text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```c
/*
*
* Based on adc_mcux_adc16.c and adc_mcux_adc12.c, which are:
*
*/
#define DT_DRV_COMPAT nxp_lpc_lpadc
#include <errno.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/regulator.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/pinctrl.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
#include <fsl_lpadc.h>
LOG_MODULE_REGISTER(nxp_mcux_lpadc);
/*
* Currently, no instance of the ADC IP has more than
* 8 channels present. Therefore, we treat channels
* with an index 8 or higher as a side b channel, with
* the channel index given by channel_num % 8
*/
#define CHANNELS_PER_SIDE 0x8
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
struct mcux_lpadc_config {
ADC_Type *base;
lpadc_reference_voltage_source_t voltage_ref;
uint8_t power_level;
uint32_t calibration_average;
uint32_t offset_a;
uint32_t offset_b;
void (*irq_config_func)(const struct device *dev);
const struct pinctrl_dev_config *pincfg;
const struct device *ref_supplies;
const struct device *clock_dev;
clock_control_subsys_t clock_subsys;
int32_t ref_supply_val;
};
struct mcux_lpadc_data {
const struct device *dev;
struct adc_context ctx;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint32_t channels;
lpadc_conv_command_config_t cmd_config[CONFIG_LPADC_CHANNEL_COUNT];
};
static int mcux_lpadc_acquisition_time_setup(const struct device *dev, uint16_t acq_time,
lpadc_conv_command_config_t *cmd)
{
const struct mcux_lpadc_config *config = dev->config;
uint32_t adc_freq_hz = 0;
uint32_t conversion_factor = 0;
uint32_t acquisition_time_value = ADC_ACQ_TIME_VALUE(acq_time);
uint8_t acquisition_time_unit = ADC_ACQ_TIME_UNIT(acq_time);
if (ADC_ACQ_TIME_DEFAULT == acquisition_time_value) {
return 0;
}
/* If the acquisition time is expressed in ADC ticks, then directly compare
* the acquisition time with configuration items (3, 5, 7, etc. ADC ticks)
* supported by the LPADC. The conversion factor is set to 1 (means do not need
* to convert configuration items from ADC ticks to nanoseconds).
* If the acquisition time is expressed in microseconds or nanoseconds, First
* calculate the ADC cycle based on the ADC clock, then convert the configuration
* items supported by LPADC into nanoseconds, and finally compare the acquisition
* time with configuration items. The conversion factor is equal to the ADC cycle
* (means convert configuration items from ADC ticks to nanoseconds).
*/
if (ADC_ACQ_TIME_TICKS == acquisition_time_unit) {
conversion_factor = 1;
} else {
if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &adc_freq_hz)) {
LOG_ERR("Get clock rate failed");
return -EINVAL;
}
conversion_factor = 1000000000 / adc_freq_hz;
if (ADC_ACQ_TIME_MICROSECONDS == acquisition_time_unit) {
acquisition_time_value *= 1000;
}
}
if ((3 * conversion_factor) >= acquisition_time_value) {
cmd->sampleTimeMode = kLPADC_SampleTimeADCK3;
} else if ((5 * conversion_factor) >= acquisition_time_value) {
cmd->sampleTimeMode = kLPADC_SampleTimeADCK5;
} else if ((7 * conversion_factor) >= acquisition_time_value) {
cmd->sampleTimeMode = kLPADC_SampleTimeADCK7;
} else if ((11 * conversion_factor) >= acquisition_time_value) {
cmd->sampleTimeMode = kLPADC_SampleTimeADCK11;
} else if ((19 * conversion_factor) >= acquisition_time_value) {
cmd->sampleTimeMode = kLPADC_SampleTimeADCK19;
} else if ((35 * conversion_factor) >= acquisition_time_value) {
cmd->sampleTimeMode = kLPADC_SampleTimeADCK35;
} else if ((67 * conversion_factor) >= acquisition_time_value) {
cmd->sampleTimeMode = kLPADC_SampleTimeADCK67;
} else if ((131 * conversion_factor) >= acquisition_time_value) {
cmd->sampleTimeMode = kLPADC_SampleTimeADCK131;
} else {
return -EINVAL;
}
return 0;
}
static int mcux_lpadc_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct mcux_lpadc_config *config = dev->config;
const struct device *regulator = config->ref_supplies;
int32_t vref_uv = config->ref_supply_val * 1000;
struct mcux_lpadc_data *data = dev->data;
lpadc_conv_command_config_t *cmd;
uint8_t channel_side;
uint8_t channel_num;
int err;
/* User may configure maximum number of active channels */
if (channel_cfg->channel_id >= CONFIG_LPADC_CHANNEL_COUNT) {
LOG_ERR("Channel %d is not valid", channel_cfg->channel_id);
return -EINVAL;
}
/* Select ADC CMD register to configure based off channel ID */
cmd = &data->cmd_config[channel_cfg->channel_id];
/* If bit 5 of input_positive is set, then channel side B is used */
channel_side = 0x20 & channel_cfg->input_positive;
/* Channel number is selected by lower 4 bits of input_positive */
channel_num = ADC_CMDL_ADCH(channel_cfg->input_positive);
LOG_DBG("Channel num: %u, channel side: %c", channel_num,
channel_side == 0 ? 'A' : 'B');
LPADC_GetDefaultConvCommandConfig(cmd);
/* Configure LPADC acquisition time. */
if (mcux_lpadc_acquisition_time_setup(dev, channel_cfg->acquisition_time, cmd)) {
LOG_ERR("LPADC acquisition time setting failed");
return -EINVAL;
}
if (channel_cfg->differential) {
/* Channel pairs must match in differential mode */
if ((ADC_CMDL_ADCH(channel_cfg->input_positive)) !=
(ADC_CMDL_ADCH(channel_cfg->input_negative))) {
return -ENOTSUP;
}
#if defined(FSL_FEATURE_LPADC_HAS_CMDL_DIFF) && FSL_FEATURE_LPADC_HAS_CMDL_DIFF
/* Check to see which channel is the positive input */
if (channel_cfg->input_positive & 0x20) {
/* Channel B is positive side */
cmd->sampleChannelMode =
kLPADC_SampleChannelDiffBothSideBA;
} else {
/* Channel A is positive side */
cmd->sampleChannelMode =
kLPADC_SampleChannelDiffBothSideAB;
}
#else
cmd->sampleChannelMode = kLPADC_SampleChannelDiffBothSide;
#endif
} else if (channel_side != 0) {
cmd->sampleChannelMode = kLPADC_SampleChannelSingleEndSideB;
} else {
/* Default value for sampleChannelMode is SideA */
}
#if defined(FSL_FEATURE_LPADC_HAS_CMDL_CSCALE) && FSL_FEATURE_LPADC_HAS_CMDL_CSCALE
/*
* The true scaling factor used by the LPADC is 30/64, instead of
* 1/2. Select 1/2 as this is the closest scaling factor available
* in Zephyr.
*/
if (channel_cfg->gain == ADC_GAIN_1_2) {
LOG_INF("Channel gain of 30/64 selected");
cmd->sampleScaleMode = kLPADC_SamplePartScale;
} else if (channel_cfg->gain == ADC_GAIN_1) {
cmd->sampleScaleMode = kLPADC_SampleFullScale;
} else {
LOG_ERR("Invalid channel gain");
return -EINVAL;
}
#else
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Invalid channel gain");
return -EINVAL;
}
#endif
/*
* ADC_REF_EXTERNAL1: Use SoC internal regulator as LPADC reference voltage.
* ADC_REF_EXTERNAL0: Use other voltage source (maybe also within the SoCs)
* as LPADC reference voltage, like VREFH, VDDA, etc.
*/
if (channel_cfg->reference == ADC_REF_EXTERNAL1) {
LOG_DBG("ref external1");
if (regulator != NULL) {
err = regulator_set_voltage(regulator, vref_uv, vref_uv);
if (err < 0) {
return err;
}
} else {
return -EINVAL;
}
} else if (channel_cfg->reference == ADC_REF_EXTERNAL0) {
LOG_DBG("ref external0");
} else {
LOG_DBG("ref not support");
return -EINVAL;
}
cmd->channelNumber = channel_num;
return 0;
}
static int mcux_lpadc_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct mcux_lpadc_config *config = dev->config;
struct mcux_lpadc_data *data = dev->data;
lpadc_hardware_average_mode_t hardware_average_mode;
uint8_t channel, last_enabled;
#if defined(FSL_FEATURE_LPADC_HAS_CMDL_MODE) \
&& FSL_FEATURE_LPADC_HAS_CMDL_MODE
lpadc_conversion_resolution_mode_t resolution_mode;
switch (sequence->resolution) {
case 12:
case 13:
resolution_mode = kLPADC_ConversionResolutionStandard;
break;
case 16:
resolution_mode = kLPADC_ConversionResolutionHigh;
break;
default:
LOG_ERR("Unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
#else
/* If FSL_FEATURE_LPADC_HAS_CMDL_MODE is not defined
only 12/13 bit resolution is supported. */
if (sequence->resolution != 12 && sequence->resolution != 13) {
LOG_ERR("Unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
#endif /* FSL_FEATURE_LPADC_HAS_CMDL_MODE */
switch (sequence->oversampling) {
case 0:
hardware_average_mode = kLPADC_HardwareAverageCount1;
break;
case 1:
hardware_average_mode = kLPADC_HardwareAverageCount2;
break;
case 2:
hardware_average_mode = kLPADC_HardwareAverageCount4;
break;
case 3:
hardware_average_mode = kLPADC_HardwareAverageCount8;
break;
case 4:
hardware_average_mode = kLPADC_HardwareAverageCount16;
break;
case 5:
hardware_average_mode = kLPADC_HardwareAverageCount32;
break;
case 6:
hardware_average_mode = kLPADC_HardwareAverageCount64;
break;
case 7:
hardware_average_mode = kLPADC_HardwareAverageCount128;
break;
default:
LOG_ERR("Unsupported oversampling value %d",
sequence->oversampling);
return -ENOTSUP;
}
/*
* Now, look at the selected channels to determine which ADC channels
* we need to configure, and set those channels up.
*
* Since this ADC supports chaining channels in hardware, we will
* start with the highest channel ID and work downwards, chaining
* channels as we go.
*/
channel = CONFIG_LPADC_CHANNEL_COUNT;
last_enabled = 0;
while (channel-- > 0) {
if (sequence->channels & BIT(channel)) {
/* Setup this channel command */
#if defined(FSL_FEATURE_LPADC_HAS_CMDL_MODE) && FSL_FEATURE_LPADC_HAS_CMDL_MODE
data->cmd_config[channel].conversionResolutionMode =
resolution_mode;
#endif
data->cmd_config[channel].hardwareAverageMode =
hardware_average_mode;
if (last_enabled) {
/* Chain channel */
data->cmd_config[channel].chainedNextCommandNumber =
last_enabled + 1;
LOG_DBG("Chaining channel %u to %u",
channel, last_enabled);
} else {
/* End of chain */
data->cmd_config[channel].chainedNextCommandNumber = 0;
}
last_enabled = channel;
LPADC_SetConvCommandConfig(config->base,
channel + 1, &data->cmd_config[channel]);
}
};
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
int error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static int mcux_lpadc_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct mcux_lpadc_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, async ? true : false, async);
error = mcux_lpadc_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
static int mcux_lpadc_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return mcux_lpadc_read_async(dev, sequence, NULL);
}
static void mcux_lpadc_start_channel(const struct device *dev)
{
const struct mcux_lpadc_config *config = dev->config;
struct mcux_lpadc_data *data = dev->data;
lpadc_conv_trigger_config_t trigger_config;
uint8_t first_channel;
first_channel = find_lsb_set(data->channels) - 1;
LOG_DBG("Starting channel %d, input %d", first_channel,
data->cmd_config[first_channel].channelNumber);
LPADC_GetDefaultConvTriggerConfig(&trigger_config);
trigger_config.targetCommandId = first_channel + 1;
/* configures trigger0. */
LPADC_SetConvTriggerConfig(config->base, 0, &trigger_config);
/* 1 is trigger0 mask. */
LPADC_DoSoftwareTrigger(config->base, 1);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct mcux_lpadc_data *data =
CONTAINER_OF(ctx, struct mcux_lpadc_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
mcux_lpadc_start_channel(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct mcux_lpadc_data *data =
CONTAINER_OF(ctx, struct mcux_lpadc_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void mcux_lpadc_isr(const struct device *dev)
{
const struct mcux_lpadc_config *config = dev->config;
struct mcux_lpadc_data *data = dev->data;
ADC_Type *base = config->base;
lpadc_conv_result_t conv_result;
lpadc_sample_channel_mode_t conv_mode;
int16_t result;
uint16_t channel;
#if (defined(FSL_FEATURE_LPADC_FIFO_COUNT) \
&& (FSL_FEATURE_LPADC_FIFO_COUNT == 2U))
LPADC_GetConvResult(base, &conv_result, 0U);
#else
LPADC_GetConvResult(base, &conv_result);
#endif /* FSL_FEATURE_LPADC_FIFO_COUNT */
channel = conv_result.commandIdSource - 1;
LOG_DBG("Finished channel %d. Raw result is 0x%04x",
channel, conv_result.convValue);
/*
* For 12 or 13 bit resolution the LSBs will be 0, so a bit shift
* is needed. For differential modes, the ADC conversion to
* millivolts expects to use a shift one less than the resolution.
*
* For 16 bit modes, the adc value can be left untouched. ADC
* API should treat the value as signed if the channel is
* in differential mode
*/
conv_mode = data->cmd_config[channel].sampleChannelMode;
if (data->ctx.sequence.resolution < 15) {
result = ((conv_result.convValue >> 3) & 0xFFF);
#if defined(FSL_FEATURE_LPADC_HAS_CMDL_DIFF) && FSL_FEATURE_LPADC_HAS_CMDL_DIFF
if (conv_mode == kLPADC_SampleChannelDiffBothSideAB ||
conv_mode == kLPADC_SampleChannelDiffBothSideBA) {
#else
if (conv_mode == kLPADC_SampleChannelDiffBothSide) {
#endif
if ((conv_result.convValue & 0x8000)) {
/* 13 bit mode, MSB is sign bit. (2's complement) */
result -= 0x1000;
}
}
*data->buffer++ = result;
} else {
*data->buffer++ = conv_result.convValue;
}
data->channels &= ~BIT(channel);
/*
* Hardware will automatically continue sampling, so no need
* to issue new trigger
*/
if (data->channels == 0) {
adc_context_on_sampling_done(&data->ctx, dev);
}
}
static int mcux_lpadc_init(const struct device *dev)
{
const struct mcux_lpadc_config *config = dev->config;
struct mcux_lpadc_data *data = dev->data;
ADC_Type *base = config->base;
lpadc_config_t adc_config;
int err;
err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
if (err) {
return err;
}
/* Enable necessary regulators */
const struct device *regulator = config->ref_supplies;
if (regulator != NULL) {
err = regulator_enable(regulator);
if (err) {
return err;
}
}
LPADC_GetDefaultConfig(&adc_config);
adc_config.enableAnalogPreliminary = true;
adc_config.referenceVoltageSource = config->voltage_ref;
#if defined(FSL_FEATURE_LPADC_HAS_CTRL_CAL_AVGS) \
&& FSL_FEATURE_LPADC_HAS_CTRL_CAL_AVGS
adc_config.conversionAverageMode = config->calibration_average;
#endif /* FSL_FEATURE_LPADC_HAS_CTRL_CAL_AVGS */
adc_config.powerLevelMode = config->power_level;
LPADC_Init(base, &adc_config);
/* Do ADC calibration. */
#if defined(FSL_FEATURE_LPADC_HAS_CTRL_CALOFS) \
&& FSL_FEATURE_LPADC_HAS_CTRL_CALOFS
#if defined(FSL_FEATURE_LPADC_HAS_OFSTRIM) \
&& FSL_FEATURE_LPADC_HAS_OFSTRIM
/* Request offset calibration. */
#if defined(CONFIG_LPADC_DO_OFFSET_CALIBRATION) \
&& CONFIG_LPADC_DO_OFFSET_CALIBRATION
LPADC_DoOffsetCalibration(base);
#else
LPADC_SetOffsetValue(base,
config->offset_a,
config->offset_b);
#endif /* DEMO_LPADC_DO_OFFSET_CALIBRATION */
#endif /* FSL_FEATURE_LPADC_HAS_OFSTRIM */
/* Request gain calibration. */
LPADC_DoAutoCalibration(base);
#endif /* FSL_FEATURE_LPADC_HAS_CTRL_CALOFS */
#if (defined(FSL_FEATURE_LPADC_HAS_CFG_CALOFS) \
&& FSL_FEATURE_LPADC_HAS_CFG_CALOFS)
/* Do auto calibration. */
LPADC_DoAutoCalibration(base);
#endif /* FSL_FEATURE_LPADC_HAS_CFG_CALOFS */
/* Enable the watermark interrupt. */
#if (defined(FSL_FEATURE_LPADC_FIFO_COUNT) \
&& (FSL_FEATURE_LPADC_FIFO_COUNT == 2U))
LPADC_EnableInterrupts(base, kLPADC_FIFO0WatermarkInterruptEnable);
#else
LPADC_EnableInterrupts(base, kLPADC_FIFOWatermarkInterruptEnable);
#endif /* FSL_FEATURE_LPADC_FIFO_COUNT */
config->irq_config_func(dev);
data->dev = dev;
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api mcux_lpadc_driver_api = {
.channel_setup = mcux_lpadc_channel_setup,
.read = mcux_lpadc_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = mcux_lpadc_read_async,
#endif
};
#define LPADC_MCUX_INIT(n) \
\
static void mcux_lpadc_config_func_##n(const struct device *dev); \
\
PINCTRL_DT_INST_DEFINE(n); \
static const struct mcux_lpadc_config mcux_lpadc_config_##n = { \
.base = (ADC_Type *)DT_INST_REG_ADDR(n), \
.voltage_ref = DT_INST_PROP(n, voltage_ref), \
.calibration_average = DT_INST_ENUM_IDX_OR(n, calibration_average, 0), \
.power_level = DT_INST_PROP(n, power_level), \
.offset_a = DT_INST_PROP(n, offset_value_a), \
.offset_b = DT_INST_PROP(n, offset_value_b), \
.irq_config_func = mcux_lpadc_config_func_##n, \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.ref_supplies = COND_CODE_1(DT_INST_NODE_HAS_PROP(n, nxp_references),\
(DEVICE_DT_GET(DT_PHANDLE(DT_DRV_INST(n),\
nxp_references))), (NULL)),\
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
.clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name),\
.ref_supply_val = COND_CODE_1(\
DT_INST_NODE_HAS_PROP(n, nxp_references),\
(DT_PHA(DT_DRV_INST(n), nxp_references, vref_mv)), \
(0)),\
}; \
static struct mcux_lpadc_data mcux_lpadc_data_##n = { \
ADC_CONTEXT_INIT_TIMER(mcux_lpadc_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(mcux_lpadc_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(mcux_lpadc_data_##n, ctx), \
}; \
\
DEVICE_DT_INST_DEFINE(n, \
&mcux_lpadc_init, NULL, &mcux_lpadc_data_##n, \
&mcux_lpadc_config_##n, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&mcux_lpadc_driver_api); \
\
static void mcux_lpadc_config_func_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), \
DT_INST_IRQ(n, priority), mcux_lpadc_isr, \
DEVICE_DT_INST_GET(n), 0); \
\
irq_enable(DT_INST_IRQN(n)); \
}
DT_INST_FOREACH_STATUS_OKAY(LPADC_MCUX_INIT)
``` | /content/code_sandbox/drivers/adc/adc_mcux_lpadc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,073 |
```c
/* TI ADS7052 ADC
*
*
*/
#define DT_DRV_COMPAT ti_ads7052
#include <zephyr/device.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/logging/log.h>
#include <zephyr/kernel.h>
LOG_MODULE_REGISTER(adc_ads7052);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define ADS7052_RESOLUTION 14U
struct ads7052_config {
struct spi_dt_spec bus;
uint8_t channels;
};
struct ads7052_data {
struct adc_context ctx;
const struct device *dev;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint8_t channels;
struct k_thread thread;
struct k_sem sem;
K_KERNEL_STACK_MEMBER(stack, CONFIG_ADC_ADS7052_ACQUISITION_THREAD_STACK_SIZE);
};
static int adc_ads7052_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct ads7052_config *config = dev->config;
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("unsupported channel gain '%d'", channel_cfg->gain);
return -ENOTSUP;
}
if (channel_cfg->reference != ADC_REF_VDD_1) {
LOG_ERR("unsupported channel reference '%d'", channel_cfg->reference);
return -ENOTSUP;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("unsupported acquisition_time '%d'", channel_cfg->acquisition_time);
return -ENOTSUP;
}
if (channel_cfg->channel_id >= config->channels) {
LOG_ERR("unsupported channel id '%d'", channel_cfg->channel_id);
return -ENOTSUP;
}
return 0;
}
static int ads7052_validate_buffer_size(const struct device *dev,
const struct adc_sequence *sequence)
{
uint8_t channels = 0;
size_t needed;
channels = POPCOUNT(sequence->channels);
needed = channels * sizeof(uint16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
/**
* @brief Send ADS7052 Offset calibration request
*
* On power-up, the host must provide 24 SCLKs in the first serial transfer to enter the OFFCAL
* state. During normal operation, the host must provide 64 SCLKs in the serial transfer frame to
* enter the OFFCAL state.
*/
static int ads7052_send_calibration(const struct device *dev, bool power_up)
{
const struct ads7052_config *config = dev->config;
int err;
uint8_t sclks_needed = power_up ? 24 : 64;
uint8_t num_bytes = sclks_needed / 8;
uint8_t tx_bytes[8] = {0};
const struct spi_buf tx_buf = {.buf = tx_bytes, .len = num_bytes};
const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
err = spi_write_dt(&config->bus, &tx);
return err;
}
static int ads7052_start_read(const struct device *dev, const struct adc_sequence *sequence)
{
const struct ads7052_config *config = dev->config;
struct ads7052_data *data = dev->data;
int err;
if (sequence->resolution != ADS7052_RESOLUTION) {
LOG_ERR("unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
if (find_msb_set(sequence->channels) > config->channels) {
LOG_ERR("unsupported channels in mask: 0x%08x", sequence->channels);
return -ENOTSUP;
}
if (sequence->calibrate) {
ads7052_send_calibration(dev, false);
}
err = ads7052_validate_buffer_size(dev, sequence);
if (err) {
LOG_ERR("buffer size too small");
return err;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int adc_ads7052_read_async(const struct device *dev, const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct ads7052_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, async ? true : false, async);
error = ads7052_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
static int adc_ads7052_read(const struct device *dev, const struct adc_sequence *sequence)
{
return adc_ads7052_read_async(dev, sequence, NULL);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct ads7052_data *data = CONTAINER_OF(ctx, struct ads7052_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
k_sem_give(&data->sem);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct ads7052_data *data = CONTAINER_OF(ctx, struct ads7052_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
/**
* @brief Get a 14-bit integer from raw ADC data.
*
* @param src Location of the big-endian 14-bit integer to get.
*
* @return 14-bit integer in host endianness.
*/
static inline int ads7052_get_be14(const uint8_t src[2])
{
return ((src[0] & 0x7F) << 7) | (src[1] >> 1);
}
/**
* @brief Read ADS7052 over SPI interface
*
* A leading 0 is output on the SDO pin on the CS falling edge.
* The most significant bit (MSB) of the output data is launched on the SDO pin on the rising edge
* after the first SCLK falling edge. Subsequent output bits are launched on the subsequent rising
* edges provided on SCLK. When all 14 output bits are shifted out, the device outputs 0's on the
* subsequent SCLK rising edges. The device enters the ACQ state after 18 clocks and a minimum time
* of tACQ must be provided for acquiring the next sample. If the device is provided with less than
* 18 SCLK falling edges in the present serial transfer frame, the device provides an invalid
* conversion result in the next serial transfer frame
*/
static int ads7052_read_channel(const struct device *dev, uint8_t channel, uint16_t *result)
{
const struct ads7052_config *config = dev->config;
int err;
uint8_t rx_bytes[3];
const struct spi_buf rx_buf[1] = {{.buf = rx_bytes, .len = sizeof(rx_bytes)}};
const struct spi_buf_set rx = {.buffers = rx_buf, .count = ARRAY_SIZE(rx_buf)};
err = spi_read_dt(&config->bus, &rx);
if (err) {
return err;
}
*result = ads7052_get_be14(rx_bytes);
*result &= BIT_MASK(ADS7052_RESOLUTION);
return 0;
}
static void ads7052_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
struct ads7052_data *data = p1;
uint16_t result = 0;
uint8_t channel;
int err = 0;
err = ads7052_send_calibration(data->dev, true);
if (err) {
LOG_ERR("failed to send powerup sequence (err %d)", err);
}
while (true) {
k_sem_take(&data->sem, K_FOREVER);
while (data->channels != 0) {
channel = find_lsb_set(data->channels) - 1;
LOG_DBG("reading channel %d", channel);
err = ads7052_read_channel(data->dev, channel, &result);
if (err) {
LOG_ERR("failed to read channel %d (err %d)", channel, err);
adc_context_complete(&data->ctx, err);
break;
}
LOG_DBG("read channel %d, result = %d", channel, result);
*data->buffer++ = result;
WRITE_BIT(data->channels, channel, 0);
}
adc_context_on_sampling_done(&data->ctx, data->dev);
}
}
static int adc_ads7052_init(const struct device *dev)
{
const struct ads7052_config *config = dev->config;
struct ads7052_data *data = dev->data;
data->dev = dev;
adc_context_init(&data->ctx);
k_sem_init(&data->sem, 0, 1);
if (!spi_is_ready_dt(&config->bus)) {
LOG_ERR("SPI bus %s not ready", config->bus.bus->name);
return -ENODEV;
}
k_thread_create(&data->thread, data->stack,
K_KERNEL_STACK_SIZEOF(data->stack),
ads7052_acquisition_thread, data, NULL, NULL,
CONFIG_ADC_ADS7052_ACQUISITION_THREAD_PRIO, 0, K_NO_WAIT);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api ads7052_api = {
.channel_setup = adc_ads7052_channel_setup,
.read = adc_ads7052_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_ads7052_read_async,
#endif
};
#define ADC_ADS7052_SPI_CFG \
SPI_OP_MODE_MASTER | SPI_MODE_CPOL | SPI_MODE_CPHA | SPI_WORD_SET(8) | SPI_TRANSFER_MSB
#define ADC_ADS7052_INIT(n) \
\
static const struct ads7052_config ads7052_cfg_##n = { \
.bus = SPI_DT_SPEC_INST_GET(n, ADC_ADS7052_SPI_CFG, 1U), \
.channels = 1, \
}; \
\
static struct ads7052_data ads7052_data_##n = { \
ADC_CONTEXT_INIT_TIMER(ads7052_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(ads7052_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(ads7052_data_##n, ctx), \
}; \
\
DEVICE_DT_INST_DEFINE(n, adc_ads7052_init, NULL, &ads7052_data_##n, &ads7052_cfg_##n, \
POST_KERNEL, CONFIG_ADC_ADS7052_INIT_PRIORITY, &ads7052_api);
DT_INST_FOREACH_STATUS_OKAY(ADC_ADS7052_INIT)
``` | /content/code_sandbox/drivers/adc/adc_ads7052.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,330 |
```c
/*
*
*/
#include <stdbool.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/logging/log.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/util.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
LOG_MODULE_REGISTER(ADC_MAX1125X, CONFIG_ADC_LOG_LEVEL);
#define MAX1125X_CONFIG_PGA(x) BIT(x)
#define MAX1125X_CONFIG_CHANNEL(x) ((x) << 5)
#define MAX1125X_CONFIG_CHMAP(x) ((x << 2) | BIT(1))
#define MAX1125X_REG_DATA(x) (MAX1125X_REG_DATA0 + (x << 1))
#define MAX1125X_CMD_READ 0xC1
#define MAX1125X_CMD_WRITE 0xC0
#define MAX1125X_CMD_CONV 0x80
#define MAX1125X_CMD_CALIBRATION 0x20
#define MAX1125X_CMD_SEQUENCER 0x30
enum max1125x_mode {
MAX1125X_MODE_POWERDOWN = 0x01,
MAX1125X_MODE_CALIBRATION = 0x02,
MAX1125X_MODE_SEQUENCER = 0x03,
};
enum {
MAX1125X_CONFIG_RATE_1_9 = 0x00,
MAX1125X_CONFIG_RATE_3_9 = 0x01,
MAX1125X_CONFIG_RATE_7_8 = 0x02,
MAX1125X_CONFIG_RATE_15_6 = 0x03,
MAX1125X_CONFIG_RATE_31_2 = 0x04,
MAX1125X_CONFIG_RATE_62_5 = 0x05,
MAX1125X_CONFIG_RATE_125 = 0x06,
MAX1125X_CONFIG_RATE_250 = 0x07,
MAX1125X_CONFIG_RATE_500 = 0x08,
MAX1125X_CONFIG_RATE_1000 = 0x09,
MAX1125X_CONFIG_RATE_2000 = 0x0A,
MAX1125X_CONFIG_RATE_4000 = 0x0B,
MAX1125X_CONFIG_RATE_8000 = 0x0C,
MAX1125X_CONFIG_RATE_16000 = 0x0D,
MAX1125X_CONFIG_RATE_32000 = 0x0E,
MAX1125X_CONFIG_RATE_64000 = 0x0F,
};
enum max1125x_reg {
MAX1125X_REG_STAT = 0x00,
MAX1125X_REG_CTRL1 = 0x02,
MAX1125X_REG_CTRL2 = 0x04,
MAX1125X_REG_CTRL3 = 0x06,
MAX1125X_REG_GPIO_CTRL = 0x08,
MAX1125X_REG_DELAY = 0x0A,
MAX1125X_REG_CHMAP1 = 0x0C,
MAX1125X_REG_CHMAP0 = 0x0E,
MAX1125X_REG_SEQ = 0x10,
MAX1125X_REG_GPO_DIR = 0x12,
MAX1125X_REG_SOC = 0x14,
MAX1125X_REG_SGC = 0x16,
MAX1125X_REG_SCOC = 0x18,
MAX1125X_REG_SCGC = 0x1A,
MAX1125X_REG_DATA0 = 0x1C,
MAX1125X_REG_DATA1 = 0x1E,
MAX1125X_REG_DATA2 = 0x20,
MAX1125X_REG_DATA3 = 0x22,
MAX1125X_REG_DATA4 = 0x24,
MAX1125X_REG_DATA5 = 0x26,
};
enum {
MAX1125X_REG_STAT_LEN = 3,
MAX1125X_REG_CTRL1_LEN = 1,
MAX1125X_REG_CTRL2_LEN = 1,
MAX1125X_REG_CTRL3_LEN = 1,
MAX1125X_REG_GPIO_CTRL_LEN = 1,
MAX1125X_REG_DELAY_LEN = 2,
MAX1125X_REG_CHMAP1_LEN = 3,
MAX1125X_REG_CHMAP0_LEN = 3,
MAX1125X_REG_SEQ_LEN = 1,
MAX1125X_REG_GPO_DIR_LEN = 1,
MAX1125X_REG_SOC_LEN = 3,
MAX1125X_REG_SGC_LEN = 3,
MAX1125X_REG_SCOC_LEN = 3,
MAX1125X_REG_SCGC_LEN = 3,
};
enum {
MAX1125X_CTRL1_CAL_SELF = 0,
MAX1125X_CTRL1_CAL_OFFSET = 1,
MAX1125X_CTRL1_CAL_FULLSCALE = 2,
};
enum {
MAX1125X_CTRL1_PD_NOP = 0,
MAX1125X_CTRL1_DP_SLEEP = 1,
MAX1125X_CTRL1_DP_STANDBY = 2,
MAX1125X_CTRL1_DP_RESET = 3,
};
enum {
MAX1125X_CTRL1_CONTSC = 0,
MAX1125X_CTRL1_SCYCLE = 1,
MAX1125X_CTRL1_FORMAT = 2,
MAX1125X_CTRL1_UBPOLAR = 3,
};
enum {
MAX1125X_CTRL2_PGA_GAIN_1 = 0,
MAX1125X_CTRL2_PGA_GAIN_2 = 1,
MAX1125X_CTRL2_PGA_GAIN_4 = 2,
MAX1125X_CTRL2_PGA_GAIN_8 = 3,
MAX1125X_CTRL2_PGA_GAIN_16 = 4,
MAX1125X_CTRL2_PGA_GAIN_32 = 5,
MAX1125X_CTRL2_PGA_GAIN_64 = 6,
MAX1125X_CTRL2_PGA_GAIN_128 = 7,
};
enum {
MAX1125X_CTRL2_PGAEN = 3,
MAX1125X_CTRL2_LPMODE = 4,
MAX1125X_CTRL2_LDOEN = 5,
MAX1125X_CTRL2_CSSEN = 6,
MAX1125X_CTRL2_EXTCLK = 7,
};
enum {
MAX1125X_CTRL3_NOSCO = 0,
MAX1125X_CTRL3_NOSCG = 1,
MAX1125X_CTRL3_NOSYSO = 2,
MAX1125X_CTRL3_NOSYSG = 3,
MAX1125X_CTRL3_CALREGSEL = 4,
MAX1125X_CTRL3_SYNC_MODE = 5,
MAX1125X_CTRL3_GPO_MODE = 6,
};
enum {
MAX1125X_GPIO_CTRL_DIO0 = 0,
MAX1125X_GPIO_CTRL_DIO1 = 1,
MAX1125X_GPIO_CTRL_DIRO = 3,
MAX1125X_GPIO_CTRL_DIR1 = 4,
MAX1125X_GPIO_CTRL_GPIO0_EN = 6,
MAX1125X_GPIO_CTRL_GPIO1_EN = 7,
};
enum {
MAX1125X_SEQ_RDYBEN = 0,
MAX1125X_SEQ_MDREN = 1,
MAX1125X_SEQ_GPODREN = 2,
MAX1125X_SEQ_MODE0 = 3,
MAX1125X_SEQ_MODE1 = 4,
MAX1125X_SEQ_MUX0 = 5,
MAX1125X_SEQ_MUX1 = 6,
MAX1125X_SEQ_MUX2 = 7,
};
enum {
MAX1125X_GPO_DIR_GPO0 = 0,
MAX1125X_GPO_DIR_GPO1 = 1,
};
enum {
MAX1125X_CMD_RATE0 = 0,
MAX1125X_CMD_RATE1 = 1,
MAX1125X_CMD_RATE2 = 2,
MAX1125X_CMD_RATE3 = 3,
};
enum {
MAX1125X_CHANNEL_0 = 0x0,
MAX1125X_CHANNEL_1 = 0x1,
MAX1125X_CHANNEL_2 = 0x2,
MAX1125X_CHANNEL_3 = 0x3,
MAX1125X_CHANNEL_4 = 0x4,
MAX1125X_CHANNEL_5 = 0x5,
};
enum {
MAX1125X_CMD_MODE0 = 4,
MAX1125X_CMD_MODE1 = 5,
};
struct max1125x_gpio_ctrl {
bool gpio0_enable;
bool gpio1_enable;
bool gpio0_direction;
bool gpio1_direction;
};
struct max1125x_gpo_ctrl {
bool gpo0_enable;
bool gpo1_enable;
};
struct max1125x_config {
struct spi_dt_spec bus;
struct gpio_dt_spec drdy_gpio;
const uint32_t odr_delay[16];
uint8_t resolution;
bool multiplexer;
bool pga;
bool self_calibration;
struct max1125x_gpio_ctrl gpio;
struct max1125x_gpo_ctrl gpo;
};
struct max1125x_data {
const struct device *dev;
struct adc_context ctx;
uint8_t rate;
struct gpio_callback callback_data_ready;
struct k_sem acq_sem;
struct k_sem data_ready_signal;
int32_t *buffer;
int32_t *repeat_buffer;
struct k_thread thread;
bool differential;
K_KERNEL_STACK_MEMBER(stack, CONFIG_ADC_MAX1125X_ACQUISITION_THREAD_STACK_SIZE);
};
static void max1125x_data_ready_handler(const struct device *dev, struct gpio_callback *gpio_cb,
uint32_t pins)
{
ARG_UNUSED(dev);
ARG_UNUSED(pins);
struct max1125x_data *data =
CONTAINER_OF(gpio_cb, struct max1125x_data, callback_data_ready);
k_sem_give(&data->data_ready_signal);
}
static int max1125x_read_reg(const struct device *dev, enum max1125x_reg reg_addr, uint8_t *buffer,
size_t reg_size)
{
int ret;
const struct max1125x_config *config = dev->config;
uint8_t buffer_tx[3];
uint8_t buffer_rx[ARRAY_SIZE(buffer_tx)];
const struct spi_buf tx_buf[] = {{
.buf = buffer_tx,
.len = ARRAY_SIZE(buffer_tx),
}};
const struct spi_buf rx_buf[] = {{
.buf = buffer_rx,
.len = ARRAY_SIZE(buffer_rx),
}};
const struct spi_buf_set tx = {
.buffers = tx_buf,
.count = ARRAY_SIZE(tx_buf),
};
const struct spi_buf_set rx = {
.buffers = rx_buf,
.count = ARRAY_SIZE(rx_buf),
};
buffer_tx[0] = MAX1125X_CMD_READ | reg_addr;
/* read one register */
buffer_tx[1] = 0x00;
ret = spi_transceive_dt(&config->bus, &tx, &rx);
if (ret != 0) {
LOG_ERR("MAX1125X: error writing register 0x%X (%d)", reg_addr, ret);
return ret;
}
*buffer = buffer_rx[1];
LOG_DBG("read from register 0x%02X value 0x%02X", reg_addr, *buffer);
return 0;
}
static int max1125x_write_reg(const struct device *dev, enum max1125x_reg reg_addr,
uint8_t *reg_val, size_t reg_size)
{
int ret;
const struct max1125x_config *config = dev->config;
uint8_t command = MAX1125X_CMD_WRITE | reg_addr;
const struct spi_buf spi_buf[2] = {{.buf = &command, .len = sizeof(command)},
{.buf = reg_val, .len = reg_size}};
const struct spi_buf_set tx = {.buffers = spi_buf, .count = ARRAY_SIZE(spi_buf)};
ret = spi_write_dt(&config->bus, &tx);
if (ret != 0) {
LOG_ERR("MAX1125X: error writing register 0x%X (%d)", reg_addr, ret);
return ret;
}
return 0;
}
static int max1125x_send_command(const struct device *dev, enum max1125x_mode mode, uint8_t rate)
{
int ret;
const struct max1125x_config *config = dev->config;
uint8_t command = MAX1125X_CMD_CONV | mode | rate;
const struct spi_buf spi_buf = {.buf = &command, .len = sizeof(command)};
const struct spi_buf_set tx = {.buffers = &spi_buf, .count = 1};
ret = spi_write_dt(&config->bus, &tx);
if (ret != 0) {
LOG_ERR("MAX1125X: error writing register 0x%X (%d)", rate, ret);
return ret;
}
return 0;
}
static int max1125x_start_conversion(const struct device *dev)
{
const struct max1125x_data *data = dev->data;
return max1125x_send_command(dev, MAX1125X_CMD_SEQUENCER, data->rate);
}
static inline int max1125x_acq_time_to_dr(const struct device *dev, uint16_t acq_time)
{
struct max1125x_data *data = dev->data;
const struct max1125x_config *config = dev->config;
const uint32_t *odr_delay = config->odr_delay;
uint32_t odr_delay_us = 0;
uint16_t acq_value = ADC_ACQ_TIME_VALUE(acq_time);
int odr = -EINVAL;
if (acq_time != ADC_ACQ_TIME_DEFAULT && ADC_ACQ_TIME_UNIT(acq_time) != ADC_ACQ_TIME_TICKS) {
LOG_ERR("MAX1125X: invalid acq time value (%d)", acq_time);
return -EINVAL;
}
if (acq_value < MAX1125X_CONFIG_RATE_1_9 || acq_value > MAX1125X_CONFIG_RATE_64000) {
LOG_ERR("MAX1125X: invalid acq value (%d)", acq_value);
return -EINVAL;
}
odr = acq_value;
odr_delay_us = odr_delay[acq_value];
data->rate = odr;
return odr;
}
static int max1125x_wait_data_ready(const struct device *dev)
{
struct max1125x_data *data = dev->data;
return k_sem_take(&data->data_ready_signal, ADC_CONTEXT_WAIT_FOR_COMPLETION_TIMEOUT);
}
static int max1125x_read_sample(const struct device *dev)
{
const struct max1125x_config *config = dev->config;
struct max1125x_data *data = dev->data;
bool is_positive;
uint8_t buffer_tx[(config->resolution / 8) + 1];
uint8_t buffer_rx[ARRAY_SIZE(buffer_tx)];
uint8_t current_channel = find_msb_set(data->ctx.sequence.channels) - 1;
int rc;
const struct spi_buf tx_buf[] = {{
.buf = buffer_tx,
.len = ARRAY_SIZE(buffer_tx),
}};
const struct spi_buf rx_buf[] = {{
.buf = buffer_rx,
.len = ARRAY_SIZE(buffer_rx),
}};
const struct spi_buf_set tx = {
.buffers = tx_buf,
.count = ARRAY_SIZE(tx_buf),
};
const struct spi_buf_set rx = {
.buffers = rx_buf,
.count = ARRAY_SIZE(rx_buf),
};
buffer_tx[0] = MAX1125X_CMD_READ | MAX1125X_REG_DATA(current_channel);
rc = spi_transceive_dt(&config->bus, &tx, &rx);
if (rc != 0) {
LOG_ERR("spi_transceive failed with error %i", rc);
return rc;
}
/* The data format while in unipolar mode is always offset binary.
* In offset binary format the most negative value is 0x000000,
* the midscale value is 0x800000 and the most positive value is
* 0xFFFFFF. In bipolar mode if the FORMAT bit = 1 then the
* data format is offset binary. If the FORMAT bit = 0, then
* the data format is twos complement. In twos complement the
* negative full-scale value is 0x800000, the midscale is 0x000000
* and the positive full scale is 0x7FFFFF. Any input exceeding
* the available input range is limited to the minimum or maximum
* data value.
*/
is_positive = buffer_rx[(config->resolution / 8)] >> 7;
if (is_positive) {
*data->buffer++ = sys_get_be24(buffer_rx) - (1 << (config->resolution - 1));
} else {
*data->buffer++ = sys_get_be24(buffer_rx + 1);
}
adc_context_on_sampling_done(&data->ctx, dev);
return rc;
}
static int max1125x_configure_chmap(const struct device *dev, const uint8_t channel_id)
{
uint8_t last_order = 0;
uint8_t chmap1_register[3] = {0};
uint8_t chmap0_register[3] = {0};
if (channel_id > 6) {
LOG_ERR("MAX1125X: invalid channel (%u)", channel_id);
return -EINVAL;
}
max1125x_read_reg(dev, MAX1125X_REG_CHMAP1, chmap1_register, MAX1125X_REG_CHMAP1_LEN);
for (int index = 0; index < 3; index++) {
if ((chmap1_register[index] >> 2) >= last_order) {
last_order = chmap1_register[index] >> 2;
} else {
continue;
}
}
max1125x_read_reg(dev, MAX1125X_REG_CHMAP0, chmap0_register, MAX1125X_REG_CHMAP0_LEN);
for (int index = 0; index < 3; index++) {
if ((chmap0_register[index] >> 2) >= last_order) {
last_order = chmap0_register[index] >> 2;
} else {
continue;
}
}
last_order++;
switch (channel_id) {
case MAX1125X_CHANNEL_0:
chmap0_register[2] = MAX1125X_CONFIG_CHMAP(last_order);
break;
case MAX1125X_CHANNEL_1:
chmap0_register[1] = MAX1125X_CONFIG_CHMAP(last_order);
break;
case MAX1125X_CHANNEL_2:
chmap0_register[0] = MAX1125X_CONFIG_CHMAP(last_order);
break;
case MAX1125X_CHANNEL_3:
chmap1_register[2] = MAX1125X_CONFIG_CHMAP(last_order);
break;
case MAX1125X_CHANNEL_4:
chmap1_register[1] = MAX1125X_CONFIG_CHMAP(last_order);
break;
case MAX1125X_CHANNEL_5:
chmap1_register[0] = MAX1125X_CONFIG_CHMAP(last_order);
break;
default:
break;
}
if (channel_id > 3) {
/* CHMAP 1 register configuration */
max1125x_write_reg(dev, MAX1125X_REG_CHMAP1, chmap1_register,
MAX1125X_REG_CHMAP1_LEN);
} else {
/* CHMAP 0 register configuration */
max1125x_write_reg(dev, MAX1125X_REG_CHMAP0, chmap0_register,
MAX1125X_REG_CHMAP0_LEN);
}
return 0;
}
static int max1125x_self_calibration(const struct device *dev)
{
uint8_t seq_register = 0;
uint8_t ctrl1_register = BIT(MAX1125X_CTRL1_SCYCLE);
max1125x_write_reg(dev, MAX1125X_REG_SEQ, &seq_register, MAX1125X_REG_SEQ_LEN);
max1125x_write_reg(dev, MAX1125X_REG_CTRL1, &ctrl1_register, MAX1125X_REG_CTRL1_LEN);
max1125x_send_command(dev, MAX1125X_CMD_CALIBRATION, 0x00);
k_sleep(K_MSEC(200));
return 0;
}
static int max1125x_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct max1125x_config *max_config = dev->config;
uint8_t seq_register = 0;
uint8_t ctrl2_register = 0;
uint8_t gpio_reg = 0;
uint8_t gpo_reg = 0;
/* sequencer register configuration */
max1125x_read_reg(dev, MAX1125X_REG_SEQ, &seq_register, MAX1125X_REG_SEQ_LEN);
seq_register |= BIT(MAX1125X_SEQ_MDREN);
seq_register |= BIT(MAX1125X_SEQ_MODE0);
max1125x_write_reg(dev, MAX1125X_REG_SEQ, &seq_register, MAX1125X_REG_SEQ_LEN);
/* configuration multiplexer */
if (max_config->multiplexer) {
if (!channel_cfg->differential) {
LOG_ERR("6 channel fully supported only supported differential "
"differemtial option %i",
channel_cfg->differential);
return -ENOTSUP;
}
}
max1125x_acq_time_to_dr(dev, channel_cfg->acquisition_time);
/* ctrl2 register configuration */
if (max_config->pga) {
/* programmable gain amplifier support */
ctrl2_register |= MAX1125X_CONFIG_PGA(MAX1125X_CTRL2_PGAEN);
switch (channel_cfg->gain) {
case ADC_GAIN_1:
ctrl2_register |= MAX1125X_CTRL2_PGA_GAIN_1;
break;
case ADC_GAIN_2:
ctrl2_register |= MAX1125X_CTRL2_PGA_GAIN_2;
break;
case ADC_GAIN_4:
ctrl2_register |= MAX1125X_CTRL2_PGA_GAIN_4;
break;
case ADC_GAIN_8:
ctrl2_register |= MAX1125X_CTRL2_PGA_GAIN_8;
break;
case ADC_GAIN_16:
ctrl2_register |= MAX1125X_CTRL2_PGA_GAIN_16;
break;
case ADC_GAIN_32:
ctrl2_register |= MAX1125X_CTRL2_PGA_GAIN_32;
break;
case ADC_GAIN_64:
ctrl2_register |= MAX1125X_CTRL2_PGA_GAIN_64;
break;
case ADC_GAIN_128:
ctrl2_register |= MAX1125X_CTRL2_PGA_GAIN_128;
break;
default:
LOG_ERR("MAX1125X: unsupported channel gain '%d'", channel_cfg->gain);
return -ENOTSUP;
}
}
if (channel_cfg->reference == ADC_REF_INTERNAL) {
ctrl2_register |= BIT(MAX1125X_CTRL2_LDOEN);
} else if (channel_cfg->reference == ADC_REF_EXTERNAL1) {
ctrl2_register &= ~BIT(MAX1125X_CTRL2_LDOEN);
} else {
LOG_ERR("MAX1125X: unsupported channel reference type '%d'",
channel_cfg->reference);
return -ENOTSUP;
}
max1125x_write_reg(dev, MAX1125X_REG_CTRL2, &ctrl2_register, MAX1125X_REG_CTRL2_LEN);
/* GPIO_CTRL register configuration */
gpio_reg |= max_config->gpio.gpio0_enable << MAX1125X_GPIO_CTRL_GPIO0_EN;
gpio_reg |= max_config->gpio.gpio1_enable << MAX1125X_GPIO_CTRL_GPIO1_EN;
gpio_reg |= max_config->gpio.gpio0_direction << MAX1125X_GPIO_CTRL_DIRO;
gpio_reg |= max_config->gpio.gpio1_direction << MAX1125X_GPIO_CTRL_DIR1;
max1125x_write_reg(dev, MAX1125X_REG_GPIO_CTRL, &gpio_reg, MAX1125X_REG_GPIO_CTRL_LEN);
/* GPO_DIR register configuration */
gpo_reg |= max_config->gpo.gpo0_enable << MAX1125X_GPO_DIR_GPO0;
gpo_reg |= max_config->gpo.gpo1_enable << MAX1125X_GPO_DIR_GPO1;
max1125x_write_reg(dev, MAX1125X_REG_GPO_DIR, &gpo_reg, MAX1125X_REG_GPO_DIR_LEN);
/* configuration of channel order */
max1125x_configure_chmap(dev, channel_cfg->channel_id);
return 0;
}
static int max1125x_validate_buffer_size(const struct adc_sequence *sequence)
{
size_t needed = sizeof(uint8_t) * (sequence->resolution / 8);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
static int max1125x_validate_sequence(const struct device *dev, const struct adc_sequence *sequence)
{
int err;
if (sequence->oversampling) {
LOG_ERR("MAX1125X: oversampling not supported");
return -ENOTSUP;
}
err = max1125x_validate_buffer_size(sequence);
if (err) {
LOG_ERR("MAX1125X: buffer size too small");
return -ENOTSUP;
}
return 0;
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct max1125x_data *data = CONTAINER_OF(ctx, struct max1125x_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct max1125x_data *data = CONTAINER_OF(ctx, struct max1125x_data, ctx);
data->repeat_buffer = data->buffer;
max1125x_start_conversion(data->dev);
k_sem_give(&data->acq_sem);
}
static int max1125x_adc_start_read(const struct device *dev, const struct adc_sequence *sequence)
{
int rc;
struct max1125x_data *data = dev->data;
rc = max1125x_validate_sequence(dev, sequence);
if (rc != 0) {
return rc;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int max1125x_adc_read_async(const struct device *dev, const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int rc;
struct max1125x_data *data = dev->data;
adc_context_lock(&data->ctx, async ? true : false, async);
rc = max1125x_adc_start_read(dev, sequence);
adc_context_release(&data->ctx, rc);
return rc;
}
static int max1125x_adc_perform_read(const struct device *dev)
{
struct max1125x_data *data = dev->data;
int rc;
rc = max1125x_read_sample(dev);
if (rc != 0) {
LOG_ERR("reading sample failed (err %d)", rc);
adc_context_complete(&data->ctx, rc);
return rc;
}
return rc;
}
static int max1125x_read(const struct device *dev, const struct adc_sequence *sequence)
{
return max1125x_adc_read_async(dev, sequence, NULL);
}
static void max1125x_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
const struct device *dev = p1;
struct max1125x_data *data = dev->data;
int rc;
while (true) {
k_sem_take(&data->acq_sem, K_FOREVER);
rc = max1125x_wait_data_ready(dev);
if (rc != 0) {
LOG_ERR("MAX1125X: failed to get ready status (err %d)", rc);
adc_context_complete(&data->ctx, rc);
break;
}
max1125x_adc_perform_read(dev);
}
}
static int max1125x_init(const struct device *dev)
{
int err;
const struct max1125x_config *config = dev->config;
struct max1125x_data *data = dev->data;
data->dev = dev;
k_sem_init(&data->acq_sem, 0, 1);
k_sem_init(&data->data_ready_signal, 0, 1);
if (!spi_is_ready_dt(&config->bus)) {
LOG_ERR("spi bus %s not ready", config->bus.bus->name);
return -ENODEV;
}
if (config->self_calibration) {
LOG_INF("performing self calibration process");
max1125x_self_calibration(dev);
}
err = gpio_pin_configure_dt(&config->drdy_gpio, GPIO_INPUT);
if (err != 0) {
LOG_ERR("failed to initialize GPIO for data ready (err %d)", err);
return err;
}
err = gpio_pin_interrupt_configure_dt(&config->drdy_gpio, GPIO_INT_EDGE_TO_ACTIVE);
if (err != 0) {
LOG_ERR("failed to configure data ready interrupt (err %d)", err);
return -EIO;
}
gpio_init_callback(&data->callback_data_ready, max1125x_data_ready_handler,
BIT(config->drdy_gpio.pin));
err = gpio_add_callback(config->drdy_gpio.port, &data->callback_data_ready);
if (err != 0) {
LOG_ERR("failed to add data ready callback (err %d)", err);
return -EIO;
}
k_tid_t tid = k_thread_create(
&data->thread, data->stack, K_KERNEL_STACK_SIZEOF(data->stack),
max1125x_acquisition_thread, (void *)dev, NULL, NULL,
CONFIG_ADC_MAX1125X_ACQUISITION_THREAD_PRIORITY, 0, K_NO_WAIT);
k_thread_name_set(tid, "adc_max1125x");
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api max1125x_api = {
.channel_setup = max1125x_channel_setup,
.read = max1125x_read,
.ref_internal = 2048,
#ifdef CONFIG_ADC_ASYNC
.read_async = max1125x_adc_read_async,
#endif
};
#define DT_INST_MAX1125X(inst, t) DT_INST(inst, maxim_max##t)
#define MAX1125X_INIT(t, n, odr_delay_us, res, mux, pgab) \
static const struct max1125x_config max##t##_cfg_##n = { \
.bus = SPI_DT_SPEC_GET(DT_INST_MAX1125X(n, t), \
SPI_OP_MODE_MASTER | SPI_WORD_SET(8) | SPI_TRANSFER_MSB, \
1), \
.odr_delay = odr_delay_us, \
.resolution = res, \
.multiplexer = mux, \
.pga = pgab, \
.drdy_gpio = GPIO_DT_SPEC_GET_OR(DT_INST_MAX1125X(n, t), drdy_gpios, {0}), \
.self_calibration = DT_PROP_OR(DT_INST_MAX1125X(n, t), self_calibration, 0), \
.gpio.gpio0_enable = DT_PROP_OR(DT_INST_MAX1125X(n, t), gpio0_enable, 1), \
.gpio.gpio1_enable = DT_PROP_OR(DT_INST_MAX1125X(n, t), gpio1_enable, 0), \
.gpio.gpio0_direction = DT_PROP_OR(DT_INST_MAX1125X(n, t), gpio0_direction, 0), \
.gpio.gpio1_direction = DT_PROP_OR(DT_INST_MAX1125X(n, t), gpio1_direction, 0), \
.gpo.gpo0_enable = DT_PROP_OR(DT_INST_MAX1125X(n, t), gpo1_enable, 0), \
.gpo.gpo1_enable = DT_PROP_OR(DT_INST_MAX1125X(n, t), gpo1_enable, 0), \
}; \
static struct max1125x_data max##t##_data_##n = { \
ADC_CONTEXT_INIT_LOCK(max##t##_data_##n, ctx), \
ADC_CONTEXT_INIT_TIMER(max##t##_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(max##t##_data_##n, ctx), \
}; \
DEVICE_DT_DEFINE(DT_INST_MAX1125X(n, t), max1125x_init, NULL, &max##t##_data_##n, \
&max##t##_cfg_##n, POST_KERNEL, CONFIG_ADC_MAX1125X_INIT_PRIORITY, \
&max1125x_api);
/* Each data register is a 16-bit read-only register. Any attempt to write
* data to this location will have no effect. The data read from these
* registers is clocked out MSB first. The result is stored in a format
* according to the FORMAT bit in the CTRL1 register. The data format
* while in unipolar mode is always offset binary. In offset binary
* format the most negative value is 0x0000, the midscale value is 0x8000 and
* the most positive value is 0xFFFF. In bipolar mode if the FORMAT
* bit = 1 then the data format is offset binary. If the FORMAT
* bit= 0, then the data format is twos complement. In twos
* complement the negative full-scale value is 0x8000, the midscale is 0x0000
* and the positive full scale is 0x7FFF. Any input exceeding the available
* input range is limited to the minimum or maximum data value.
*/
#define MAX11253_RESOLUTION 16
/* Each data register is a 24-bit read-only register. Any attempt to write
* data to this location will have no effect. The data read from these
* registers is clocked out MSB first. The result is stored in a format
* according to the FORMAT bit in the CTRL1 register. The data format
* while in unipolar mode is always offset binary. In offset binary format
* the most negative value is 0x000000, the midscale value is 0x800000 and
* the most positive value is 0xFFFFFF. In bipolar mode if the FORMAT
* bit = 1 then the data format is offset binary. If the FORMAT bit = 0,
* then the data format is twos complement. In twos complement the negative
* full-scale value is 0x800000, the midscale is 0x000000 and the positive
* full scale is 0x7FFFFF. Any input exceeding the available input range is
* limited to the minimum or maximum data value.
*/
#define MAX11254_RESOLUTION 24
/*
* Approximated MAX1125X acquisition times in microseconds. These are
* used for the initial delay when polling for data ready.
*
* {1.9 SPS, 3.9 SPS, 7.8 SPS, 15.6 SPS, 31.2 SPS, 62.5 SPS, 125 SPS, 250 SPS, 500 SPS,
* 1000 SPS, 2000 SPS, 4000 SPS, 8000 SPS, 16000 SPS, 32000 SPS, 64000 SPS}
*/
#define MAX1125X_ODR_DELAY_US \
{ \
526315, 256410, 128205, 64102, 32051, 16000, 8000, 4000, 2000, 1000, 500, 250, \
125, 62, 31, 15 \
}
/*
* MAX11253: 16 bit, 6-channel, programmable gain amplifier, delta-sigma
*/
#define MAX11253_INIT(n) \
MAX1125X_INIT(11253, n, MAX1125X_ODR_DELAY_US, MAX11253_RESOLUTION, false, true)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11253
DT_INST_FOREACH_STATUS_OKAY(MAX11253_INIT)
/*
* MAX1125X: 24 bit, 6-channel, programmable gain amplifier, delta-sigma
*/
#define MAX11254_INIT(n) \
MAX1125X_INIT(11254, n, MAX1125X_ODR_DELAY_US, MAX11254_RESOLUTION, false, true)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11254
DT_INST_FOREACH_STATUS_OKAY(MAX11254_INIT)
``` | /content/code_sandbox/drivers/adc/adc_max1125x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 8,039 |
```c
/*
*
*/
/**
* @file
* @brief ADC driver for the LMP90xxx AFE.
*/
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/adc/lmp90xxx.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/crc.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_lmp90xxx);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
/* LMP90xxx register addresses */
#define LMP90XXX_REG_RESETCN 0x00U
#define LMP90XXX_REG_SPI_HANDSHAKECN 0x01U
#define LMP90XXX_REG_SPI_RESET 0x02U
#define LMP90XXX_REG_SPI_STREAMCN 0x03U
#define LMP90XXX_REG_PWRCN 0x08U
#define LMP90XXX_REG_DATA_ONLY_1 0x09U
#define LMP90XXX_REG_DATA_ONLY_2 0x0AU
#define LMP90XXX_REG_ADC_RESTART 0x0BU
#define LMP90XXX_REG_GPIO_DIRCN 0x0EU
#define LMP90XXX_REG_GPIO_DAT 0x0FU
#define LMP90XXX_REG_BGCALCN 0x10U
#define LMP90XXX_REG_SPI_DRDYBCN 0x11U
#define LMP90XXX_REG_ADC_AUXCN 0x12U
#define LMP90XXX_REG_SPI_CRC_CN 0x13U
#define LMP90XXX_REG_SENDIAG_THLDH 0x14U
#define LMP90XXX_REG_SENDIAG_THLDL 0x15U
#define LMP90XXX_REG_SCALCN 0x17U
#define LMP90XXX_REG_ADC_DONE 0x18U
#define LMP90XXX_REG_SENDIAG_FLAGS 0x19U
#define LMP90XXX_REG_ADC_DOUT 0x1AU
#define LMP90XXX_REG_SPI_CRC_DAT 0x1DU
#define LMP90XXX_REG_CH_STS 0x1EU
#define LMP90XXX_REG_CH_SCAN 0x1FU
/* LMP90xxx channel input and configuration registers */
#define LMP90XXX_REG_CH_INPUTCN(ch) (0x20U + (2 * ch))
#define LMP90XXX_REG_CH_CONFIG(ch) (0x21U + (2 * ch))
/* LMP90xxx upper (URA) and lower (LRA) register addresses */
#define LMP90XXX_URA(addr) ((addr >> 4U) & GENMASK(2, 0))
#define LMP90XXX_LRA(addr) (addr & GENMASK(3, 0))
/* LMP90xxx instruction byte 1 (INST1) */
#define LMP90XXX_INST1_WAB 0x10U
#define LMP90XXX_INST1_RA 0x90U
/* LMP90xxx instruction byte 2 (INST2) */
#define LMP90XXX_INST2_WB 0U
#define LMP90XXX_INST2_R BIT(7)
#define LMP90XXX_INST2_SZ_1 (0x0U << 5)
#define LMP90XXX_INST2_SZ_2 (0x1U << 5)
#define LMP90XXX_INST2_SZ_3 (0x2U << 5)
#define LMP90XXX_INST2_SZ_STREAM (0x3U << 5)
/* LMP90xxx register values/commands */
#define LMP90XXX_REG_AND_CNV_RST 0xC3U
#define LMP90XXX_SDO_DRDYB_DRIVER(x) ((x & BIT_MASK(3)) << 1)
#define LMP90XXX_PWRCN(x) (x & BIT_MASK(2))
#define LMP90XXX_RTD_CUR_SEL(x) (x & BIT_MASK(4))
#define LMP90XXX_SPI_DRDYB_D6(x) ((x & BIT(0)) << 7)
#define LMP90XXX_EN_CRC(x) ((x & BIT(0)) << 4)
#define LMP90XXX_DRDYB_AFT_CRC(x) ((x & BIT(0)) << 2)
#define LMP90XXX_CH_SCAN_SEL(x) ((x & BIT_MASK(2)) << 6)
#define LMP90XXX_LAST_CH(x) ((x & BIT_MASK(3)) << 3)
#define LMP90XXX_FIRST_CH(x) (x & BIT_MASK(3))
#define LMP90XXX_BURNOUT_EN(x) ((x & BIT(0)) << 7)
#define LMP90XXX_VREF_SEL(x) ((x & BIT(0)) << 6)
#define LMP90XXX_VINP(x) ((x & BIT_MASK(3)) << 3)
#define LMP90XXX_VINN(x) (x & BIT_MASK(3))
#define LMP90XXX_BGCALN(x) (x & BIT_MASK(3))
#define LMP90XXX_ODR_SEL(x) ((x & BIT_MASK(3)) << 4)
#define LMP90XXX_GAIN_SEL(x) ((x & BIT_MASK(3)) << 1)
#define LMP90XXX_BUF_EN(x) (x & BIT(0))
#define LMP90XXX_GPIO_DAT_MASK BIT_MASK(LMP90XXX_GPIO_MAX)
/* Invalid (never used) Upper Register Address */
#define LMP90XXX_INVALID_URA UINT8_MAX
/* Maximum number of ADC channels */
#define LMP90XXX_MAX_CHANNELS 7
/* Maximum number of ADC inputs */
#define LMP90XXX_MAX_INPUTS 8
/* Default Output Data Rate (ODR) is 214.65 SPS */
#define LMP90XXX_DEFAULT_ODR 7
/* Macro for checking if Data Ready Bar IRQ is in use */
#define LMP90XXX_HAS_DRDYB(config) (config->drdyb.port != NULL)
struct lmp90xxx_config {
struct spi_dt_spec bus;
struct gpio_dt_spec drdyb;
uint8_t rtd_current;
uint8_t resolution;
uint8_t channels;
};
struct lmp90xxx_data {
struct adc_context ctx;
const struct device *dev;
struct gpio_callback drdyb_cb;
struct k_mutex ura_lock;
uint8_t ura;
int32_t *buffer;
int32_t *repeat_buffer;
uint32_t channels;
bool calibrate;
uint8_t channel_odr[LMP90XXX_MAX_CHANNELS];
#ifdef CONFIG_ADC_LMP90XXX_GPIO
struct k_mutex gpio_lock;
uint8_t gpio_dircn;
uint8_t gpio_dat;
#endif /* CONFIG_ADC_LMP90XXX_GPIO */
struct k_thread thread;
struct k_sem acq_sem;
struct k_sem drdyb_sem;
K_KERNEL_STACK_MEMBER(stack,
CONFIG_ADC_LMP90XXX_ACQUISITION_THREAD_STACK_SIZE);
};
/*
* Approximated LMP90xxx acquisition times in milliseconds. These are
* used for the initial delay when polling for data ready.
*/
static const int32_t lmp90xxx_odr_delay_tbl[8] = {
596, /* 13.42/8 = 1.6775 SPS */
298, /* 13.42/4 = 3.355 SPS */
149, /* 13.42/2 = 6.71 SPS */
75, /* 13.42 SPS */
37, /* 214.65/8 = 26.83125 SPS */
19, /* 214.65/4 = 53.6625 SPS */
9, /* 214.65/2 = 107.325 SPS */
5, /* 214.65 SPS (default) */
};
static inline uint8_t lmp90xxx_inst2_sz(size_t len)
{
if (len == 1) {
return LMP90XXX_INST2_SZ_1;
} else if (len == 2) {
return LMP90XXX_INST2_SZ_2;
} else if (len == 3) {
return LMP90XXX_INST2_SZ_3;
} else {
return LMP90XXX_INST2_SZ_STREAM;
}
}
static int lmp90xxx_read_reg(const struct device *dev, uint8_t addr,
uint8_t *dptr,
size_t len)
{
const struct lmp90xxx_config *config = dev->config;
struct lmp90xxx_data *data = dev->data;
uint8_t ura = LMP90XXX_URA(addr);
uint8_t inst1_uab[2] = { LMP90XXX_INST1_WAB, ura };
uint8_t inst2 = LMP90XXX_INST2_R | LMP90XXX_LRA(addr);
struct spi_buf tx_buf[2];
struct spi_buf rx_buf[2];
struct spi_buf_set tx;
struct spi_buf_set rx;
int dummy = 0;
int i = 0;
int err;
if (len == 0) {
LOG_ERR("attempt to read 0 bytes from register 0x%02x", addr);
return -EINVAL;
}
if (k_is_in_isr()) {
/* Prevent SPI transactions from an ISR */
return -EWOULDBLOCK;
}
k_mutex_lock(&data->ura_lock, K_FOREVER);
if (ura != data->ura) {
/* Instruction Byte 1 + Upper Address Byte */
tx_buf[i].buf = inst1_uab;
tx_buf[i].len = sizeof(inst1_uab);
dummy += sizeof(inst1_uab);
i++;
}
/* Instruction Byte 2 */
inst2 |= lmp90xxx_inst2_sz(len);
tx_buf[i].buf = &inst2;
tx_buf[i].len = sizeof(inst2);
dummy += sizeof(inst2);
i++;
/* Dummy RX Bytes */
rx_buf[0].buf = NULL;
rx_buf[0].len = dummy;
/* Data Byte(s) */
rx_buf[1].buf = dptr;
rx_buf[1].len = len;
tx.buffers = tx_buf;
tx.count = i;
rx.buffers = rx_buf;
rx.count = 2;
err = spi_transceive_dt(&config->bus, &tx, &rx);
if (!err) {
data->ura = ura;
} else {
/* Force INST1 + UAB on next access */
data->ura = LMP90XXX_INVALID_URA;
}
k_mutex_unlock(&data->ura_lock);
return err;
}
static int lmp90xxx_read_reg8(const struct device *dev, uint8_t addr,
uint8_t *val)
{
return lmp90xxx_read_reg(dev, addr, val, sizeof(*val));
}
static int lmp90xxx_write_reg(const struct device *dev, uint8_t addr,
uint8_t *dptr,
size_t len)
{
const struct lmp90xxx_config *config = dev->config;
struct lmp90xxx_data *data = dev->data;
uint8_t ura = LMP90XXX_URA(addr);
uint8_t inst1_uab[2] = { LMP90XXX_INST1_WAB, ura };
uint8_t inst2 = LMP90XXX_INST2_WB | LMP90XXX_LRA(addr);
struct spi_buf tx_buf[3];
struct spi_buf_set tx;
int i = 0;
int err;
if (len == 0) {
LOG_ERR("attempt write 0 bytes to register 0x%02x", addr);
return -EINVAL;
}
if (k_is_in_isr()) {
/* Prevent SPI transactions from an ISR */
return -EWOULDBLOCK;
}
k_mutex_lock(&data->ura_lock, K_FOREVER);
if (ura != data->ura) {
/* Instruction Byte 1 + Upper Address Byte */
tx_buf[i].buf = inst1_uab;
tx_buf[i].len = sizeof(inst1_uab);
i++;
}
/* Instruction Byte 2 */
inst2 |= lmp90xxx_inst2_sz(len);
tx_buf[i].buf = &inst2;
tx_buf[i].len = sizeof(inst2);
i++;
/* Data Byte(s) */
tx_buf[i].buf = dptr;
tx_buf[i].len = len;
i++;
tx.buffers = tx_buf;
tx.count = i;
err = spi_write_dt(&config->bus, &tx);
if (!err) {
data->ura = ura;
} else {
/* Force INST1 + UAB on next access */
data->ura = LMP90XXX_INVALID_URA;
}
k_mutex_unlock(&data->ura_lock);
return err;
}
static int lmp90xxx_write_reg8(const struct device *dev, uint8_t addr,
uint8_t val)
{
return lmp90xxx_write_reg(dev, addr, &val, sizeof(val));
}
static int lmp90xxx_soft_reset(const struct device *dev)
{
int err;
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_RESETCN,
LMP90XXX_REG_AND_CNV_RST);
if (err) {
return err;
}
/* Write to RESETCN twice in order to reset mode as well as registers */
return lmp90xxx_write_reg8(dev, LMP90XXX_REG_RESETCN,
LMP90XXX_REG_AND_CNV_RST);
}
static inline bool lmp90xxx_has_channel(const struct device *dev,
uint8_t channel)
{
const struct lmp90xxx_config *config = dev->config;
if (channel >= config->channels) {
return false;
} else {
return true;
}
}
static inline bool lmp90xxx_has_input(const struct device *dev, uint8_t input)
{
const struct lmp90xxx_config *config = dev->config;
if (input >= LMP90XXX_MAX_INPUTS) {
return false;
} else if (config->channels < LMP90XXX_MAX_CHANNELS &&
(input >= 3 && input <= 5)) {
/* This device only has inputs 0, 1, 2, 6, and 7 */
return false;
} else {
return true;
}
}
static inline int lmp90xxx_acq_time_to_odr(uint16_t acq_time)
{
uint16_t acq_value;
if (acq_time == ADC_ACQ_TIME_DEFAULT) {
return LMP90XXX_DEFAULT_ODR;
}
if (ADC_ACQ_TIME_UNIT(acq_time) != ADC_ACQ_TIME_TICKS) {
return -EINVAL;
}
/*
* The LMP90xxx supports odd (and very slow) output data
* rates. Allow the caller to specify the ODR directly using
* ADC_ACQ_TIME_TICKS
*/
acq_value = ADC_ACQ_TIME_VALUE(acq_time);
if (acq_value <= LMP90XXX_DEFAULT_ODR) {
return acq_value;
}
return -EINVAL;
}
static int lmp90xxx_adc_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
struct lmp90xxx_data *data = dev->data;
uint8_t chx_inputcn = LMP90XXX_BURNOUT_EN(0); /* No burnout currents */
uint8_t chx_config = LMP90XXX_BUF_EN(0); /* No buffer */
uint8_t payload[2];
uint8_t addr;
int ret;
switch (channel_cfg->reference) {
case ADC_REF_EXTERNAL0:
chx_inputcn |= LMP90XXX_VREF_SEL(0);
break;
case ADC_REF_EXTERNAL1:
chx_inputcn |= LMP90XXX_VREF_SEL(1);
break;
default:
LOG_ERR("unsupported channel reference type '%d'",
channel_cfg->reference);
return -ENOTSUP;
}
if (!lmp90xxx_has_channel(dev, channel_cfg->channel_id)) {
LOG_ERR("unsupported channel id '%d'", channel_cfg->channel_id);
return -ENOTSUP;
}
if (!lmp90xxx_has_input(dev, channel_cfg->input_positive)) {
LOG_ERR("unsupported positive input '%d'",
channel_cfg->input_positive);
return -ENOTSUP;
}
chx_inputcn |= LMP90XXX_VINP(channel_cfg->input_positive);
if (!lmp90xxx_has_input(dev, channel_cfg->input_negative)) {
LOG_ERR("unsupported negative input '%d'",
channel_cfg->input_negative);
return -ENOTSUP;
}
chx_inputcn |= LMP90XXX_VINN(channel_cfg->input_negative);
ret = lmp90xxx_acq_time_to_odr(channel_cfg->acquisition_time);
if (ret < 0) {
LOG_ERR("unsupported channel acquisition time 0x%02x",
channel_cfg->acquisition_time);
return -ENOTSUP;
}
chx_config |= LMP90XXX_ODR_SEL(ret);
data->channel_odr[channel_cfg->channel_id] = ret;
switch (channel_cfg->gain) {
case ADC_GAIN_1:
chx_config |= LMP90XXX_GAIN_SEL(0);
break;
case ADC_GAIN_2:
chx_config |= LMP90XXX_GAIN_SEL(1);
break;
case ADC_GAIN_4:
chx_config |= LMP90XXX_GAIN_SEL(2);
break;
case ADC_GAIN_8:
chx_config |= LMP90XXX_GAIN_SEL(3);
break;
case ADC_GAIN_16:
chx_config |= LMP90XXX_GAIN_SEL(4);
break;
case ADC_GAIN_32:
chx_config |= LMP90XXX_GAIN_SEL(5);
break;
case ADC_GAIN_64:
chx_config |= LMP90XXX_GAIN_SEL(6);
break;
case ADC_GAIN_128:
chx_config |= LMP90XXX_GAIN_SEL(7);
break;
default:
LOG_ERR("unsupported channel gain '%d'", channel_cfg->gain);
return -ENOTSUP;
}
payload[0] = chx_inputcn;
payload[1] = chx_config;
addr = LMP90XXX_REG_CH_INPUTCN(channel_cfg->channel_id);
ret = lmp90xxx_write_reg(dev, addr, payload, sizeof(payload));
if (ret) {
LOG_ERR("failed to configure channel (err %d)", ret);
}
return ret;
}
static int lmp90xxx_validate_buffer_size(const struct adc_sequence *sequence)
{
uint8_t channels = 0;
size_t needed;
uint32_t mask;
for (mask = BIT(LMP90XXX_MAX_CHANNELS - 1); mask != 0; mask >>= 1) {
if (mask & sequence->channels) {
channels++;
}
}
needed = channels * sizeof(int32_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
static int lmp90xxx_adc_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct lmp90xxx_config *config = dev->config;
struct lmp90xxx_data *data = dev->data;
int err;
if (sequence->resolution != config->resolution) {
LOG_ERR("unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
if (!lmp90xxx_has_channel(dev, find_msb_set(sequence->channels) - 1)) {
LOG_ERR("unsupported channels in mask: 0x%08x",
sequence->channels);
return -ENOTSUP;
}
err = lmp90xxx_validate_buffer_size(sequence);
if (err) {
LOG_ERR("buffer size too small");
return err;
}
data->buffer = sequence->buffer;
data->calibrate = sequence->calibrate;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int lmp90xxx_adc_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct lmp90xxx_data *data = dev->data;
int err;
adc_context_lock(&data->ctx, async ? true : false, async);
err = lmp90xxx_adc_start_read(dev, sequence);
adc_context_release(&data->ctx, err);
return err;
}
static int lmp90xxx_adc_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return lmp90xxx_adc_read_async(dev, sequence, NULL);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct lmp90xxx_data *data =
CONTAINER_OF(ctx, struct lmp90xxx_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
k_sem_give(&data->acq_sem);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct lmp90xxx_data *data =
CONTAINER_OF(ctx, struct lmp90xxx_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static int lmp90xxx_adc_read_channel(const struct device *dev,
uint8_t channel,
int32_t *result)
{
const struct lmp90xxx_config *config = dev->config;
struct lmp90xxx_data *data = dev->data;
uint8_t adc_done;
uint8_t ch_scan;
uint8_t buf[4]; /* ADC_DOUT + CRC */
int32_t delay;
uint8_t odr;
int err;
/* Single channel, single scan mode */
ch_scan = LMP90XXX_CH_SCAN_SEL(0x1) | LMP90XXX_FIRST_CH(channel) |
LMP90XXX_LAST_CH(channel);
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_CH_SCAN, ch_scan);
if (err) {
LOG_ERR("failed to setup scan channels (err %d)", err);
return err;
}
/* Start scan */
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_PWRCN, LMP90XXX_PWRCN(0));
if (err) {
LOG_ERR("failed to set active mode (err %d)", err);
return err;
}
if (LMP90XXX_HAS_DRDYB(config)) {
k_sem_take(&data->drdyb_sem, K_FOREVER);
} else {
odr = data->channel_odr[channel];
delay = lmp90xxx_odr_delay_tbl[odr];
LOG_DBG("sleeping for %d ms", delay);
k_msleep(delay);
/* Poll for data ready */
do {
err = lmp90xxx_read_reg8(dev, LMP90XXX_REG_ADC_DONE,
&adc_done);
if (err) {
LOG_ERR("failed to read done (err %d)", err);
return err;
}
if (adc_done == 0xFFU) {
LOG_DBG("sleeping for 1 ms");
k_msleep(1);
} else {
break;
}
} while (true);
}
if (IS_ENABLED(CONFIG_ADC_LMP90XXX_CRC)) {
err = lmp90xxx_read_reg(dev, LMP90XXX_REG_ADC_DOUT, buf,
sizeof(buf));
} else {
err = lmp90xxx_read_reg(dev, LMP90XXX_REG_ADC_DOUT, buf,
config->resolution / 8);
}
if (err) {
LOG_ERR("failed to read ADC DOUT (err %d)", err);
return err;
}
if (IS_ENABLED(CONFIG_ADC_LMP90XXX_CRC)) {
uint8_t crc = crc8(buf, 3, 0x31, 0, false) ^ 0xFFU;
if (buf[3] != crc) {
LOG_ERR("CRC mismatch (0x%02x vs. 0x%02x)", buf[3],
crc);
return -EIO;
}
}
/* Read result, get rid of CRC, and sign extend result */
*result = (int32_t)sys_get_be32(buf);
*result >>= (32 - config->resolution);
return 0;
}
static void lmp90xxx_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
struct lmp90xxx_data *data = p1;
uint8_t bgcalcn = LMP90XXX_BGCALN(0x3); /* Default to BgCalMode3 */
int32_t result = 0;
uint8_t channel;
int err;
while (true) {
k_sem_take(&data->acq_sem, K_FOREVER);
if (data->calibrate) {
/* Use BgCalMode2 */
bgcalcn = LMP90XXX_BGCALN(0x2);
}
LOG_DBG("using BGCALCN = 0x%02x", bgcalcn);
err = lmp90xxx_write_reg8(data->dev,
LMP90XXX_REG_BGCALCN, bgcalcn);
if (err) {
LOG_ERR("failed to setup background calibration "
"(err %d)", err);
adc_context_complete(&data->ctx, err);
break;
}
while (data->channels) {
channel = find_lsb_set(data->channels) - 1;
LOG_DBG("reading channel %d", channel);
err = lmp90xxx_adc_read_channel(data->dev,
channel, &result);
if (err) {
adc_context_complete(&data->ctx, err);
break;
}
LOG_DBG("finished channel %d, result = %d", channel,
result);
/*
* ADC samples are stored as int32_t regardless of the
* resolution in order to provide a uniform interface
* for the driver.
*/
*data->buffer++ = result;
WRITE_BIT(data->channels, channel, 0);
}
adc_context_on_sampling_done(&data->ctx, data->dev);
}
}
static void lmp90xxx_drdyb_callback(const struct device *port,
struct gpio_callback *cb, uint32_t pins)
{
struct lmp90xxx_data *data =
CONTAINER_OF(cb, struct lmp90xxx_data, drdyb_cb);
/* Signal thread that data is now ready */
k_sem_give(&data->drdyb_sem);
}
#ifdef CONFIG_ADC_LMP90XXX_GPIO
int lmp90xxx_gpio_set_output(const struct device *dev, uint8_t pin)
{
struct lmp90xxx_data *data = dev->data;
int err = 0;
uint8_t tmp;
if (pin > LMP90XXX_GPIO_MAX) {
return -EINVAL;
}
k_mutex_lock(&data->gpio_lock, K_FOREVER);
tmp = data->gpio_dircn | BIT(pin);
if (tmp != data->gpio_dircn) {
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_GPIO_DIRCN, tmp);
if (!err) {
data->gpio_dircn = tmp;
}
}
k_mutex_unlock(&data->gpio_lock);
return err;
}
int lmp90xxx_gpio_set_input(const struct device *dev, uint8_t pin)
{
struct lmp90xxx_data *data = dev->data;
int err = 0;
uint8_t tmp;
if (pin > LMP90XXX_GPIO_MAX) {
return -EINVAL;
}
k_mutex_lock(&data->gpio_lock, K_FOREVER);
tmp = data->gpio_dircn & ~BIT(pin);
if (tmp != data->gpio_dircn) {
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_GPIO_DIRCN, tmp);
if (!err) {
data->gpio_dircn = tmp;
}
}
k_mutex_unlock(&data->gpio_lock);
return err;
}
int lmp90xxx_gpio_set_pin_value(const struct device *dev, uint8_t pin,
bool value)
{
struct lmp90xxx_data *data = dev->data;
int err = 0;
uint8_t tmp;
if (pin > LMP90XXX_GPIO_MAX) {
return -EINVAL;
}
k_mutex_lock(&data->gpio_lock, K_FOREVER);
tmp = data->gpio_dat;
WRITE_BIT(tmp, pin, value);
if (tmp != data->gpio_dat) {
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_GPIO_DAT, tmp);
if (!err) {
data->gpio_dat = tmp;
}
}
k_mutex_unlock(&data->gpio_lock);
return err;
}
int lmp90xxx_gpio_get_pin_value(const struct device *dev, uint8_t pin,
bool *value)
{
struct lmp90xxx_data *data = dev->data;
int err = 0;
uint8_t tmp;
if (pin > LMP90XXX_GPIO_MAX) {
return -EINVAL;
}
k_mutex_lock(&data->gpio_lock, K_FOREVER);
err = lmp90xxx_read_reg8(dev, LMP90XXX_REG_GPIO_DAT, &tmp);
if (!err) {
*value = tmp & BIT(pin);
}
k_mutex_unlock(&data->gpio_lock);
return err;
}
int lmp90xxx_gpio_port_get_raw(const struct device *dev,
gpio_port_value_t *value)
{
struct lmp90xxx_data *data = dev->data;
uint8_t tmp;
int err;
k_mutex_lock(&data->gpio_lock, K_FOREVER);
err = lmp90xxx_read_reg8(dev, LMP90XXX_REG_GPIO_DAT, &tmp);
tmp &= ~(data->gpio_dircn);
k_mutex_unlock(&data->gpio_lock);
*value = tmp;
return err;
}
int lmp90xxx_gpio_port_set_masked_raw(const struct device *dev,
gpio_port_pins_t mask,
gpio_port_value_t value)
{
struct lmp90xxx_data *data = dev->data;
int err = 0;
uint8_t tmp;
mask &= LMP90XXX_GPIO_DAT_MASK;
k_mutex_lock(&data->gpio_lock, K_FOREVER);
tmp = (data->gpio_dat & ~mask) | (value & mask);
if (tmp != data->gpio_dat) {
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_GPIO_DAT, tmp);
if (!err) {
data->gpio_dat = tmp;
}
}
k_mutex_unlock(&data->gpio_lock);
return err;
}
int lmp90xxx_gpio_port_set_bits_raw(const struct device *dev,
gpio_port_pins_t pins)
{
struct lmp90xxx_data *data = dev->data;
int err = 0;
uint8_t tmp;
tmp = pins & LMP90XXX_GPIO_DAT_MASK;
k_mutex_lock(&data->gpio_lock, K_FOREVER);
if (tmp != data->gpio_dat) {
tmp |= data->gpio_dat;
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_GPIO_DAT, tmp);
if (!err) {
data->gpio_dat = tmp;
}
}
k_mutex_unlock(&data->gpio_lock);
return err;
}
int lmp90xxx_gpio_port_clear_bits_raw(const struct device *dev,
gpio_port_pins_t pins)
{
struct lmp90xxx_data *data = dev->data;
int err = 0;
uint8_t tmp;
tmp = pins & LMP90XXX_GPIO_DAT_MASK;
k_mutex_lock(&data->gpio_lock, K_FOREVER);
if ((tmp & data->gpio_dat) != 0) {
tmp = data->gpio_dat & ~tmp;
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_GPIO_DAT, tmp);
if (!err) {
data->gpio_dat = tmp;
}
}
k_mutex_unlock(&data->gpio_lock);
return err;
}
int lmp90xxx_gpio_port_toggle_bits(const struct device *dev,
gpio_port_pins_t pins)
{
struct lmp90xxx_data *data = dev->data;
uint8_t tmp;
int err;
tmp = pins & LMP90XXX_GPIO_DAT_MASK;
k_mutex_lock(&data->gpio_lock, K_FOREVER);
tmp ^= data->gpio_dat;
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_GPIO_DAT, tmp);
if (!err) {
data->gpio_dat = tmp;
}
k_mutex_unlock(&data->gpio_lock);
return err;
}
#endif /* CONFIG_ADC_LMP90XXX_GPIO */
static int lmp90xxx_init(const struct device *dev)
{
const struct lmp90xxx_config *config = dev->config;
struct lmp90xxx_data *data = dev->data;
k_tid_t tid;
int err;
data->dev = dev;
k_mutex_init(&data->ura_lock);
k_sem_init(&data->acq_sem, 0, 1);
k_sem_init(&data->drdyb_sem, 0, 1);
#ifdef CONFIG_ADC_LMP90XXX_GPIO
k_mutex_init(&data->gpio_lock);
#endif /* CONFIG_ADC_LMP90XXX_GPIO */
/* Force INST1 + UAB on first access */
data->ura = LMP90XXX_INVALID_URA;
if (!spi_is_ready_dt(&config->bus)) {
LOG_ERR("SPI bus %s not ready", config->bus.bus->name);
return -ENODEV;
}
err = lmp90xxx_soft_reset(dev);
if (err) {
LOG_ERR("failed to request soft reset (err %d)", err);
return err;
}
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_SPI_HANDSHAKECN,
LMP90XXX_SDO_DRDYB_DRIVER(0x4));
if (err) {
LOG_ERR("failed to set SPI handshake control (err %d)",
err);
return err;
}
if (config->rtd_current) {
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_ADC_AUXCN,
LMP90XXX_RTD_CUR_SEL(config->rtd_current));
if (err) {
LOG_ERR("failed to set RTD current (err %d)", err);
return err;
}
}
if (IS_ENABLED(CONFIG_ADC_LMP90XXX_CRC)) {
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_SPI_CRC_CN,
LMP90XXX_EN_CRC(1) |
LMP90XXX_DRDYB_AFT_CRC(1));
if (err) {
LOG_ERR("failed to enable CRC (err %d)", err);
return err;
}
}
if (LMP90XXX_HAS_DRDYB(config)) {
err = gpio_pin_configure_dt(&config->drdyb, GPIO_INPUT);
if (err) {
LOG_ERR("failed to configure DRDYB GPIO pin (err %d)",
err);
return -EINVAL;
}
gpio_init_callback(&data->drdyb_cb, lmp90xxx_drdyb_callback,
BIT(config->drdyb.pin));
err = gpio_add_callback(config->drdyb.port, &data->drdyb_cb);
if (err) {
LOG_ERR("failed to add DRDYB callback (err %d)", err);
return -EINVAL;
}
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_SPI_DRDYBCN,
LMP90XXX_SPI_DRDYB_D6(1));
if (err) {
LOG_ERR("failed to configure D6 as DRDYB (err %d)",
err);
return err;
}
err = gpio_pin_interrupt_configure_dt(&config->drdyb,
GPIO_INT_EDGE_TO_ACTIVE);
if (err) {
LOG_ERR("failed to configure DRDBY interrupt (err %d)",
err);
return -EINVAL;
}
}
tid = k_thread_create(&data->thread, data->stack,
K_KERNEL_STACK_SIZEOF(data->stack),
lmp90xxx_acquisition_thread,
data, NULL, NULL,
CONFIG_ADC_LMP90XXX_ACQUISITION_THREAD_PRIO,
0, K_NO_WAIT);
k_thread_name_set(tid, "adc_lmp90xxx");
/* Put device in stand-by to prepare it for single-shot conversion */
err = lmp90xxx_write_reg8(dev, LMP90XXX_REG_PWRCN, LMP90XXX_PWRCN(0x3));
if (err) {
LOG_ERR("failed to request stand-by mode (err %d)", err);
return err;
}
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api lmp90xxx_adc_api = {
.channel_setup = lmp90xxx_adc_channel_setup,
.read = lmp90xxx_adc_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = lmp90xxx_adc_read_async,
#endif
};
#define ASSERT_LMP90XXX_CURRENT_VALID(v) \
BUILD_ASSERT(v == 0 || v == 100 || v == 200 || v == 300 || \
v == 400 || v == 500 || v == 600 || v == 700 || \
v == 800 || v == 900 || v == 1000, \
"unsupported RTD current (" #v ")")
#define LMP90XXX_UAMPS_TO_RTD_CUR_SEL(x) (x / 100)
#define DT_INST_LMP90XXX(inst, t) DT_INST(inst, ti_lmp##t)
#define LMP90XXX_INIT(t, n, res, ch) \
ASSERT_LMP90XXX_CURRENT_VALID(UTIL_AND( \
DT_NODE_HAS_PROP(DT_INST_LMP90XXX(n, t), rtd_current), \
DT_PROP(DT_INST_LMP90XXX(n, t), rtd_current))); \
static struct lmp90xxx_data lmp##t##_data_##n = { \
ADC_CONTEXT_INIT_TIMER(lmp##t##_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(lmp##t##_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(lmp##t##_data_##n, ctx), \
}; \
static const struct lmp90xxx_config lmp##t##_config_##n = { \
.bus = SPI_DT_SPEC_GET(DT_INST_LMP90XXX(n, t), SPI_OP_MODE_MASTER | \
SPI_TRANSFER_MSB | SPI_WORD_SET(8), 0), \
.drdyb = GPIO_DT_SPEC_GET_OR(DT_INST_LMP90XXX(n, t), drdyb_gpios, {0}), \
.rtd_current = LMP90XXX_UAMPS_TO_RTD_CUR_SEL( \
DT_PROP_OR(DT_INST_LMP90XXX(n, t), rtd_current, 0)), \
.resolution = res, \
.channels = ch, \
}; \
DEVICE_DT_DEFINE(DT_INST_LMP90XXX(n, t), \
&lmp90xxx_init, NULL, \
&lmp##t##_data_##n, \
&lmp##t##_config_##n, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&lmp90xxx_adc_api);
/*
* LMP90077: 16 bit, 2 diff/4 se (4 channels), 0 currents
*/
#define LMP90077_INIT(n) LMP90XXX_INIT(90077, n, 16, 4)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_lmp90077
DT_INST_FOREACH_STATUS_OKAY(LMP90077_INIT)
/*
* LMP90078: 16 bit, 2 diff/4 se (4 channels), 2 currents
*/
#define LMP90078_INIT(n) LMP90XXX_INIT(90078, n, 16, 4)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_lmp90078
DT_INST_FOREACH_STATUS_OKAY(LMP90078_INIT)
/*
* LMP90079: 16 bit, 4 diff/7 se (7 channels), 0 currents, has VIN3-5
*/
#define LMP90079_INIT(n) LMP90XXX_INIT(90079, n, 16, 7)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_lmp90079
DT_INST_FOREACH_STATUS_OKAY(LMP90079_INIT)
/*
* LMP90080: 16 bit, 4 diff/7 se (7 channels), 2 currents, has VIN3-5
*/
#define LMP90080_INIT(n) LMP90XXX_INIT(90080, n, 16, 7)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_lmp90080
DT_INST_FOREACH_STATUS_OKAY(LMP90080_INIT)
/*
* LMP90097: 24 bit, 2 diff/4 se (4 channels), 0 currents
*/
#define LMP90097_INIT(n) LMP90XXX_INIT(90097, n, 24, 4)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_lmp90097
DT_INST_FOREACH_STATUS_OKAY(LMP90097_INIT)
/*
* LMP90098: 24 bit, 2 diff/4 se (4 channels), 2 currents
*/
#define LMP90098_INIT(n) LMP90XXX_INIT(90098, n, 24, 4)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_lmp90098
DT_INST_FOREACH_STATUS_OKAY(LMP90098_INIT)
/*
* LMP90099: 24 bit, 4 diff/7 se (7 channels), 0 currents, has VIN3-5
*/
#define LMP90099_INIT(n) LMP90XXX_INIT(90099, n, 24, 7)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_lmp90099
DT_INST_FOREACH_STATUS_OKAY(LMP90099_INIT)
/*
* LMP90100: 24 bit, 4 diff/7 se (7 channels), 2 currents, has VIN3-5
*/
#define LMP90100_INIT(n) LMP90XXX_INIT(90100, n, 24, 7)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_lmp90100
DT_INST_FOREACH_STATUS_OKAY(LMP90100_INIT)
``` | /content/code_sandbox/drivers/adc/adc_lmp90xxx.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 9,248 |
```unknown
# IADC configuration options
config ADC_GECKO_IADC
bool "Gecko Incremental ADC driver"
default y
depends on DT_HAS_SILABS_GECKO_IADC_ENABLED
select SOC_GECKO_IADC
select ADC_CONFIGURABLE_INPUTS
help
Enable the driver implementation for the Silabs GeckoEXX32 Incremental ADC
config ADC_GECKO_ADC
bool "Gecko ADC driver"
default y
depends on DT_HAS_SILABS_GECKO_ADC_ENABLED
select SOC_GECKO_ADC
select ADC_CONFIGURABLE_INPUTS
help
Enable the driver implementation for the Silabs GeckoEFM32 ADC
``` | /content/code_sandbox/drivers/adc/Kconfig.gecko | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 140 |
```c
/*
* an affiliate of Cypress Semiconductor Corporation
*
*/
/**
* @brief ADC driver for Infineon CAT1 MCU family.
*/
#define DT_DRV_COMPAT infineon_cat1_adc
#include <zephyr/drivers/adc.h>
#include <cyhal_adc.h>
#include <cyhal_utils_impl.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(ifx_cat1_adc, CONFIG_ADC_LOG_LEVEL);
#if defined(PASS_SARMUX_PADS0_PORT)
#define _ADCSAR_PORT PASS_SARMUX_PADS0_PORT
#else
#error The selected device does not supported ADC
#endif
#define ADC_CAT1_EVENTS_MASK (CYHAL_ADC_EOS | CYHAL_ADC_ASYNC_READ_COMPLETE)
#define ADC_CAT1_DEFAULT_ACQUISITION_NS (1000u)
#define ADC_CAT1_RESOLUTION (12u)
#define ADC_CAT1_REF_INTERNAL_MV (1200u)
struct ifx_cat1_adc_data {
struct adc_context ctx;
const struct device *dev;
cyhal_adc_t adc_obj;
cyhal_adc_channel_t adc_chan_obj[CY_SAR_SEQ_NUM_CHANNELS];
uint16_t *buffer;
uint16_t *repeat_buffer;
uint32_t channels;
uint32_t channels_mask;
};
struct ifx_cat1_adc_config {
uint8_t irq_priority;
};
static void _cyhal_adc_event_callback(void *callback_arg, cyhal_adc_event_t event)
{
const struct device *dev = (const struct device *) callback_arg;
struct ifx_cat1_adc_data *data = dev->data;
uint32_t channels = data->channels;
int32_t result;
uint32_t channel_id;
while (channels != 0) {
channel_id = find_lsb_set(channels) - 1;
channels &= ~BIT(channel_id);
result = Cy_SAR_GetResult32(data->adc_chan_obj[channel_id].adc->base,
data->adc_chan_obj[channel_id].channel_idx);
/* Legacy API for BWC. Convert from signed to unsigned by adding 0x800 to
* convert the lowest signed 12-bit number to 0x0.
*/
*data->buffer = (uint16_t)(result + 0x800);
data->buffer++;
}
adc_context_on_sampling_done(&data->ctx, dev);
LOG_DBG("%s ISR triggered.", dev->name);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct ifx_cat1_adc_data *data = CONTAINER_OF(ctx, struct ifx_cat1_adc_data, ctx);
data->repeat_buffer = data->buffer;
Cy_SAR_StartConvert(data->adc_obj.base, CY_SAR_START_CONVERT_SINGLE_SHOT);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct ifx_cat1_adc_data *data = CONTAINER_OF(ctx, struct ifx_cat1_adc_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static int ifx_cat1_adc_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
struct ifx_cat1_adc_data *data = dev->data;
cy_rslt_t result;
cyhal_gpio_t vplus = CYHAL_GET_GPIO(_ADCSAR_PORT, channel_cfg->input_positive);
cyhal_gpio_t vminus = channel_cfg->differential ?
CYHAL_GET_GPIO(_ADCSAR_PORT, channel_cfg->input_negative) :
CYHAL_ADC_VNEG;
uint32_t acquisition_ns = ADC_CAT1_DEFAULT_ACQUISITION_NS;
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Selected ADC reference is not valid");
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Selected ADC gain is not valid");
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
switch (ADC_ACQ_TIME_UNIT(channel_cfg->acquisition_time)) {
case ADC_ACQ_TIME_MICROSECONDS:
acquisition_ns = ADC_ACQ_TIME_VALUE(channel_cfg->acquisition_time) * 1000;
break;
case ADC_ACQ_TIME_NANOSECONDS:
acquisition_ns = ADC_ACQ_TIME_VALUE(channel_cfg->acquisition_time);
break;
default:
LOG_ERR("Selected ADC acquisition time units is not valid");
return -EINVAL;
}
}
/* ADC channel configuration */
const cyhal_adc_channel_config_t channel_config = {
/* Disable averaging for channel */
.enable_averaging = false,
/* Minimum acquisition time set to 1us */
.min_acquisition_ns = acquisition_ns,
/* Sample channel when ADC performs a scan */
.enabled = true
};
/* Initialize a channel and configure it to scan the input pin(s). */
cyhal_adc_channel_free(&data->adc_chan_obj[channel_cfg->channel_id]);
result = cyhal_adc_channel_init_diff(&data->adc_chan_obj[channel_cfg->channel_id],
&data->adc_obj, vplus, vminus, &channel_config);
if (result != CY_RSLT_SUCCESS) {
LOG_ERR("ADC channel initialization failed. Error: 0x%08X\n", (unsigned int)result);
return -EIO;
}
data->channels_mask |= BIT(channel_cfg->channel_id);
return 0;
}
static int validate_buffer_size(const struct adc_sequence *sequence)
{
int active_channels = 0;
int total_buffer_size;
for (int i = 0; i < CY_SAR_SEQ_NUM_CHANNELS; i++) {
if (sequence->channels & BIT(i)) {
active_channels++;
}
}
total_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
total_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < total_buffer_size) {
return -ENOMEM;
}
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct ifx_cat1_adc_data *data = dev->data;
uint32_t channels = sequence->channels;
uint32_t unconfigured_channels = channels & ~data->channels_mask;
if (sequence->resolution != ADC_CAT1_RESOLUTION) {
LOG_ERR("Invalid ADC resolution (%d)", sequence->resolution);
return -EINVAL;
}
if (unconfigured_channels != 0) {
LOG_ERR("ADC channel(s) not configured: 0x%08X\n", unconfigured_channels);
return -EINVAL;
}
if (sequence->oversampling) {
LOG_ERR("Oversampling not supported");
return -ENOTSUP;
}
int return_val = validate_buffer_size(sequence);
if (return_val < 0) {
LOG_ERR("Invalid sequence buffer size");
return return_val;
}
data->channels = channels;
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int ifx_cat1_adc_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int ret;
struct ifx_cat1_adc_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
ret = start_read(dev, sequence);
adc_context_release(&data->ctx, ret);
return ret;
}
#ifdef CONFIG_ADC_ASYNC
static int ifx_cat1_adc_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int ret;
struct ifx_cat1_adc_data *data = dev->data;
adc_context_lock(&data->ctx, true, async);
ret = start_read(dev, sequence);
adc_context_release(&data->ctx, ret);
return ret;
}
#endif
static int ifx_cat1_adc_init(const struct device *dev)
{
struct ifx_cat1_adc_data *data = dev->data;
const struct ifx_cat1_adc_config *config = dev->config;
cy_rslt_t result;
data->dev = dev;
/* Initialize ADC. The ADC block which can connect to the input pin is selected */
result = cyhal_adc_init(&data->adc_obj, CYHAL_GET_GPIO(_ADCSAR_PORT, 0), NULL);
if (result != CY_RSLT_SUCCESS) {
LOG_ERR("ADC initialization failed. Error: 0x%08X\n", (unsigned int)result);
return -EIO;
}
/* Enable ADC Interrupt */
cyhal_adc_enable_event(&data->adc_obj, (cyhal_adc_event_t)ADC_CAT1_EVENTS_MASK,
config->irq_priority, true);
cyhal_adc_register_callback(&data->adc_obj, _cyhal_adc_event_callback, (void *) dev);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api adc_cat1_driver_api = {
.channel_setup = ifx_cat1_adc_channel_setup,
.read = ifx_cat1_adc_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = ifx_cat1_adc_read_async,
#endif
.ref_internal = ADC_CAT1_REF_INTERNAL_MV
};
/* Macros for ADC instance declaration */
#define INFINEON_CAT1_ADC_INIT(n) \
static struct ifx_cat1_adc_data ifx_cat1_adc_data##n = { \
ADC_CONTEXT_INIT_TIMER(ifx_cat1_adc_data##n, ctx), \
ADC_CONTEXT_INIT_LOCK(ifx_cat1_adc_data##n, ctx), \
ADC_CONTEXT_INIT_SYNC(ifx_cat1_adc_data##n, ctx), \
}; \
\
static const struct ifx_cat1_adc_config adc_cat1_cfg_##n = { \
.irq_priority = DT_INST_IRQ(n, priority), \
}; \
\
DEVICE_DT_INST_DEFINE(n, ifx_cat1_adc_init, \
NULL, &ifx_cat1_adc_data##n, \
&adc_cat1_cfg_##n, \
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&adc_cat1_driver_api);
DT_INST_FOREACH_STATUS_OKAY(INFINEON_CAT1_ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_ifx_cat1.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,200 |
```c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/logging/log.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/util.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER 1
#include "adc_context.h"
LOG_MODULE_REGISTER(max11102_17, CONFIG_ADC_LOG_LEVEL);
struct max11102_17_config {
struct spi_dt_spec bus;
const struct gpio_dt_spec gpio_chsel;
uint8_t resolution;
uint8_t channel_count;
};
struct max11102_17_data {
struct adc_context ctx;
struct k_sem acquire_signal;
int16_t *buffer;
int16_t *buffer_ptr;
uint8_t current_channel_id;
uint8_t sequence_channel_id;
#if CONFIG_ADC_ASYNC
struct k_thread thread;
K_KERNEL_STACK_MEMBER(stack, CONFIG_ADC_MAX11102_17_ACQUISITION_THREAD_STACK_SIZE);
#endif /* CONFIG_ADC_ASYNC */
};
static int max11102_17_switch_channel(const struct device *dev)
{
const struct max11102_17_config *config = dev->config;
struct max11102_17_data *data = dev->data;
int result;
uint8_t buffer_rx[1];
const struct spi_buf rx_buf[] = {{
.buf = buffer_rx,
.len = ARRAY_SIZE(buffer_rx),
}};
const struct spi_buf_set rx = {
.buffers = rx_buf,
.count = ARRAY_SIZE(rx_buf),
};
struct spi_dt_spec bus;
memcpy(&bus, &config->bus, sizeof(bus));
bus.config.operation |= SPI_HOLD_ON_CS;
result = spi_read_dt(&bus, &rx);
if (result != 0) {
LOG_ERR("read failed with error %i", result);
return result;
}
gpio_pin_set_dt(&config->gpio_chsel, data->current_channel_id);
result = spi_read_dt(&config->bus, &rx);
if (result != 0) {
LOG_ERR("read failed with error %i", result);
return result;
}
return 0;
}
static int max11102_17_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct max11102_17_config *config = dev->config;
LOG_DBG("read from ADC channel %i", channel_cfg->channel_id);
if (channel_cfg->reference != ADC_REF_EXTERNAL0) {
LOG_ERR("invalid reference %i", channel_cfg->reference);
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("invalid gain %i", channel_cfg->gain);
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("invalid acquisition time %i", channel_cfg->acquisition_time);
return -EINVAL;
}
if (channel_cfg->differential != 0) {
LOG_ERR("differential inputs are not supported");
return -EINVAL;
}
if (channel_cfg->channel_id > config->channel_count) {
LOG_ERR("invalid channel selection %i", channel_cfg->channel_id);
return -EINVAL;
}
return 0;
}
static int max11102_17_validate_buffer_size(const struct adc_sequence *sequence)
{
size_t necessary = sizeof(int16_t);
if (sequence->options) {
necessary *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < necessary) {
return -ENOMEM;
}
return 0;
}
static int max11102_17_validate_sequence(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct max11102_17_config *config = dev->config;
struct max11102_17_data *data = dev->data;
size_t sequence_channel_count = 0;
const size_t channel_maximum = 8*sizeof(sequence->channels);
if (sequence->resolution != config->resolution) {
LOG_ERR("invalid resolution");
return -EINVAL;
}
for (size_t i = 0; i < channel_maximum; ++i) {
if ((BIT(i) & sequence->channels) == 0) {
continue;
}
if (i > config->channel_count) {
LOG_ERR("invalid channel selection");
return -EINVAL;
}
sequence_channel_count++;
data->sequence_channel_id = i;
}
if (sequence_channel_count == 0) {
LOG_ERR("no channel selected");
return -EINVAL;
}
if (sequence_channel_count > 1) {
LOG_ERR("multiple channels selected");
return -EINVAL;
}
if (sequence->oversampling) {
LOG_ERR("oversampling is not supported");
return -EINVAL;
}
return max11102_17_validate_buffer_size(sequence);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct max11102_17_data *data = CONTAINER_OF(ctx, struct max11102_17_data, ctx);
if (repeat_sampling) {
data->buffer = data->buffer_ptr;
}
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct max11102_17_data *data = CONTAINER_OF(ctx, struct max11102_17_data, ctx);
data->buffer_ptr = data->buffer;
k_sem_give(&data->acquire_signal);
}
static int max11102_17_adc_start_read(const struct device *dev, const struct adc_sequence *sequence,
bool wait)
{
int result;
struct max11102_17_data *data = dev->data;
result = max11102_17_validate_sequence(dev, sequence);
if (result != 0) {
LOG_ERR("sequence validation failed");
return result;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
if (wait) {
result = adc_context_wait_for_completion(&data->ctx);
}
return result;
}
static int max11102_17_read_sample(const struct device *dev, int16_t *sample)
{
const struct max11102_17_config *config = dev->config;
int result;
size_t trailing_bits = 15 - config->resolution;
uint8_t buffer_rx[2];
const struct spi_buf rx_buf[] = {{
.buf = buffer_rx,
.len = ARRAY_SIZE(buffer_rx),
}};
const struct spi_buf_set rx = {
.buffers = rx_buf,
.count = ARRAY_SIZE(rx_buf),
};
result = spi_read_dt(&config->bus, &rx);
if (result != 0) {
LOG_ERR("read failed with error %i", result);
return result;
}
*sample = sys_get_be16(buffer_rx);
LOG_DBG("raw sample: 0x%04X", *sample);
*sample = *sample >> trailing_bits;
*sample = *sample & GENMASK(config->resolution, 0);
LOG_DBG("sample: 0x%04X", *sample);
return 0;
}
static int max11102_17_adc_perform_read(const struct device *dev)
{
int result;
struct max11102_17_data *data = dev->data;
k_sem_take(&data->acquire_signal, K_FOREVER);
if (data->sequence_channel_id != data->current_channel_id) {
LOG_DBG("switch channel selection");
data->current_channel_id = data->sequence_channel_id;
max11102_17_switch_channel(dev);
}
result = max11102_17_read_sample(dev, data->buffer);
if (result != 0) {
LOG_ERR("reading sample failed");
adc_context_complete(&data->ctx, result);
return result;
}
data->buffer++;
adc_context_on_sampling_done(&data->ctx, dev);
return result;
}
#if CONFIG_ADC_ASYNC
static int max11102_17_adc_read_async(const struct device *dev, const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int result;
struct max11102_17_data *data = dev->data;
adc_context_lock(&data->ctx, true, async);
result = max11102_17_adc_start_read(dev, sequence, true);
adc_context_release(&data->ctx, result);
return result;
}
static int max11102_17_read(const struct device *dev, const struct adc_sequence *sequence)
{
int result;
struct max11102_17_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
result = max11102_17_adc_start_read(dev, sequence, true);
adc_context_release(&data->ctx, result);
return result;
}
#else
static int max11102_17_read(const struct device *dev, const struct adc_sequence *sequence)
{
int result;
struct max11102_17_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
result = max11102_17_adc_start_read(dev, sequence, false);
while (result == 0 && k_sem_take(&data->ctx.sync, K_NO_WAIT) != 0) {
result = max11102_17_adc_perform_read(dev);
}
adc_context_release(&data->ctx, result);
return result;
}
#endif
#if CONFIG_ADC_ASYNC
static void max11102_17_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
const struct device *dev = p1;
while (true) {
max11102_17_adc_perform_read(dev);
}
}
#endif
static int max11102_17_init(const struct device *dev)
{
int result;
const struct max11102_17_config *config = dev->config;
struct max11102_17_data *data = dev->data;
int16_t sample;
adc_context_init(&data->ctx);
k_sem_init(&data->acquire_signal, 0, 1);
if (!spi_is_ready_dt(&config->bus)) {
LOG_ERR("SPI device is not ready");
return -ENODEV;
}
switch (config->channel_count) {
case 1:
if (config->gpio_chsel.port != NULL) {
LOG_ERR("GPIO for chsel set with only one channel");
return -EINVAL;
}
break;
case 2:
if (config->gpio_chsel.port == NULL) {
LOG_ERR("no GPIO for chsel set with two channels");
return -EINVAL;
}
result = gpio_pin_configure_dt(&config->gpio_chsel, GPIO_OUTPUT_INACTIVE);
if (result != 0) {
LOG_ERR("failed to initialize GPIO for chsel");
return result;
}
break;
default:
LOG_ERR("invalid number of channels (%i)", config->channel_count);
return -EINVAL;
}
data->current_channel_id = 0;
#if CONFIG_ADC_ASYNC
k_tid_t tid = k_thread_create(
&data->thread, data->stack, CONFIG_ADC_MAX11102_17_ACQUISITION_THREAD_STACK_SIZE,
max11102_17_acquisition_thread, (void *)dev, NULL, NULL,
CONFIG_ADC_MAX11102_17_ACQUISITION_THREAD_INIT_PRIO, 0, K_NO_WAIT);
k_thread_name_set(tid, "adc_max11102_17");
#endif
/* power up time is one conversion cycle */
result = max11102_17_read_sample(dev, &sample);
if (result != 0) {
LOG_ERR("unable to read dummy sample for power up timing");
return result;
}
adc_context_unlock_unconditionally(&data->ctx);
return result;
}
static const struct adc_driver_api api = {
.channel_setup = max11102_17_channel_setup,
.read = max11102_17_read,
.ref_internal = 0,
#ifdef CONFIG_ADC_ASYNC
.read_async = max11102_17_adc_read_async,
#endif
};
BUILD_ASSERT(CONFIG_ADC_INIT_PRIORITY > CONFIG_SPI_INIT_PRIORITY,
"CONFIG_ADC_INIT_PRIORITY must be higher than CONFIG_SPI_INIT_PRIORITY");
#define ADC_MAX11102_17_INST_DEFINE(index, name, res, channels) \
static const struct max11102_17_config config_##name##_##index = { \
.bus = SPI_DT_SPEC_INST_GET( \
index, \
SPI_OP_MODE_MASTER | SPI_MODE_CPOL | SPI_MODE_CPHA | SPI_WORD_SET(8), 0), \
.gpio_chsel = GPIO_DT_SPEC_INST_GET_OR(index, chsel_gpios, {0}), \
.resolution = res, \
.channel_count = channels, \
}; \
static struct max11102_17_data data_##name##_##index; \
DEVICE_DT_INST_DEFINE(index, max11102_17_init, NULL, &data_##name##_##index, \
&config_##name##_##index, POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&api);
#define DT_DRV_COMPAT maxim_max11102
#define ADC_MAX11102_RESOLUTION 12
#define ADC_MAX11102_CHANNELS 2
DT_INST_FOREACH_STATUS_OKAY_VARGS(ADC_MAX11102_17_INST_DEFINE, DT_DRV_COMPAT,
ADC_MAX11102_RESOLUTION, ADC_MAX11102_CHANNELS)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11103
#define ADC_MAX11103_RESOLUTION 12
#define ADC_MAX11103_CHANNELS 2
DT_INST_FOREACH_STATUS_OKAY_VARGS(ADC_MAX11102_17_INST_DEFINE, DT_DRV_COMPAT,
ADC_MAX11103_RESOLUTION, ADC_MAX11103_CHANNELS)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11105
#define ADC_MAX11105_RESOLUTION 12
#define ADC_MAX11105_CHANNELS 1
DT_INST_FOREACH_STATUS_OKAY_VARGS(ADC_MAX11102_17_INST_DEFINE, DT_DRV_COMPAT,
ADC_MAX11105_RESOLUTION, ADC_MAX11105_CHANNELS)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11106
#define ADC_MAX11106_RESOLUTION 10
#define ADC_MAX11106_CHANNELS 2
DT_INST_FOREACH_STATUS_OKAY_VARGS(ADC_MAX11102_17_INST_DEFINE, DT_DRV_COMPAT,
ADC_MAX11106_RESOLUTION, ADC_MAX11106_CHANNELS)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11110
#define ADC_MAX11110_RESOLUTION 10
#define ADC_MAX11110_CHANNELS 1
DT_INST_FOREACH_STATUS_OKAY_VARGS(ADC_MAX11102_17_INST_DEFINE, DT_DRV_COMPAT,
ADC_MAX11110_RESOLUTION, ADC_MAX11110_CHANNELS)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11111
#define ADC_MAX11111_RESOLUTION 8
#define ADC_MAX11111_CHANNELS 2
DT_INST_FOREACH_STATUS_OKAY_VARGS(ADC_MAX11102_17_INST_DEFINE, DT_DRV_COMPAT,
ADC_MAX11111_RESOLUTION, ADC_MAX11111_CHANNELS)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11115
#define ADC_MAX11115_RESOLUTION 8
#define ADC_MAX11115_CHANNELS 1
DT_INST_FOREACH_STATUS_OKAY_VARGS(ADC_MAX11102_17_INST_DEFINE, DT_DRV_COMPAT,
ADC_MAX11115_RESOLUTION, ADC_MAX11115_CHANNELS)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11116
#define ADC_MAX11116_RESOLUTION 8
#define ADC_MAX11116_CHANNELS 1
DT_INST_FOREACH_STATUS_OKAY_VARGS(ADC_MAX11102_17_INST_DEFINE, DT_DRV_COMPAT,
ADC_MAX11116_RESOLUTION, ADC_MAX11116_CHANNELS)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT maxim_max11117
#define ADC_MAX11117_RESOLUTION 10
#define ADC_MAX11117_CHANNELS 1
DT_INST_FOREACH_STATUS_OKAY_VARGS(ADC_MAX11102_17_INST_DEFINE, DT_DRV_COMPAT,
ADC_MAX11117_RESOLUTION, ADC_MAX11117_CHANNELS)
#undef DT_DRV_COMPAT
``` | /content/code_sandbox/drivers/adc/adc_max11102_17.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,429 |
```unknown
config ADC_CC13XX_CC26XX
bool "CC13XX/CC26XX ADC driver"
default y
depends on DT_HAS_TI_CC13XX_CC26XX_ADC_ENABLED
help
Enable the TI CC13XX/CC26XX ADC driver.
``` | /content/code_sandbox/drivers/adc/Kconfig.cc13xx_cc26xx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 57 |
```unknown
# ADC configuration options
config ADC_STM32
bool "STM32 ADC driver"
default y
depends on DT_HAS_ST_STM32_ADC_ENABLED
help
Enable the driver implementation for the stm32xx ADC
if ADC_STM32
config ADC_STM32_DMA
bool "STM32 MCU ADC DMA Support"
select DMA
help
Enable the ADC DMA mode for ADC instances
that enable dma channels in their device tree node.
endif
``` | /content/code_sandbox/drivers/adc/Kconfig.stm32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 98 |
```c
/*
*
*/
#define DT_DRV_COMPAT atmel_sam_afec
/** @file
* @brief Atmel SAM MCU family ADC (AFEC) driver.
*
* This is an implementation of the Zephyr ADC driver using the SAM Analog
* Front-End Controller (AFEC) peripheral.
*/
#include <errno.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/util.h>
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <soc.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_sam_afec);
#define NUM_CHANNELS 12
#define CONF_ADC_PRESCALER ((SOC_ATMEL_SAM_MCK_FREQ_HZ / 15000000) - 1)
#ifndef AFEC_MR_ONE
#define AFEC_MR_ONE AFEC_MR_ANACH
#endif
typedef void (*cfg_func_t)(const struct device *dev);
struct adc_sam_data {
struct adc_context ctx;
const struct device *dev;
/* Pointer to the buffer in the sequence. */
uint16_t *buffer;
/* Pointer to the beginning of a sample. Consider the number of
* channels in the sequence: this buffer changes by that amount
* so all the channels would get repeated.
*/
uint16_t *repeat_buffer;
/* Bit mask of the channels to be sampled. */
uint32_t channels;
/* Index of the channel being sampled. */
uint8_t channel_id;
};
struct adc_sam_cfg {
Afec *regs;
cfg_func_t cfg_func;
const struct atmel_sam_pmc_config clock_cfg;
const struct pinctrl_dev_config *pcfg;
};
static int adc_sam_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_sam_cfg * const cfg = dev->config;
Afec *const afec = cfg->regs;
uint8_t channel_id = channel_cfg->channel_id;
/* Clear the gain bits for the channel. */
afec->AFEC_CGR &= ~(3 << channel_id * 2U);
switch (channel_cfg->gain) {
case ADC_GAIN_1:
/* A value of 0 in this register is a gain of 1. */
break;
case ADC_GAIN_1_2:
afec->AFEC_CGR |= (1 << (channel_id * 2U));
break;
case ADC_GAIN_1_4:
afec->AFEC_CGR |= (2 << (channel_id * 2U));
break;
default:
LOG_ERR("Selected ADC gain is not valid");
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Selected ADC acquisition time is not valid");
return -EINVAL;
}
if (channel_cfg->reference != ADC_REF_EXTERNAL0) {
LOG_ERR("Selected reference is not valid");
return -EINVAL;
}
if (channel_cfg->differential) {
LOG_ERR("Differential input is not supported");
return -EINVAL;
}
#ifdef AFEC_11147
/* Set single ended channels to unsigned and differential channels
* to signed conversions.
*/
afec->AFEC_EMR &= ~(AFEC_EMR_SIGNMODE(
AFEC_EMR_SIGNMODE_SE_UNSG_DF_SIGN_Val));
#endif
return 0;
}
static void adc_sam_start_conversion(const struct device *dev)
{
const struct adc_sam_cfg *const cfg = dev->config;
struct adc_sam_data *data = dev->data;
Afec *const afec = cfg->regs;
data->channel_id = find_lsb_set(data->channels) - 1;
LOG_DBG("Starting channel %d", data->channel_id);
/* Disable all channels. */
afec->AFEC_CHDR = 0xfff;
afec->AFEC_IDR = 0xfff;
/* Enable the ADC channel. This also enables/selects the channel pin as
* an input to the AFEC (50.5.1 SAM E70 datasheet).
*/
afec->AFEC_CHER = (1 << data->channel_id);
/* Enable the interrupt for the channel. */
afec->AFEC_IER = (1 << data->channel_id);
/* Start the conversions. */
afec->AFEC_CR = AFEC_CR_START;
}
/**
* This is only called once at the beginning of all the conversions,
* all channels as a group.
*/
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_sam_data *data = CONTAINER_OF(ctx, struct adc_sam_data, ctx);
data->channels = ctx->sequence.channels;
adc_sam_start_conversion(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_sam_data *data = CONTAINER_OF(ctx, struct adc_sam_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static int check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_ERR("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_sam_data *data = dev->data;
int error = 0;
uint32_t channels = sequence->channels;
data->channels = 0U;
/* Signal an error if the channel selection is invalid (no channels or
* a non-existing one is selected).
*/
if (channels == 0U ||
(channels & (~0UL << NUM_CHANNELS))) {
LOG_ERR("Invalid selection of channels");
return -EINVAL;
}
if (sequence->oversampling != 0U) {
LOG_ERR("Oversampling is not supported");
return -EINVAL;
}
if (sequence->resolution != 12U) {
/* TODO JKW: Support the Enhanced Resolution Mode 50.6.3 page
* 1544.
*/
LOG_ERR("ADC resolution value %d is not valid",
sequence->resolution);
return -EINVAL;
}
uint8_t num_active_channels = 0U;
uint8_t channel = 0U;
while (channels > 0) {
if (channels & 1) {
++num_active_channels;
}
channels >>= 1;
++channel;
}
error = check_buffer_size(sequence, num_active_channels);
if (error) {
return error;
}
/* In the context you have a pointer to the adc_sam_data structure
* only.
*/
data->buffer = sequence->buffer;
data->repeat_buffer = sequence->buffer;
/* At this point we allow the scheduler to do other things while
* we wait for the conversions to complete. This is provided by the
* adc_context functions. However, the caller of this function is
* blocked until the results are in.
*/
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static int adc_sam_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_sam_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
static int adc_sam_init(const struct device *dev)
{
const struct adc_sam_cfg *const cfg = dev->config;
struct adc_sam_data *data = dev->data;
Afec *const afec = cfg->regs;
int retval;
/* Reset the AFEC. */
afec->AFEC_CR = AFEC_CR_SWRST;
afec->AFEC_MR = AFEC_MR_TRGEN_DIS
| AFEC_MR_SLEEP_NORMAL
| AFEC_MR_FWUP_OFF
| AFEC_MR_FREERUN_OFF
| AFEC_MR_PRESCAL(CONF_ADC_PRESCALER)
| AFEC_MR_STARTUP_SUT96
| AFEC_MR_ONE
| AFEC_MR_USEQ_NUM_ORDER;
/* Set all channels CM voltage to Vrefp/2 (512). */
for (int i = 0; i < NUM_CHANNELS; i++) {
afec->AFEC_CSELR = i;
afec->AFEC_COCR = 512;
}
/* Enable PGA and Current Bias. */
afec->AFEC_ACR = AFEC_ACR_IBCTL(1)
#ifdef AFEC_11147
| AFEC_ACR_PGA0EN
| AFEC_ACR_PGA1EN
#endif
;
/* Enable AFEC clock in PMC */
(void)clock_control_on(SAM_DT_PMC_CONTROLLER,
(clock_control_subsys_t)&cfg->clock_cfg);
/* Connect pins to the peripheral */
retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
if (retval < 0) {
return retval;
}
cfg->cfg_func(dev);
data->dev = dev;
adc_context_unlock_unconditionally(&data->ctx);
return retval;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_sam_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_sam_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif
static const struct adc_driver_api adc_sam_api = {
.channel_setup = adc_sam_channel_setup,
.read = adc_sam_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_sam_read_async,
#endif
};
static void adc_sam_isr(const struct device *dev)
{
struct adc_sam_data *data = dev->data;
const struct adc_sam_cfg *const cfg = dev->config;
Afec *const afec = cfg->regs;
uint16_t result;
afec->AFEC_CHDR |= BIT(data->channel_id);
afec->AFEC_IDR |= BIT(data->channel_id);
afec->AFEC_CSELR = AFEC_CSELR_CSEL(data->channel_id);
result = (uint16_t)(afec->AFEC_CDR);
*data->buffer++ = result;
data->channels &= ~BIT(data->channel_id);
if (data->channels) {
adc_sam_start_conversion(dev);
} else {
/* Called once all conversions have completed.*/
adc_context_on_sampling_done(&data->ctx, dev);
}
}
#define ADC_SAM_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
static void adc##n##_sam_cfg_func(const struct device *dev); \
\
static const struct adc_sam_cfg adc##n##_sam_cfg = { \
.regs = (Afec *)DT_INST_REG_ADDR(n), \
.cfg_func = adc##n##_sam_cfg_func, \
.clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(n), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
}; \
\
static struct adc_sam_data adc##n##_sam_data = { \
ADC_CONTEXT_INIT_TIMER(adc##n##_sam_data, ctx), \
ADC_CONTEXT_INIT_LOCK(adc##n##_sam_data, ctx), \
ADC_CONTEXT_INIT_SYNC(adc##n##_sam_data, ctx), \
}; \
\
DEVICE_DT_INST_DEFINE(n, adc_sam_init, NULL, \
&adc##n##_sam_data, \
&adc##n##_sam_cfg, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&adc_sam_api); \
\
static void adc##n##_sam_cfg_func(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
adc_sam_isr, \
DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
}
DT_INST_FOREACH_STATUS_OKAY(ADC_SAM_INIT)
``` | /content/code_sandbox/drivers/adc/adc_sam_afec.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,832 |
```c
/*
*
*/
#include <zephyr/logging/log.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/irq.h>
#include <Adc_Sar_Ip_HwAccess.h>
#include <Adc_Sar_Ip.h>
#include <Adc_Sar_Ip_Irq.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define DT_DRV_COMPAT nxp_s32_adc_sar
LOG_MODULE_REGISTER(adc_nxp_s32_adc_sar, CONFIG_ADC_LOG_LEVEL);
/* Convert channel of group ADC to channel of physical ADC instance */
#define ADC_NXP_S32_GROUPCHAN_2_PHYCHAN(group, channel) \
(ADC_SAR_IP_HW_REG_SIZE * group + channel)
struct adc_nxp_s32_config {
ADC_Type *base;
uint8_t instance;
uint8_t group_channel;
uint8_t callback_select;
Adc_Sar_Ip_ConfigType *adc_cfg;
void (*irq_config_func)(const struct device *dev);
const struct pinctrl_dev_config *pin_cfg;
};
struct adc_nxp_s32_data {
const struct device *dev;
struct adc_context ctx;
uint16_t *buffer;
uint16_t *buf_end;
uint16_t *repeat_buffer;
uint32_t mask_channels;
uint8_t num_channels;
};
static int adc_nxp_s32_init(const struct device *dev)
{
const struct adc_nxp_s32_config *config = dev->config;
struct adc_nxp_s32_data *data = dev->data;
Adc_Sar_Ip_StatusType status;
/* This array shows max number of channels of each group */
uint8_t map_chan_group[ADC_SAR_IP_INSTANCE_COUNT][ADC_SAR_IP_NUM_GROUP_CHAN]
= FEATURE_ADC_MAX_CHN_COUNT;
data->num_channels = map_chan_group[config->instance][config->group_channel];
if (config->pin_cfg) {
if (pinctrl_apply_state(config->pin_cfg, PINCTRL_STATE_DEFAULT)) {
return -EIO;
}
}
status = Adc_Sar_Ip_Init(config->instance, config->adc_cfg);
if (status) {
return -EIO;
}
#if FEATURE_ADC_HAS_CALIBRATION
status = Adc_Sar_Ip_DoCalibration(config->instance);
if (status) {
return -EIO;
}
#endif
Adc_Sar_Ip_EnableNotifications(config->instance,
config->callback_select ?
ADC_SAR_IP_NOTIF_FLAG_NORMAL_ENDCHAIN
: ADC_SAR_IP_NOTIF_FLAG_NORMAL_EOC);
data->dev = dev;
config->irq_config_func(dev);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static int adc_nxp_s32_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
struct adc_nxp_s32_data *data = dev->data;
if (channel_cfg->channel_id >= data->num_channels) {
LOG_ERR("Channel %d is not valid", channel_cfg->channel_id);
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Unsupported channel acquisition time");
return -ENOTSUP;
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -ENOTSUP;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Unsupported channel gain %d", channel_cfg->gain);
return -ENOTSUP;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Unsupported channel reference");
return -ENOTSUP;
}
return 0;
}
static int adc_nxp_s32_validate_buffer_size(const struct device *dev,
const struct adc_sequence *sequence)
{
uint8_t active_channels = 0;
size_t needed_size;
active_channels = POPCOUNT(sequence->channels);
needed_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_size) {
return -ENOSPC;
}
return 0;
}
#if FEATURE_ADC_HAS_AVERAGING
static int adc_nxp_s32_set_averaging(const struct device *dev, uint8_t oversampling)
{
const struct adc_nxp_s32_config *config = dev->config;
Adc_Sar_Ip_AvgSelectType avg_sel = ADC_SAR_IP_AVG_4_CONV;
bool avg_en = true;
switch (oversampling) {
case 0:
avg_en = false;
break;
case 2:
avg_sel = ADC_SAR_IP_AVG_4_CONV;
break;
case 3:
avg_sel = ADC_SAR_IP_AVG_8_CONV;
break;
case 4:
avg_sel = ADC_SAR_IP_AVG_16_CONV;
break;
case 5:
avg_sel = ADC_SAR_IP_AVG_32_CONV;
break;
default:
LOG_ERR("Unsupported oversampling value");
return -ENOTSUP;
}
Adc_Sar_Ip_SetAveraging(config->instance, avg_en, avg_sel);
return 0;
}
#endif
#if (ADC_SAR_IP_SET_RESOLUTION == STD_ON)
static int adc_nxp_s32_set_resolution(const struct device *dev, uint8_t adc_resol)
{
const struct adc_nxp_s32_config *config = dev->config;
Adc_Sar_Ip_Resolution resolution;
switch (adc_resol) {
case 8:
resolution = ADC_SAR_IP_RESOLUTION_8;
break;
case 10:
resolution = ADC_SAR_IP_RESOLUTION_10;
break;
case 12:
resolution = ADC_SAR_IP_RESOLUTION_12;
break;
case 14:
resolution = ADC_SAR_IP_RESOLUTION_14;
break;
default:
LOG_ERR("Unsupported resolution");
return -ENOTSUP;
}
Adc_Sar_Ip_SetResolution(config->instance, resolution);
return 0;
}
#endif
static int adc_nxp_s32_start_read_async(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_nxp_s32_config *config = dev->config;
struct adc_nxp_s32_data *data = dev->data;
int error;
uint32_t mask;
uint8_t channel;
if (find_msb_set(sequence->channels) > data->num_channels) {
LOG_ERR("Channels out of bit map");
return -EINVAL;
}
error = adc_nxp_s32_validate_buffer_size(dev, sequence);
if (error) {
LOG_ERR("Buffer size isn't enough");
return -EINVAL;
}
#if FEATURE_ADC_HAS_AVERAGING
error = adc_nxp_s32_set_averaging(dev, sequence->oversampling);
if (error) {
return -ENOTSUP;
}
#else
if (sequence->oversampling) {
LOG_ERR("Oversampling can't be changed");
return -ENOTSUP;
}
#endif
#if (ADC_SAR_IP_SET_RESOLUTION == STD_ON)
error = adc_nxp_s32_set_resolution(dev, sequence->resolution);
if (error) {
return -ENOTSUP;
}
#else
if (sequence->resolution != ADC_SAR_IP_MAX_RESOLUTION) {
LOG_ERR("Resolution can't be changed");
return -ENOTSUP;
}
#endif
if (sequence->calibrate) {
#if FEATURE_ADC_HAS_CALIBRATION
error = Adc_Sar_Ip_DoCalibration(config->instance);
if (error) {
LOG_ERR("Error during calibration");
return -EIO;
}
#else
LOG_ERR("Unsupported calibration");
return -ENOTSUP;
#endif
}
for (int i = 0; i < data->num_channels; i++) {
mask = (sequence->channels >> i) & 0x1;
channel = ADC_NXP_S32_GROUPCHAN_2_PHYCHAN(config->group_channel, i);
if (mask) {
Adc_Sar_Ip_EnableChannelNotifications(config->instance,
channel, ADC_SAR_IP_CHAN_NOTIF_EOC);
Adc_Sar_Ip_EnableChannel(config->instance,
ADC_SAR_IP_CONV_CHAIN_NORMAL, channel);
} else {
Adc_Sar_Ip_DisableChannelNotifications(config->instance,
channel, ADC_SAR_IP_CHAN_NOTIF_EOC);
Adc_Sar_Ip_DisableChannel(config->instance,
ADC_SAR_IP_CONV_CHAIN_NORMAL, channel);
}
}
/* Save ADC sequence sampling buffer and its end pointer address */
data->buffer = sequence->buffer;
if (config->callback_select) {
data->buf_end = data->buffer + sequence->buffer_size / sizeof(uint16_t);
}
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_nxp_s32_data *data = CONTAINER_OF(ctx, struct adc_nxp_s32_data, ctx);
const struct adc_nxp_s32_config *config = data->dev->config;
data->mask_channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
Adc_Sar_Ip_StartConversion(config->instance, ADC_SAR_IP_CONV_CHAIN_NORMAL);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_nxp_s32_data *const data =
CONTAINER_OF(ctx, struct adc_nxp_s32_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static int adc_nxp_s32_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_nxp_s32_data *data = dev->data;
int error = 0;
adc_context_lock(&data->ctx, async ? true : false, async);
error = adc_nxp_s32_start_read_async(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
static int adc_nxp_s32_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return adc_nxp_s32_read_async(dev, sequence, NULL);
}
static void adc_nxp_s32_isr(const struct device *dev)
{
const struct adc_nxp_s32_config *config = dev->config;
Adc_Sar_Ip_IRQHandler(config->instance);
}
#define ADC_NXP_S32_DRIVER_API(n) \
static const struct adc_driver_api adc_nxp_s32_driver_api_##n = { \
.channel_setup = adc_nxp_s32_channel_setup, \
.read = adc_nxp_s32_read, \
IF_ENABLED(CONFIG_ADC_ASYNC, (.read_async = adc_nxp_s32_read_async,))\
.ref_internal = DT_INST_PROP(n, vref_mv), \
};
#define ADC_NXP_S32_IRQ_CONFIG(n) \
static void adc_nxp_s32_adc_sar_config_func_##n(const struct device *dev)\
{ \
IRQ_CONNECT(DT_INST_IRQN(n), \
DT_INST_IRQ(n, priority), \
adc_nxp_s32_isr, DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
};
#define ADC_NXP_S32_CALLBACK_DEFINE(n) \
void adc_nxp_s32_normal_end_conversion_callback##n(const uint16 PhysicalChanId)\
{ \
const struct device *dev = DEVICE_DT_INST_GET(n); \
const struct adc_nxp_s32_config *config = dev->config; \
struct adc_nxp_s32_data *data = dev->data; \
uint16_t result = 0; \
\
result = Adc_Sar_Ip_GetConvData(n, PhysicalChanId); \
LOG_DBG("End conversion, channel %d, group %d, result = %d", \
ADC_SAR_IP_CHAN_2_BIT(PhysicalChanId), \
config->group_channel, result); \
\
*data->buffer++ = result; \
data->mask_channels &= \
~BIT(ADC_SAR_IP_CHAN_2_BIT(PhysicalChanId)); \
\
if (!data->mask_channels) { \
adc_context_on_sampling_done(&data->ctx, \
(struct device *)dev); \
} \
}; \
void adc_nxp_s32_normal_endchain_callback##n(void) \
{ \
const struct device *dev = DEVICE_DT_INST_GET(n); \
const struct adc_nxp_s32_config *config = dev->config; \
struct adc_nxp_s32_data *data = dev->data; \
uint16_t result = 0; \
uint8_t channel; \
\
while (data->mask_channels) { \
channel = ADC_NXP_S32_GROUPCHAN_2_PHYCHAN( \
config->group_channel, \
(find_lsb_set(data->mask_channels)-1)); \
result = Adc_Sar_Ip_GetConvData(n, channel); \
LOG_DBG("End chain, channel %d, group %d, result = %d", \
ADC_SAR_IP_CHAN_2_BIT(channel), \
config->group_channel, result); \
if (data->buffer < data->buf_end) { \
*data->buffer++ = result; \
} \
data->mask_channels &= \
~BIT(ADC_SAR_IP_CHAN_2_BIT(channel)); \
} \
\
adc_context_on_sampling_done(&data->ctx, (struct device *)dev); \
};
#define ADC_NXP_S32_INSTANCE_CHECK(indx, n) \
((DT_INST_REG_ADDR(n) == IP_ADC_##indx##_BASE) ? indx : 0)
#define ADC_NXP_S32_GET_INSTANCE(n) \
LISTIFY(__DEBRACKET ADC_INSTANCE_COUNT, ADC_NXP_S32_INSTANCE_CHECK, (|), n)
#define ADC_NXP_S32_INIT_DEVICE(n) \
ADC_NXP_S32_DRIVER_API(n) \
ADC_NXP_S32_CALLBACK_DEFINE(n) \
ADC_NXP_S32_IRQ_CONFIG(n) \
COND_CODE_1(DT_INST_NUM_PINCTRL_STATES(n), \
(PINCTRL_DT_INST_DEFINE(n);), (EMPTY)) \
static const Adc_Sar_Ip_ConfigType adc_nxp_s32_default_config##n = \
{ \
.ConvMode = ADC_SAR_IP_CONV_MODE_ONESHOT, \
.AdcResolution = ADC_SAR_IP_RESOLUTION_14, \
.HighSpeedConvEn = DT_INST_PROP(n, high_speed), \
.EndOfNormalChainNotification = \
adc_nxp_s32_normal_endchain_callback##n, \
.EndOfConvNotification = \
adc_nxp_s32_normal_end_conversion_callback##n, \
}; \
static struct adc_nxp_s32_data adc_nxp_s32_data_##n = { \
ADC_CONTEXT_INIT_TIMER(adc_nxp_s32_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_nxp_s32_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_nxp_s32_data_##n, ctx), \
}; \
static const struct adc_nxp_s32_config adc_nxp_s32_config_##n = { \
.base = (ADC_Type *)DT_INST_REG_ADDR(n), \
.instance = ADC_NXP_S32_GET_INSTANCE(n), \
.group_channel = DT_INST_ENUM_IDX(n, group_channel), \
.callback_select = DT_INST_ENUM_IDX(n, callback_select), \
.adc_cfg = (Adc_Sar_Ip_ConfigType *)&adc_nxp_s32_default_config##n,\
.irq_config_func = adc_nxp_s32_adc_sar_config_func_##n, \
.pin_cfg = COND_CODE_1(DT_INST_NUM_PINCTRL_STATES(n), \
(PINCTRL_DT_INST_DEV_CONFIG_GET(n)), (NULL)), \
}; \
DEVICE_DT_INST_DEFINE(n, \
&adc_nxp_s32_init, \
NULL, \
&adc_nxp_s32_data_##n, \
&adc_nxp_s32_config_##n, \
POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&adc_nxp_s32_driver_api_##n);
DT_INST_FOREACH_STATUS_OKAY(ADC_NXP_S32_INIT_DEVICE)
``` | /content/code_sandbox/drivers/adc/adc_nxp_s32_adc_sar.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,751 |
```c
/*
*
*/
#define DT_DRV_COMPAT renesas_smartbond_adc
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include <DA1469xAB.h>
#include <da1469x_pd.h>
#include "adc_context.h"
#include <zephyr/dt-bindings/adc/smartbond-adc.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/smartbond_clock_control.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/pm/device.h>
#include <zephyr/pm/policy.h>
#include <zephyr/pm/device_runtime.h>
LOG_MODULE_REGISTER(adc_smartbond_adc);
struct adc_smartbond_cfg {
const struct pinctrl_dev_config *pcfg;
};
struct adc_smartbond_data {
struct adc_context ctx;
/* Buffer to store channel data */
uint16_t *buffer;
/* Copy of channel mask from sequence */
uint32_t channel_read_mask;
/* Number of bits in sequence channels */
uint8_t sequence_channel_count;
/* Index in buffer to store current value to */
uint8_t result_index;
};
#define SMARTBOND_ADC_CHANNEL_COUNT 8
/*
* Channels are handled by software this array holds individual
* settings for each channel that must be applied before conversion.
*/
struct adc_smartbond_channel_cfg {
uint32_t gp_adc_ctrl_reg;
uint32_t gp_adc_ctrl2_reg;
} m_channels[SMARTBOND_ADC_CHANNEL_COUNT];
/* Implementation of the ADC driver API function: adc_channel_setup. */
static int adc_smartbond_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
uint8_t channel_id = channel_cfg->channel_id;
struct adc_smartbond_channel_cfg *config = &m_channels[channel_id];
if (channel_id >= SMARTBOND_ADC_CHANNEL_COUNT) {
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Selected ADC acquisition time is not valid");
return -EINVAL;
}
if (channel_cfg->differential) {
if (channel_cfg->input_positive != SMARTBOND_GPADC_P1_09 &&
channel_cfg->input_positive != SMARTBOND_GPADC_P0_08) {
LOG_ERR("Differential channels supported only for P1_09 and P0_08");
return -EINVAL;
}
}
switch (channel_cfg->gain) {
case ADC_GAIN_1_3:
/* Turn on attenuator and increase sample time to 32 cycles */
config->gp_adc_ctrl2_reg = 0x101;
break;
case ADC_GAIN_1:
config->gp_adc_ctrl2_reg = 0;
break;
default:
LOG_ERR("Selected ADC gain is not valid");
return -EINVAL;
}
switch (channel_cfg->reference) {
case ADC_REF_INTERNAL:
break;
default:
LOG_ERR("Selected ADC reference is not valid");
return -EINVAL;
}
config->gp_adc_ctrl_reg =
channel_cfg->input_positive << GPADC_GP_ADC_CTRL_REG_GP_ADC_SEL_Pos;
if (!channel_cfg->differential) {
config->gp_adc_ctrl_reg |= GPADC_GP_ADC_CTRL_REG_GP_ADC_SE_Msk;
}
return 0;
}
static inline void gpadc_smartbond_pm_policy_state_lock_get(const struct device *dev,
struct adc_smartbond_data *data)
{
#if defined(CONFIG_PM_DEVICE)
pm_device_runtime_get(dev);
/*
* Prevent the SoC from entering the normal sleep state.
*/
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
#endif
}
static inline void gpadc_smartbond_pm_policy_state_lock_put(const struct device *dev,
struct adc_smartbond_data *data)
{
#if defined(CONFIG_PM_DEVICE)
/*
* Allow the SoC to enter the normal sleep state once GPADC is done.
*/
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
pm_device_runtime_put(dev);
#endif
}
#define PER_CHANNEL_ADC_CONFIG_MASK (GPADC_GP_ADC_CTRL_REG_GP_ADC_SEL_Msk | \
GPADC_GP_ADC_CTRL_REG_GP_ADC_SE_Msk)
static int pop_count(uint32_t n)
{
return __builtin_popcount(n);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
uint32_t val;
struct adc_smartbond_data *data =
CONTAINER_OF(ctx, struct adc_smartbond_data, ctx);
/* Extract lower channel from sequence mask */
int current_channel = u32_count_trailing_zeros(data->channel_read_mask);
if (ctx->sequence.calibrate) {
/* TODO: Add calibration code */
} else {
val = GPADC->GP_ADC_CTRL_REG & ~PER_CHANNEL_ADC_CONFIG_MASK;
val |= m_channels[current_channel].gp_adc_ctrl_reg;
val |= GPADC_GP_ADC_CTRL_REG_GP_ADC_START_Msk |
GPADC_GP_ADC_CTRL_REG_GP_ADC_MINT_Msk;
GPADC->GP_ADC_CTRL2_REG = m_channels[current_channel].gp_adc_ctrl2_reg;
GPADC->GP_ADC_CTRL_REG = val;
}
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat)
{
struct adc_smartbond_data *data =
CONTAINER_OF(ctx, struct adc_smartbond_data, ctx);
if (!repeat) {
data->buffer += data->sequence_channel_count;
}
}
static int check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_ERR("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int error;
struct adc_smartbond_data *data = dev->data;
if (sequence->oversampling > 7U) {
LOG_ERR("Invalid oversampling");
return -EINVAL;
}
if ((sequence->channels == 0) ||
((sequence->channels & ~BIT_MASK(SMARTBOND_ADC_CHANNEL_COUNT)) != 0)) {
LOG_ERR("Channel scanning is not supported");
return -EINVAL;
}
if (sequence->resolution < 8 || sequence->resolution > 15) {
LOG_ERR("ADC resolution value %d is not valid",
sequence->resolution);
return -EINVAL;
}
error = check_buffer_size(sequence, 1);
if (error) {
return error;
}
data->buffer = sequence->buffer;
data->channel_read_mask = sequence->channels;
data->sequence_channel_count = pop_count(sequence->channels);
data->result_index = 0;
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static void adc_smartbond_isr(const struct device *dev)
{
struct adc_smartbond_data *data = dev->data;
int current_channel = u32_count_trailing_zeros(data->channel_read_mask);
GPADC->GP_ADC_CLEAR_INT_REG = 0;
/* Store current channel value, result is left justified, move bits right */
data->buffer[data->result_index++] = ((uint16_t)GPADC->GP_ADC_RESULT_REG) >>
(16 - data->ctx.sequence.resolution);
/* Exclude channel from mask for further reading */
data->channel_read_mask ^= 1 << current_channel;
if (data->channel_read_mask == 0) {
gpadc_smartbond_pm_policy_state_lock_put(dev, data);
adc_context_on_sampling_done(&data->ctx, dev);
} else {
adc_context_start_sampling(&data->ctx);
}
LOG_DBG("%s ISR triggered.", dev->name);
}
/* Implementation of the ADC driver API function: adc_read. */
static int adc_smartbond_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int error;
struct adc_smartbond_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
gpadc_smartbond_pm_policy_state_lock_get(dev, data);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#if defined(CONFIG_ADC_ASYNC)
/* Implementation of the ADC driver API function: adc_read_sync. */
static int adc_smartbond_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_smartbond_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
gpadc_smartbond_pm_policy_state_lock_get(dev, data);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif /* CONFIG_ADC_ASYNC */
static int gpadc_smartbond_resume(const struct device *dev)
{
int ret, rate;
const struct adc_smartbond_cfg *config = dev->config;
const struct device *clock_dev = DEVICE_DT_GET(DT_NODELABEL(osc));
da1469x_pd_acquire(MCU_PD_DOMAIN_PER);
/* Get current clock to determine GP_ADC_EN_DEL */
clock_control_get_rate(clock_dev, (clock_control_subsys_t)SMARTBOND_CLK_SYS_CLK, &rate);
GPADC->GP_ADC_CTRL3_REG = (rate/1600000)&0xff;
GPADC->GP_ADC_CTRL_REG = GPADC_GP_ADC_CTRL_REG_GP_ADC_EN_Msk;
/*
* Configure dt provided device signals when available.
* pinctrl is optional so ENOENT is not setup failure.
*/
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0 && ret != -ENOENT) {
/* Disable the GPADC LDO */
GPADC->GP_ADC_CTRL_REG = 0;
/* Release the peripheral domain */
da1469x_pd_release(MCU_PD_DOMAIN_PER);
LOG_ERR("ADC pinctrl setup failed (%d)", ret);
return ret;
}
return 0;
}
#ifdef CONFIG_PM_DEVICE
static int gpadc_smartbond_suspend(const struct device *dev)
{
int ret;
const struct adc_smartbond_cfg *config = dev->config;
/* Disable the GPADC LDO */
GPADC->GP_ADC_CTRL_REG = 0;
/* Release the peripheral domain */
da1469x_pd_release(MCU_PD_DOMAIN_PER);
/*
* Configure dt provided device signals for sleep.
* pinctrl is optional so ENOENT is not setup failure.
*/
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP);
if (ret < 0 && ret != -ENOENT) {
LOG_WRN("Failed to configure the GPADC pins to inactive state");
return ret;
}
return 0;
}
static int gpadc_smartbond_pm_action(const struct device *dev,
enum pm_device_action action)
{
int ret;
switch (action) {
case PM_DEVICE_ACTION_RESUME:
ret = gpadc_smartbond_resume(dev);
break;
case PM_DEVICE_ACTION_SUSPEND:
ret = gpadc_smartbond_suspend(dev);
break;
default:
return -ENOTSUP;
}
return ret;
}
#endif /* CONFIG_PM_DEVICE */
static int adc_smartbond_init(const struct device *dev)
{
int ret;
struct adc_smartbond_data *data = dev->data;
#ifdef CONFIG_PM_DEVICE_RUNTIME
/* Make sure device state is marked as suspended */
pm_device_init_suspended(dev);
ret = pm_device_runtime_enable(dev);
#else
ret = gpadc_smartbond_resume(dev);
#endif
IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
adc_smartbond_isr, DEVICE_DT_INST_GET(0), 0);
NVIC_ClearPendingIRQ(DT_INST_IRQN(0));
NVIC_EnableIRQ(DT_INST_IRQN(0));
adc_context_unlock_unconditionally(&data->ctx);
return ret;
}
static const struct adc_driver_api adc_smartbond_driver_api = {
.channel_setup = adc_smartbond_channel_setup,
.read = adc_smartbond_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_smartbond_read_async,
#endif
.ref_internal = 1200,
};
/*
* There is only one instance on supported SoCs, so inst is guaranteed
* to be 0 if any instance is okay. (We use adc_0 above, so the driver
* is relying on the numeric instance value in a way that happens to
* be safe.)
*
* Just in case that assumption becomes invalid in the future, we use
* a BUILD_ASSERT().
*/
#define ADC_INIT(inst) \
BUILD_ASSERT((inst) == 0, \
"multiple instances not supported"); \
PINCTRL_DT_INST_DEFINE(inst); \
static const struct adc_smartbond_cfg adc_smartbond_cfg_##inst = {\
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \
}; \
static struct adc_smartbond_data adc_smartbond_data_##inst = { \
ADC_CONTEXT_INIT_TIMER(adc_smartbond_data_##inst, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_smartbond_data_##inst, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_smartbond_data_##inst, ctx), \
}; \
PM_DEVICE_DT_INST_DEFINE(inst, gpadc_smartbond_pm_action); \
DEVICE_DT_INST_DEFINE(inst, \
adc_smartbond_init, \
PM_DEVICE_DT_INST_GET(inst), \
&adc_smartbond_data_##inst, \
&adc_smartbond_cfg_##inst, \
POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&adc_smartbond_driver_api);
DT_INST_FOREACH_STATUS_OKAY(ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_smartbond_gpadc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,042 |
```c
/*
*
*/
#include <zephyr/drivers/adc.h>
int adc_gain_invert(enum adc_gain gain,
int32_t *value)
{
struct gain_desc {
uint8_t mul;
uint8_t div;
};
static const struct gain_desc gains[] = {
[ADC_GAIN_1_6] = {.mul = 6, .div = 1},
[ADC_GAIN_1_5] = {.mul = 5, .div = 1},
[ADC_GAIN_1_4] = {.mul = 4, .div = 1},
[ADC_GAIN_1_3] = {.mul = 3, .div = 1},
[ADC_GAIN_2_5] = {.mul = 5, .div = 2},
[ADC_GAIN_1_2] = {.mul = 2, .div = 1},
[ADC_GAIN_2_3] = {.mul = 3, .div = 2},
[ADC_GAIN_4_5] = {.mul = 5, .div = 4},
[ADC_GAIN_1] = {.mul = 1, .div = 1},
[ADC_GAIN_2] = {.mul = 1, .div = 2},
[ADC_GAIN_3] = {.mul = 1, .div = 3},
[ADC_GAIN_4] = {.mul = 1, .div = 4},
[ADC_GAIN_6] = {.mul = 1, .div = 6},
[ADC_GAIN_8] = {.mul = 1, .div = 8},
[ADC_GAIN_12] = {.mul = 1, .div = 12},
[ADC_GAIN_16] = {.mul = 1, .div = 16},
[ADC_GAIN_24] = {.mul = 1, .div = 24},
[ADC_GAIN_32] = {.mul = 1, .div = 32},
[ADC_GAIN_64] = {.mul = 1, .div = 64},
[ADC_GAIN_128] = {.mul = 1, .div = 128},
};
int rv = -EINVAL;
if ((uint8_t)gain < ARRAY_SIZE(gains)) {
const struct gain_desc *gdp = &gains[gain];
if ((gdp->mul != 0) && (gdp->div != 0)) {
*value = (gdp->mul * *value) / gdp->div;
rv = 0;
}
}
return rv;
}
``` | /content/code_sandbox/drivers/adc/adc_common.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 589 |
```unknown
# MCP320x ADC configuration options
config ADC_MCP320X
bool "MCP3204/MCP3208 driver"
default y
depends on DT_HAS_MICROCHIP_MCP3204_ENABLED || DT_HAS_MICROCHIP_MCP3208_ENABLED
select SPI
help
Enable MCP3204/MCP3208 ADC driver.
The MCP3204/MCP3208 are 4/8 channel 12-bit A/D converters
with SPI interface.
if ADC_MCP320X
config ADC_MCP320X_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 512
help
Size of the stack used for the internal data acquisition
thread.
config ADC_MCP320X_ACQUISITION_THREAD_PRIO
int "Priority for the ADC data acquisition thread"
default 0
help
Priority level for the internal ADC data acquisition thread.
endif # ADC_MCP320X
``` | /content/code_sandbox/drivers/adc/Kconfig.mcp320x | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 202 |
```unknown
config ADC_NXP_S32_ADC_SAR
bool "NXP S32 ADC SAR driver"
default y
depends on DT_HAS_NXP_S32_ADC_SAR_ENABLED
help
This option enables the NXP S32 ADC SAR driver.
``` | /content/code_sandbox/drivers/adc/Kconfig.nxp_s32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```c
/*
*
*/
#define DT_DRV_COMPAT raspberrypi_pico_adc
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/reset.h>
#include <zephyr/logging/log.h>
#include <hardware/adc.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_rpi, CONFIG_ADC_LOG_LEVEL);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define ADC_RPI_MAX_RESOLUTION 12
/** Bits numbers of rrobin register mean an available number of channels. */
#define ADC_RPI_CHANNEL_NUM (ADC_CS_RROBIN_MSB - ADC_CS_RROBIN_LSB + 1)
/**
* @brief RaspberryPi Pico ADC config
*
* This structure contains constant data for given instance of RaspberryPi Pico ADC.
*/
struct adc_rpi_config {
/** Number of supported channels */
uint8_t num_channels;
/** pinctrl configs */
const struct pinctrl_dev_config *pcfg;
/** function pointer to irq setup */
void (*irq_configure)(void);
/** Pointer to clock controller device */
const struct device *clk_dev;
/** Clock id of ADC clock */
clock_control_subsys_t clk_id;
/** Reset controller config */
const struct reset_dt_spec reset;
};
/**
* @brief RaspberryPi Pico ADC data
*
* This structure contains data structures used by a RaspberryPi Pico ADC.
*/
struct adc_rpi_data {
/** Structure that handle state of ongoing read operation */
struct adc_context ctx;
/** Pointer to RaspberryPi Pico ADC own device structure */
const struct device *dev;
/** Pointer to memory where next sample will be written */
uint16_t *buf;
/** Pointer to where will be data stored in case of repeated sampling */
uint16_t *repeat_buf;
/** Mask with channels that will be sampled */
uint32_t channels;
};
static inline void adc_start_once(void)
{
hw_set_bits(&adc_hw->cs, ADC_CS_START_ONCE_BITS);
}
static inline uint16_t adc_get_result(void)
{
return (uint16_t)adc_hw->result;
}
static inline bool adc_get_err(void)
{
return (adc_hw->cs & ADC_CS_ERR_BITS) ? true : false;
}
static inline void adc_clear_errors(void)
{
/* write 1 to clear */
hw_set_bits(&adc_hw->fcs, ADC_FCS_OVER_BITS);
hw_set_bits(&adc_hw->fcs, ADC_FCS_UNDER_BITS);
hw_set_bits(&adc_hw->fcs, ADC_FCS_ERR_BITS);
hw_set_bits(&adc_hw->cs, ADC_CS_ERR_STICKY_BITS);
}
static inline void adc_enable(void)
{
adc_hw->cs = ADC_CS_EN_BITS;
while (!(adc_hw->cs & ADC_CS_READY_BITS)) {
;
}
}
static int adc_rpi_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_rpi_config *config = dev->config;
if (channel_cfg->channel_id >= config->num_channels) {
LOG_ERR("unsupported channel id '%d'", channel_cfg->channel_id);
return -ENOTSUP;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Acquisition time is not valid");
return -EINVAL;
}
if (channel_cfg->differential) {
LOG_ERR("unsupported differential mode");
return -ENOTSUP;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Gain is not valid");
return -EINVAL;
}
return 0;
}
/**
* @brief Check if buffer in @p sequence is big enough to hold all ADC samples
*
* @param dev RaspberryPi Pico ADC device
* @param sequence ADC sequence description
*
* @return 0 on success
* @return -ENOMEM if buffer is not big enough
*/
static int adc_rpi_check_buffer_size(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_rpi_config *config = dev->config;
uint8_t channels = 0;
size_t needed;
uint32_t mask;
for (mask = BIT(config->num_channels - 1); mask != 0; mask >>= 1) {
if (mask & sequence->channels) {
channels++;
}
}
needed = channels * sizeof(uint16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
/**
* @brief Start processing read request
*
* @param dev RaspberryPi Pico ADC device
* @param sequence ADC sequence description
*
* @return 0 on success
* @return -ENOTSUP if requested resolution or channel is out side of supported
* range
* @return -ENOMEM if buffer is not big enough
* (see @ref adc_rpi_check_buffer_size)
* @return other error code returned by adc_context_wait_for_completion
*/
static int adc_rpi_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_rpi_config *config = dev->config;
struct adc_rpi_data *data = dev->data;
int err;
if (sequence->resolution > ADC_RPI_MAX_RESOLUTION ||
sequence->resolution == 0) {
LOG_ERR("unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
if (find_msb_set(sequence->channels) > config->num_channels) {
LOG_ERR("unsupported channels in mask: 0x%08x",
sequence->channels);
return -ENOTSUP;
}
err = adc_rpi_check_buffer_size(dev, sequence);
if (err) {
LOG_ERR("buffer size too small");
return err;
}
data->buf = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
/**
* Interrupt handler
*/
static void adc_rpi_isr(const struct device *dev)
{
struct adc_rpi_data *data = dev->data;
uint16_t result;
uint8_t ainsel;
/* Fetch result */
result = adc_get_result();
ainsel = adc_get_selected_input();
/* Drain FIFO */
while (!adc_fifo_is_empty()) {
(void)adc_fifo_get();
}
/* Abort converting if error detected. */
if (adc_get_err()) {
adc_context_complete(&data->ctx, -EIO);
return;
}
/* Copy to buffer and mark this channel as completed to channels bitmap. */
*data->buf++ = result;
data->channels &= ~(BIT(ainsel));
/* Notify result if all data gathered. */
if (data->channels == 0) {
adc_context_on_sampling_done(&data->ctx, dev);
return;
}
/* Kick next channel conversion */
ainsel = (uint8_t)(find_lsb_set(data->channels) - 1);
adc_select_input(ainsel);
adc_start_once();
}
static int adc_rpi_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_rpi_data *data = dev->data;
int err;
adc_context_lock(&data->ctx, async ? true : false, async);
err = adc_rpi_start_read(dev, sequence);
adc_context_release(&data->ctx, err);
return err;
}
static int adc_rpi_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return adc_rpi_read_async(dev, sequence, NULL);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_rpi_data *data = CONTAINER_OF(ctx, struct adc_rpi_data,
ctx);
data->channels = ctx->sequence.channels;
data->repeat_buf = data->buf;
adc_clear_errors();
/* Find next channel and start conversion */
adc_select_input(find_lsb_set(data->channels) - 1);
adc_start_once();
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_rpi_data *data = CONTAINER_OF(ctx, struct adc_rpi_data,
ctx);
if (repeat_sampling) {
data->buf = data->repeat_buf;
}
}
/**
* @brief Function called on init for each RaspberryPi Pico ADC device. It setups all
* channels to return constant 0 mV and create acquisition thread.
*
* @param dev RaspberryPi Pico ADC device
*
* @return 0 on success
*/
static int adc_rpi_init(const struct device *dev)
{
const struct adc_rpi_config *config = dev->config;
struct adc_rpi_data *data = dev->data;
int ret;
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0) {
return ret;
}
ret = clock_control_on(config->clk_dev, config->clk_id);
if (ret < 0) {
return ret;
}
ret = reset_line_toggle_dt(&config->reset);
if (ret < 0) {
return ret;
}
config->irq_configure();
/*
* Configure the FIFO control register.
* Set the threshold as 1 for getting notification immediately
* on converting completed.
*/
adc_fifo_setup(true, false, 1, true, true);
/* Set max speed to conversion */
adc_set_clkdiv(0.f);
/* Enable ADC and wait becoming READY */
adc_enable();
/* Enable FIFO interrupt */
adc_irq_set_enabled(true);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
#define IRQ_CONFIGURE_FUNC(idx) \
static void adc_rpi_configure_func_##idx(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(idx), DT_INST_IRQ(idx, priority), \
adc_rpi_isr, DEVICE_DT_INST_GET(idx), 0); \
irq_enable(DT_INST_IRQN(idx)); \
}
#define IRQ_CONFIGURE_DEFINE(idx) .irq_configure = adc_rpi_configure_func_##idx
#define ADC_RPI_INIT(idx) \
IRQ_CONFIGURE_FUNC(idx) \
PINCTRL_DT_INST_DEFINE(idx); \
static struct adc_driver_api adc_rpi_api_##idx = { \
.channel_setup = adc_rpi_channel_setup, \
.read = adc_rpi_read, \
.ref_internal = DT_INST_PROP(idx, vref_mv), \
IF_ENABLED(CONFIG_ADC_ASYNC, (.read_async = adc_rpi_read_async,)) \
}; \
static const struct adc_rpi_config adc_rpi_config_##idx = { \
.num_channels = ADC_RPI_CHANNEL_NUM, \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \
.clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \
.clk_id = (clock_control_subsys_t)DT_INST_PHA_BY_IDX(idx, clocks, 0, clk_id), \
.reset = RESET_DT_SPEC_INST_GET(idx), \
IRQ_CONFIGURE_DEFINE(idx), \
}; \
static struct adc_rpi_data adc_rpi_data_##idx = { \
ADC_CONTEXT_INIT_TIMER(adc_rpi_data_##idx, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_rpi_data_##idx, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_rpi_data_##idx, ctx), \
.dev = DEVICE_DT_INST_GET(idx), \
}; \
\
DEVICE_DT_INST_DEFINE(idx, adc_rpi_init, NULL, \
&adc_rpi_data_##idx, \
&adc_rpi_config_##idx, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&adc_rpi_api_##idx)
DT_INST_FOREACH_STATUS_OKAY(ADC_RPI_INIT);
``` | /content/code_sandbox/drivers/adc/adc_rpi_pico.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,592 |
```c
/*
*
*/
#define DT_DRV_COMPAT nuvoton_npcx_adc
#include <assert.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/adc/adc_npcx_threshold.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/kernel.h>
#include <zephyr/pm/policy.h>
#include <soc.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_npcx, CONFIG_ADC_LOG_LEVEL);
/* ADC speed/delay values during initialization */
#define ADC_REGULAR_DLY_VAL 0x03
#define ADC_REGULAR_ADCCNF2_VAL 0x8B07
#define ADC_REGULAR_GENDLY_VAL 0x0100
#define ADC_REGULAR_MEAST_VAL 0x0001
/* ADC targeted operating frequency (2MHz) */
#define NPCX_ADC_CLK 2000000
/* ADC conversion mode */
#define NPCX_ADC_CHN_CONVERSION_MODE 0
#define NPCX_ADC_SCAN_CONVERSION_MODE 1
/* Max channel number to be converted in ADCCS */
#define NPCX_ADCCS_MAX_CHANNEL_COUNT 16
#define ADC_NPCX_THRVAL_RESOLUTION 10
#define ADC_NPCX_THRVAL_MAX BIT_MASK(ADC_NPCX_THRVAL_RESOLUTION)
/* Device config */
struct adc_npcx_config {
/* adc controller base address */
uintptr_t base;
/* clock configuration */
struct npcx_clk_cfg clk_cfg;
/* the number of ADC channels */
const uint8_t channel_count;
/* amount of thresholds supported */
const uint8_t threshold_count;
/* routine for configuring ADC's ISR */
void (*irq_cfg_func)(void);
const struct pinctrl_dev_config *pcfg;
};
struct adc_npcx_threshold_control {
/*
* Selects ADC channel number, for which the measured data is compared
* for threshold detection.
*/
uint8_t chnsel;
/*
* Sets relation between measured value and assetion threshold value.
* in thrval:
* 0: Threshold event is generated if Measured data > thrval.
* 1: Threshold event is generated if Measured data <= thrval.
*/
bool l_h;
/* Sets the threshold value to which measured data is compared. */
uint16_t thrval;
/*
* Pointer of work queue item to be notified when threshold assertion
* occurs.
*/
struct k_work *work;
};
struct adc_npcx_threshold_data {
/*
* While threshold interruption is enabled we need to resume to repetitive
* sampling mode after adc_npcx_read is called. This variable records
* channels being used in repetitive mode in order to set ADC registers
* back to threshold detection when adc_npcx_read is completed.
*/
uint32_t repetitive_channels;
/*
* While threshold interruption is enabled, adc_npcx_read must disable
* all active threshold running to avoid race condition, this variable
* helps restore active threshods after adc_npcs_read has finnished.
*/
uint8_t active_thresholds;
/* This array holds current configuration for each threshold. */
struct adc_npcx_threshold_control
control[DT_INST_PROP(0, threshold_count)];
};
/* Driver data */
struct adc_npcx_data {
/* Input clock for ADC converter */
uint32_t input_clk;
/* mutex of ADC channels */
struct adc_context ctx;
/*
* Bit-mask indicating the channels to be included in each sampling
* of this sequence.
*/
uint32_t channels;
/* ADC Device pointer used in api functions */
const struct device *adc_dev;
uint16_t *buffer;
uint16_t *repeat_buffer;
/* end pointer of buffer to ensure enough space for storing ADC data. */
uint16_t *buf_end;
/* Threshold comparator data pointer */
struct adc_npcx_threshold_data *threshold_data;
#ifdef CONFIG_PM
atomic_t current_pm_lock;
#endif
};
/*
* Pointer of internal work queue thread to be notified when threshold assertion
* occurs if CONFIG_ADC_CMP_NPCX_WORKQUEUE is enabled.
*/
struct k_work_q *work_q;
/* Driver convenience defines */
#define HAL_INSTANCE(dev) ((struct adc_reg *)((const struct adc_npcx_config *)(dev)->config)->base)
/* ADC local functions */
#ifdef CONFIG_PM
static void adc_npcx_pm_policy_state_lock_get(struct adc_npcx_data *data)
{
if (atomic_test_and_set_bit(&data->current_pm_lock, 0) == 0) {
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
static void adc_npcx_pm_policy_state_lock_put(struct adc_npcx_data *data)
{
if (atomic_test_and_clear_bit(&data->current_pm_lock, 0) == 1) {
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
#endif
static inline void adc_npcx_config_channels(const struct device *dev, uint32_t channels)
{
const struct adc_npcx_config *config = dev->config;
struct adc_reg *const inst = HAL_INSTANCE(dev);
inst->ADCCS = channels & BIT_MASK(NPCX_ADCCS_MAX_CHANNEL_COUNT);
/* Only npcx4 and later series support over 16 ADC channels */
if (config->channel_count > NPCX_ADCCS_MAX_CHANNEL_COUNT) {
inst->ADCCS2 = (channels >> NPCX_ADCCS_MAX_CHANNEL_COUNT) &
BIT_MASK(NPCX_ADCCS_MAX_CHANNEL_COUNT);
}
}
static inline void adc_npcx_enable_threshold_detect(const struct device *dev, uint8_t th_sel,
bool enable)
{
const struct adc_npcx_config *config = dev->config;
if (enable) {
#ifdef CONFIG_ADC_NPCX_CMP_V2
THEN(config->base) |= BIT(th_sel);
#else /* CONFIG_ADC_NPCX_CMP_V1 */
THRCTL(config->base, th_sel) |= BIT(NPCX_THRCTL_THEN);
#endif
} else {
#ifdef CONFIG_ADC_NPCX_CMP_V2
THEN(config->base) &= ~BIT(th_sel);
#else /* CONFIG_ADC_NPCX_CMP_V1 */
THRCTL(config->base, th_sel) &= ~BIT(NPCX_THRCTL_THEN);
#endif
}
}
static void adc_npcx_isr(const struct device *dev)
{
const struct adc_npcx_config *config = dev->config;
struct adc_npcx_data *const data = dev->data;
struct adc_reg *const inst = HAL_INSTANCE(dev);
struct adc_npcx_threshold_data *const t_data = data->threshold_data;
uint16_t status = inst->ADCSTS;
uint16_t result, channel;
/* Clear status pending bits first */
inst->ADCSTS = status;
LOG_DBG("%s: status is %04X\n", __func__, status);
/* Is end of conversion cycle event? ie. Scan conversion is done. */
if (IS_BIT_SET(status, NPCX_ADCSTS_EOCCEV) &&
IS_BIT_SET(inst->ADCCNF, NPCX_ADCCNF_INTECCEN)) {
/* Stop conversion for scan conversion mode */
inst->ADCCNF |= BIT(NPCX_ADCCNF_STOP);
/* Get result for each ADC selected channel */
while (data->channels) {
channel = find_lsb_set(data->channels) - 1;
result = GET_FIELD(CHNDAT(config->base, channel),
NPCX_CHNDAT_CHDAT_FIELD);
/*
* Save ADC result and adc_npcx_validate_buffer_size()
* already ensures that the buffer has enough space for
* storing result.
*/
if (data->buffer < data->buf_end) {
*data->buffer++ = result;
}
data->channels &= ~BIT(channel);
}
/* Disable End of cyclic conversion interruption */
inst->ADCCNF &= ~BIT(NPCX_ADCCNF_INTECCEN);
if (IS_ENABLED(CONFIG_ADC_CMP_NPCX) &&
t_data->active_thresholds) {
/* Set repetitive channels back */
adc_npcx_config_channels(dev, t_data->repetitive_channels);
/* Start conversion */
inst->ADCCNF |= BIT(NPCX_ADCCNF_START);
} else {
/* Disable all channels */
adc_npcx_config_channels(dev, 0);
/* Turn off ADC */
inst->ADCCNF &= ~(BIT(NPCX_ADCCNF_ADCEN));
#ifdef CONFIG_PM
adc_npcx_pm_policy_state_lock_put(data);
#endif
}
/* Inform sampling is done */
adc_context_on_sampling_done(&data->ctx, data->adc_dev);
}
if (!(IS_ENABLED(CONFIG_ADC_CMP_NPCX) && t_data->active_thresholds)) {
return;
}
uint16_t thrcts;
for (uint8_t i = 0; i < config->threshold_count; i++) {
if (IS_BIT_SET(inst->THRCTS, i) && IS_BIT_SET(inst->THRCTS,
(NPCX_THRCTS_THR1_IEN + i))) {
/* Avoid clearing other threshold status */
thrcts = inst->THRCTS &
~GENMASK(config->threshold_count - 1, 0);
/* Clear threshold status */
thrcts |= BIT(i);
inst->THRCTS = thrcts;
if (t_data->control[i].work) {
/* Notify work thread */
k_work_submit_to_queue(work_q ? work_q : &k_sys_work_q,
t_data->control[i].work);
}
}
}
}
/*
* Validate the buffer size with adc channels mask. If it is lower than what
* we need return -ENOSPC.
*/
static int adc_npcx_validate_buffer_size(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_npcx_config *config = dev->config;
uint8_t channels = 0;
uint32_t mask;
size_t needed;
for (mask = BIT(config->channel_count - 1); mask != 0; mask >>= 1) {
if (mask & sequence->channels) {
channels++;
}
}
needed = channels * sizeof(uint16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOSPC;
}
return 0;
}
static void adc_npcx_start_scan(const struct device *dev)
{
const struct adc_npcx_config *config = dev->config;
struct adc_npcx_data *const data = dev->data;
struct adc_reg *const inst = HAL_INSTANCE(dev);
#ifdef CONFIG_PM
adc_npcx_pm_policy_state_lock_get(data);
#endif
/* Turn on ADC first */
inst->ADCCNF |= BIT(NPCX_ADCCNF_ADCEN);
/* Stop conversion for scan conversion mode */
inst->ADCCNF |= BIT(NPCX_ADCCNF_STOP);
/* Clear end of cyclic conversion event status flag */
inst->ADCSTS |= BIT(NPCX_ADCSTS_EOCCEV);
/* Update selected channels in scan mode by channels mask */
adc_npcx_config_channels(dev, data->channels);
/* Select 'Scan' Conversion mode. */
SET_FIELD(inst->ADCCNF, NPCX_ADCCNF_ADCMD_FIELD,
NPCX_ADC_SCAN_CONVERSION_MODE);
/* Enable end of cyclic conversion event interrupt */
inst->ADCCNF |= BIT(NPCX_ADCCNF_INTECCEN);
/* Start conversion */
inst->ADCCNF |= BIT(NPCX_ADCCNF_START);
if (config->channel_count > NPCX_ADCCS_MAX_CHANNEL_COUNT) {
LOG_DBG("Start ADC scan conversion and ADCCNF,ADCCS, ADCCS2 are "
"(%04X,%04X,%04X)\n", inst->ADCCNF, inst->ADCCS, inst->ADCCS2);
} else {
LOG_DBG("Start ADC scan conversion and ADCCNF,ADCCS are (%04X,%04X)\n",
inst->ADCCNF, inst->ADCCS);
}
}
static int adc_npcx_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_npcx_config *config = dev->config;
struct adc_npcx_data *const data = dev->data;
int error = 0;
if (!sequence->channels ||
(sequence->channels & ~BIT_MASK(config->channel_count))) {
LOG_ERR("Invalid ADC channels");
return -EINVAL;
}
/* Fixed 10 bit resolution of npcx ADC */
if (sequence->resolution != 10) {
LOG_ERR("Unfixed 10 bit ADC resolution");
return -ENOTSUP;
}
error = adc_npcx_validate_buffer_size(dev, sequence);
if (error) {
LOG_ERR("ADC buffer size too small");
return error;
}
/* Save ADC sequence sampling buffer and its end pointer address */
data->buffer = sequence->buffer;
data->buf_end = data->buffer + sequence->buffer_size / sizeof(uint16_t);
/* Start ADC conversion */
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
/* ADC api functions */
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_npcx_data *const data =
CONTAINER_OF(ctx, struct adc_npcx_data, ctx);
data->repeat_buffer = data->buffer;
data->channels = ctx->sequence.channels;
/* Start ADC scan conversion */
adc_npcx_start_scan(data->adc_dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_npcx_data *const data =
CONTAINER_OF(ctx, struct adc_npcx_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static int adc_npcx_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_npcx_config *config = dev->config;
uint8_t channel_id = channel_cfg->channel_id;
if (channel_id >= config->channel_count) {
LOG_ERR("Invalid channel %d", channel_id);
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Unsupported channel acquisition time");
return -ENOTSUP;
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -ENOTSUP;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Unsupported channel gain %d", channel_cfg->gain);
return -ENOTSUP;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Unsupported channel reference");
return -ENOTSUP;
}
LOG_DBG("ADC channel %d configured", channel_cfg->channel_id);
return 0;
}
static int adc_npcx_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_npcx_data *const data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = adc_npcx_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#if defined(CONFIG_ADC_ASYNC)
static int adc_npcx_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_npcx_data *const data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = adc_npcx_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif /* CONFIG_ADC_ASYNC */
static void adc_npcx_set_repetitive(const struct device *dev, int chnsel,
uint8_t enable)
{
struct adc_reg *const inst = HAL_INSTANCE(dev);
struct adc_npcx_data *const data = dev->data;
struct adc_npcx_threshold_data *const t_data = data->threshold_data;
/* Stop ADC conversion */
inst->ADCCNF |= BIT(NPCX_ADCCNF_STOP);
if (enable) {
#ifdef CONFIG_PM
adc_npcx_pm_policy_state_lock_get(data);
#endif
/* Turn on ADC */
inst->ADCCNF |= BIT(NPCX_ADCCNF_ADCEN);
/* Set ADC conversion code to SW conversion mode */
SET_FIELD(inst->ADCCNF, NPCX_ADCCNF_ADCMD_FIELD,
NPCX_ADC_SCAN_CONVERSION_MODE);
/* Add selected ADC channel to be converted */
t_data->repetitive_channels |= BIT(chnsel);
adc_npcx_config_channels(dev, t_data->repetitive_channels);
/* Set conversion type to repetitive (runs continuously) */
inst->ADCCNF |= BIT(NPCX_ADCCNF_ADCRPTC);
/* Start conversion */
inst->ADCCNF |= BIT(NPCX_ADCCNF_START);
} else {
/* Remove selected ADC channel to be converted */
t_data->repetitive_channels &= ~BIT(chnsel);
adc_npcx_config_channels(dev, t_data->repetitive_channels);
if (!t_data->repetitive_channels) {
/* No thesholdd active left, disable repetitive mode */
inst->ADCCNF &= ~BIT(NPCX_ADCCNF_ADCRPTC);
/* Turn off ADC */
inst->ADCCNF &= ~BIT(NPCX_ADCCNF_ADCEN);
#ifdef CONFIG_PM
adc_npcx_pm_policy_state_lock_put(data);
#endif
} else {
/* Start conversion again */
inst->ADCCNF |= BIT(NPCX_ADCCNF_START);
}
}
}
int adc_npcx_threshold_ctrl_set_param(const struct device *dev,
const uint8_t th_sel,
const struct adc_npcx_threshold_param
*param)
{
const struct adc_npcx_config *config = dev->config;
struct adc_npcx_data *const data = dev->data;
struct adc_npcx_threshold_data *const t_data = data->threshold_data;
struct adc_npcx_threshold_control *const t_ctrl =
&t_data->control[th_sel];
int ret = 0;
if (!IS_ENABLED(CONFIG_ADC_CMP_NPCX)) {
return -EOPNOTSUPP;
}
if (!param || th_sel >= config->threshold_count) {
return -EINVAL;
}
adc_context_lock(&data->ctx, false, NULL);
switch (param->type) {
case ADC_NPCX_THRESHOLD_PARAM_CHNSEL:
if (param->val >= config->channel_count) {
ret = -EINVAL;
break;
}
t_ctrl->chnsel = (uint8_t)param->val;
break;
case ADC_NPCX_THRESHOLD_PARAM_L_H:
t_ctrl->l_h = !!param->val;
break;
case ADC_NPCX_THRESHOLD_PARAM_THVAL:
if (param->val == 0 || param->val >= ADC_NPCX_THRVAL_MAX) {
ret = -EINVAL;
break;
}
t_ctrl->thrval = (uint16_t)param->val;
break;
case ADC_NPCX_THRESHOLD_PARAM_WORK:
if (param->val == 0) {
ret = -EINVAL;
break;
}
t_ctrl->work = (struct k_work *)param->val;
break;
default:
ret = -EINVAL;
}
adc_context_release(&data->ctx, 0);
return ret;
}
static int adc_npcx_threshold_ctrl_setup(const struct device *dev,
const uint8_t th_sel)
{
struct adc_npcx_data *const data = dev->data;
struct adc_driver_api *api = (struct adc_driver_api *)dev->api;
struct adc_npcx_threshold_data *const t_data = data->threshold_data;
const struct adc_npcx_config *config = dev->config;
struct adc_npcx_threshold_control *const t_ctrl =
&t_data->control[th_sel];
if (th_sel >= config->threshold_count) {
return -EINVAL;
}
adc_context_lock(&data->ctx, false, NULL);
if (t_data->active_thresholds & BIT(th_sel)) {
/* Unable to setup threshold parameters while active */
adc_context_release(&data->ctx, 0);
LOG_ERR("Threshold selected (%d) is active!", th_sel);
return -EBUSY;
}
if (t_ctrl->chnsel >= config->channel_count ||
t_ctrl->thrval >= api->ref_internal ||
t_ctrl->thrval == 0 || t_ctrl->work == 0) {
adc_context_release(&data->ctx, 0);
LOG_ERR("Threshold selected (%d) is not configured!", th_sel);
return -EINVAL;
}
SET_FIELD(THRCTL(config->base, th_sel),
NPCX_THRCTL_CHNSEL, t_ctrl->chnsel);
if (t_ctrl->l_h) {
THRCTL(config->base, th_sel) |= BIT(NPCX_THRCTL_L_H);
} else {
THRCTL(config->base, th_sel) &= ~BIT(NPCX_THRCTL_L_H);
}
/* Set the threshold value. */
SET_FIELD(THRCTL(config->base, th_sel), NPCX_THRCTL_THRVAL,
t_ctrl->thrval);
adc_context_release(&data->ctx, 0);
return 0;
}
static int adc_npcx_threshold_enable_irq(const struct device *dev,
const uint8_t th_sel)
{
struct adc_reg *const inst = HAL_INSTANCE(dev);
struct adc_driver_api *api = (struct adc_driver_api *)dev->api;
struct adc_npcx_data *const data = dev->data;
const struct adc_npcx_config *config = dev->config;
struct adc_npcx_threshold_data *const t_data = data->threshold_data;
struct adc_npcx_threshold_control *const t_ctrl =
&t_data->control[th_sel];
uint16_t thrcts;
if (th_sel >= config->threshold_count) {
LOG_ERR("Invalid ADC threshold selection! (%d)", th_sel);
return -EINVAL;
}
adc_context_lock(&data->ctx, false, NULL);
if (t_ctrl->chnsel >= config->channel_count ||
t_ctrl->thrval >= api->ref_internal ||
t_ctrl->thrval == 0 || t_ctrl->work == 0) {
adc_context_release(&data->ctx, 0);
LOG_ERR("Threshold selected (%d) is not configured!", th_sel);
return -EINVAL;
}
/* Record new active threshold */
t_data->active_thresholds |= BIT(th_sel);
/* avoid clearing other threshold status */
thrcts = inst->THRCTS & ~GENMASK(config->threshold_count - 1, 0);
/* Enable threshold detection */
adc_npcx_enable_threshold_detect(dev, th_sel, true);
/* clear threshold status */
thrcts |= BIT(th_sel);
/* set enable threshold status */
thrcts |= BIT(NPCX_THRCTS_THR1_IEN + th_sel);
inst->THRCTS = thrcts;
adc_npcx_set_repetitive(dev, t_data->control[th_sel].chnsel, true);
adc_context_release(&data->ctx, 0);
return 0;
}
int adc_npcx_threshold_disable_irq(const struct device *dev,
const uint8_t th_sel)
{
struct adc_reg *const inst = HAL_INSTANCE(dev);
const struct adc_npcx_config *config = dev->config;
struct adc_npcx_data *const data = dev->data;
struct adc_npcx_threshold_data *const t_data = data->threshold_data;
uint16_t thrcts;
if (!IS_ENABLED(CONFIG_ADC_CMP_NPCX)) {
return -EOPNOTSUPP;
}
if (th_sel >= config->threshold_count) {
LOG_ERR("Invalid ADC threshold selection! (%d)", th_sel);
return -EINVAL;
}
adc_context_lock(&data->ctx, false, NULL);
if (!(t_data->active_thresholds & BIT(th_sel))) {
adc_context_release(&data->ctx, 0);
LOG_ERR("Threshold selection (%d) is not enabled", th_sel);
return -ENODEV;
}
/* avoid clearing other threshold status */
thrcts = inst->THRCTS & ~GENMASK(config->threshold_count - 1, 0);
/* set enable threshold status */
thrcts &= ~BIT(NPCX_THRCTS_THR1_IEN + th_sel);
inst->THRCTS = thrcts;
/* Disable threshold detection */
adc_npcx_enable_threshold_detect(dev, th_sel, false);
/* Update active threshold */
t_data->active_thresholds &= ~BIT(th_sel);
adc_npcx_set_repetitive(dev, t_data->control[th_sel].chnsel, false);
adc_context_release(&data->ctx, 0);
return 0;
}
int adc_npcx_threshold_ctrl_enable(const struct device *dev, uint8_t th_sel,
const bool enable)
{
int ret;
if (!IS_ENABLED(CONFIG_ADC_CMP_NPCX)) {
return -EOPNOTSUPP;
}
/* Enable/Disable threshold IRQ */
if (enable) {
/* Set control threshold registers */
ret = adc_npcx_threshold_ctrl_setup(dev, th_sel);
if (ret) {
return ret;
}
ret = adc_npcx_threshold_enable_irq(dev, th_sel);
} else {
ret = adc_npcx_threshold_disable_irq(dev, th_sel);
}
return ret;
}
int adc_npcx_threshold_mv_to_thrval(const struct device *dev, uint32_t val_mv,
uint32_t *thrval)
{
struct adc_driver_api *api = (struct adc_driver_api *)dev->api;
if (!IS_ENABLED(CONFIG_ADC_CMP_NPCX)) {
return -EOPNOTSUPP;
}
if (val_mv >= api->ref_internal) {
return -EINVAL;
}
*thrval = (val_mv << ADC_NPCX_THRVAL_RESOLUTION) /
api->ref_internal;
return 0;
}
#if defined(CONFIG_ADC_CMP_NPCX_WORKQUEUE)
struct k_work_q adc_npcx_work_q;
static K_KERNEL_STACK_DEFINE(adc_npcx_work_q_stack,
CONFIG_ADC_CMP_NPCX_WORKQUEUE_STACK_SIZE);
static int adc_npcx_init_cmp_work_q(void)
{
struct k_work_queue_config cfg = {
.name = "adc_cmp_work",
.no_yield = false,
};
k_work_queue_start(&adc_npcx_work_q,
adc_npcx_work_q_stack,
K_KERNEL_STACK_SIZEOF(adc_npcx_work_q_stack),
CONFIG_ADC_CMP_NPCX_WORKQUEUE_PRIORITY, &cfg);
work_q = &adc_npcx_work_q;
return 0;
}
SYS_INIT(adc_npcx_init_cmp_work_q, POST_KERNEL, CONFIG_SENSOR_INIT_PRIORITY);
#endif
static int adc_npcx_init(const struct device *dev)
{
const struct adc_npcx_config *const config = dev->config;
struct adc_npcx_data *const data = dev->data;
struct adc_reg *const inst = HAL_INSTANCE(dev);
const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
int prescaler = 0, ret;
if (!device_is_ready(clk_dev)) {
LOG_ERR("clock control device not ready");
return -ENODEV;
}
/* Save ADC device in data */
data->adc_dev = dev;
/* Turn on device clock first and get source clock freq. */
ret = clock_control_on(clk_dev, (clock_control_subsys_t)
&config->clk_cfg);
if (ret < 0) {
LOG_ERR("Turn on ADC clock fail %d", ret);
return ret;
}
ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)
&config->clk_cfg, &data->input_clk);
if (ret < 0) {
LOG_ERR("Get ADC clock rate error %d", ret);
return ret;
}
/* Configure the ADC clock */
prescaler = DIV_ROUND_UP(data->input_clk, NPCX_ADC_CLK);
if (prescaler > 0x40) {
prescaler = 0x40;
}
/* Set Core Clock Division Factor in order to obtain the ADC clock */
SET_FIELD(inst->ATCTL, NPCX_ATCTL_SCLKDIV_FIELD, prescaler - 1);
/* Set regular ADC delay */
SET_FIELD(inst->ATCTL, NPCX_ATCTL_DLY_FIELD, ADC_REGULAR_DLY_VAL);
/* Set ADC speed sequentially */
inst->ADCCNF2 = ADC_REGULAR_ADCCNF2_VAL;
inst->GENDLY = ADC_REGULAR_GENDLY_VAL;
inst->MEAST = ADC_REGULAR_MEAST_VAL;
/* Configure ADC interrupt and enable it */
config->irq_cfg_func();
/* Initialize mutex of ADC channels */
adc_context_unlock_unconditionally(&data->ctx);
/* Configure pin-mux for ADC device */
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0) {
LOG_ERR("ADC pinctrl setup failed (%d)", ret);
return ret;
}
return 0;
}
#define NPCX_ADC_INIT(n) \
\
static void adc_npcx_irq_cfg_func_##n(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
adc_npcx_isr, DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
} \
\
static const struct adc_driver_api adc_npcx_driver_api_##n = { \
.channel_setup = adc_npcx_channel_setup, \
.read = adc_npcx_read, \
.ref_internal = DT_INST_PROP(n, vref_mv), \
IF_ENABLED(CONFIG_ADC_ASYNC, \
(.read_async = adc_npcx_read_async,)) \
}; \
\
PINCTRL_DT_INST_DEFINE(n); \
\
static const struct adc_npcx_config adc_npcx_cfg_##n = { \
.base = DT_INST_REG_ADDR(n), \
.clk_cfg = NPCX_DT_CLK_CFG_ITEM(n), \
.channel_count = DT_INST_PROP(n, channel_count), \
.threshold_count = DT_INST_PROP(n, threshold_count), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.irq_cfg_func = adc_npcx_irq_cfg_func_##n, \
}; \
static struct adc_npcx_threshold_data threshold_data_##n; \
static struct adc_npcx_data adc_npcx_data_##n = { \
ADC_CONTEXT_INIT_TIMER(adc_npcx_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_npcx_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_npcx_data_##n, ctx), \
.threshold_data = &threshold_data_##n, \
}; \
DEVICE_DT_INST_DEFINE(n, \
adc_npcx_init, NULL, \
&adc_npcx_data_##n, &adc_npcx_cfg_##n, \
PRE_KERNEL_1, CONFIG_ADC_INIT_PRIORITY, \
&adc_npcx_driver_api_##n);
DT_INST_FOREACH_STATUS_OKAY(NPCX_ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_npcx.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,848 |
```c
/*
*
*/
#include <zephyr/drivers/adc.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/kernel.h>
static inline int z_vrfy_adc_channel_setup(const struct device *dev,
const struct adc_channel_cfg *user_channel_cfg)
{
struct adc_channel_cfg channel_cfg;
K_OOPS(K_SYSCALL_DRIVER_ADC(dev, channel_setup));
K_OOPS(k_usermode_from_copy(&channel_cfg,
(struct adc_channel_cfg *)user_channel_cfg,
sizeof(struct adc_channel_cfg)));
return z_impl_adc_channel_setup((const struct device *)dev,
&channel_cfg);
}
#include <zephyr/syscalls/adc_channel_setup_mrsh.c>
static bool copy_sequence(struct adc_sequence *dst,
struct adc_sequence_options *options,
struct adc_sequence *src)
{
if (k_usermode_from_copy(dst, src, sizeof(struct adc_sequence)) != 0) {
printk("couldn't copy adc_sequence struct\n");
return false;
}
if (dst->options) {
if (k_usermode_from_copy(options, dst->options,
sizeof(struct adc_sequence_options)) != 0) {
printk("couldn't copy adc_options struct\n");
return false;
}
dst->options = options;
}
if (K_SYSCALL_MEMORY_WRITE(dst->buffer, dst->buffer_size) != 0) {
printk("no access to buffer memory\n");
return false;
}
return true;
}
static inline int z_vrfy_adc_read(const struct device *dev,
const struct adc_sequence *user_sequence)
{
struct adc_sequence sequence;
struct adc_sequence_options options;
K_OOPS(K_SYSCALL_DRIVER_ADC(dev, read));
K_OOPS(K_SYSCALL_VERIFY_MSG(copy_sequence(&sequence, &options,
(struct adc_sequence *)user_sequence),
"invalid ADC sequence"));
if (sequence.options != NULL) {
K_OOPS(K_SYSCALL_VERIFY_MSG(sequence.options->callback == NULL,
"ADC sequence callbacks forbidden from user mode"));
}
return z_impl_adc_read((const struct device *)dev, &sequence);
}
#include <zephyr/syscalls/adc_read_mrsh.c>
#ifdef CONFIG_ADC_ASYNC
static inline int z_vrfy_adc_read_async(const struct device *dev,
const struct adc_sequence *user_sequence,
struct k_poll_signal *async)
{
struct adc_sequence sequence;
struct adc_sequence_options options;
K_OOPS(K_SYSCALL_DRIVER_ADC(dev, read_async));
K_OOPS(K_SYSCALL_VERIFY_MSG(copy_sequence(&sequence, &options,
(struct adc_sequence *)user_sequence),
"invalid ADC sequence"));
if (sequence.options != NULL) {
K_OOPS(K_SYSCALL_VERIFY_MSG(sequence.options->callback == NULL,
"ADC sequence callbacks forbidden from user mode"));
}
K_OOPS(K_SYSCALL_OBJ(async, K_OBJ_POLL_SIGNAL));
return z_impl_adc_read_async((const struct device *)dev, &sequence,
(struct k_poll_signal *)async);
}
#include <zephyr/syscalls/adc_read_async_mrsh.c>
#endif /* CONFIG_ADC_ASYNC */
``` | /content/code_sandbox/drivers/adc/adc_handlers.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 647 |
```unknown
config ADC_AD559X
bool "AD559x ADC driver"
default y
depends on DT_HAS_ADI_AD559X_ADC_ENABLED
select MFD
help
Enable the AD559x ADC driver.
config ADC_AD559X_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
depends on ADC_AD559X
default 384
help
Size of the stack used for the internal data acquisition
thread.
config ADC_AD559X_ACQUISITION_THREAD_PRIO
int "Priority for the ADC data acquisition thread"
depends on ADC_AD559X
default 0
help
Priority level for the internal ADC data acquisition thread.
``` | /content/code_sandbox/drivers/adc/Kconfig.ad559x | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 148 |
```unknown
#
config ADC_ESP32
bool "ESP32 ADC driver"
default y
depends on DT_HAS_ESPRESSIF_ESP32_ADC_ENABLED
help
Enable the driver implementation for the ESP32 ADC
if ADC_ESP32
config ADC_ESP32_DMA
bool "ESP32 ADC DMA Support"
default n
depends on DT_HAS_ESPRESSIF_ESP32_GDMA_ENABLED
help
Enable the ADC DMA mode for ADC instances
that enable dma channels in their device tree node.
endif
``` | /content/code_sandbox/drivers/adc/Kconfig.esp32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 109 |
```c
/*
*
*/
#define DT_DRV_COMPAT silabs_gecko_iadc
#include <zephyr/drivers/adc.h>
#include <em_iadc.h>
#include <em_cmu.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(iadc_gecko, CONFIG_ADC_LOG_LEVEL);
/* Number of channels available. */
#define GECKO_CHANNEL_COUNT 16
#define GECKO_INTERNAL_REFERENCE_mV 1210
#define GECKO_DATA_RES12BIT(DATA) ((DATA) & 0x0FFF)
struct adc_gecko_channel_config {
IADC_CfgAnalogGain_t gain;
IADC_CfgReference_t reference;
IADC_PosInput_t input_positive;
IADC_NegInput_t input_negative;
bool initialized;
};
struct adc_gecko_data {
const struct device *dev;
struct adc_context ctx;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint32_t channels;
uint8_t channel_id;
struct adc_gecko_channel_config channel_config[GECKO_CHANNEL_COUNT];
};
struct adc_gecko_config {
IADC_Config_t config;
IADC_TypeDef *base;
void (*irq_cfg_func)(void);
};
static void adc_gecko_set_config(const struct device *dev)
{
struct adc_gecko_data *data = dev->data;
struct adc_gecko_channel_config *channel_config = NULL;
const struct adc_gecko_config *config = dev->config;
IADC_TypeDef *iadc = (IADC_TypeDef *)config->base;
IADC_InitSingle_t sInit = IADC_INITSINGLE_DEFAULT;
IADC_SingleInput_t initSingleInput = IADC_SINGLEINPUT_DEFAULT;
IADC_Init_t init = IADC_INIT_DEFAULT;
IADC_AllConfigs_t initAllConfigs = IADC_ALLCONFIGS_DEFAULT;
channel_config = &data->channel_config[data->channel_id];
initSingleInput.posInput = channel_config->input_positive;
initSingleInput.negInput = channel_config->input_negative;
initAllConfigs.configs[0].analogGain = channel_config->gain;
initAllConfigs.configs[0].reference = channel_config->reference;
IADC_init(iadc, &init, &initAllConfigs);
IADC_initSingle(iadc, &sInit, &initSingleInput);
}
static int adc_gecko_check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_DBG("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int adc_gecko_check_resolution(const struct adc_sequence *sequence)
{
int value = sequence->resolution;
/* Base resolution is on 12, it can be changed only up by oversampling */
if (value != 12) {
return -EINVAL;
}
return value;
}
static int start_read(const struct device *dev, const struct adc_sequence *sequence)
{
struct adc_gecko_data *data = dev->data;
uint32_t channels;
uint8_t channel_count;
uint8_t index;
int res;
/* Check if at least 1 channel is requested */
if (sequence->channels == 0) {
LOG_DBG("No channel requested");
return -EINVAL;
}
if (sequence->oversampling) {
LOG_ERR("Oversampling is not supported");
return -ENOTSUP;
}
/* Check resolution setting */
res = adc_gecko_check_resolution(sequence);
if (res < 0) {
return -EINVAL;
}
/* Verify all requested channels are initialized and store resolution */
channels = sequence->channels;
channel_count = 0;
while (channels) {
/* Iterate through all channels and check if they are initialized */
index = find_lsb_set(channels) - 1;
if (index >= GECKO_CHANNEL_COUNT) {
LOG_DBG("Requested channel index not available: %d", index);
return -EINVAL;
}
if (!data->channel_config[index].initialized) {
LOG_DBG("Channel not initialized");
return -EINVAL;
}
channel_count++;
channels &= ~BIT(index);
}
/* Check buffer size */
res = adc_gecko_check_buffer_size(sequence, channel_count);
if (res < 0) {
return res;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
res = adc_context_wait_for_completion(&data->ctx);
return res;
}
static void adc_gecko_start_channel(const struct device *dev)
{
const struct adc_gecko_config *config = dev->config;
struct adc_gecko_data *data = dev->data;
IADC_TypeDef *iadc = (IADC_TypeDef *)config->base;
data->channel_id = find_lsb_set(data->channels) - 1;
LOG_DBG("Starting channel %d", data->channel_id);
adc_gecko_set_config(data->dev);
/* Enable single conversion interrupt */
IADC_enableInt(iadc, IADC_IEN_SINGLEDONE);
/* Start single conversion */
IADC_command(iadc, iadcCmdStartSingle);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_gecko_data *data =
CONTAINER_OF(ctx, struct adc_gecko_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
adc_gecko_start_channel(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_gecko_data *data =
CONTAINER_OF(ctx, struct adc_gecko_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void adc_gecko_isr(void *arg)
{
const struct device *dev = (const struct device *)arg;
const struct adc_gecko_config *config = dev->config;
struct adc_gecko_data *data = dev->data;
IADC_TypeDef *iadc = config->base;
IADC_Result_t sample;
uint32_t flags, err;
/*
* IRQ is enabled only for SINGLEDONE. However, other
* interrupt flags - the ones singaling an error - may be
* set simultaneously with SINGLEDONE. We read & clear them
* to determine if conversion is successful or not.
*/
flags = IADC_getInt(iadc);
__ASSERT(flags & IADC_IF_SINGLEDONE,
"unexpected IADC IRQ (flags=0x%08x)!", flags);
err = flags & (IADC_IF_PORTALLOCERR |
IADC_IF_POLARITYERR |
IADC_IF_EM23ABORTERROR);
if (!err) {
sample = IADC_readSingleResult(iadc);
*data->buffer++ = GECKO_DATA_RES12BIT((uint16_t)sample.data);
data->channels &= ~BIT(data->channel_id);
if (data->channels) {
adc_gecko_start_channel(dev);
} else {
adc_context_on_sampling_done(&data->ctx, dev);
}
} else {
LOG_ERR("IADC conversion error, flags=%08x", err);
adc_context_complete(&data->ctx, -EIO);
}
IADC_clearInt(iadc, IADC_IF_SINGLEDONE | err);
}
static int adc_gecko_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_gecko_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_gecko_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_gecko_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif
static void adc_gecko_gpio_busalloc_pos(IADC_PosInput_t input)
{
uint32_t port = ((input << _IADC_SCAN_PINPOS_SHIFT) &
_IADC_SCAN_PORTPOS_MASK) >> _IADC_SCAN_PORTPOS_SHIFT;
uint32_t pin = ((input << _IADC_SCAN_PINPOS_SHIFT) &
_IADC_SCAN_PINPOS_MASK) >> _IADC_SCAN_PINPOS_SHIFT;
switch (port) {
case _IADC_SCAN_PORTPOS_PORTA:
if (pin & 1) {
GPIO->ABUSALLOC |= GPIO_ABUSALLOC_AODD0_ADC0;
} else {
GPIO->ABUSALLOC |= GPIO_ABUSALLOC_AEVEN0_ADC0;
}
break;
case _IADC_SCAN_PORTPOS_PORTB:
if (pin & 1) {
GPIO->BBUSALLOC |= GPIO_BBUSALLOC_BODD0_ADC0;
} else {
GPIO->BBUSALLOC |= GPIO_BBUSALLOC_BEVEN0_ADC0;
}
break;
case _IADC_SCAN_PORTPOS_PORTC:
case _IADC_SCAN_PORTPOS_PORTD:
if (pin & 1) {
GPIO->CDBUSALLOC |= GPIO_CDBUSALLOC_CDODD0_ADC0;
} else {
GPIO->CDBUSALLOC |= GPIO_CDBUSALLOC_CDEVEN0_ADC0;
}
break;
default:
}
}
static void adc_gecko_gpio_busalloc_neg(IADC_NegInput_t input)
{
uint32_t port = ((input << _IADC_SCAN_PINNEG_SHIFT) &
_IADC_SCAN_PORTNEG_MASK) >> _IADC_SCAN_PORTNEG_SHIFT;
uint32_t pin = ((input << _IADC_SCAN_PINNEG_SHIFT) &
_IADC_SCAN_PINNEG_MASK) >> _IADC_SCAN_PINNEG_SHIFT;
switch (port) {
case _IADC_SCAN_PORTNEG_PORTA:
if (pin & 1) {
GPIO->ABUSALLOC |= GPIO_ABUSALLOC_AODD0_ADC0;
} else {
GPIO->ABUSALLOC |= GPIO_ABUSALLOC_AEVEN0_ADC0;
}
break;
case _IADC_SCAN_PORTNEG_PORTB:
if (pin & 1) {
GPIO->BBUSALLOC |= GPIO_BBUSALLOC_BODD0_ADC0;
} else {
GPIO->BBUSALLOC |= GPIO_BBUSALLOC_BEVEN0_ADC0;
}
break;
case _IADC_SCAN_PORTNEG_PORTC:
case _IADC_SCAN_PORTNEG_PORTD:
if (pin & 1) {
GPIO->CDBUSALLOC |= GPIO_CDBUSALLOC_CDODD0_ADC0;
} else {
GPIO->CDBUSALLOC |= GPIO_CDBUSALLOC_CDEVEN0_ADC0;
}
break;
default:
}
}
static int adc_gecko_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
struct adc_gecko_data *data = dev->data;
struct adc_gecko_channel_config *channel_config = NULL;
if (channel_cfg->channel_id < GECKO_CHANNEL_COUNT) {
channel_config = &data->channel_config[channel_cfg->channel_id];
} else {
LOG_DBG("Requested channel index not available: %d", channel_cfg->channel_id);
return -EINVAL;
}
channel_config->initialized = false;
channel_config->input_positive = channel_cfg->input_positive;
if (channel_cfg->differential) {
channel_config->input_negative = channel_cfg->input_negative;
} else {
channel_config->input_negative = iadcNegInputGnd;
}
/* Setup input */
switch (channel_cfg->gain) {
#if defined(_IADC_CFG_ANALOGGAIN_ANAGAIN0P25)
case ADC_GAIN_1_4:
channel_config->gain = iadcCfgAnalogGain0P25x;
break;
#endif
case ADC_GAIN_1_2:
channel_config->gain = iadcCfgAnalogGain0P5x;
break;
case ADC_GAIN_1:
channel_config->gain = iadcCfgAnalogGain1x;
break;
case ADC_GAIN_2:
channel_config->gain = iadcCfgAnalogGain2x;
break;
case ADC_GAIN_3:
channel_config->gain = iadcCfgAnalogGain3x;
break;
case ADC_GAIN_4:
channel_config->gain = iadcCfgAnalogGain4x;
break;
default:
LOG_ERR("unsupported channel gain '%d'", channel_cfg->gain);
return -ENOTSUP;
}
/* Setup reference */
switch (channel_cfg->reference) {
case ADC_REF_VDD_1:
channel_config->reference = iadcCfgReferenceVddx;
break;
case ADC_REF_INTERNAL:
channel_config->reference = iadcCfgReferenceInt1V2;
break;
#if defined(_IADC_CFG_REFSEL_VREF2P5)
case ADC_REF_EXTERNAL1:
channel_config->reference = iadcCfgReferenceExt2V5;
break;
#endif
case ADC_REF_EXTERNAL0:
channel_config->reference = iadcCfgReferenceExt1V25;
break;
default:
LOG_ERR("unsupported channel reference type '%d'",
channel_cfg->reference);
return -ENOTSUP;
}
/* Setup GPIO xBUSALLOC registers if channel uses GPIO pin */
adc_gecko_gpio_busalloc_pos(channel_config->input_positive);
adc_gecko_gpio_busalloc_neg(channel_config->input_negative);
channel_config->initialized = true;
LOG_DBG("Channel setup succeeded!");
return 0;
}
static int adc_gecko_init(const struct device *dev)
{
const struct adc_gecko_config *config = dev->config;
struct adc_gecko_data *data = dev->data;
CMU_ClockEnable(cmuClock_IADC0, true);
/* Select clock for IADC */
CMU_ClockSelectSet(cmuClock_IADCCLK, cmuSelect_FSRCO); /* FSRCO - 20MHz */
data->dev = dev;
config->irq_cfg_func();
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api api_gecko_adc_driver_api = {
.channel_setup = adc_gecko_channel_setup,
.read = adc_gecko_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_gecko_read_async,
#endif
.ref_internal = GECKO_INTERNAL_REFERENCE_mV,
};
#define GECKO_IADC_INIT(n) \
\
static void adc_gecko_config_func_##n(void); \
\
const static struct adc_gecko_config adc_gecko_config_##n = { \
.base = (IADC_TypeDef *)DT_INST_REG_ADDR(n),\
.irq_cfg_func = adc_gecko_config_func_##n, \
}; \
static struct adc_gecko_data adc_gecko_data_##n = { \
ADC_CONTEXT_INIT_TIMER(adc_gecko_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_gecko_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_gecko_data_##n, ctx), \
}; \
static void adc_gecko_config_func_##n(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), \
DT_INST_IRQ(n, priority), \
adc_gecko_isr, DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
}; \
DEVICE_DT_INST_DEFINE(n, \
&adc_gecko_init, NULL, \
&adc_gecko_data_##n, &adc_gecko_config_##n,\
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&api_gecko_adc_driver_api);
DT_INST_FOREACH_STATUS_OKAY(GECKO_IADC_INIT)
``` | /content/code_sandbox/drivers/adc/iadc_gecko.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,478 |
```unknown
# Renesas RA Family
config ADC_RENESAS_RA
bool "Renesas RA ADC"
default y
depends on DT_HAS_RENESAS_RA_ADC_ENABLED
select USE_RA_FSP_ADC
help
Enable Renesas RA ADC Driver.
``` | /content/code_sandbox/drivers/adc/Kconfig.renesas_ra | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 56 |
```c
/*
*
*/
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <nrfx_adc.h>
#include <zephyr/dt-bindings/adc/nrf-adc.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_nrfx_adc);
#define DT_DRV_COMPAT nordic_nrf_adc
/* Ensure that definitions in nrf-adc.h match MDK. */
BUILD_ASSERT((NRF_ADC_AIN0 == NRF_ADC_CONFIG_INPUT_0) &&
(NRF_ADC_AIN1 == NRF_ADC_CONFIG_INPUT_1) &&
(NRF_ADC_AIN2 == NRF_ADC_CONFIG_INPUT_2) &&
(NRF_ADC_AIN3 == NRF_ADC_CONFIG_INPUT_3) &&
(NRF_ADC_AIN4 == NRF_ADC_CONFIG_INPUT_4) &&
(NRF_ADC_AIN5 == NRF_ADC_CONFIG_INPUT_5) &&
(NRF_ADC_AIN6 == NRF_ADC_CONFIG_INPUT_6) &&
(NRF_ADC_AIN7 == NRF_ADC_CONFIG_INPUT_7),
"Definitions from nrf-adc.h do not match those from nrf_adc.h");
struct driver_data {
struct adc_context ctx;
nrf_adc_value_t *buffer;
uint8_t active_channels;
};
static struct driver_data m_data = {
ADC_CONTEXT_INIT_TIMER(m_data, ctx),
ADC_CONTEXT_INIT_LOCK(m_data, ctx),
ADC_CONTEXT_INIT_SYNC(m_data, ctx),
};
static nrfx_adc_channel_t m_channels[CONFIG_ADC_NRFX_ADC_CHANNEL_COUNT];
/* Implementation of the ADC driver API function: adc_channel_setup. */
static int adc_nrfx_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
uint8_t channel_id = channel_cfg->channel_id;
nrf_adc_config_t *config = &m_channels[channel_id].config;
if (channel_id >= CONFIG_ADC_NRFX_ADC_CHANNEL_COUNT) {
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Selected ADC acquisition time is not valid");
return -EINVAL;
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -EINVAL;
}
switch (channel_cfg->gain) {
case ADC_GAIN_1_3:
config->scaling = NRF_ADC_CONFIG_SCALING_INPUT_ONE_THIRD;
break;
case ADC_GAIN_2_3:
config->scaling = NRF_ADC_CONFIG_SCALING_INPUT_TWO_THIRDS;
break;
case ADC_GAIN_1:
config->scaling = NRF_ADC_CONFIG_SCALING_INPUT_FULL_SCALE;
break;
default:
LOG_ERR("Selected ADC gain is not valid");
return -EINVAL;
}
switch (channel_cfg->reference) {
case ADC_REF_INTERNAL:
config->reference = NRF_ADC_CONFIG_REF_VBG;
config->extref = NRF_ADC_CONFIG_EXTREFSEL_NONE;
break;
case ADC_REF_VDD_1_2:
config->reference = NRF_ADC_CONFIG_REF_SUPPLY_ONE_HALF;
config->extref = NRF_ADC_CONFIG_EXTREFSEL_NONE;
break;
case ADC_REF_VDD_1_3:
config->reference = NRF_ADC_CONFIG_REF_SUPPLY_ONE_THIRD;
config->extref = NRF_ADC_CONFIG_EXTREFSEL_NONE;
break;
case ADC_REF_EXTERNAL0:
config->reference = NRF_ADC_CONFIG_REF_EXT;
config->extref = NRF_ADC_CONFIG_EXTREFSEL_AREF0;
break;
case ADC_REF_EXTERNAL1:
config->reference = NRF_ADC_CONFIG_REF_EXT;
config->extref = NRF_ADC_CONFIG_EXTREFSEL_AREF1;
break;
default:
LOG_ERR("Selected ADC reference is not valid");
return -EINVAL;
}
config->input = channel_cfg->input_positive;
config->resolution = NRF_ADC_CONFIG_RES_8BIT;
return 0;
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
ARG_UNUSED(ctx);
nrfx_adc_buffer_convert(m_data.buffer, m_data.active_channels);
nrfx_adc_sample();
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat)
{
ARG_UNUSED(ctx);
if (!repeat) {
m_data.buffer += m_data.active_channels;
}
}
static int check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(nrf_adc_value_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_ERR("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int error;
uint32_t selected_channels = sequence->channels;
uint8_t active_channels;
uint8_t channel_id;
nrf_adc_config_resolution_t nrf_resolution;
/* Signal an error if channel selection is invalid (no channels or
* a non-existing one is selected).
*/
if (!selected_channels ||
(selected_channels &
~BIT_MASK(CONFIG_ADC_NRFX_ADC_CHANNEL_COUNT))) {
LOG_ERR("Invalid selection of channels");
return -EINVAL;
}
if (sequence->oversampling != 0U) {
LOG_ERR("Oversampling is not supported");
return -EINVAL;
}
switch (sequence->resolution) {
case 8:
nrf_resolution = NRF_ADC_CONFIG_RES_8BIT;
break;
case 9:
nrf_resolution = NRF_ADC_CONFIG_RES_9BIT;
break;
case 10:
nrf_resolution = NRF_ADC_CONFIG_RES_10BIT;
break;
default:
LOG_ERR("ADC resolution value %d is not valid",
sequence->resolution);
return -EINVAL;
}
active_channels = 0U;
nrfx_adc_all_channels_disable();
/* Enable the channels selected for the pointed sequence.
*/
channel_id = 0U;
while (selected_channels) {
if (selected_channels & BIT(0)) {
/* The nrfx driver requires setting the resolution
* for each enabled channel individually.
*/
m_channels[channel_id].config.resolution =
nrf_resolution;
nrfx_adc_channel_enable(&m_channels[channel_id]);
++active_channels;
}
selected_channels >>= 1;
++channel_id;
}
error = check_buffer_size(sequence, active_channels);
if (error) {
return error;
}
m_data.buffer = sequence->buffer;
m_data.active_channels = active_channels;
adc_context_start_read(&m_data.ctx, sequence);
error = adc_context_wait_for_completion(&m_data.ctx);
return error;
}
/* Implementation of the ADC driver API function: adc_read. */
static int adc_nrfx_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int error;
adc_context_lock(&m_data.ctx, false, NULL);
error = start_read(dev, sequence);
adc_context_release(&m_data.ctx, error);
return error;
}
#ifdef CONFIG_ADC_ASYNC
/* Implementation of the ADC driver API function: adc_read_sync. */
static int adc_nrfx_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int error;
adc_context_lock(&m_data.ctx, true, async);
error = start_read(dev, sequence);
adc_context_release(&m_data.ctx, error);
return error;
}
#endif /* CONFIG_ADC_ASYNC */
static void event_handler(const nrfx_adc_evt_t *p_event)
{
const struct device *const dev = DEVICE_DT_INST_GET(0);
if (p_event->type == NRFX_ADC_EVT_DONE) {
adc_context_on_sampling_done(&m_data.ctx, dev);
}
}
static int init_adc(const struct device *dev)
{
const nrfx_adc_config_t config = NRFX_ADC_DEFAULT_CONFIG;
nrfx_err_t result = nrfx_adc_init(&config, event_handler);
if (result != NRFX_SUCCESS) {
LOG_ERR("Failed to initialize device: %s",
dev->name);
return -EBUSY;
}
IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
nrfx_isr, nrfx_adc_irq_handler, 0);
adc_context_unlock_unconditionally(&m_data.ctx);
return 0;
}
static const struct adc_driver_api adc_nrfx_driver_api = {
.channel_setup = adc_nrfx_channel_setup,
.read = adc_nrfx_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_nrfx_read_async,
#endif
.ref_internal = 1200,
};
/*
* There is only one instance on supported SoCs, so inst is guaranteed
* to be 0 if any instance is okay. (We use adc_0 above, so the driver
* is relying on the numeric instance value in a way that happens to
* be safe.)
*
* Just in case that assumption becomes invalid in the future, we use
* a BUILD_ASSERT().
*/
#define ADC_INIT(inst) \
BUILD_ASSERT((inst) == 0, \
"multiple instances not supported"); \
DEVICE_DT_INST_DEFINE(0, \
init_adc, NULL, NULL, NULL, \
POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&adc_nrfx_driver_api);
DT_INST_FOREACH_STATUS_OKAY(ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_nrfx_adc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,075 |
```unknown
# ADC configuration options
config ADC_VF610
bool "VF610 ADC driver"
depends on DT_HAS_NXP_VF610_ADC_ENABLED
default y
help
Enable the VF610 ADC driver.
``` | /content/code_sandbox/drivers/adc/Kconfig.vf610 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 43 |
```c
/*
*
*/
#define DT_DRV_COMPAT ite_it8xxx2_adc
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_ite_it8xxx2);
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/pinctrl.h>
#include <soc.h>
#include <soc_dt.h>
#include <errno.h>
#include <assert.h>
#include <zephyr/irq.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
/* ADC internal reference voltage (Unit:mV) */
#ifdef CONFIG_ADC_IT8XXX2_VOL_FULL_SCALE
#define IT8XXX2_ADC_VREF_VOL 3300
#else
#define IT8XXX2_ADC_VREF_VOL 3000
#endif
/* ADC channels disabled */
#define IT8XXX2_ADC_CHANNEL_DISABLED 0x1F
/* ADC sample time delay (Unit:us) */
#define IT8XXX2_ADC_SAMPLE_TIME_US 500
/* Wait next clock rising (Clock source 32.768K) */
#define IT8XXX2_WAIT_NEXT_CLOCK_TIME_US 31
/* ADC channels offset */
#define ADC_CHANNEL_SHIFT 5
#define ADC_CHANNEL_OFFSET(ch) ((ch)-CHIP_ADC_CH13-ADC_CHANNEL_SHIFT)
#ifdef CONFIG_ADC_IT8XXX2_VOL_FULL_SCALE
#define ADC_0_7_FULL_SCALE_MASK GENMASK(7, 0)
#define ADC_8_10_FULL_SCALE_MASK GENMASK(2, 0)
#define ADC_13_16_FULL_SCALE_MASK GENMASK(3, 0)
#endif
#ifdef CONFIG_SOC_IT8XXX2_EC_BUS_24MHZ
/* Select analog clock division factor */
#define ADC_SACLKDIV_MASK GENMASK(6, 4)
#define ADC_SACLKDIV(div) FIELD_PREP(ADC_SACLKDIV_MASK, div)
#endif
/* List of ADC channels. */
enum chip_adc_channel {
CHIP_ADC_CH0 = 0,
CHIP_ADC_CH1,
CHIP_ADC_CH2,
CHIP_ADC_CH3,
CHIP_ADC_CH4,
CHIP_ADC_CH5,
CHIP_ADC_CH6,
CHIP_ADC_CH7,
CHIP_ADC_CH13,
CHIP_ADC_CH14,
CHIP_ADC_CH15,
CHIP_ADC_CH16,
CHIP_ADC_COUNT,
};
struct adc_it8xxx2_data {
struct adc_context ctx;
struct k_sem sem;
/* Channel ID */
uint32_t ch;
/* Save ADC result to the buffer. */
uint16_t *buffer;
/*
* The sample buffer pointer should be prepared
* for writing of next sampling results.
*/
uint16_t *repeat_buffer;
};
/*
* Structure adc_it8xxx2_cfg is about the setting of adc
* this config will be used at initial time
*/
struct adc_it8xxx2_cfg {
/* ADC alternate configuration */
const struct pinctrl_dev_config *pcfg;
};
#define ADC_IT8XXX2_REG_BASE \
((struct adc_it8xxx2_regs *)(DT_INST_REG_ADDR(0)))
static int adc_it8xxx2_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
uint8_t channel_id = channel_cfg->channel_id;
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Selected ADC acquisition time is not valid");
return -EINVAL;
}
/* Support channels 0~7 and 13~16 */
if (!((channel_id >= 0 && channel_id <= 7) ||
(channel_id >= 13 && channel_id <= 16))) {
LOG_ERR("Channel %d is not valid", channel_id);
return -EINVAL;
}
/* Channels 13~16 should be shifted by 5 */
if (channel_id > CHIP_ADC_CH7) {
channel_id -= ADC_CHANNEL_SHIFT;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Invalid channel gain");
return -EINVAL;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Invalid channel reference");
return -EINVAL;
}
LOG_DBG("Channel setup succeeded!");
return 0;
}
static void adc_disable_measurement(uint32_t ch)
{
struct adc_it8xxx2_regs *const adc_regs = ADC_IT8XXX2_REG_BASE;
if (ch <= CHIP_ADC_CH7) {
/*
* Disable measurement.
* bit(4:0) = 0x1f : channel disable
*/
adc_regs->VCH0CTL = IT8XXX2_ADC_DATVAL |
IT8XXX2_ADC_CHANNEL_DISABLED;
} else {
/*
* Channels 13~16 controller setting.
* bit7 = 1: End of conversion. New data is available in
* VCHDATL/VCHDATM.
*/
adc_regs->adc_vchs_ctrl[ADC_CHANNEL_OFFSET(ch)].VCHCTL =
IT8XXX2_ADC_DATVAL;
}
/* ADC module disable */
adc_regs->ADCCFG &= ~IT8XXX2_ADC_ADCEN;
/* disable adc interrupt */
irq_disable(DT_INST_IRQN(0));
}
static int adc_data_valid(const struct device *dev)
{
struct adc_it8xxx2_regs *const adc_regs = ADC_IT8XXX2_REG_BASE;
struct adc_it8xxx2_data *data = dev->data;
return (data->ch <= CHIP_ADC_CH7) ?
(adc_regs->VCH0CTL & IT8XXX2_ADC_DATVAL) :
(adc_regs->ADCDVSTS2 & BIT(ADC_CHANNEL_OFFSET(data->ch)));
}
/* Get result for each ADC selected channel. */
static void adc_it8xxx2_get_sample(const struct device *dev)
{
struct adc_it8xxx2_data *data = dev->data;
struct adc_it8xxx2_regs *const adc_regs = ADC_IT8XXX2_REG_BASE;
if (adc_data_valid(dev)) {
if (data->ch <= CHIP_ADC_CH7) {
/* Read adc raw data of msb and lsb */
*data->buffer++ = adc_regs->VCH0DATM << 8 |
adc_regs->VCH0DATL;
} else {
/* Read adc channels 13~16 raw data of msb and lsb */
*data->buffer++ =
adc_regs->adc_vchs_ctrl[ADC_CHANNEL_OFFSET(data->ch)].VCHDATM << 8 |
adc_regs->adc_vchs_ctrl[ADC_CHANNEL_OFFSET(data->ch)].VCHDATL;
}
} else {
LOG_WRN("ADC failed to read (regs=%x, ch=%d)",
adc_regs->ADCDVSTS, data->ch);
}
adc_disable_measurement(data->ch);
}
static void adc_poll_valid_data(void)
{
const struct device *const dev = DEVICE_DT_INST_GET(0);
int valid = 0;
/*
* If the polling waits for a valid data longer than
* the sampling time limit, the program will return.
*/
for (int i = 0U; i < (IT8XXX2_ADC_SAMPLE_TIME_US /
IT8XXX2_WAIT_NEXT_CLOCK_TIME_US); i++) {
/* Wait next clock time (1/32.768K~=30.5us) */
k_busy_wait(IT8XXX2_WAIT_NEXT_CLOCK_TIME_US);
if (adc_data_valid(dev)) {
valid = 1;
break;
}
}
if (valid) {
adc_it8xxx2_get_sample(dev);
} else {
LOG_ERR("Sampling timeout.");
return;
}
}
static void adc_enable_measurement(uint32_t ch)
{
struct adc_it8xxx2_regs *const adc_regs = ADC_IT8XXX2_REG_BASE;
const struct device *const dev = DEVICE_DT_INST_GET(0);
struct adc_it8xxx2_data *data = dev->data;
if (ch <= CHIP_ADC_CH7) {
/* Select and enable a voltage channel input for measurement */
adc_regs->VCH0CTL = (IT8XXX2_ADC_DATVAL | IT8XXX2_ADC_INTDVEN) + ch;
} else {
/* Channels 13~16 controller setting */
adc_regs->adc_vchs_ctrl[ADC_CHANNEL_OFFSET(ch)].VCHCTL =
IT8XXX2_ADC_DATVAL | IT8XXX2_ADC_INTDVEN | IT8XXX2_ADC_VCHEN;
}
/* ADC module enable */
adc_regs->ADCCFG |= IT8XXX2_ADC_ADCEN;
/*
* In the sampling process, it is possible to read multiple channels
* at a time. The ADC sampling of it8xxx2 needs to read each channel
* in sequence, so it needs to wait for an interrupt to read data in
* the loop through k_sem_take(). But k_timer_start() is used in the
* interval test in test_adc.c, so we need to use polling wait instead
* of k_sem_take() to wait, otherwise it will cause kernel panic.
*
* k_is_in_isr() can determine whether to use polling or k_sem_take()
* at present.
*/
if (k_is_in_isr()) {
/* polling wait for a valid data */
adc_poll_valid_data();
} else {
/* Enable adc interrupt */
irq_enable(DT_INST_IRQN(0));
/* Wait for an interrupt to read valid data. */
k_sem_take(&data->sem, K_FOREVER);
}
}
static int check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_ERR("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int adc_it8xxx2_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_it8xxx2_data *data = dev->data;
uint32_t channel_mask = sequence->channels;
/* Channels 13~16 should be shifted to the right by 5 */
if (channel_mask > BIT(CHIP_ADC_CH7)) {
channel_mask >>= ADC_CHANNEL_SHIFT;
}
if (!channel_mask || channel_mask & ~BIT_MASK(CHIP_ADC_COUNT)) {
LOG_ERR("Invalid selection of channels");
return -EINVAL;
}
if (!sequence->resolution) {
LOG_ERR("ADC resolution is not valid");
return -EINVAL;
}
LOG_DBG("Configure resolution=%d", sequence->resolution);
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_it8xxx2_data *data =
CONTAINER_OF(ctx, struct adc_it8xxx2_data, ctx);
uint32_t channels = ctx->sequence.channels;
uint8_t channel_count = 0;
data->repeat_buffer = data->buffer;
/*
* The ADC sampling of it8xxx2 needs to read each channel
* in sequence.
*/
while (channels) {
data->ch = find_lsb_set(channels) - 1;
channels &= ~BIT(data->ch);
adc_enable_measurement(data->ch);
channel_count++;
}
if (check_buffer_size(&ctx->sequence, channel_count)) {
return;
}
adc_context_on_sampling_done(&data->ctx, DEVICE_DT_INST_GET(0));
}
static int adc_it8xxx2_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_it8xxx2_data *data = dev->data;
int err;
adc_context_lock(&data->ctx, false, NULL);
err = adc_it8xxx2_start_read(dev, sequence);
adc_context_release(&data->ctx, err);
return err;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_it8xxx2_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_it8xxx2_data *data = dev->data;
int err;
adc_context_lock(&data->ctx, true, async);
err = adc_it8xxx2_start_read(dev, sequence);
adc_context_release(&data->ctx, err);
return err;
}
#endif /* CONFIG_ADC_ASYNC */
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_it8xxx2_data *data =
CONTAINER_OF(ctx, struct adc_it8xxx2_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void adc_it8xxx2_isr(const struct device *dev)
{
struct adc_it8xxx2_data *data = dev->data;
LOG_DBG("ADC ISR triggered.");
adc_it8xxx2_get_sample(dev);
k_sem_give(&data->sem);
}
static const struct adc_driver_api api_it8xxx2_driver_api = {
.channel_setup = adc_it8xxx2_channel_setup,
.read = adc_it8xxx2_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_it8xxx2_read_async,
#endif
.ref_internal = IT8XXX2_ADC_VREF_VOL,
};
/*
* ADC analog accuracy initialization (only once after VSTBY power on)
*
* Write 1 to this bit and write 0 to this bit immediately once and
* only once during the firmware initialization and do not write 1 again
* after initialization since IT83xx takes much power consumption
* if this bit is set as 1
*/
static void adc_accuracy_initialization(void)
{
struct adc_it8xxx2_regs *const adc_regs = ADC_IT8XXX2_REG_BASE;
/* Start adc accuracy initialization */
adc_regs->ADCSTS |= IT8XXX2_ADC_AINITB;
/* Enable automatic HW calibration. */
adc_regs->KDCTL |= IT8XXX2_ADC_AHCE;
/* Stop adc accuracy initialization */
adc_regs->ADCSTS &= ~IT8XXX2_ADC_AINITB;
}
static int adc_it8xxx2_init(const struct device *dev)
{
const struct adc_it8xxx2_cfg *config = dev->config;
struct adc_it8xxx2_data *data = dev->data;
struct adc_it8xxx2_regs *const adc_regs = ADC_IT8XXX2_REG_BASE;
int status;
#ifdef CONFIG_ADC_IT8XXX2_VOL_FULL_SCALE
/* ADC input voltage 0V ~ AVCC (3.3V) is mapped into 0h-3FFh */
adc_regs->ADCIVMFSCS1 = ADC_0_7_FULL_SCALE_MASK;
adc_regs->ADCIVMFSCS2 = ADC_8_10_FULL_SCALE_MASK;
adc_regs->ADCIVMFSCS3 = ADC_13_16_FULL_SCALE_MASK;
#endif
/* ADC analog accuracy initialization */
adc_accuracy_initialization();
/* Set the pin to ADC alternate function. */
status = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (status < 0) {
LOG_ERR("Failed to configure ADC pins");
return status;
}
/*
* The ADC channel conversion time is 30.8*(SCLKDIV+1) us.
* (Current setting is 61.6us)
*
* NOTE: A sample time delay (60us) also need to be included in
* conversion time.
* In addition, the ADC has a waiting time of 202.8us for
* voltage stabilization.
*
* So the final ADC sample time result is ~= 324.4us.
*/
adc_regs->ADCSTS &= ~IT8XXX2_ADC_ADCCTS1;
adc_regs->ADCCFG &= ~IT8XXX2_ADC_ADCCTS0;
/*
* bit[5-0]@ADCCTL : SCLKDIV
* SCLKDIV has to be equal to or greater than 1h;
*/
adc_regs->ADCCTL = 1;
#ifdef CONFIG_SOC_IT8XXX2_EC_BUS_24MHZ
adc_regs->ADCCTL1 =
(adc_regs->ADCCTL1 & ~ADC_SACLKDIV_MASK) | ADC_SACLKDIV(2);
#endif
/*
* Enable this bit, and data of VCHxDATL/VCHxDATM will be
* kept until data valid is cleared.
*/
adc_regs->ADCGCR |= IT8XXX2_ADC_DBKEN;
IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
adc_it8xxx2_isr, DEVICE_DT_INST_GET(0), 0);
k_sem_init(&data->sem, 0, 1);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static struct adc_it8xxx2_data adc_it8xxx2_data_0 = {
ADC_CONTEXT_INIT_TIMER(adc_it8xxx2_data_0, ctx),
ADC_CONTEXT_INIT_LOCK(adc_it8xxx2_data_0, ctx),
ADC_CONTEXT_INIT_SYNC(adc_it8xxx2_data_0, ctx),
};
PINCTRL_DT_INST_DEFINE(0);
static const struct adc_it8xxx2_cfg adc_it8xxx2_cfg_0 = {
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
};
DEVICE_DT_INST_DEFINE(0, adc_it8xxx2_init,
NULL,
&adc_it8xxx2_data_0,
&adc_it8xxx2_cfg_0, PRE_KERNEL_1,
CONFIG_ADC_INIT_PRIORITY,
&api_it8xxx2_driver_api);
``` | /content/code_sandbox/drivers/adc/adc_ite_it8xxx2.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,840 |
```unknown
# NUMAKER ADC Driver configuration options
config ADC_NUMAKER
bool "Nuvoton NuMaker MCU ADC driver"
default y
select HAS_NUMAKER_ADC
depends on DT_HAS_NUVOTON_NUMAKER_ADC_ENABLED
help
This option enables the ADC driver for Nuvoton NuMaker family of
processors.
Say y if you wish to enable NuMaker ADC.
``` | /content/code_sandbox/drivers/adc/Kconfig.numaker | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 81 |
```c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/adc/ads114s0x.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/dt-bindings/adc/ads114s0x_adc.h>
#include <zephyr/logging/log.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/util.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER 1
#define ADC_CONTEXT_WAIT_FOR_COMPLETION_TIMEOUT \
K_MSEC(CONFIG_ADC_ADS114S0X_WAIT_FOR_COMPLETION_TIMEOUT_MS)
#include "adc_context.h"
LOG_MODULE_REGISTER(ads114s0x, CONFIG_ADC_LOG_LEVEL);
#define ADS114S0X_CLK_FREQ_IN_KHZ 4096
#define ADS114S0X_RESET_LOW_TIME_IN_CLOCK_CYCLES 4
#define ADS114S0X_START_SYNC_PULSE_DURATION_IN_CLOCK_CYCLES 4
#define ADS114S0X_SETUP_TIME_IN_CLOCK_CYCLES 32
#define ADS114S0X_INPUT_SELECTION_AINCOM 12
#define ADS114S0X_RESOLUTION 16
#define ADS114S0X_REF_INTERNAL 2500
#define ADS114S0X_GPIO_MAX 3
#define ADS114S0X_POWER_ON_RESET_TIME_IN_US 2200
#define ADS114S0X_VBIAS_PIN_MAX 7
#define ADS114S0X_VBIAS_PIN_MIN 0
/* Not mentioned in the datasheet, but instead determined experimentally. */
#define ADS114S0X_RESET_DELAY_TIME_SAFETY_MARGIN_IN_US 1000
#define ADS114S0X_RESET_DELAY_TIME_IN_US \
(4096 * 1000 / ADS114S0X_CLK_FREQ_IN_KHZ + ADS114S0X_RESET_DELAY_TIME_SAFETY_MARGIN_IN_US)
#define ADS114S0X_RESET_LOW_TIME_IN_US \
(ADS114S0X_RESET_LOW_TIME_IN_CLOCK_CYCLES * 1000 / ADS114S0X_CLK_FREQ_IN_KHZ)
#define ADS114S0X_START_SYNC_PULSE_DURATION_IN_US \
(ADS114S0X_START_SYNC_PULSE_DURATION_IN_CLOCK_CYCLES * 1000 / ADS114S0X_CLK_FREQ_IN_KHZ)
#define ADS114S0X_SETUP_TIME_IN_US \
(ADS114S0X_SETUP_TIME_IN_CLOCK_CYCLES * 1000 / ADS114S0X_CLK_FREQ_IN_KHZ)
enum ads114s0x_command {
ADS114S0X_COMMAND_NOP = 0x00,
ADS114S0X_COMMAND_WAKEUP = 0x02,
ADS114S0X_COMMAND_POWERDOWN = 0x04,
ADS114S0X_COMMAND_RESET = 0x06,
ADS114S0X_COMMAND_START = 0x08,
ADS114S0X_COMMAND_STOP = 0x0A,
ADS114S0X_COMMAND_SYOCAL = 0x16,
ADS114S0X_COMMAND_SYGCAL = 0x17,
ADS114S0X_COMMAND_SFOCAL = 0x19,
ADS114S0X_COMMAND_RDATA = 0x12,
ADS114S0X_COMMAND_RREG = 0x20,
ADS114S0X_COMMAND_WREG = 0x40,
};
enum ads114s0x_register {
ADS114S0X_REGISTER_ID = 0x00,
ADS114S0X_REGISTER_STATUS = 0x01,
ADS114S0X_REGISTER_INPMUX = 0x02,
ADS114S0X_REGISTER_PGA = 0x03,
ADS114S0X_REGISTER_DATARATE = 0x04,
ADS114S0X_REGISTER_REF = 0x05,
ADS114S0X_REGISTER_IDACMAG = 0x06,
ADS114S0X_REGISTER_IDACMUX = 0x07,
ADS114S0X_REGISTER_VBIAS = 0x08,
ADS114S0X_REGISTER_SYS = 0x09,
ADS114S0X_REGISTER_OFCAL0 = 0x0B,
ADS114S0X_REGISTER_OFCAL1 = 0x0C,
ADS114S0X_REGISTER_FSCAL0 = 0x0E,
ADS114S0X_REGISTER_FSCAL1 = 0x0F,
ADS114S0X_REGISTER_GPIODAT = 0x10,
ADS114S0X_REGISTER_GPIOCON = 0x11,
};
#define ADS114S0X_REGISTER_GET_VALUE(value, pos, length) \
FIELD_GET(GENMASK(pos + length - 1, pos), value)
#define ADS114S0X_REGISTER_SET_VALUE(target, value, pos, length) \
target &= ~GENMASK(pos + length - 1, pos); \
target |= FIELD_PREP(GENMASK(pos + length - 1, pos), value)
#define ADS114S0X_REGISTER_ID_DEV_ID_LENGTH 3
#define ADS114S0X_REGISTER_ID_DEV_ID_POS 0
#define ADS114S0X_REGISTER_ID_DEV_ID_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_ID_DEV_ID_POS, \
ADS114S0X_REGISTER_ID_DEV_ID_LENGTH)
#define ADS114S0X_REGISTER_ID_DEV_ID_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_ID_DEV_ID_POS, \
ADS114S0X_REGISTER_ID_DEV_ID_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_POR_LENGTH 1
#define ADS114S0X_REGISTER_STATUS_FL_POR_POS 7
#define ADS114S0X_REGISTER_STATUS_FL_POR_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_STATUS_FL_POR_POS, \
ADS114S0X_REGISTER_STATUS_FL_POR_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_POR_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_STATUS_FL_POR_POS, \
ADS114S0X_REGISTER_STATUS_FL_POR_LENGTH)
#define ADS114S0X_REGISTER_STATUS_NOT_RDY_LENGTH 1
#define ADS114S0X_REGISTER_STATUS_NOT_RDY_POS 6
#define ADS114S0X_REGISTER_STATUS_NOT_RDY_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_STATUS_NOT_RDY_POS, \
ADS114S0X_REGISTER_STATUS_NOT_RDY_LENGTH)
#define ADS114S0X_REGISTER_STATUS_NOT_RDY_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_STATUS_NOT_RDY_POS, \
ADS114S0X_REGISTER_STATUS_NOT_RDY_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_P_RAILP_LENGTH 1
#define ADS114S0X_REGISTER_STATUS_FL_P_RAILP_POS 5
#define ADS114S0X_REGISTER_STATUS_FL_P_RAILP_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_STATUS_FL_P_RAILP_POS, \
ADS114S0X_REGISTER_STATUS_FL_P_RAILP_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_P_RAILP_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_STATUS_FL_P_RAILP_POS, \
ADS114S0X_REGISTER_STATUS_FL_P_RAILP_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_P_RAILN_LENGTH 1
#define ADS114S0X_REGISTER_STATUS_FL_P_RAILN_POS 4
#define ADS114S0X_REGISTER_STATUS_FL_P_RAILN_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_STATUS_FL_P_RAILN_POS, \
ADS114S0X_REGISTER_STATUS_FL_P_RAILN_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_P_RAILN_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_STATUS_FL_P_RAILN_POS, \
ADS114S0X_REGISTER_STATUS_FL_P_RAILN_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_N_RAILP_LENGTH 1
#define ADS114S0X_REGISTER_STATUS_FL_N_RAILP_POS 3
#define ADS114S0X_REGISTER_STATUS_FL_N_RAILP_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_STATUS_FL_N_RAILP_POS, \
ADS114S0X_REGISTER_STATUS_FL_N_RAILP_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_N_RAILP_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_STATUS_FL_N_RAILP_POS, \
ADS114S0X_REGISTER_STATUS_FL_N_RAILP_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_N_RAILN_LENGTH 1
#define ADS114S0X_REGISTER_STATUS_FL_N_RAILN_POS 2
#define ADS114S0X_REGISTER_STATUS_FL_N_RAILN_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_STATUS_FL_N_RAILN_POS, \
ADS114S0X_REGISTER_STATUS_FL_N_RAILN_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_N_RAILN_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_STATUS_FL_N_RAILN_POS, \
ADS114S0X_REGISTER_STATUS_FL_N_RAILN_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_REF_L1_LENGTH 1
#define ADS114S0X_REGISTER_STATUS_FL_REF_L1_POS 1
#define ADS114S0X_REGISTER_STATUS_FL_REF_L1_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_STATUS_FL_REF_L1_POS, \
ADS114S0X_REGISTER_STATUS_FL_REF_L1_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_REF_L1_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_STATUS_FL_REF_L1_POS, \
ADS114S0X_REGISTER_STATUS_FL_REF_L1_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_REF_L0_LENGTH 1
#define ADS114S0X_REGISTER_STATUS_FL_REF_L0_POS 0
#define ADS114S0X_REGISTER_STATUS_FL_REF_L0_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_STATUS_FL_REF_L0_POS, \
ADS114S0X_REGISTER_STATUS_FL_REF_L0_LENGTH)
#define ADS114S0X_REGISTER_STATUS_FL_REF_L0_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_STATUS_FL_REF_L0_POS, \
ADS114S0X_REGISTER_STATUS_FL_REF_L0_LENGTH)
#define ADS114S0X_REGISTER_INPMUX_MUXP_LENGTH 4
#define ADS114S0X_REGISTER_INPMUX_MUXP_POS 4
#define ADS114S0X_REGISTER_INPMUX_MUXP_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_INPMUX_MUXP_POS, \
ADS114S0X_REGISTER_INPMUX_MUXP_LENGTH)
#define ADS114S0X_REGISTER_INPMUX_MUXP_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_INPMUX_MUXP_POS, \
ADS114S0X_REGISTER_INPMUX_MUXP_LENGTH)
#define ADS114S0X_REGISTER_INPMUX_MUXN_LENGTH 4
#define ADS114S0X_REGISTER_INPMUX_MUXN_POS 0
#define ADS114S0X_REGISTER_INPMUX_MUXN_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_INPMUX_MUXN_POS, \
ADS114S0X_REGISTER_INPMUX_MUXN_LENGTH)
#define ADS114S0X_REGISTER_INPMUX_MUXN_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_INPMUX_MUXN_POS, \
ADS114S0X_REGISTER_INPMUX_MUXN_LENGTH)
#define ADS114S0X_REGISTER_PGA_DELAY_LENGTH 3
#define ADS114S0X_REGISTER_PGA_DELAY_POS 5
#define ADS114S0X_REGISTER_PGA_DELAY_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_PGA_DELAY_POS, \
ADS114S0X_REGISTER_PGA_DELAY_LENGTH)
#define ADS114S0X_REGISTER_PGA_DELAY_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_PGA_DELAY_POS, \
ADS114S0X_REGISTER_PGA_DELAY_LENGTH)
#define ADS114S0X_REGISTER_PGA_PGA_EN_LENGTH 2
#define ADS114S0X_REGISTER_PGA_PGA_EN_POS 3
#define ADS114S0X_REGISTER_PGA_PGA_EN_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_PGA_PGA_EN_POS, \
ADS114S0X_REGISTER_PGA_PGA_EN_LENGTH)
#define ADS114S0X_REGISTER_PGA_PGA_EN_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_PGA_PGA_EN_POS, \
ADS114S0X_REGISTER_PGA_PGA_EN_LENGTH)
#define ADS114S0X_REGISTER_PGA_GAIN_LENGTH 3
#define ADS114S0X_REGISTER_PGA_GAIN_POS 0
#define ADS114S0X_REGISTER_PGA_GAIN_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_PGA_GAIN_POS, \
ADS114S0X_REGISTER_PGA_GAIN_LENGTH)
#define ADS114S0X_REGISTER_PGA_GAIN_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_PGA_GAIN_POS, \
ADS114S0X_REGISTER_PGA_GAIN_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_G_CHOP_LENGTH 1
#define ADS114S0X_REGISTER_DATARATE_G_CHOP_POS 7
#define ADS114S0X_REGISTER_DATARATE_G_CHOP_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_DATARATE_G_CHOP_POS, \
ADS114S0X_REGISTER_DATARATE_G_CHOP_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_G_CHOP_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_DATARATE_G_CHOP_POS, \
ADS114S0X_REGISTER_DATARATE_G_CHOP_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_CLK_LENGTH 1
#define ADS114S0X_REGISTER_DATARATE_CLK_POS 6
#define ADS114S0X_REGISTER_DATARATE_CLK_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_DATARATE_CLK_POS, \
ADS114S0X_REGISTER_DATARATE_CLK_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_CLK_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_DATARATE_CLK_POS, \
ADS114S0X_REGISTER_DATARATE_CLK_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_MODE_LENGTH 1
#define ADS114S0X_REGISTER_DATARATE_MODE_POS 5
#define ADS114S0X_REGISTER_DATARATE_MODE_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_DATARATE_MODE_POS, \
ADS114S0X_REGISTER_DATARATE_MODE_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_MODE_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_DATARATE_MODE_POS, \
ADS114S0X_REGISTER_DATARATE_MODE_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_FILTER_LENGTH 1
#define ADS114S0X_REGISTER_DATARATE_FILTER_POS 4
#define ADS114S0X_REGISTER_DATARATE_FILTER_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_DATARATE_FILTER_POS, \
ADS114S0X_REGISTER_DATARATE_FILTER_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_FILTER_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_DATARATE_FILTER_POS, \
ADS114S0X_REGISTER_DATARATE_FILTER_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_DR_LENGTH 4
#define ADS114S0X_REGISTER_DATARATE_DR_POS 0
#define ADS114S0X_REGISTER_DATARATE_DR_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_DATARATE_DR_POS, \
ADS114S0X_REGISTER_DATARATE_DR_LENGTH)
#define ADS114S0X_REGISTER_DATARATE_DR_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_DATARATE_DR_POS, \
ADS114S0X_REGISTER_DATARATE_DR_LENGTH)
#define ADS114S0X_REGISTER_REF_FL_REF_EN_LENGTH 2
#define ADS114S0X_REGISTER_REF_FL_REF_EN_POS 6
#define ADS114S0X_REGISTER_REF_FL_REF_EN_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_REF_FL_REF_EN_POS, \
ADS114S0X_REGISTER_REF_FL_REF_EN_LENGTH)
#define ADS114S0X_REGISTER_REF_FL_REF_EN_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_REF_FL_REF_EN_POS, \
ADS114S0X_REGISTER_REF_FL_REF_EN_LENGTH)
#define ADS114S0X_REGISTER_REF_NOT_REFP_BUF_LENGTH 1
#define ADS114S0X_REGISTER_REF_NOT_REFP_BUF_POS 5
#define ADS114S0X_REGISTER_REF_NOT_REFP_BUF_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_REF_NOT_REFP_BUF_POS, \
ADS114S0X_REGISTER_REF_NOT_REFP_BUF_LENGTH)
#define ADS114S0X_REGISTER_REF_NOT_REFP_BUF_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_REF_NOT_REFP_BUF_POS, \
ADS114S0X_REGISTER_REF_NOT_REFP_BUF_LENGTH)
#define ADS114S0X_REGISTER_REF_NOT_REFN_BUF_LENGTH 1
#define ADS114S0X_REGISTER_REF_NOT_REFN_BUF_POS 4
#define ADS114S0X_REGISTER_REF_NOT_REFN_BUF_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_REF_NOT_REFN_BUF_POS, \
ADS114S0X_REGISTER_REF_NOT_REFN_BUF_LENGTH)
#define ADS114S0X_REGISTER_REF_NOT_REFN_BUF_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_REF_NOT_REFN_BUF_POS, \
ADS114S0X_REGISTER_REF_NOT_REFN_BUF_LENGTH)
#define ADS114S0X_REGISTER_REF_REFSEL_LENGTH 2
#define ADS114S0X_REGISTER_REF_REFSEL_POS 2
#define ADS114S0X_REGISTER_REF_REFSEL_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_REF_REFSEL_POS, \
ADS114S0X_REGISTER_REF_REFSEL_LENGTH)
#define ADS114S0X_REGISTER_REF_REFSEL_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_REF_REFSEL_POS, \
ADS114S0X_REGISTER_REF_REFSEL_LENGTH)
#define ADS114S0X_REGISTER_REF_REFCON_LENGTH 2
#define ADS114S0X_REGISTER_REF_REFCON_POS 0
#define ADS114S0X_REGISTER_REF_REFCON_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_REF_REFCON_POS, \
ADS114S0X_REGISTER_REF_REFCON_LENGTH)
#define ADS114S0X_REGISTER_REF_REFCON_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_REF_REFCON_POS, \
ADS114S0X_REGISTER_REF_REFCON_LENGTH)
#define ADS114S0X_REGISTER_IDACMAG_FL_RAIL_EN_LENGTH 1
#define ADS114S0X_REGISTER_IDACMAG_FL_RAIL_EN_POS 7
#define ADS114S0X_REGISTER_IDACMAG_FL_RAIL_EN_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_IDACMAG_FL_RAIL_EN_POS, \
ADS114S0X_REGISTER_IDACMAG_FL_RAIL_EN_LENGTH)
#define ADS114S0X_REGISTER_IDACMAG_FL_RAIL_EN_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_IDACMAG_FL_RAIL_EN_POS, \
ADS114S0X_REGISTER_IDACMAG_FL_RAIL_EN_LENGTH)
#define ADS114S0X_REGISTER_IDACMAG_PSW_LENGTH 1
#define ADS114S0X_REGISTER_IDACMAG_PSW_POS 6
#define ADS114S0X_REGISTER_IDACMAG_PSW_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_IDACMAG_PSW_POS, \
ADS114S0X_REGISTER_IDACMAG_PSW_LENGTH)
#define ADS114S0X_REGISTER_IDACMAG_PSW_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_IDACMAG_PSW_POS, \
ADS114S0X_REGISTER_IDACMAG_PSW_LENGTH)
#define ADS114S0X_REGISTER_IDACMAG_IMAG_LENGTH 4
#define ADS114S0X_REGISTER_IDACMAG_IMAG_POS 0
#define ADS114S0X_REGISTER_IDACMAG_IMAG_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_IDACMAG_IMAG_POS, \
ADS114S0X_REGISTER_IDACMAG_IMAG_LENGTH)
#define ADS114S0X_REGISTER_IDACMAG_IMAG_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_IDACMAG_IMAG_POS, \
ADS114S0X_REGISTER_IDACMAG_IMAG_LENGTH)
#define ADS114S0X_REGISTER_IDACMUX_I2MUX_LENGTH 4
#define ADS114S0X_REGISTER_IDACMUX_I2MUX_POS 4
#define ADS114S0X_REGISTER_IDACMUX_I2MUX_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_IDACMUX_I2MUX_POS, \
ADS114S0X_REGISTER_IDACMUX_I2MUX_LENGTH)
#define ADS114S0X_REGISTER_IDACMUX_I2MUX_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_IDACMUX_I2MUX_POS, \
ADS114S0X_REGISTER_IDACMUX_I2MUX_LENGTH)
#define ADS114S0X_REGISTER_IDACMUX_I1MUX_LENGTH 4
#define ADS114S0X_REGISTER_IDACMUX_I1MUX_POS 0
#define ADS114S0X_REGISTER_IDACMUX_I1MUX_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_IDACMUX_I1MUX_POS, \
ADS114S0X_REGISTER_IDACMUX_I1MUX_LENGTH)
#define ADS114S0X_REGISTER_IDACMUX_I1MUX_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_IDACMUX_I1MUX_POS, \
ADS114S0X_REGISTER_IDACMUX_I1MUX_LENGTH)
#define ADS114S0X_REGISTER_VBIAS_VB_LEVEL_LENGTH 1
#define ADS114S0X_REGISTER_VBIAS_VB_LEVEL_POS 7
#define ADS114S0X_REGISTER_VBIAS_VB_LEVEL_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_VBIAS_VB_LEVEL_POS, \
ADS114S0X_REGISTER_VBIAS_VB_LEVEL_LENGTH)
#define ADS114S0X_REGISTER_VBIAS_VB_LEVEL_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_VBIAS_VB_LEVEL_POS, \
ADS114S0X_REGISTER_VBIAS_VB_LEVEL_LENGTH)
#define ADS114S0X_REGISTER_GPIODAT_DIR_LENGTH 4
#define ADS114S0X_REGISTER_GPIODAT_DIR_POS 4
#define ADS114S0X_REGISTER_GPIODAT_DIR_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_GPIODAT_DIR_POS, \
ADS114S0X_REGISTER_GPIODAT_DIR_LENGTH)
#define ADS114S0X_REGISTER_GPIODAT_DIR_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_GPIODAT_DIR_POS, \
ADS114S0X_REGISTER_GPIODAT_DIR_LENGTH)
#define ADS114S0X_REGISTER_GPIODAT_DAT_LENGTH 4
#define ADS114S0X_REGISTER_GPIODAT_DAT_POS 0
#define ADS114S0X_REGISTER_GPIODAT_DAT_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_GPIODAT_DAT_POS, \
ADS114S0X_REGISTER_GPIODAT_DAT_LENGTH)
#define ADS114S0X_REGISTER_GPIODAT_DAT_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_GPIODAT_DAT_POS, \
ADS114S0X_REGISTER_GPIODAT_DAT_LENGTH)
#define ADS114S0X_REGISTER_GPIOCON_CON_LENGTH 4
#define ADS114S0X_REGISTER_GPIOCON_CON_POS 0
#define ADS114S0X_REGISTER_GPIOCON_CON_GET(value) \
ADS114S0X_REGISTER_GET_VALUE(value, ADS114S0X_REGISTER_GPIOCON_CON_POS, \
ADS114S0X_REGISTER_GPIOCON_CON_LENGTH)
#define ADS114S0X_REGISTER_GPIOCON_CON_SET(target, value) \
ADS114S0X_REGISTER_SET_VALUE(target, value, ADS114S0X_REGISTER_GPIOCON_CON_POS, \
ADS114S0X_REGISTER_GPIOCON_CON_LENGTH)
/*
* - AIN0 as positive input
* - AIN1 as negative input
*/
#define ADS114S0X_REGISTER_INPMUX_SET_DEFAULTS(target) \
ADS114S0X_REGISTER_INPMUX_MUXP_SET(target, 0b0000); \
ADS114S0X_REGISTER_INPMUX_MUXN_SET(target, 0b0001)
/*
* - disable reference monitor
* - enable positive reference buffer
* - disable negative reference buffer
* - use internal reference
* - enable internal voltage reference
*/
#define ADS114S0X_REGISTER_REF_SET_DEFAULTS(target) \
ADS114S0X_REGISTER_REF_FL_REF_EN_SET(target, 0b00); \
ADS114S0X_REGISTER_REF_NOT_REFP_BUF_SET(target, 0b0); \
ADS114S0X_REGISTER_REF_NOT_REFN_BUF_SET(target, 0b1); \
ADS114S0X_REGISTER_REF_REFSEL_SET(target, 0b10); \
ADS114S0X_REGISTER_REF_REFCON_SET(target, 0b01)
/*
* - disable global chop
* - use internal oscillator
* - single shot conversion mode
* - low latency filter
* - 20 samples per second
*/
#define ADS114S0X_REGISTER_DATARATE_SET_DEFAULTS(target) \
ADS114S0X_REGISTER_DATARATE_G_CHOP_SET(target, 0b0); \
ADS114S0X_REGISTER_DATARATE_CLK_SET(target, 0b0); \
ADS114S0X_REGISTER_DATARATE_MODE_SET(target, 0b1); \
ADS114S0X_REGISTER_DATARATE_FILTER_SET(target, 0b1); \
ADS114S0X_REGISTER_DATARATE_DR_SET(target, 0b0100)
/*
* - delay of 14*t_mod
* - disable gain
* - gain 1
*/
#define ADS114S0X_REGISTER_PGA_SET_DEFAULTS(target) \
ADS114S0X_REGISTER_PGA_DELAY_SET(target, 0b000); \
ADS114S0X_REGISTER_PGA_PGA_EN_SET(target, 0b00); \
ADS114S0X_REGISTER_PGA_GAIN_SET(target, 0b000)
/*
* - disable PGA output rail flag
* - low-side power switch
* - IDAC off
*/
#define ADS114S0X_REGISTER_IDACMAG_SET_DEFAULTS(target) \
ADS114S0X_REGISTER_IDACMAG_FL_RAIL_EN_SET(target, 0b0); \
ADS114S0X_REGISTER_IDACMAG_PSW_SET(target, 0b0); \
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(target, 0b0000)
/*
* - disconnect IDAC1
* - disconnect IDAC2
*/
#define ADS114S0X_REGISTER_IDACMUX_SET_DEFAULTS(target) \
ADS114S0X_REGISTER_IDACMUX_I1MUX_SET(target, 0b1111); \
ADS114S0X_REGISTER_IDACMUX_I2MUX_SET(target, 0b1111)
struct ads114s0x_config {
struct spi_dt_spec bus;
#if CONFIG_ADC_ASYNC
k_thread_stack_t *stack;
#endif
const struct gpio_dt_spec gpio_reset;
const struct gpio_dt_spec gpio_data_ready;
const struct gpio_dt_spec gpio_start_sync;
int idac_current;
uint8_t vbias_level;
};
struct ads114s0x_data {
struct adc_context ctx;
#if CONFIG_ADC_ASYNC
struct k_thread thread;
#endif /* CONFIG_ADC_ASYNC */
struct gpio_callback callback_data_ready;
struct k_sem data_ready_signal;
struct k_sem acquire_signal;
int16_t *buffer;
int16_t *buffer_ptr;
#if CONFIG_ADC_ADS114S0X_GPIO
struct k_mutex gpio_lock;
uint8_t gpio_enabled; /* one bit per GPIO, 1 = enabled */
uint8_t gpio_direction; /* one bit per GPIO, 1 = input */
uint8_t gpio_value; /* one bit per GPIO, 1 = high */
#endif /* CONFIG_ADC_ADS114S0X_GPIO */
};
static void ads114s0x_data_ready_handler(const struct device *dev, struct gpio_callback *gpio_cb,
uint32_t pins)
{
ARG_UNUSED(dev);
ARG_UNUSED(pins);
struct ads114s0x_data *data =
CONTAINER_OF(gpio_cb, struct ads114s0x_data, callback_data_ready);
k_sem_give(&data->data_ready_signal);
}
static int ads114s0x_read_register(const struct device *dev,
enum ads114s0x_register register_address, uint8_t *value)
{
const struct ads114s0x_config *config = dev->config;
uint8_t buffer_tx[3];
uint8_t buffer_rx[ARRAY_SIZE(buffer_tx)];
const struct spi_buf tx_buf[] = {{
.buf = buffer_tx,
.len = ARRAY_SIZE(buffer_tx),
}};
const struct spi_buf rx_buf[] = {{
.buf = buffer_rx,
.len = ARRAY_SIZE(buffer_rx),
}};
const struct spi_buf_set tx = {
.buffers = tx_buf,
.count = ARRAY_SIZE(tx_buf),
};
const struct spi_buf_set rx = {
.buffers = rx_buf,
.count = ARRAY_SIZE(rx_buf),
};
buffer_tx[0] = ((uint8_t)ADS114S0X_COMMAND_RREG) | ((uint8_t)register_address);
/* read one register */
buffer_tx[1] = 0x00;
int result = spi_transceive_dt(&config->bus, &tx, &rx);
if (result != 0) {
LOG_ERR("%s: spi_transceive failed with error %i", dev->name, result);
return result;
}
*value = buffer_rx[2];
LOG_DBG("%s: read from register 0x%02X value 0x%02X", dev->name, register_address, *value);
return 0;
}
static int ads114s0x_write_register(const struct device *dev,
enum ads114s0x_register register_address, uint8_t value)
{
const struct ads114s0x_config *config = dev->config;
uint8_t buffer_tx[3];
const struct spi_buf tx_buf[] = {{
.buf = buffer_tx,
.len = ARRAY_SIZE(buffer_tx),
}};
const struct spi_buf_set tx = {
.buffers = tx_buf,
.count = ARRAY_SIZE(tx_buf),
};
buffer_tx[0] = ((uint8_t)ADS114S0X_COMMAND_WREG) | ((uint8_t)register_address);
/* write one register */
buffer_tx[1] = 0x00;
buffer_tx[2] = value;
LOG_DBG("%s: writing to register 0x%02X value 0x%02X", dev->name, register_address, value);
int result = spi_write_dt(&config->bus, &tx);
if (result != 0) {
LOG_ERR("%s: spi_write failed with error %i", dev->name, result);
return result;
}
return 0;
}
static int ads114s0x_write_multiple_registers(const struct device *dev,
enum ads114s0x_register *register_addresses,
uint8_t *values, size_t count)
{
const struct ads114s0x_config *config = dev->config;
uint8_t buffer_tx[2];
const struct spi_buf tx_buf[] = {
{
.buf = buffer_tx,
.len = ARRAY_SIZE(buffer_tx),
},
{
.buf = values,
.len = count,
},
};
const struct spi_buf_set tx = {
.buffers = tx_buf,
.count = ARRAY_SIZE(tx_buf),
};
if (count == 0) {
LOG_WRN("%s: ignoring the command to write 0 registers", dev->name);
return -EINVAL;
}
buffer_tx[0] = ((uint8_t)ADS114S0X_COMMAND_WREG) | ((uint8_t)register_addresses[0]);
buffer_tx[1] = count - 1;
LOG_HEXDUMP_DBG(register_addresses, count, "writing to registers");
LOG_HEXDUMP_DBG(values, count, "values");
/* ensure that the register addresses are in the correct order */
for (size_t i = 1; i < count; ++i) {
__ASSERT(register_addresses[i - 1] + 1 == register_addresses[i],
"register addresses are not consecutive");
}
int result = spi_write_dt(&config->bus, &tx);
if (result != 0) {
LOG_ERR("%s: spi_write failed with error %i", dev->name, result);
return result;
}
return 0;
}
static int ads114s0x_send_command(const struct device *dev, enum ads114s0x_command command)
{
const struct ads114s0x_config *config = dev->config;
uint8_t buffer_tx[1];
const struct spi_buf tx_buf[] = {{
.buf = buffer_tx,
.len = ARRAY_SIZE(buffer_tx),
}};
const struct spi_buf_set tx = {
.buffers = tx_buf,
.count = ARRAY_SIZE(tx_buf),
};
buffer_tx[0] = (uint8_t)command;
LOG_DBG("%s: sending command 0x%02X", dev->name, command);
int result = spi_write_dt(&config->bus, &tx);
if (result != 0) {
LOG_ERR("%s: spi_write failed with error %i", dev->name, result);
return result;
}
return 0;
}
static int ads114s0x_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct ads114s0x_config *config = dev->config;
uint8_t input_mux = 0;
uint8_t reference_control = 0;
uint8_t data_rate = 0;
uint8_t gain = 0;
uint8_t idac_magnitude = 0;
uint8_t idac_mux = 0;
uint8_t pin_selections[4];
uint8_t vbias = 0;
size_t pin_selections_size;
int result;
enum ads114s0x_register register_addresses[7];
uint8_t values[ARRAY_SIZE(register_addresses)];
uint16_t acquisition_time_value = ADC_ACQ_TIME_VALUE(channel_cfg->acquisition_time);
uint16_t acquisition_time_unit = ADC_ACQ_TIME_UNIT(channel_cfg->acquisition_time);
ADS114S0X_REGISTER_INPMUX_SET_DEFAULTS(gain);
ADS114S0X_REGISTER_REF_SET_DEFAULTS(reference_control);
ADS114S0X_REGISTER_DATARATE_SET_DEFAULTS(data_rate);
ADS114S0X_REGISTER_PGA_SET_DEFAULTS(gain);
ADS114S0X_REGISTER_IDACMAG_SET_DEFAULTS(idac_magnitude);
ADS114S0X_REGISTER_IDACMUX_SET_DEFAULTS(idac_mux);
if (channel_cfg->channel_id != 0) {
LOG_ERR("%s: only one channel is supported", dev->name);
return -EINVAL;
}
/* The ADS114 uses samples per seconds units with the lowest being 2.5SPS
* and with acquisition_time only having 14b for time, this will not fit
* within here for microsecond units. Use Tick units and allow the user to
* specify the ODR directly.
*/
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT &&
acquisition_time_unit != ADC_ACQ_TIME_TICKS) {
LOG_ERR("%s: invalid acquisition time %i", dev->name,
channel_cfg->acquisition_time);
return -EINVAL;
}
if (channel_cfg->acquisition_time == ADC_ACQ_TIME_DEFAULT) {
ADS114S0X_REGISTER_DATARATE_DR_SET(data_rate, ADS114S0X_CONFIG_DR_20);
} else {
ADS114S0X_REGISTER_DATARATE_DR_SET(data_rate, acquisition_time_value);
}
switch (channel_cfg->reference) {
case ADC_REF_INTERNAL:
/* disable negative reference buffer */
ADS114S0X_REGISTER_REF_NOT_REFN_BUF_SET(reference_control, 0b1);
/* disable positive reference buffer */
ADS114S0X_REGISTER_REF_NOT_REFP_BUF_SET(reference_control, 0b1);
/* use internal reference */
ADS114S0X_REGISTER_REF_REFSEL_SET(reference_control, 0b10);
break;
case ADC_REF_EXTERNAL0:
/* enable negative reference buffer */
ADS114S0X_REGISTER_REF_NOT_REFN_BUF_SET(reference_control, 0b0);
/* enable positive reference buffer */
ADS114S0X_REGISTER_REF_NOT_REFP_BUF_SET(reference_control, 0b0);
/* use external reference 0*/
ADS114S0X_REGISTER_REF_REFSEL_SET(reference_control, 0b00);
break;
case ADC_REF_EXTERNAL1:
/* enable negative reference buffer */
ADS114S0X_REGISTER_REF_NOT_REFN_BUF_SET(reference_control, 0b0);
/* enable positive reference buffer */
ADS114S0X_REGISTER_REF_NOT_REFP_BUF_SET(reference_control, 0b0);
/* use external reference 0*/
ADS114S0X_REGISTER_REF_REFSEL_SET(reference_control, 0b01);
break;
default:
LOG_ERR("%s: reference %i is not supported", dev->name, channel_cfg->reference);
return -EINVAL;
}
if (channel_cfg->differential) {
LOG_DBG("%s: configuring channel for a differential measurement from the pins (p, "
"n) (%i, %i)",
dev->name, channel_cfg->input_positive, channel_cfg->input_negative);
if (channel_cfg->input_positive >= ADS114S0X_INPUT_SELECTION_AINCOM) {
LOG_ERR("%s: positive channel input %i is invalid", dev->name,
channel_cfg->input_positive);
return -EINVAL;
}
if (channel_cfg->input_negative >= ADS114S0X_INPUT_SELECTION_AINCOM) {
LOG_ERR("%s: negative channel input %i is invalid", dev->name,
channel_cfg->input_negative);
return -EINVAL;
}
if (channel_cfg->input_positive == channel_cfg->input_negative) {
LOG_ERR("%s: negative and positive channel inputs must be different",
dev->name);
return -EINVAL;
}
ADS114S0X_REGISTER_INPMUX_MUXP_SET(input_mux, channel_cfg->input_positive);
ADS114S0X_REGISTER_INPMUX_MUXN_SET(input_mux, channel_cfg->input_negative);
pin_selections[0] = channel_cfg->input_positive;
pin_selections[1] = channel_cfg->input_negative;
} else {
LOG_DBG("%s: configuring channel for single ended measurement from input %i",
dev->name, channel_cfg->input_positive);
if (channel_cfg->input_positive >= ADS114S0X_INPUT_SELECTION_AINCOM) {
LOG_ERR("%s: channel input %i is invalid", dev->name,
channel_cfg->input_positive);
return -EINVAL;
}
ADS114S0X_REGISTER_INPMUX_MUXP_SET(input_mux, channel_cfg->input_positive);
ADS114S0X_REGISTER_INPMUX_MUXN_SET(input_mux, ADS114S0X_INPUT_SELECTION_AINCOM);
pin_selections[0] = channel_cfg->input_positive;
pin_selections[1] = ADS114S0X_INPUT_SELECTION_AINCOM;
}
switch (channel_cfg->gain) {
case ADC_GAIN_1:
/* set gain value */
ADS114S0X_REGISTER_PGA_GAIN_SET(gain, 0b000);
break;
case ADC_GAIN_2:
ADS114S0X_REGISTER_PGA_GAIN_SET(gain, 0b001);
break;
case ADC_GAIN_4:
ADS114S0X_REGISTER_PGA_GAIN_SET(gain, 0b010);
break;
case ADC_GAIN_8:
ADS114S0X_REGISTER_PGA_GAIN_SET(gain, 0b011);
break;
case ADC_GAIN_16:
ADS114S0X_REGISTER_PGA_GAIN_SET(gain, 0b100);
break;
case ADC_GAIN_32:
ADS114S0X_REGISTER_PGA_GAIN_SET(gain, 0b101);
break;
case ADC_GAIN_64:
ADS114S0X_REGISTER_PGA_GAIN_SET(gain, 0b110);
break;
case ADC_GAIN_128:
ADS114S0X_REGISTER_PGA_GAIN_SET(gain, 0b111);
break;
default:
LOG_ERR("%s: gain value %i not supported", dev->name, channel_cfg->gain);
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
/* enable gain */
ADS114S0X_REGISTER_PGA_PGA_EN_SET(gain, 0b01);
}
switch (config->idac_current) {
case 0:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b0000);
break;
case 10:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b0001);
break;
case 50:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b0010);
break;
case 100:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b0011);
break;
case 250:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b0100);
break;
case 500:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b0101);
break;
case 750:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b0110);
break;
case 1000:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b0111);
break;
case 1500:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b1000);
break;
case 2000:
ADS114S0X_REGISTER_IDACMAG_IMAG_SET(idac_magnitude, 0b1001);
break;
default:
LOG_ERR("%s: IDAC magnitude %i not supported", dev->name, config->idac_current);
return -EINVAL;
}
if (channel_cfg->current_source_pin_set) {
LOG_DBG("%s: current source pin set to %i and %i", dev->name,
channel_cfg->current_source_pin[0], channel_cfg->current_source_pin[1]);
if (channel_cfg->current_source_pin[0] > 0b1111) {
LOG_ERR("%s: invalid selection %i for I1MUX", dev->name,
channel_cfg->current_source_pin[0]);
return -EINVAL;
}
if (channel_cfg->current_source_pin[1] > 0b1111) {
LOG_ERR("%s: invalid selection %i for I2MUX", dev->name,
channel_cfg->current_source_pin[1]);
return -EINVAL;
}
ADS114S0X_REGISTER_IDACMUX_I1MUX_SET(idac_mux, channel_cfg->current_source_pin[0]);
ADS114S0X_REGISTER_IDACMUX_I2MUX_SET(idac_mux, channel_cfg->current_source_pin[1]);
pin_selections[2] = channel_cfg->current_source_pin[0];
pin_selections[3] = channel_cfg->current_source_pin[1];
pin_selections_size = 4;
} else {
LOG_DBG("%s: current source pins not set", dev->name);
pin_selections_size = 2;
}
for (size_t i = 0; i < pin_selections_size; ++i) {
if (pin_selections[i] > ADS114S0X_INPUT_SELECTION_AINCOM) {
continue;
}
for (size_t j = i + 1; j < pin_selections_size; ++j) {
if (pin_selections[j] > ADS114S0X_INPUT_SELECTION_AINCOM) {
continue;
}
if (pin_selections[i] == pin_selections[j]) {
LOG_ERR("%s: pins for inputs and current sources must be different",
dev->name);
return -EINVAL;
}
}
}
ADS114S0X_REGISTER_VBIAS_VB_LEVEL_SET(vbias, config->vbias_level);
if ((channel_cfg->vbias_pins &
~GENMASK(ADS114S0X_VBIAS_PIN_MAX, ADS114S0X_VBIAS_PIN_MIN)) != 0) {
LOG_ERR("%s: invalid VBIAS pin selection 0x%08X", dev->name,
channel_cfg->vbias_pins);
return -EINVAL;
}
vbias |= channel_cfg->vbias_pins;
register_addresses[0] = ADS114S0X_REGISTER_INPMUX;
register_addresses[1] = ADS114S0X_REGISTER_PGA;
register_addresses[2] = ADS114S0X_REGISTER_DATARATE;
register_addresses[3] = ADS114S0X_REGISTER_REF;
register_addresses[4] = ADS114S0X_REGISTER_IDACMAG;
register_addresses[5] = ADS114S0X_REGISTER_IDACMUX;
register_addresses[6] = ADS114S0X_REGISTER_VBIAS;
BUILD_ASSERT(ARRAY_SIZE(register_addresses) == 7);
values[0] = input_mux;
values[1] = gain;
values[2] = data_rate;
values[3] = reference_control;
values[4] = idac_magnitude;
values[5] = idac_mux;
values[6] = vbias;
BUILD_ASSERT(ARRAY_SIZE(values) == 7);
result = ads114s0x_write_multiple_registers(dev, register_addresses, values,
ARRAY_SIZE(values));
if (result != 0) {
LOG_ERR("%s: unable to configure registers", dev->name);
return result;
}
return 0;
}
static int ads114s0x_validate_buffer_size(const struct adc_sequence *sequence)
{
size_t needed = sizeof(int16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
static int ads114s0x_validate_sequence(const struct device *dev,
const struct adc_sequence *sequence)
{
if (sequence->resolution != ADS114S0X_RESOLUTION) {
LOG_ERR("%s: invalid resolution", dev->name);
return -EINVAL;
}
if (sequence->channels != BIT(0)) {
LOG_ERR("%s: invalid channel", dev->name);
return -EINVAL;
}
if (sequence->oversampling) {
LOG_ERR("%s: oversampling is not supported", dev->name);
return -EINVAL;
}
return ads114s0x_validate_buffer_size(sequence);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct ads114s0x_data *data = CONTAINER_OF(ctx, struct ads114s0x_data, ctx);
if (repeat_sampling) {
data->buffer = data->buffer_ptr;
}
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct ads114s0x_data *data = CONTAINER_OF(ctx, struct ads114s0x_data, ctx);
data->buffer_ptr = data->buffer;
k_sem_give(&data->acquire_signal);
}
static int ads114s0x_adc_start_read(const struct device *dev, const struct adc_sequence *sequence,
bool wait)
{
int result;
struct ads114s0x_data *data = dev->data;
result = ads114s0x_validate_sequence(dev, sequence);
if (result != 0) {
LOG_ERR("%s: sequence validation failed", dev->name);
return result;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
if (wait) {
result = adc_context_wait_for_completion(&data->ctx);
}
return result;
}
static int ads114s0x_send_start_read(const struct device *dev)
{
const struct ads114s0x_config *config = dev->config;
int result;
if (config->gpio_start_sync.port == 0) {
result = ads114s0x_send_command(dev, ADS114S0X_COMMAND_START);
if (result != 0) {
LOG_ERR("%s: unable to send START/SYNC command", dev->name);
return result;
}
} else {
result = gpio_pin_set_dt(&config->gpio_start_sync, 1);
if (result != 0) {
LOG_ERR("%s: unable to start ADC operation", dev->name);
return result;
}
k_sleep(K_USEC(ADS114S0X_START_SYNC_PULSE_DURATION_IN_US +
ADS114S0X_SETUP_TIME_IN_US));
result = gpio_pin_set_dt(&config->gpio_start_sync, 0);
if (result != 0) {
LOG_ERR("%s: unable to start ADC operation", dev->name);
return result;
}
}
return 0;
}
static int ads114s0x_wait_data_ready(const struct device *dev)
{
struct ads114s0x_data *data = dev->data;
return k_sem_take(&data->data_ready_signal, ADC_CONTEXT_WAIT_FOR_COMPLETION_TIMEOUT);
}
static int ads114s0x_read_sample(const struct device *dev, uint16_t *buffer)
{
const struct ads114s0x_config *config = dev->config;
uint8_t buffer_tx[3];
uint8_t buffer_rx[ARRAY_SIZE(buffer_tx)];
const struct spi_buf tx_buf[] = {{
.buf = buffer_tx,
.len = ARRAY_SIZE(buffer_tx),
}};
const struct spi_buf rx_buf[] = {{
.buf = buffer_rx,
.len = ARRAY_SIZE(buffer_rx),
}};
const struct spi_buf_set tx = {
.buffers = tx_buf,
.count = ARRAY_SIZE(tx_buf),
};
const struct spi_buf_set rx = {
.buffers = rx_buf,
.count = ARRAY_SIZE(rx_buf),
};
buffer_tx[0] = (uint8_t)ADS114S0X_COMMAND_RDATA;
int result = spi_transceive_dt(&config->bus, &tx, &rx);
if (result != 0) {
LOG_ERR("%s: spi_transceive failed with error %i", dev->name, result);
return result;
}
*buffer = sys_get_be16(buffer_rx + 1);
LOG_DBG("%s: read ADC sample %i", dev->name, *buffer);
return 0;
}
static int ads114s0x_adc_perform_read(const struct device *dev)
{
int result;
struct ads114s0x_data *data = dev->data;
k_sem_take(&data->acquire_signal, K_FOREVER);
k_sem_reset(&data->data_ready_signal);
result = ads114s0x_send_start_read(dev);
if (result != 0) {
LOG_ERR("%s: unable to start ADC conversion", dev->name);
adc_context_complete(&data->ctx, result);
return result;
}
result = ads114s0x_wait_data_ready(dev);
if (result != 0) {
LOG_ERR("%s: waiting for data to be ready failed", dev->name);
adc_context_complete(&data->ctx, result);
return result;
}
result = ads114s0x_read_sample(dev, data->buffer);
if (result != 0) {
LOG_ERR("%s: reading sample failed", dev->name);
adc_context_complete(&data->ctx, result);
return result;
}
data->buffer++;
adc_context_on_sampling_done(&data->ctx, dev);
return result;
}
#if CONFIG_ADC_ASYNC
static int ads114s0x_adc_read_async(const struct device *dev, const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int result;
struct ads114s0x_data *data = dev->data;
adc_context_lock(&data->ctx, true, async);
result = ads114s0x_adc_start_read(dev, sequence, true);
adc_context_release(&data->ctx, result);
return result;
}
static int ads114s0x_read(const struct device *dev, const struct adc_sequence *sequence)
{
int result;
struct ads114s0x_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
result = ads114s0x_adc_start_read(dev, sequence, true);
adc_context_release(&data->ctx, result);
return result;
}
#else
static int ads114s0x_read(const struct device *dev, const struct adc_sequence *sequence)
{
int result;
struct ads114s0x_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
result = ads114s0x_adc_start_read(dev, sequence, false);
while (result == 0 && k_sem_take(&data->ctx.sync, K_NO_WAIT) != 0) {
result = ads114s0x_adc_perform_read(dev);
}
adc_context_release(&data->ctx, result);
return result;
}
#endif
#if CONFIG_ADC_ASYNC
static void ads114s0x_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
const struct device *dev = p1;
while (true) {
ads114s0x_adc_perform_read(dev);
}
}
#endif
#ifdef CONFIG_ADC_ADS114S0X_GPIO
static int ads114s0x_gpio_write_config(const struct device *dev)
{
struct ads114s0x_data *data = dev->data;
enum ads114s0x_register register_addresses[2];
uint8_t register_values[ARRAY_SIZE(register_addresses)];
uint8_t gpio_dat = 0;
uint8_t gpio_con = 0;
ADS114S0X_REGISTER_GPIOCON_CON_SET(gpio_con, data->gpio_enabled);
ADS114S0X_REGISTER_GPIODAT_DAT_SET(gpio_dat, data->gpio_value);
ADS114S0X_REGISTER_GPIODAT_DIR_SET(gpio_dat, data->gpio_direction);
register_values[0] = gpio_dat;
register_values[1] = gpio_con;
register_addresses[0] = ADS114S0X_REGISTER_GPIODAT;
register_addresses[1] = ADS114S0X_REGISTER_GPIOCON;
return ads114s0x_write_multiple_registers(dev, register_addresses, register_values,
ARRAY_SIZE(register_values));
}
static int ads114s0x_gpio_write_value(const struct device *dev)
{
struct ads114s0x_data *data = dev->data;
uint8_t gpio_dat = 0;
ADS114S0X_REGISTER_GPIODAT_DAT_SET(gpio_dat, data->gpio_value);
ADS114S0X_REGISTER_GPIODAT_DIR_SET(gpio_dat, data->gpio_direction);
return ads114s0x_write_register(dev, ADS114S0X_REGISTER_GPIODAT, gpio_dat);
}
int ads114s0x_gpio_set_output(const struct device *dev, uint8_t pin, bool initial_value)
{
struct ads114s0x_data *data = dev->data;
int result = 0;
if (pin > ADS114S0X_GPIO_MAX) {
LOG_ERR("%s: invalid pin %i", dev->name, pin);
return -EINVAL;
}
k_mutex_lock(&data->gpio_lock, K_FOREVER);
data->gpio_enabled |= BIT(pin);
data->gpio_direction &= ~BIT(pin);
if (initial_value) {
data->gpio_value |= BIT(pin);
} else {
data->gpio_value &= ~BIT(pin);
}
result = ads114s0x_gpio_write_config(dev);
k_mutex_unlock(&data->gpio_lock);
return result;
}
int ads114s0x_gpio_set_input(const struct device *dev, uint8_t pin)
{
struct ads114s0x_data *data = dev->data;
int result = 0;
if (pin > ADS114S0X_GPIO_MAX) {
LOG_ERR("%s: invalid pin %i", dev->name, pin);
return -EINVAL;
}
k_mutex_lock(&data->gpio_lock, K_FOREVER);
data->gpio_enabled |= BIT(pin);
data->gpio_direction |= BIT(pin);
data->gpio_value &= ~BIT(pin);
result = ads114s0x_gpio_write_config(dev);
k_mutex_unlock(&data->gpio_lock);
return result;
}
int ads114s0x_gpio_deconfigure(const struct device *dev, uint8_t pin)
{
struct ads114s0x_data *data = dev->data;
int result = 0;
if (pin > ADS114S0X_GPIO_MAX) {
LOG_ERR("%s: invalid pin %i", dev->name, pin);
return -EINVAL;
}
k_mutex_lock(&data->gpio_lock, K_FOREVER);
data->gpio_enabled &= ~BIT(pin);
data->gpio_direction |= BIT(pin);
data->gpio_value &= ~BIT(pin);
result = ads114s0x_gpio_write_config(dev);
k_mutex_unlock(&data->gpio_lock);
return result;
}
int ads114s0x_gpio_set_pin_value(const struct device *dev, uint8_t pin, bool value)
{
struct ads114s0x_data *data = dev->data;
int result = 0;
if (pin > ADS114S0X_GPIO_MAX) {
LOG_ERR("%s: invalid pin %i", dev->name, pin);
return -EINVAL;
}
k_mutex_lock(&data->gpio_lock, K_FOREVER);
if ((BIT(pin) & data->gpio_enabled) == 0) {
LOG_ERR("%s: gpio pin %i not configured", dev->name, pin);
result = -EINVAL;
} else if ((BIT(pin) & data->gpio_direction) != 0) {
LOG_ERR("%s: gpio pin %i not configured as output", dev->name, pin);
result = -EINVAL;
} else {
data->gpio_value |= BIT(pin);
result = ads114s0x_gpio_write_value(dev);
}
k_mutex_unlock(&data->gpio_lock);
return result;
}
int ads114s0x_gpio_get_pin_value(const struct device *dev, uint8_t pin, bool *value)
{
struct ads114s0x_data *data = dev->data;
int result = 0;
uint8_t gpio_dat;
if (pin > ADS114S0X_GPIO_MAX) {
LOG_ERR("%s: invalid pin %i", dev->name, pin);
return -EINVAL;
}
k_mutex_lock(&data->gpio_lock, K_FOREVER);
if ((BIT(pin) & data->gpio_enabled) == 0) {
LOG_ERR("%s: gpio pin %i not configured", dev->name, pin);
result = -EINVAL;
} else if ((BIT(pin) & data->gpio_direction) == 0) {
LOG_ERR("%s: gpio pin %i not configured as input", dev->name, pin);
result = -EINVAL;
} else {
result = ads114s0x_read_register(dev, ADS114S0X_REGISTER_GPIODAT, &gpio_dat);
data->gpio_value = ADS114S0X_REGISTER_GPIODAT_DAT_GET(gpio_dat);
*value = (BIT(pin) & data->gpio_value) != 0;
}
k_mutex_unlock(&data->gpio_lock);
return result;
}
int ads114s0x_gpio_port_get_raw(const struct device *dev, gpio_port_value_t *value)
{
struct ads114s0x_data *data = dev->data;
int result = 0;
uint8_t gpio_dat;
k_mutex_lock(&data->gpio_lock, K_FOREVER);
result = ads114s0x_read_register(dev, ADS114S0X_REGISTER_GPIODAT, &gpio_dat);
data->gpio_value = ADS114S0X_REGISTER_GPIODAT_DAT_GET(gpio_dat);
*value = data->gpio_value;
k_mutex_unlock(&data->gpio_lock);
return result;
}
int ads114s0x_gpio_port_set_masked_raw(const struct device *dev, gpio_port_pins_t mask,
gpio_port_value_t value)
{
struct ads114s0x_data *data = dev->data;
int result = 0;
k_mutex_lock(&data->gpio_lock, K_FOREVER);
data->gpio_value = ((data->gpio_value & ~mask) | (mask & value)) & data->gpio_enabled &
~data->gpio_direction;
result = ads114s0x_gpio_write_value(dev);
k_mutex_unlock(&data->gpio_lock);
return result;
}
int ads114s0x_gpio_port_toggle_bits(const struct device *dev, gpio_port_pins_t pins)
{
struct ads114s0x_data *data = dev->data;
int result = 0;
k_mutex_lock(&data->gpio_lock, K_FOREVER);
data->gpio_value = (data->gpio_value ^ pins) & data->gpio_enabled & ~data->gpio_direction;
result = ads114s0x_gpio_write_value(dev);
k_mutex_unlock(&data->gpio_lock);
return result;
}
#endif /* CONFIG_ADC_ADS114S0X_GPIO */
static int ads114s0x_init(const struct device *dev)
{
uint8_t status = 0;
uint8_t reference_control = 0;
uint8_t reference_control_read;
int result;
const struct ads114s0x_config *config = dev->config;
struct ads114s0x_data *data = dev->data;
adc_context_init(&data->ctx);
k_sem_init(&data->data_ready_signal, 0, 1);
k_sem_init(&data->acquire_signal, 0, 1);
#ifdef CONFIG_ADC_ADS114S0X_GPIO
k_mutex_init(&data->gpio_lock);
#endif /* CONFIG_ADC_ADS114S0X_GPIO */
if (!spi_is_ready_dt(&config->bus)) {
LOG_ERR("%s: SPI device is not ready", dev->name);
return -ENODEV;
}
if (config->gpio_reset.port != NULL) {
result = gpio_pin_configure_dt(&config->gpio_reset, GPIO_OUTPUT_ACTIVE);
if (result != 0) {
LOG_ERR("%s: failed to initialize GPIO for reset", dev->name);
return result;
}
}
if (config->gpio_start_sync.port != NULL) {
result = gpio_pin_configure_dt(&config->gpio_start_sync, GPIO_OUTPUT_INACTIVE);
if (result != 0) {
LOG_ERR("%s: failed to initialize GPIO for start/sync", dev->name);
return result;
}
}
result = gpio_pin_configure_dt(&config->gpio_data_ready, GPIO_INPUT);
if (result != 0) {
LOG_ERR("%s: failed to initialize GPIO for data ready", dev->name);
return result;
}
result = gpio_pin_interrupt_configure_dt(&config->gpio_data_ready, GPIO_INT_EDGE_TO_ACTIVE);
if (result != 0) {
LOG_ERR("%s: failed to configure data ready interrupt", dev->name);
return -EIO;
}
gpio_init_callback(&data->callback_data_ready, ads114s0x_data_ready_handler,
BIT(config->gpio_data_ready.pin));
result = gpio_add_callback(config->gpio_data_ready.port, &data->callback_data_ready);
if (result != 0) {
LOG_ERR("%s: failed to add data ready callback", dev->name);
return -EIO;
}
#if CONFIG_ADC_ASYNC
k_tid_t tid = k_thread_create(&data->thread, config->stack,
CONFIG_ADC_ADS114S0X_ACQUISITION_THREAD_STACK_SIZE,
ads114s0x_acquisition_thread, (void *)dev, NULL, NULL,
CONFIG_ADC_ADS114S0X_ASYNC_THREAD_INIT_PRIO, 0, K_NO_WAIT);
k_thread_name_set(tid, "adc_ads114s0x");
#endif
k_busy_wait(ADS114S0X_POWER_ON_RESET_TIME_IN_US);
if (config->gpio_reset.port == NULL) {
result = ads114s0x_send_command(dev, ADS114S0X_COMMAND_RESET);
if (result != 0) {
LOG_ERR("%s: unable to send RESET command", dev->name);
return result;
}
} else {
k_busy_wait(ADS114S0X_RESET_LOW_TIME_IN_US);
gpio_pin_set_dt(&config->gpio_reset, 0);
}
k_busy_wait(ADS114S0X_RESET_DELAY_TIME_IN_US);
result = ads114s0x_read_register(dev, ADS114S0X_REGISTER_STATUS, &status);
if (result != 0) {
LOG_ERR("%s: unable to read status register", dev->name);
return result;
}
if (ADS114S0X_REGISTER_STATUS_NOT_RDY_GET(status) == 0x01) {
LOG_ERR("%s: ADS114 is not yet ready", dev->name);
return -EBUSY;
}
/*
* Activate internal voltage reference during initialization to
* avoid the necessary setup time for it to settle later on.
*/
ADS114S0X_REGISTER_REF_SET_DEFAULTS(reference_control);
result = ads114s0x_write_register(dev, ADS114S0X_REGISTER_REF, reference_control);
if (result != 0) {
LOG_ERR("%s: unable to set default reference control values", dev->name);
return result;
}
/*
* Ensure that the internal voltage reference is active.
*/
result = ads114s0x_read_register(dev, ADS114S0X_REGISTER_REF, &reference_control_read);
if (result != 0) {
LOG_ERR("%s: unable to read reference control values", dev->name);
return result;
}
if (reference_control != reference_control_read) {
LOG_ERR("%s: reference control register is incorrect: 0x%02X", dev->name,
reference_control_read);
return -EIO;
}
#ifdef CONFIG_ADC_ADS114S0X_GPIO
data->gpio_enabled = 0x00;
data->gpio_direction = 0x0F;
data->gpio_value = 0x00;
result = ads114s0x_gpio_write_config(dev);
if (result != 0) {
LOG_ERR("%s: unable to configure defaults for GPIOs", dev->name);
return result;
}
#endif
adc_context_unlock_unconditionally(&data->ctx);
return result;
}
static const struct adc_driver_api api = {
.channel_setup = ads114s0x_channel_setup,
.read = ads114s0x_read,
.ref_internal = ADS114S0X_REF_INTERNAL,
#ifdef CONFIG_ADC_ASYNC
.read_async = ads114s0x_adc_read_async,
#endif
};
BUILD_ASSERT(CONFIG_ADC_INIT_PRIORITY > CONFIG_SPI_INIT_PRIORITY,
"CONFIG_ADC_INIT_PRIORITY must be higher than CONFIG_SPI_INIT_PRIORITY");
#define DT_DRV_COMPAT ti_ads114s08
#define ADC_ADS114S0X_INST_DEFINE(n) \
IF_ENABLED( \
CONFIG_ADC_ASYNC, \
(static K_KERNEL_STACK_DEFINE( \
thread_stack_##n, CONFIG_ADC_ADS114S0X_ACQUISITION_THREAD_STACK_SIZE);)) \
static const struct ads114s0x_config config_##n = { \
.bus = SPI_DT_SPEC_INST_GET( \
n, SPI_OP_MODE_MASTER | SPI_MODE_CPHA | SPI_WORD_SET(8), 0), \
IF_ENABLED(CONFIG_ADC_ASYNC, (.stack = thread_stack_##n,)) \
.gpio_reset = GPIO_DT_SPEC_INST_GET_OR(n, reset_gpios, {0}), \
.gpio_data_ready = GPIO_DT_SPEC_INST_GET(n, drdy_gpios), \
.gpio_start_sync = GPIO_DT_SPEC_INST_GET_OR(n, start_sync_gpios, {0}), \
.idac_current = DT_INST_PROP(n, idac_current), \
.vbias_level = DT_INST_PROP(n, vbias_level), \
}; \
static struct ads114s0x_data data_##n; \
DEVICE_DT_INST_DEFINE(n, ads114s0x_init, NULL, &data_##n, &config_##n, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, &api);
DT_INST_FOREACH_STATUS_OKAY(ADC_ADS114S0X_INST_DEFINE);
``` | /content/code_sandbox/drivers/adc/adc_ads114s0x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 15,802 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_ADC_ADC_CONTEXT_H_
#define ZEPHYR_DRIVERS_ADC_ADC_CONTEXT_H_
#include <zephyr/drivers/adc.h>
#include <zephyr/sys/atomic.h>
#ifdef __cplusplus
extern "C" {
#endif
struct adc_context;
/*
* Each driver should provide implementations of the following two functions:
* - adc_context_start_sampling() that will be called when a sampling (of one
* or more channels, depending on the realized sequence) is to be started
* - adc_context_update_buffer_pointer() that will be called when the sample
* buffer pointer should be prepared for writing of next sampling results,
* the "repeat_sampling" parameter indicates if the results should be written
* in the same place as before (when true) or as consecutive ones (otherwise).
*/
static void adc_context_start_sampling(struct adc_context *ctx);
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling);
/*
* If a given driver uses some dedicated hardware timer to trigger consecutive
* samplings, it should implement also the following two functions. Otherwise,
* it should define the ADC_CONTEXT_USES_KERNEL_TIMER macro to enable parts of
* this module that utilize a standard kernel timer.
*/
static void adc_context_enable_timer(struct adc_context *ctx);
static void adc_context_disable_timer(struct adc_context *ctx);
/*
* If a driver needs to do something after a context complete then
* then this optional function can be overwritten. This will be called
* after a sequence has ended, and *not* when restarted with ADC_ACTION_REPEAT.
* To enable this function define ADC_CONTEXT_ENABLE_ON_COMPLETE.
*/
#ifdef ADC_CONTEXT_ENABLE_ON_COMPLETE
static void adc_context_on_complete(struct adc_context *ctx, int status);
#endif /* ADC_CONTEXT_ENABLE_ON_COMPLETE */
#ifndef ADC_CONTEXT_WAIT_FOR_COMPLETION_TIMEOUT
#define ADC_CONTEXT_WAIT_FOR_COMPLETION_TIMEOUT K_FOREVER
#endif
struct adc_context {
atomic_t sampling_requested;
#ifdef ADC_CONTEXT_USES_KERNEL_TIMER
struct k_timer timer;
#endif /* ADC_CONTEXT_USES_KERNEL_TIMER */
struct k_sem lock;
struct k_sem sync;
int status;
#ifdef CONFIG_ADC_ASYNC
struct k_poll_signal *signal;
bool asynchronous;
#endif /* CONFIG_ADC_ASYNC */
struct adc_sequence sequence;
struct adc_sequence_options options;
uint16_t sampling_index;
};
#ifdef ADC_CONTEXT_USES_KERNEL_TIMER
#define ADC_CONTEXT_INIT_TIMER(_data, _ctx_name) \
._ctx_name.timer = Z_TIMER_INITIALIZER(_data._ctx_name.timer, \
adc_context_on_timer_expired, \
NULL)
#endif /* ADC_CONTEXT_USES_KERNEL_TIMER */
#define ADC_CONTEXT_INIT_LOCK(_data, _ctx_name) \
._ctx_name.lock = Z_SEM_INITIALIZER(_data._ctx_name.lock, 0, 1)
#define ADC_CONTEXT_INIT_SYNC(_data, _ctx_name) \
._ctx_name.sync = Z_SEM_INITIALIZER(_data._ctx_name.sync, 0, 1)
#ifdef ADC_CONTEXT_USES_KERNEL_TIMER
static void adc_context_on_timer_expired(struct k_timer *timer_id);
#endif
static inline void adc_context_init(struct adc_context *ctx)
{
#ifdef ADC_CONTEXT_USES_KERNEL_TIMER
k_timer_init(&ctx->timer, adc_context_on_timer_expired, NULL);
#endif
k_sem_init(&ctx->lock, 0, 1);
k_sem_init(&ctx->sync, 0, 1);
}
static inline void adc_context_request_next_sampling(struct adc_context *ctx)
{
if (atomic_inc(&ctx->sampling_requested) == 0) {
adc_context_start_sampling(ctx);
} else {
/*
* If a sampling was already requested and was not finished yet,
* do not start another one from here, this will be done from
* adc_context_on_sampling_done() after the current sampling is
* complete. Instead, note this fact, and inform the user about
* it after the sequence is done.
*/
ctx->status = -EBUSY;
}
}
#ifdef ADC_CONTEXT_USES_KERNEL_TIMER
static inline void adc_context_enable_timer(struct adc_context *ctx)
{
k_timer_start(&ctx->timer, K_NO_WAIT, K_USEC(ctx->options.interval_us));
}
static inline void adc_context_disable_timer(struct adc_context *ctx)
{
k_timer_stop(&ctx->timer);
}
static void adc_context_on_timer_expired(struct k_timer *timer_id)
{
struct adc_context *ctx =
CONTAINER_OF(timer_id, struct adc_context, timer);
adc_context_request_next_sampling(ctx);
}
#endif /* ADC_CONTEXT_USES_KERNEL_TIMER */
static inline void adc_context_lock(struct adc_context *ctx,
bool asynchronous,
struct k_poll_signal *signal)
{
k_sem_take(&ctx->lock, K_FOREVER);
#ifdef CONFIG_ADC_ASYNC
ctx->asynchronous = asynchronous;
ctx->signal = signal;
#endif /* CONFIG_ADC_ASYNC */
}
static inline void adc_context_release(struct adc_context *ctx, int status)
{
#ifdef CONFIG_ADC_ASYNC
if (ctx->asynchronous && (status == 0)) {
return;
}
#endif /* CONFIG_ADC_ASYNC */
k_sem_give(&ctx->lock);
}
static inline void adc_context_unlock_unconditionally(struct adc_context *ctx)
{
if (!k_sem_count_get(&ctx->lock)) {
k_sem_give(&ctx->lock);
}
}
static inline int adc_context_wait_for_completion(struct adc_context *ctx)
{
#ifdef CONFIG_ADC_ASYNC
if (ctx->asynchronous) {
return 0;
}
#endif /* CONFIG_ADC_ASYNC */
int status = k_sem_take(&ctx->sync, ADC_CONTEXT_WAIT_FOR_COMPLETION_TIMEOUT);
if (status != 0) {
ctx->status = status;
}
return ctx->status;
}
static inline void adc_context_complete(struct adc_context *ctx, int status)
{
#ifdef ADC_CONTEXT_ENABLE_ON_COMPLETE
adc_context_on_complete(ctx, status);
#endif /* ADC_CONTEXT_ENABLE_ON_COMPLETE */
#ifdef CONFIG_ADC_ASYNC
if (ctx->asynchronous) {
if (ctx->signal) {
k_poll_signal_raise(ctx->signal, status);
}
k_sem_give(&ctx->lock);
return;
}
#endif /* CONFIG_ADC_ASYNC */
/*
* Override the status only when an error is signaled to this function.
* Please note that adc_context_request_next_sampling() might have set
* this field.
*/
if (status != 0) {
ctx->status = status;
}
k_sem_give(&ctx->sync);
}
static inline void adc_context_start_read(struct adc_context *ctx,
const struct adc_sequence *sequence)
{
ctx->sequence = *sequence;
ctx->status = 0;
if (sequence->options) {
ctx->options = *sequence->options;
ctx->sequence.options = &ctx->options;
ctx->sampling_index = 0U;
if (ctx->options.interval_us != 0U) {
atomic_set(&ctx->sampling_requested, 0);
adc_context_enable_timer(ctx);
return;
}
}
adc_context_start_sampling(ctx);
}
/*
* This function should be called after a sampling (of one or more channels,
* depending on the realized sequence) is done. It calls the defined callback
* function if required and takes further actions accordingly.
*/
static inline void adc_context_on_sampling_done(struct adc_context *ctx,
const struct device *dev)
{
if (ctx->sequence.options) {
adc_sequence_callback callback = ctx->options.callback;
enum adc_action action;
bool finish = false;
bool repeat = false;
if (callback) {
action = callback(dev,
&ctx->sequence,
ctx->sampling_index);
} else {
action = ADC_ACTION_CONTINUE;
}
switch (action) {
case ADC_ACTION_REPEAT:
repeat = true;
break;
case ADC_ACTION_FINISH:
finish = true;
break;
default: /* ADC_ACTION_CONTINUE */
if (ctx->sampling_index <
ctx->options.extra_samplings) {
++ctx->sampling_index;
} else {
finish = true;
}
}
if (!finish) {
adc_context_update_buffer_pointer(ctx, repeat);
/*
* Immediately start the next sampling if working with
* a zero interval or if the timer expired again while
* the current sampling was in progress.
*/
if (ctx->options.interval_us == 0U) {
adc_context_start_sampling(ctx);
} else if (atomic_dec(&ctx->sampling_requested) > 1) {
adc_context_start_sampling(ctx);
}
return;
}
if (ctx->options.interval_us != 0U) {
adc_context_disable_timer(ctx);
}
}
adc_context_complete(ctx, 0);
}
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_ADC_ADC_CONTEXT_H_ */
``` | /content/code_sandbox/drivers/adc/adc_context.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,888 |
```c
/*
*
*/
#define DT_DRV_COMPAT nuvoton_numaker_adc
#include <zephyr/kernel.h>
#include <zephyr/drivers/reset.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/clock_control_numaker.h>
#include <zephyr/logging/log.h>
#include <soc.h>
#include <NuMicro.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#define ADC_CONTEXT_ENABLE_ON_COMPLETE
#include "adc_context.h"
LOG_MODULE_REGISTER(adc_numaker, CONFIG_ADC_LOG_LEVEL);
/* Device config */
struct adc_numaker_config {
/* eadc base address */
EADC_T *eadc_base;
uint8_t channel_cnt;
const struct reset_dt_spec reset;
/* clock configuration */
uint32_t clk_modidx;
uint32_t clk_src;
uint32_t clk_div;
const struct device *clk_dev;
const struct pinctrl_dev_config *pincfg;
void (*irq_config_func)(const struct device *dev);
};
/* Driver context/data */
struct adc_numaker_data {
struct adc_context ctx;
const struct device *dev;
uint16_t *buffer;
uint16_t *buf_end;
uint16_t *repeat_buffer;
bool is_differential;
uint32_t channels;
uint32_t acq_time;
};
static int adc_numaker_channel_setup(const struct device *dev,
const struct adc_channel_cfg *chan_cfg)
{
const struct adc_numaker_config *cfg = dev->config;
struct adc_numaker_data *data = dev->data;
if (chan_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
if ((ADC_ACQ_TIME_UNIT(chan_cfg->acquisition_time) != ADC_ACQ_TIME_TICKS) ||
(ADC_ACQ_TIME_VALUE(chan_cfg->acquisition_time) > 255)) {
LOG_ERR("Selected ADC acquisition time is not in 0~255 ticks");
return -EINVAL;
}
}
data->acq_time = ADC_ACQ_TIME_VALUE(chan_cfg->acquisition_time);
if (chan_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Not support channel gain");
return -ENOTSUP;
}
if (chan_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Not support channel reference");
return -ENOTSUP;
}
if (chan_cfg->channel_id >= cfg->channel_cnt) {
LOG_ERR("Invalid channel (%u)", chan_cfg->channel_id);
return -EINVAL;
}
data->is_differential = (chan_cfg->differential) ? true : false;
return 0;
}
static int m_adc_numaker_validate_buffer_size(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_numaker_config *cfg = dev->config;
uint8_t channel_cnt = 0;
uint32_t mask;
size_t needed_size;
for (mask = BIT(cfg->channel_cnt - 1); mask != 0; mask >>= 1) {
if (mask & sequence->channels) {
channel_cnt++;
}
}
needed_size = channel_cnt * sizeof(uint16_t);
if (sequence->options) {
needed_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_size) {
return -ENOBUFS;
}
return 0;
}
static void adc_numaker_isr(const struct device *dev)
{
const struct adc_numaker_config *cfg = dev->config;
EADC_T *eadc = cfg->eadc_base;
struct adc_numaker_data *const data = dev->data;
uint32_t channel_mask = data->channels;
uint32_t module_mask = channel_mask;
uint32_t module_id;
uint16_t conv_data;
uint32_t pend_flag;
LOG_DBG("ADC ISR pend flag: 0x%X\n", pend_flag);
LOG_DBG("ADC ISR STATUS2[0x%x] STATUS3[0x%x]", eadc->STATUS2, eadc->STATUS3);
/* Complete the conversion of channels.
* Check EAC idle by EADC_STATUS2_BUSY_Msk
* Check trigger source coming by EADC_STATUS2_ADOVIF_Msk
* Confirm all sample modules are idle by EADC_STATUS2_ADOVIF_Msk
*/
if (!(eadc->STATUS2 & EADC_STATUS2_BUSY_Msk) &&
((eadc->STATUS3 & EADC_STATUS3_CURSPL_Msk) == EADC_STATUS3_CURSPL_Msk)) {
/* Stop the conversion for sample module */
EADC_STOP_CONV(eadc, module_mask);
/* Disable sample module A/D ADINT0 interrupt. */
EADC_DISABLE_INT(eadc, BIT0);
/* Disable the sample module ADINT0 interrupt source */
EADC_DISABLE_SAMPLE_MODULE_INT(eadc, 0, module_mask);
/* Get conversion data of each sample module for selected channel */
while (module_mask) {
module_id = find_lsb_set(module_mask) - 1;
conv_data = EADC_GET_CONV_DATA(eadc, module_id);
if (data->buffer < data->buf_end) {
*data->buffer++ = conv_data;
LOG_DBG("ADC ISR id=%d, data=0x%x", module_id, conv_data);
}
module_mask &= ~BIT(module_id);
/* Disable all channels on each sample module */
eadc->SCTL[module_id] = 0;
}
/* Inform sampling is done */
adc_context_on_sampling_done(&data->ctx, data->dev);
}
/* Clear the A/D ADINT0 interrupt flag */
EADC_CLR_INT_FLAG(eadc, EADC_STATUS2_ADIF0_Msk);
}
static void m_adc_numaker_start_scan(const struct device *dev)
{
const struct adc_numaker_config *cfg = dev->config;
EADC_T *eadc = cfg->eadc_base;
struct adc_numaker_data *const data = dev->data;
uint32_t channel_mask = data->channels;
uint32_t module_mask = channel_mask;
uint32_t channel_id;
uint32_t module_id;
/* Configure the sample module, analog input channel and software trigger source */
while (channel_mask) {
channel_id = find_lsb_set(channel_mask) - 1;
module_id = channel_id;
channel_mask &= ~BIT(channel_id);
EADC_ConfigSampleModule(eadc, module_id,
EADC_SOFTWARE_TRIGGER, channel_id);
/* Set sample module external sampling time to 0 */
EADC_SetExtendSampleTime(eadc, module_id, data->acq_time);
}
/* Clear the A/D ADINT0 interrupt flag for safe */
EADC_CLR_INT_FLAG(eadc, EADC_STATUS2_ADIF0_Msk);
/* Enable sample module A/D ADINT0 interrupt. */
EADC_ENABLE_INT(eadc, BIT0);
/* Enable sample module interrupt ADINT0. */
EADC_ENABLE_SAMPLE_MODULE_INT(eadc, 0, module_mask);
/* Start conversion */
EADC_START_CONV(eadc, module_mask);
}
/* Implement ADC API functions of adc_context.h
* - adc_context_start_sampling()
* - adc_context_update_buffer_pointer()
*/
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_numaker_data *const data =
CONTAINER_OF(ctx, struct adc_numaker_data, ctx);
data->repeat_buffer = data->buffer;
data->channels = ctx->sequence.channels;
/* Start ADC conversion for sample modules/channels */
m_adc_numaker_start_scan(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_numaker_data *data =
CONTAINER_OF(ctx, struct adc_numaker_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void adc_context_on_complete(struct adc_context *ctx, int status)
{
struct adc_numaker_data *data =
CONTAINER_OF(ctx, struct adc_numaker_data, ctx);
const struct adc_numaker_config *cfg = data->dev->config;
EADC_T *eadc = cfg->eadc_base;
ARG_UNUSED(status);
/* Disable ADC */
EADC_Close(eadc);
}
static int m_adc_numaker_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_numaker_config *cfg = dev->config;
struct adc_numaker_data *data = dev->data;
EADC_T *eadc = cfg->eadc_base;
int err;
err = m_adc_numaker_validate_buffer_size(dev, sequence);
if (err) {
LOG_ERR("ADC provided buffer is too small");
return err;
}
if (!sequence->resolution) {
LOG_ERR("ADC resolution is not valid");
return -EINVAL;
}
LOG_DBG("Configure resolution=%d", sequence->resolution);
/* Enable the A/D converter */
if (data->is_differential) {
EADC_Open(eadc, EADC_CTL_DIFFEN_DIFFERENTIAL);
} else {
EADC_Open(eadc, EADC_CTL_DIFFEN_SINGLE_END);
}
data->buffer = sequence->buffer;
data->buf_end = data->buffer + sequence->buffer_size / sizeof(uint16_t);
/* Start ADC conversion */
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int adc_numaker_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_numaker_data *data = dev->data;
int err;
adc_context_lock(&data->ctx, false, NULL);
err = m_adc_numaker_start_read(dev, sequence);
adc_context_release(&data->ctx, err);
return err;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_numaker_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_numaker_data *data = dev->data;
int err;
adc_context_lock(&data->ctx, true, async);
err = m_adc_numaker_start_read(dev, sequence);
adc_context_release(&data->ctx, err);
return err;
}
#endif
static const struct adc_driver_api adc_numaker_driver_api = {
.channel_setup = adc_numaker_channel_setup,
.read = adc_numaker_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_numaker_read_async,
#endif
};
static int adc_numaker_init(const struct device *dev)
{
const struct adc_numaker_config *cfg = dev->config;
struct adc_numaker_data *data = dev->data;
int err;
struct numaker_scc_subsys scc_subsys;
/* Validate this module's reset object */
if (!device_is_ready(cfg->reset.dev)) {
LOG_ERR("reset controller not ready");
return -ENODEV;
}
data->dev = dev;
SYS_UnlockReg();
/* CLK controller */
memset(&scc_subsys, 0x00, sizeof(scc_subsys));
scc_subsys.subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC;
scc_subsys.pcc.clk_modidx = cfg->clk_modidx;
scc_subsys.pcc.clk_src = cfg->clk_src;
scc_subsys.pcc.clk_div = cfg->clk_div;
/* Equivalent to CLK_EnableModuleClock() */
err = clock_control_on(cfg->clk_dev, (clock_control_subsys_t)&scc_subsys);
if (err != 0) {
goto done;
}
/* Equivalent to CLK_SetModuleClock() */
err = clock_control_configure(cfg->clk_dev, (clock_control_subsys_t)&scc_subsys, NULL);
if (err != 0) {
goto done;
}
err = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT);
if (err) {
LOG_ERR("Failed to apply pinctrl state");
goto done;
}
/* Reset EADC to default state, same as BSP's SYS_ResetModule(id_rst) */
reset_line_toggle_dt(&cfg->reset);
/* Enable NVIC */
cfg->irq_config_func(dev);
/* Init mutex of adc_context */
adc_context_unlock_unconditionally(&data->ctx);
done:
SYS_LockReg();
return err;
}
#define ADC_NUMAKER_IRQ_CONFIG_FUNC(n) \
static void adc_numaker_irq_config_func_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), \
DT_INST_IRQ(n, priority), \
adc_numaker_isr, \
DEVICE_DT_INST_GET(n), 0); \
\
irq_enable(DT_INST_IRQN(n)); \
}
#define ADC_NUMAKER_INIT(inst) \
PINCTRL_DT_INST_DEFINE(inst); \
ADC_NUMAKER_IRQ_CONFIG_FUNC(inst) \
\
static const struct adc_numaker_config adc_numaker_cfg_##inst = { \
.eadc_base = (EADC_T *)DT_INST_REG_ADDR(inst), \
.channel_cnt = DT_INST_PROP(inst, channels), \
.reset = RESET_DT_SPEC_INST_GET(inst), \
.clk_modidx = DT_INST_CLOCKS_CELL(inst, clock_module_index), \
.clk_src = DT_INST_CLOCKS_CELL(inst, clock_source), \
.clk_div = DT_INST_CLOCKS_CELL(inst, clock_divider), \
.clk_dev = DEVICE_DT_GET(DT_PARENT(DT_INST_CLOCKS_CTLR(inst))), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \
.irq_config_func = adc_numaker_irq_config_func_##inst, \
}; \
\
static struct adc_numaker_data adc_numaker_data_##inst = { \
ADC_CONTEXT_INIT_TIMER(adc_numaker_data_##inst, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_numaker_data_##inst, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_numaker_data_##inst, ctx), \
}; \
DEVICE_DT_INST_DEFINE(inst, \
&adc_numaker_init, NULL, \
&adc_numaker_data_##inst, &adc_numaker_cfg_##inst, \
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&adc_numaker_driver_api);
DT_INST_FOREACH_STATUS_OKAY(ADC_NUMAKER_INIT)
``` | /content/code_sandbox/drivers/adc/adc_numaker.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,091 |
```c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/util.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER 1
#include "adc_context.h"
#define DT_DRV_COMPAT ti_ads1112
LOG_MODULE_REGISTER(ADS1112, CONFIG_ADC_LOG_LEVEL);
#define ADS1112_CONFIG_GAIN(x) ((x)&BIT_MASK(2))
#define ADS1112_CONFIG_DR(x) (((x)&BIT_MASK(2)) << 2)
#define ADS1112_CONFIG_CM(x) (((x)&BIT_MASK(1)) << 4)
#define ADS1112_CONFIG_MUX(x) (((x)&BIT_MASK(2)) << 5)
#define ADS1112_CONFIG_MASK_READY BIT(7)
#define ADS1112_DEFAULT_CONFIG 0x8C
#define ADS1112_REF_INTERNAL 2048
enum ads1112_reg {
ADS1112_REG_OUTPUT = 0,
ADS1112_REG_CONFIG = 1,
};
enum {
ADS1112_CONFIG_MUX_DIFF_0_1 = 0,
ADS1112_CONFIG_MUX_BOTH_2_3 = 1,
ADS1112_CONFIG_MUX_SINGLE_0_3 = 2,
ADS1112_CONFIG_MUX_SINGLE_1_3 = 3,
};
enum {
ADS1112_CONFIG_DR_RATE_240_RES_12 = 0,
ADS1112_CONFIG_DR_RATE_60_RES_14 = 1,
ADS1112_CONFIG_DR_RATE_30_RES_15 = 2,
ADS1112_CONFIG_DR_RATE_15_RES_16 = 3,
ADS1112_CONFIG_DR_DEFAULT = ADS1112_CONFIG_DR_RATE_15_RES_16,
};
enum {
ADS1112_CONFIG_GAIN_1 = 0,
ADS1112_CONFIG_GAIN_2 = 1,
ADS1112_CONFIG_GAIN_4 = 2,
ADS1112_CONFIG_GAIN_8 = 3,
};
enum {
ADS1112_CONFIG_CM_SINGLE = 0,
ADS1112_CONFIG_CM_CONTINUOUS = 1,
};
struct ads1112_config {
const struct i2c_dt_spec bus;
};
struct ads1112_data {
struct adc_context ctx;
k_timeout_t ready_time;
struct k_sem acq_sem;
int16_t *buffer;
int16_t *buffer_ptr;
bool differential;
};
static int ads1112_read_reg(const struct device *dev, enum ads1112_reg reg_addr, uint8_t *reg_val)
{
const struct ads1112_config *config = dev->config;
uint8_t buf[3] = {0};
int rc = i2c_read_dt(&config->bus, buf, sizeof(buf));
if (reg_addr == ADS1112_REG_OUTPUT) {
reg_val[0] = buf[0];
reg_val[1] = buf[1];
} else {
reg_val[0] = buf[2];
}
return rc;
}
static int ads1112_write_reg(const struct device *dev, uint8_t reg)
{
uint8_t msg[1] = {reg};
const struct ads1112_config *config = dev->config;
/* It's only possible to write the config register, so the ADS1112
* assumes all writes are going to that register and omits the register
* parameter from write transactions
*/
return i2c_write_dt(&config->bus, msg, sizeof(msg));
}
static inline int ads1112_acq_time_to_dr(const struct device *dev, uint16_t acq_time)
{
struct ads1112_data *data = dev->data;
int odr = -EINVAL;
uint16_t acq_value = ADC_ACQ_TIME_VALUE(acq_time);
uint32_t ready_time_us = 0;
if (acq_time == ADC_ACQ_TIME_DEFAULT) {
acq_value = ADS1112_CONFIG_DR_DEFAULT;
} else if (ADC_ACQ_TIME_UNIT(acq_time) != ADC_ACQ_TIME_TICKS) {
return -EINVAL;
}
switch (acq_value) {
case ADS1112_CONFIG_DR_RATE_15_RES_16:
odr = ADS1112_CONFIG_DR_RATE_15_RES_16;
ready_time_us = (1000 * 1000) / 15;
break;
case ADS1112_CONFIG_DR_RATE_30_RES_15:
odr = ADS1112_CONFIG_DR_RATE_30_RES_15;
ready_time_us = (1000 * 1000) / 30;
break;
case ADS1112_CONFIG_DR_RATE_60_RES_14:
odr = ADS1112_CONFIG_DR_RATE_60_RES_14;
ready_time_us = (1000 * 1000) / 60;
break;
case ADS1112_CONFIG_DR_RATE_240_RES_12:
odr = ADS1112_CONFIG_DR_RATE_240_RES_12;
ready_time_us = (1000 * 1000) / 240;
break;
default:
break;
}
/* Add some additional time to ensure that the data is truly ready,
* as chips in this family often require some additional time beyond
* the listed times
*/
data->ready_time = K_USEC(ready_time_us + 10);
return odr;
}
static int ads1112_wait_data_ready(const struct device *dev)
{
int rc;
struct ads1112_data *data = dev->data;
k_sleep(data->ready_time);
uint8_t status = 0;
rc = ads1112_read_reg(dev, ADS1112_REG_CONFIG, &status);
if (rc != 0) {
return rc;
}
while ((status & ADS1112_CONFIG_MASK_READY) == 0) {
k_sleep(K_USEC(100));
rc = ads1112_read_reg(dev, ADS1112_REG_CONFIG, &status);
if (rc != 0) {
return rc;
}
}
return 0;
}
static int ads1112_read_sample(const struct device *dev, uint16_t *buff)
{
int res;
uint8_t sample[2] = {0};
const struct ads1112_config *config = dev->config;
res = ads1112_read_reg(dev, ADS1112_REG_OUTPUT, sample);
buff[0] = sys_get_be16(sample);
return res;
}
static int ads1112_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
struct ads1112_data *data = dev->data;
uint8_t config = 0;
int dr = 0;
if (channel_cfg->channel_id != 0) {
return -EINVAL;
}
if (channel_cfg->differential) {
if (channel_cfg->input_positive == 0 && channel_cfg->input_negative == 1) {
config |= ADS1112_CONFIG_MUX(ADS1112_CONFIG_MUX_DIFF_0_1);
} else if (channel_cfg->input_positive == 2 && channel_cfg->input_negative == 3) {
config |= ADS1112_CONFIG_MUX(ADS1112_CONFIG_MUX_BOTH_2_3);
} else {
return -EINVAL;
}
} else {
if (channel_cfg->input_positive == 0) {
config |= ADS1112_CONFIG_MUX(ADS1112_CONFIG_MUX_SINGLE_0_3);
} else if (channel_cfg->input_positive == 1) {
config |= ADS1112_CONFIG_MUX(ADS1112_CONFIG_MUX_SINGLE_1_3);
} else if (channel_cfg->input_positive == 2) {
config |= ADS1112_CONFIG_MUX(ADS1112_CONFIG_MUX_BOTH_2_3);
} else {
return -EINVAL;
}
}
data->differential = channel_cfg->differential;
dr = ads1112_acq_time_to_dr(dev, channel_cfg->acquisition_time);
if (dr < 0) {
return dr;
}
config |= ADS1112_CONFIG_DR(dr);
switch (channel_cfg->gain) {
case ADC_GAIN_1:
config |= ADS1112_CONFIG_GAIN(ADS1112_CONFIG_GAIN_1);
break;
case ADC_GAIN_2:
config |= ADS1112_CONFIG_GAIN(ADS1112_CONFIG_GAIN_2);
break;
case ADC_GAIN_3:
config |= ADS1112_CONFIG_GAIN(ADS1112_CONFIG_GAIN_4);
break;
case ADC_GAIN_4:
config |= ADS1112_CONFIG_GAIN(ADS1112_CONFIG_GAIN_8);
break;
default:
return -EINVAL;
}
config |= ADS1112_CONFIG_CM(ADS1112_CONFIG_CM_SINGLE); /* Only single shot supported */
return ads1112_write_reg(dev, config);
}
static int ads1112_validate_buffer_size(const struct adc_sequence *sequence)
{
size_t needed = sizeof(int16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
LOG_ERR("Insufficient buffer %i < %i", sequence->buffer_size, needed);
return -ENOMEM;
}
return 0;
}
static int ads1112_validate_sequence(const struct device *dev, const struct adc_sequence *sequence)
{
const struct ads1112_data *data = dev->data;
if (sequence->channels != BIT(0)) {
LOG_ERR("Invalid Channel 0x%x", sequence->channels);
return -EINVAL;
}
if (sequence->oversampling) {
LOG_ERR("Oversampling not supported");
return -EINVAL;
}
return ads1112_validate_buffer_size(sequence);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct ads1112_data *data = CONTAINER_OF(ctx, struct ads1112_data, ctx);
if (repeat_sampling) {
data->buffer = data->buffer_ptr;
}
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct ads1112_data *data = CONTAINER_OF(ctx, struct ads1112_data, ctx);
data->buffer_ptr = data->buffer;
k_sem_give(&data->acq_sem);
}
static int ads1112_adc_start_read(const struct device *dev, const struct adc_sequence *sequence,
bool wait)
{
int rc = 0;
struct ads1112_data *data = dev->data;
rc = ads1112_validate_sequence(dev, sequence);
if (rc != 0) {
return rc;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
if (wait) {
rc = adc_context_wait_for_completion(&data->ctx);
}
return rc;
}
static int ads1112_adc_perform_read(const struct device *dev)
{
int rc;
struct ads1112_data *data = dev->data;
k_sem_take(&data->acq_sem, K_FOREVER);
rc = ads1112_wait_data_ready(dev);
if (rc != 0) {
adc_context_complete(&data->ctx, rc);
return rc;
}
rc = ads1112_read_sample(dev, data->buffer);
if (rc != 0) {
adc_context_complete(&data->ctx, rc);
return rc;
}
data->buffer++;
adc_context_on_sampling_done(&data->ctx, dev);
return rc;
}
static int ads1112_read(const struct device *dev, const struct adc_sequence *sequence)
{
int rc;
struct ads1112_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
rc = ads1112_adc_start_read(dev, sequence, false);
while (rc == 0 && k_sem_take(&data->ctx.sync, K_NO_WAIT) != 0) {
rc = ads1112_adc_perform_read(dev);
}
adc_context_release(&data->ctx, rc);
return rc;
}
static int ads1112_init(const struct device *dev)
{
int rc = 0;
uint8_t status;
const struct ads1112_config *config = dev->config;
struct ads1112_data *data = dev->data;
adc_context_init(&data->ctx);
k_sem_init(&data->acq_sem, 0, 1);
if (!device_is_ready(config->bus.bus)) {
return -ENODEV;
}
rc = ads1112_write_reg(dev, ADS1112_DEFAULT_CONFIG);
if (rc) {
LOG_ERR("Could not set default config 0x%x", ADS1112_DEFAULT_CONFIG);
return rc;
}
adc_context_unlock_unconditionally(&data->ctx);
return rc;
}
static const struct adc_driver_api api = {
.channel_setup = ads1112_channel_setup,
.read = ads1112_read,
.ref_internal = ADS1112_REF_INTERNAL,
};
#define ADC_ADS1112_INST_DEFINE(n) \
static const struct ads1112_config config_##n = {.bus = I2C_DT_SPEC_INST_GET(n)}; \
static struct ads1112_data data_##n; \
DEVICE_DT_INST_DEFINE(n, ads1112_init, NULL, &data_##n, &config_##n, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, &api);
DT_INST_FOREACH_STATUS_OKAY(ADC_ADS1112_INST_DEFINE);
``` | /content/code_sandbox/drivers/adc/adc_ads1112.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,878 |
```c
/*
*
*/
#define DT_DRV_COMPAT atmel_sam_adc
#include <soc.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_sam, CONFIG_ADC_LOG_LEVEL);
#define SAM_ADC_NUM_CHANNELS 16
#define SAM_ADC_TEMP_CHANNEL 15
struct adc_sam_config {
Adc *regs;
const struct atmel_sam_pmc_config clock_cfg;
uint8_t prescaler;
uint8_t startup_time;
uint8_t settling_time;
uint8_t tracking_time;
const struct pinctrl_dev_config *pcfg;
void (*config_func)(const struct device *dev);
};
struct adc_sam_data {
struct adc_context ctx;
const struct device *dev;
/* Pointer to the buffer in the sequence. */
uint16_t *buffer;
/* Pointer to the beginning of a sample. Consider the number of
* channels in the sequence: this buffer changes by that amount
* so all the channels would get repeated.
*/
uint16_t *repeat_buffer;
/* Number of active channels to fill buffer */
uint8_t num_active_channels;
};
static uint8_t count_bits(uint32_t val)
{
uint8_t res = 0;
while (val) {
res += val & 1U;
val >>= 1;
}
return res;
}
static int adc_sam_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_sam_config *const cfg = dev->config;
Adc *const adc = cfg->regs;
uint8_t channel_id = channel_cfg->channel_id;
if (channel_cfg->differential) {
if (channel_id != (channel_cfg->input_positive / 2U)
|| channel_id != (channel_cfg->input_negative / 2U)) {
LOG_ERR("Invalid ADC differential input for channel %u", channel_id);
return -EINVAL;
}
} else {
if (channel_id != channel_cfg->input_positive) {
LOG_ERR("Invalid ADC single-ended input for channel %u", channel_id);
return -EINVAL;
}
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Invalid ADC channel acquisition time");
return -EINVAL;
}
if (channel_cfg->reference != ADC_REF_EXTERNAL0) {
LOG_ERR("Invalid ADC channel reference (%d)", channel_cfg->reference);
return -EINVAL;
}
/* Enable internal temperature sensor (channel 15 / single-ended) */
if (channel_cfg->channel_id == SAM_ADC_TEMP_CHANNEL) {
adc->ADC_ACR |= ADC_ACR_TSON;
}
/* Set channel mode, always on both inputs */
if (channel_cfg->differential) {
adc->ADC_COR |= (ADC_COR_DIFF0 | ADC_COR_DIFF1) << (channel_id * 2U);
} else {
adc->ADC_COR &= ~((ADC_COR_DIFF0 | ADC_COR_DIFF1) << (channel_id * 2U));
}
/* Reset current gain */
adc->ADC_CGR &= ~(ADC_CGR_GAIN0_Msk << (channel_id * 2U));
switch (channel_cfg->gain) {
case ADC_GAIN_1_2:
if (!channel_cfg->differential) {
LOG_ERR("ADC 1/2x gain only allowed for differential channel");
return -EINVAL;
}
/* NOP */
break;
case ADC_GAIN_1:
adc->ADC_CGR |= ADC_CGR_GAIN0(1) << (channel_id * 2U);
break;
case ADC_GAIN_2:
adc->ADC_CGR |= ADC_CGR_GAIN0(2) << (channel_id * 2U);
break;
case ADC_GAIN_4:
if (channel_cfg->differential) {
LOG_ERR("ADC 4x gain only allowed for single-ended channel");
return -EINVAL;
}
adc->ADC_CGR |= ADC_CGR_GAIN0(3) << (channel_id * 2U);
break;
default:
LOG_ERR("Invalid ADC channel gain (%d)", channel_cfg->gain);
return -EINVAL;
}
return 0;
}
static void adc_sam_start_conversion(const struct device *dev)
{
const struct adc_sam_config *const cfg = dev->config;
Adc *const adc = cfg->regs;
adc->ADC_CR = ADC_CR_START;
}
/**
* This is only called once at the beginning of all the conversions,
* all channels as a group.
*/
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_sam_data *data = CONTAINER_OF(ctx, struct adc_sam_data, ctx);
const struct adc_sam_config *const cfg = data->dev->config;
Adc *const adc = cfg->regs;
data->num_active_channels = count_bits(ctx->sequence.channels);
/* Disable all */
adc->ADC_CHDR = 0xffff;
/* Enable selected */
adc->ADC_CHER = ctx->sequence.channels;
LOG_DBG("Starting conversion for %u channels", data->num_active_channels);
adc_sam_start_conversion(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat)
{
struct adc_sam_data *data = CONTAINER_OF(ctx, struct adc_sam_data, ctx);
if (repeat) {
data->buffer = data->repeat_buffer;
}
}
static int check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_ERR("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_sam_data *data = dev->data;
uint32_t channels = sequence->channels;
int error;
/* Signal an error if the channel selection is invalid (no channels or
* a non-existing one is selected).
*/
if (channels == 0U ||
(channels & (~0UL << SAM_ADC_NUM_CHANNELS))) {
LOG_ERR("Invalid selection of channels");
return -EINVAL;
}
if (sequence->oversampling != 0U) {
LOG_ERR("Oversampling is not supported");
return -EINVAL;
}
if (sequence->resolution != 12U) {
LOG_ERR("ADC resolution %d is not valid", sequence->resolution);
return -EINVAL;
}
data->num_active_channels = count_bits(channels);
error = check_buffer_size(sequence, data->num_active_channels);
if (error) {
return error;
}
data->buffer = sequence->buffer;
data->repeat_buffer = sequence->buffer;
/* At this point we allow the scheduler to do other things while
* we wait for the conversions to complete. This is provided by the
* adc_context functions. However, the caller of this function is
* blocked until the results are in.
*/
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int adc_sam_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_sam_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
static void adc_sam_isr(const struct device *dev)
{
const struct adc_sam_config *const cfg = dev->config;
struct adc_sam_data *data = dev->data;
Adc *const adc = cfg->regs;
uint16_t result;
if (adc->ADC_ISR & ADC_ISR_DRDY) {
result = adc->ADC_LCDR & ADC_LCDR_LDATA_Msk;
*data->buffer++ = result;
data->num_active_channels--;
if (data->num_active_channels == 0) {
/* Called once all conversions have completed.*/
adc_context_on_sampling_done(&data->ctx, dev);
} else {
adc_sam_start_conversion(dev);
}
}
}
static int adc_sam_init(const struct device *dev)
{
const struct adc_sam_config *const cfg = dev->config;
struct adc_sam_data *data = dev->data;
Adc *const adc = cfg->regs;
int ret;
uint32_t frequency, conv_periods;
/* Get peripheral clock frequency */
ret = clock_control_get_rate(SAM_DT_PMC_CONTROLLER,
(clock_control_subsys_t)&cfg->clock_cfg,
&frequency);
if (ret < 0) {
LOG_ERR("Failed to get ADC peripheral clock rate (%d)", ret);
return -ENODEV;
}
/* Calculate ADC clock frequency */
frequency = frequency / 2U / (cfg->prescaler + 1U);
if (frequency < 1000000U || frequency > 22000000U) {
LOG_ERR("Invalid ADC clock frequency %d (1MHz < freq < 22Mhz)", frequency);
return -EINVAL;
}
/* The number of ADC pulses for conversion */
conv_periods = MAX(20U, cfg->tracking_time + 6U);
/* Calculate the sampling frequency */
frequency /= conv_periods;
/* Reset ADC controller */
adc->ADC_CR = ADC_CR_SWRST;
/* Reset Mode */
adc->ADC_MR = 0U;
/* Reset PDC transfer */
adc->ADC_PTCR = ADC_PTCR_RXTDIS | ADC_PTCR_TXTDIS;
adc->ADC_RCR = 0U;
adc->ADC_RNCR = 0U;
/* Set prescaler, timings and allow different analog settings for each channel */
adc->ADC_MR = ADC_MR_PRESCAL(cfg->prescaler)
| ADC_MR_STARTUP(cfg->startup_time)
| ADC_MR_SETTLING(cfg->settling_time)
| ADC_MR_TRACKTIM(cfg->tracking_time)
| ADC_MR_TRANSFER(2U) /* Should be 2 to guarantee the optimal hold time. */
| ADC_MR_ANACH_ALLOWED;
/**
* Set bias current control
* IBCTL = 00 is the required value for a sampling frequency below 500 kHz,
* and IBCTL = 01 for a sampling frequency between 500 kHz and 1 MHz.
*/
adc->ADC_ACR = ADC_ACR_IBCTL(frequency < 500000U ? 0U : 1U);
/* Enable ADC clock in PMC */
ret = clock_control_on(SAM_DT_PMC_CONTROLLER,
(clock_control_subsys_t)&cfg->clock_cfg);
if (ret < 0) {
LOG_ERR("Failed to enable ADC clock (%d)", ret);
return -ENODEV;
}
ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0) {
return ret;
}
cfg->config_func(dev);
/* Enable data ready interrupt */
adc->ADC_IER = ADC_IER_DRDY;
data->dev = dev;
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_sam_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_sam_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif
static const struct adc_driver_api adc_sam_api = {
.channel_setup = adc_sam_channel_setup,
.read = adc_sam_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_sam_read_async,
#endif
};
#define ADC_SAM_DEVICE(n) \
PINCTRL_DT_INST_DEFINE(n); \
static void adc_sam_irq_config_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), \
DT_INST_IRQ(n, priority), \
adc_sam_isr, \
DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
} \
static const struct adc_sam_config adc_sam_config_##n = { \
.regs = (Adc *)DT_INST_REG_ADDR(n), \
.clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(n), \
.prescaler = DT_INST_PROP(n, prescaler), \
.startup_time = DT_INST_ENUM_IDX(n, startup_time), \
.settling_time = DT_INST_ENUM_IDX(n, settling_time), \
.tracking_time = DT_INST_ENUM_IDX(n, tracking_time), \
.config_func = &adc_sam_irq_config_##n, \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
}; \
static struct adc_sam_data adc_sam_data_##n = { \
ADC_CONTEXT_INIT_TIMER(adc_sam_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_sam_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_sam_data_##n, ctx), \
.dev = DEVICE_DT_INST_GET(n), \
}; \
DEVICE_DT_INST_DEFINE(n, adc_sam_init, NULL, \
&adc_sam_data_##n, \
&adc_sam_config_##n, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&adc_sam_api);
DT_INST_FOREACH_STATUS_OKAY(ADC_SAM_DEVICE)
``` | /content/code_sandbox/drivers/adc/adc_sam.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,085 |
```c
/*
*
*/
#define DT_DRV_COMPAT espressif_esp32_adc
#include <errno.h>
#include <hal/adc_hal.h>
#include <hal/adc_types.h>
#include <soc/adc_periph.h>
#include <esp_adc_cal.h>
#include <esp_clk_tree.h>
#include <esp_private/periph_ctrl.h>
#include <esp_private/sar_periph_ctrl.h>
#include <esp_private/adc_share_hw_ctrl.h>
#if defined(CONFIG_ADC_ESP32_DMA)
#if !SOC_GDMA_SUPPORTED
#error "SoCs without GDMA peripheral are not supported!"
#endif
#include <zephyr/drivers/dma.h>
#include <zephyr/drivers/dma/dma_esp32.h>
#endif
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_esp32, CONFIG_ADC_LOG_LEVEL);
#define ADC_RESOLUTION_MIN SOC_ADC_DIGI_MIN_BITWIDTH
#define ADC_RESOLUTION_MAX SOC_ADC_DIGI_MAX_BITWIDTH
#if CONFIG_SOC_SERIES_ESP32
#define ADC_CALI_SCHEME ESP_ADC_CAL_VAL_EFUSE_VREF
/* Due to significant measurement discrepancy in higher voltage range, we
* clip the value instead of yet another correction. The IDF implementation
* for ESP32-S2 is doing it, so we copy that approach in Zephyr driver
*/
#define ADC_CLIP_MVOLT_11DB 2550
#elif CONFIG_SOC_SERIES_ESP32S3
#define ADC_CALI_SCHEME ESP_ADC_CAL_VAL_EFUSE_TP_FIT
#else
#define ADC_CALI_SCHEME ESP_ADC_CAL_VAL_EFUSE_TP
#endif
/* Validate if resolution in bits is within allowed values */
#define VALID_RESOLUTION(r) ((r) >= ADC_RESOLUTION_MIN && (r) <= ADC_RESOLUTION_MAX)
#define INVALID_RESOLUTION(r) (!VALID_RESOLUTION(r))
/* Default internal reference voltage */
#define ADC_ESP32_DEFAULT_VREF_INTERNAL (1100)
#define ADC_DMA_BUFFER_SIZE DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED
struct adc_esp32_conf {
adc_unit_t unit;
uint8_t channel_count;
#if defined(CONFIG_ADC_ESP32_DMA)
const struct device *gpio_port;
const struct device *dma_dev;
uint8_t dma_channel;
#endif /* defined(CONFIG_ADC_ESP32_DMA) */
};
struct adc_esp32_data {
adc_atten_t attenuation[SOC_ADC_MAX_CHANNEL_NUM];
uint8_t resolution[SOC_ADC_MAX_CHANNEL_NUM];
esp_adc_cal_characteristics_t chars[SOC_ADC_MAX_CHANNEL_NUM];
uint16_t meas_ref_internal;
uint16_t *buffer;
bool calibrate;
#if defined(CONFIG_ADC_ESP32_DMA)
adc_hal_dma_ctx_t adc_hal_dma_ctx;
uint8_t *dma_buffer;
struct k_sem dma_conv_wait_lock;
#endif /* defined(CONFIG_ADC_ESP32_DMA) */
};
/* Convert zephyr,gain property to the ESP32 attenuation */
static inline int gain_to_atten(enum adc_gain gain, adc_atten_t *atten)
{
switch (gain) {
case ADC_GAIN_1:
*atten = ADC_ATTEN_DB_0;
break;
case ADC_GAIN_4_5:
*atten = ADC_ATTEN_DB_2_5;
break;
case ADC_GAIN_1_2:
*atten = ADC_ATTEN_DB_6;
break;
case ADC_GAIN_1_4:
*atten = ADC_ATTEN_DB_11;
break;
default:
return -ENOTSUP;
}
return 0;
}
#if !defined(CONFIG_ADC_ESP32_DMA)
/* Convert voltage by inverted attenuation to support zephyr gain values */
static void atten_to_gain(adc_atten_t atten, uint32_t *val_mv)
{
if (!val_mv) {
return;
}
switch (atten) {
case ADC_ATTEN_DB_2_5:
*val_mv = (*val_mv * 4) / 5; /* 1/ADC_GAIN_4_5 */
break;
case ADC_ATTEN_DB_6:
*val_mv = *val_mv >> 1; /* 1/ADC_GAIN_1_2 */
break;
case ADC_ATTEN_DB_11:
*val_mv = *val_mv / 4; /* 1/ADC_GAIN_1_4 */
break;
case ADC_ATTEN_DB_0: /* 1/ADC_GAIN_1 */
default:
break;
}
}
#endif /* !defined(CONFIG_ADC_ESP32_DMA) */
static void adc_hw_calibration(adc_unit_t unit)
{
#if SOC_ADC_CALIBRATION_V1_SUPPORTED
adc_hal_calibration_init(unit);
for (int j = 0; j < SOC_ADC_ATTEN_NUM; j++) {
adc_calc_hw_calibration_code(unit, j);
#if SOC_ADC_CALIB_CHAN_COMPENS_SUPPORTED
/* Load the channel compensation from efuse */
for (int k = 0; k < SOC_ADC_CHANNEL_NUM(unit); k++) {
adc_load_hw_calibration_chan_compens(unit, k, j);
}
#endif /* SOC_ADC_CALIB_CHAN_COMPENS_SUPPORTED */
}
#endif /* SOC_ADC_CALIBRATION_V1_SUPPORTED */
}
static bool adc_calibration_init(const struct device *dev)
{
struct adc_esp32_data *data = dev->data;
switch (esp_adc_cal_check_efuse(ADC_CALI_SCHEME)) {
case ESP_ERR_NOT_SUPPORTED:
LOG_WRN("Skip software calibration - Not supported!");
break;
case ESP_ERR_INVALID_VERSION:
LOG_WRN("Skip software calibration - Invalid version!");
break;
case ESP_OK:
LOG_DBG("Software calibration possible");
return true;
default:
LOG_ERR("Invalid arg");
break;
}
return false;
}
#if defined(CONFIG_ADC_ESP32_DMA)
static void IRAM_ATTR adc_esp32_dma_conv_done(const struct device *dma_dev, void *user_data,
uint32_t channel, int status)
{
ARG_UNUSED(dma_dev);
ARG_UNUSED(status);
const struct device *dev = user_data;
struct adc_esp32_data *data = dev->data;
k_sem_give(&data->dma_conv_wait_lock);
}
static int adc_esp32_dma_start(const struct device *dev, uint8_t *buf, size_t len)
{
const struct adc_esp32_conf *conf = dev->config;
struct adc_esp32_data *data = dev->data;
int err = 0;
struct dma_config dma_cfg = {0};
struct dma_status dma_status = {0};
struct dma_block_config dma_blk = {0};
err = dma_get_status(conf->dma_dev, conf->dma_channel, &dma_status);
if (err) {
LOG_ERR("Unable to get dma channel[%u] status (%d)",
(unsigned int)conf->dma_channel, err);
return -EINVAL;
}
if (dma_status.busy) {
LOG_ERR("dma channel[%u] is busy!", (unsigned int)conf->dma_channel);
return -EBUSY;
}
unsigned int key = irq_lock();
dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
dma_cfg.dma_callback = adc_esp32_dma_conv_done;
dma_cfg.user_data = (void *)dev;
dma_cfg.dma_slot = ESP_GDMA_TRIG_PERIPH_ADC0;
dma_cfg.block_count = 1;
dma_cfg.head_block = &dma_blk;
dma_blk.block_size = len;
dma_blk.dest_address = (uint32_t)buf;
err = dma_config(conf->dma_dev, conf->dma_channel, &dma_cfg);
if (err) {
LOG_ERR("Error configuring dma (%d)", err);
goto unlock;
}
err = dma_start(conf->dma_dev, conf->dma_channel);
if (err) {
LOG_ERR("Error starting dma (%d)", err);
goto unlock;
}
unlock:
irq_unlock(key);
return err;
}
static int adc_esp32_dma_stop(const struct device *dev)
{
const struct adc_esp32_conf *conf = dev->config;
unsigned int key = irq_lock();
int err = 0;
err = dma_stop(conf->dma_dev, conf->dma_channel);
if (err) {
LOG_ERR("Error stopping dma (%d)", err);
}
irq_unlock(key);
return err;
}
static int adc_esp32_fill_digi_pattern(const struct device *dev, const struct adc_sequence *seq,
void *pattern_config, uint32_t *pattern_len, uint32_t *unit_attenuation)
{
const struct adc_esp32_conf *conf = dev->config;
struct adc_esp32_data *data = dev->data;
adc_digi_pattern_config_t *adc_digi_pattern_config =
(adc_digi_pattern_config_t *)pattern_config;
const uint32_t unit_atten_uninit = 999;
uint32_t channel_mask = 1, channels_copy = seq->channels;
*pattern_len = 0;
*unit_attenuation = unit_atten_uninit;
for (uint8_t channel_id = 0; channel_id < conf->channel_count; channel_id++) {
if (channels_copy & channel_mask) {
if (*unit_attenuation == unit_atten_uninit) {
*unit_attenuation = data->attenuation[channel_id];
} else if (*unit_attenuation != data->attenuation[channel_id]) {
LOG_ERR("Channel[%u] attenuation different of unit[%u] attenuation",
(unsigned int)channel_id, (unsigned int)conf->unit);
return -EINVAL;
}
adc_digi_pattern_config->atten = data->attenuation[channel_id];
adc_digi_pattern_config->channel = channel_id;
adc_digi_pattern_config->unit = conf->unit;
adc_digi_pattern_config->bit_width = seq->resolution;
adc_digi_pattern_config++;
*pattern_len += 1;
if (*pattern_len > SOC_ADC_PATT_LEN_MAX) {
LOG_ERR("Max pattern len is %d", SOC_ADC_PATT_LEN_MAX);
return -EINVAL;
}
channels_copy &= ~channel_mask;
if (!channels_copy) {
break;
}
}
channel_mask <<= 1;
}
return 0;
}
static void adc_esp32_digi_start(const struct device *dev, void *pattern_config,
uint32_t pattern_len, uint32_t number_of_samplings,
uint32_t sample_freq_hz, uint32_t unit_attenuation)
{
const struct adc_esp32_conf *conf = dev->config;
struct adc_esp32_data *data = dev->data;
sar_periph_ctrl_adc_continuous_power_acquire();
adc_lock_acquire(conf->unit);
#if SOC_ADC_CALIBRATION_V1_SUPPORTED
adc_set_hw_calibration_code(conf->unit, unit_attenuation);
#endif /* SOC_ADC_CALIBRATION_V1_SUPPORTED */
#if SOC_ADC_ARBITER_SUPPORTED
if (conf->unit == ADC_UNIT_2) {
adc_arbiter_t config = ADC_ARBITER_CONFIG_DEFAULT();
adc_hal_arbiter_config(&config);
}
#endif /* SOC_ADC_ARBITER_SUPPORTED */
adc_hal_digi_ctrlr_cfg_t adc_hal_digi_ctrlr_cfg;
soc_module_clk_t clk_src = ADC_DIGI_CLK_SRC_DEFAULT;
uint32_t clk_src_freq_hz = 0;
esp_clk_tree_src_get_freq_hz(clk_src, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED,
&clk_src_freq_hz);
adc_hal_digi_ctrlr_cfg.conv_mode =
(conf->unit == ADC_UNIT_1)?ADC_CONV_SINGLE_UNIT_1:ADC_CONV_SINGLE_UNIT_2;
adc_hal_digi_ctrlr_cfg.clk_src = clk_src;
adc_hal_digi_ctrlr_cfg.clk_src_freq_hz = clk_src_freq_hz;
adc_hal_digi_ctrlr_cfg.sample_freq_hz = sample_freq_hz;
adc_hal_digi_ctrlr_cfg.adc_pattern = (adc_digi_pattern_config_t *)pattern_config;
adc_hal_digi_ctrlr_cfg.adc_pattern_len = pattern_len;
uint32_t number_of_adc_digi_samples = number_of_samplings * pattern_len;
adc_hal_dma_config_t adc_hal_dma_config = {
.dev = (void *)GDMA_LL_GET_HW(0),
.eof_desc_num = 1,
.eof_step = 1,
.dma_chan = conf->dma_channel,
.eof_num = number_of_adc_digi_samples,
};
adc_hal_dma_ctx_config(&data->adc_hal_dma_ctx, &adc_hal_dma_config);
adc_hal_set_controller(conf->unit, ADC_HAL_CONTINUOUS_READ_MODE);
adc_hal_digi_init(&data->adc_hal_dma_ctx);
adc_hal_digi_controller_config(&data->adc_hal_dma_ctx, &adc_hal_digi_ctrlr_cfg);
adc_hal_digi_start(&data->adc_hal_dma_ctx, data->dma_buffer);
}
static void adc_esp32_digi_stop(const struct device *dev)
{
const struct adc_esp32_conf *conf = dev->config;
struct adc_esp32_data *data = dev->data;
adc_hal_digi_dis_intr(&data->adc_hal_dma_ctx, ADC_HAL_DMA_INTR_MASK);
adc_hal_digi_clr_intr(&data->adc_hal_dma_ctx, ADC_HAL_DMA_INTR_MASK);
adc_hal_digi_stop(&data->adc_hal_dma_ctx);
adc_hal_digi_deinit(&data->adc_hal_dma_ctx);
adc_lock_release(conf->unit);
sar_periph_ctrl_adc_continuous_power_release();
}
static void adc_esp32_fill_seq_buffer(const void *seq_buffer, const void *dma_buffer,
uint32_t number_of_samples)
{
uint16_t *sample = (uint16_t *)seq_buffer;
adc_digi_output_data_t *digi_data = (adc_digi_output_data_t *)dma_buffer;
for (uint32_t k = 0; k < number_of_samples; k++) {
*sample++ = (uint16_t)(digi_data++)->type2.data;
}
}
static int adc_esp32_wait_for_dma_conv_done(const struct device *dev)
{
struct adc_esp32_data *data = dev->data;
int err = 0;
err = k_sem_take(&data->dma_conv_wait_lock, K_FOREVER);
if (err) {
LOG_ERR("Error taking dma_conv_wait_lock (%d)", err);
}
return err;
}
#endif /* defined(CONFIG_ADC_ESP32_DMA) */
static int adc_esp32_read(const struct device *dev, const struct adc_sequence *seq)
{
const struct adc_esp32_conf *conf = dev->config;
struct adc_esp32_data *data = dev->data;
int reading;
uint32_t cal, cal_mv;
uint8_t channel_id = find_lsb_set(seq->channels) - 1;
if (seq->buffer_size < 2) {
LOG_ERR("Sequence buffer space too low '%d'", seq->buffer_size);
return -ENOMEM;
}
#if !defined(CONFIG_ADC_ESP32_DMA)
if (seq->channels > BIT(channel_id)) {
LOG_ERR("Multi-channel readings not supported");
return -ENOTSUP;
}
#endif /* !defined(CONFIG_ADC_ESP32_DMA) */
if (seq->options) {
if (seq->options->extra_samplings) {
LOG_ERR("Extra samplings not supported");
return -ENOTSUP;
}
#if !defined(CONFIG_ADC_ESP32_DMA)
if (seq->options->interval_us) {
LOG_ERR("Interval between samplings not supported");
return -ENOTSUP;
}
#endif /* !defined(CONFIG_ADC_ESP32_DMA) */
}
if (INVALID_RESOLUTION(seq->resolution)) {
LOG_ERR("unsupported resolution (%d)", seq->resolution);
return -ENOTSUP;
}
if (seq->calibrate) {
/* TODO: Does this mean actual Vref measurement on selected GPIO ?*/
LOG_ERR("calibration is not supported");
return -ENOTSUP;
}
data->resolution[channel_id] = seq->resolution;
#if CONFIG_SOC_SERIES_ESP32C3
/* NOTE: nothing to set on ESP32C3 SoC */
if (conf->unit == ADC_UNIT_1) {
adc1_config_width(ADC_WIDTH_BIT_DEFAULT);
}
#else
adc_set_data_width(conf->unit, data->resolution[channel_id]);
#endif /* CONFIG_SOC_SERIES_ESP32C3 */
#if !defined(CONFIG_ADC_ESP32_DMA)
/* Read raw value */
if (conf->unit == ADC_UNIT_1) {
reading = adc1_get_raw(channel_id);
}
if (conf->unit == ADC_UNIT_2) {
if (adc2_get_raw(channel_id, ADC_WIDTH_BIT_DEFAULT, &reading)) {
LOG_ERR("Conversion timeout on '%s' channel %d", dev->name, channel_id);
return -ETIMEDOUT;
}
}
/* Calibration scheme is available */
if (data->calibrate) {
data->chars[channel_id].bit_width = data->resolution[channel_id];
/* Get corrected voltage output */
cal = cal_mv = esp_adc_cal_raw_to_voltage(reading, &data->chars[channel_id]);
#if CONFIG_SOC_SERIES_ESP32
if (data->attenuation[channel_id] == ADC_ATTEN_DB_11) {
if (cal > ADC_CLIP_MVOLT_11DB) {
cal = ADC_CLIP_MVOLT_11DB;
}
}
#endif /* CONFIG_SOC_SERIES_ESP32 */
/* Fit according to selected attenuation */
atten_to_gain(data->attenuation[channel_id], &cal);
if (data->meas_ref_internal > 0) {
cal = (cal << data->resolution[channel_id]) / data->meas_ref_internal;
}
} else {
LOG_DBG("Using uncalibrated values!");
/* Uncalibrated raw value */
cal = reading;
}
/* Store result */
data->buffer = (uint16_t *) seq->buffer;
data->buffer[0] = cal;
#else /* !defined(CONFIG_ADC_ESP32_DMA) */
int err = 0;
uint32_t adc_pattern_len, unit_attenuation;
adc_digi_pattern_config_t adc_digi_pattern_config[SOC_ADC_MAX_CHANNEL_NUM];
err = adc_esp32_fill_digi_pattern(dev, seq, &adc_digi_pattern_config,
&adc_pattern_len, &unit_attenuation);
if (err || adc_pattern_len == 0) {
return -EINVAL;
}
const struct adc_sequence_options *options = seq->options;
uint32_t sample_freq_hz = SOC_ADC_SAMPLE_FREQ_THRES_HIGH,
number_of_samplings = 1;
if (options != NULL) {
number_of_samplings = seq->buffer_size / (adc_pattern_len * sizeof(uint16_t));
if (options->interval_us) {
sample_freq_hz = MHZ(1) / options->interval_us;
}
}
if (!number_of_samplings) {
LOG_ERR("buffer_size insufficient to store at least one set of samples!");
return -EINVAL;
}
if (sample_freq_hz < SOC_ADC_SAMPLE_FREQ_THRES_LOW ||
sample_freq_hz > SOC_ADC_SAMPLE_FREQ_THRES_HIGH) {
LOG_ERR("ADC sampling frequency out of range: %uHz", sample_freq_hz);
return -EINVAL;
}
uint32_t number_of_adc_samples = number_of_samplings * adc_pattern_len;
uint32_t number_of_adc_dma_data_bytes =
number_of_adc_samples * SOC_ADC_DIGI_DATA_BYTES_PER_CONV;
if (number_of_adc_dma_data_bytes > ADC_DMA_BUFFER_SIZE) {
LOG_ERR("dma buffer size insufficient to store a complete sequence!");
return -EINVAL;
}
err = adc_esp32_dma_start(dev, data->dma_buffer, number_of_adc_dma_data_bytes);
if (err) {
return err;
}
adc_esp32_digi_start(dev, &adc_digi_pattern_config, adc_pattern_len, number_of_samplings,
sample_freq_hz, unit_attenuation);
err = adc_esp32_wait_for_dma_conv_done(dev);
if (err) {
return err;
}
adc_esp32_digi_stop(dev);
err = adc_esp32_dma_stop(dev);
if (err) {
return err;
}
adc_esp32_fill_seq_buffer(seq->buffer, data->dma_buffer, number_of_adc_samples);
#endif /* !defined(CONFIG_ADC_ESP32_DMA) */
return 0;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_esp32_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
(void)(dev);
(void)(sequence);
(void)(async);
return -ENOTSUP;
}
#endif /* CONFIG_ADC_ASYNC */
static int adc_esp32_channel_setup(const struct device *dev, const struct adc_channel_cfg *cfg)
{
const struct adc_esp32_conf *conf = (const struct adc_esp32_conf *)dev->config;
struct adc_esp32_data *data = (struct adc_esp32_data *) dev->data;
int err;
if (cfg->channel_id >= conf->channel_count) {
LOG_ERR("Unsupported channel id '%d'", cfg->channel_id);
return -ENOTSUP;
}
if (cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Unsupported channel reference '%d'", cfg->reference);
return -ENOTSUP;
}
if (cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Unsupported acquisition_time '%d'", cfg->acquisition_time);
return -ENOTSUP;
}
if (cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -ENOTSUP;
}
if (gain_to_atten(cfg->gain, &data->attenuation[cfg->channel_id])) {
LOG_ERR("Unsupported gain value '%d'", cfg->gain);
return -ENOTSUP;
}
/* Prepare channel */
if (conf->unit == ADC_UNIT_1) {
adc1_config_channel_atten(cfg->channel_id, data->attenuation[cfg->channel_id]);
}
if (conf->unit == ADC_UNIT_2) {
adc2_config_channel_atten(cfg->channel_id, data->attenuation[cfg->channel_id]);
}
if (data->calibrate) {
esp_adc_cal_value_t cal = esp_adc_cal_characterize(conf->unit,
data->attenuation[cfg->channel_id],
data->resolution[cfg->channel_id],
data->meas_ref_internal,
&data->chars[cfg->channel_id]);
if (cal >= ESP_ADC_CAL_VAL_NOT_SUPPORTED) {
LOG_ERR("Calibration error or not supported");
return -EIO;
}
LOG_DBG("Using ADC calibration method %d", cal);
}
#if defined(CONFIG_ADC_ESP32_DMA)
if (!SOC_ADC_DIG_SUPPORTED_UNIT(conf->unit)) {
LOG_ERR("ADC2 dma mode is no longer supported, please use ADC1!");
return -EINVAL;
}
int io_num = adc_channel_io_map[conf->unit][cfg->channel_id];
if (io_num < 0) {
LOG_ERR("Channel %u not supported!", cfg->channel_id);
return -ENOTSUP;
}
struct gpio_dt_spec gpio = {
.port = conf->gpio_port,
.dt_flags = 0,
.pin = io_num,
};
err = gpio_pin_configure_dt(&gpio, GPIO_DISCONNECTED);
if (err) {
LOG_ERR("Error disconnecting io (%d)", io_num);
return err;
}
#endif /* defined(CONFIG_ADC_ESP32_DMA) */
return 0;
}
static int adc_esp32_init(const struct device *dev)
{
struct adc_esp32_data *data = (struct adc_esp32_data *) dev->data;
const struct adc_esp32_conf *conf = (struct adc_esp32_conf *) dev->config;
adc_hw_calibration(conf->unit);
#if defined(CONFIG_ADC_ESP32_DMA)
if (!device_is_ready(conf->gpio_port)) {
LOG_ERR("gpio0 port not ready");
return -ENODEV;
}
if (k_sem_init(&data->dma_conv_wait_lock, 0, 1)) {
LOG_ERR("dma_conv_wait_lock initialization failed!");
return -EINVAL;
}
data->adc_hal_dma_ctx.rx_desc = k_aligned_alloc(sizeof(uint32_t),
sizeof(dma_descriptor_t));
if (!data->adc_hal_dma_ctx.rx_desc) {
LOG_ERR("rx_desc allocation failed!");
return -ENOMEM;
}
LOG_DBG("rx_desc = 0x%08X", (unsigned int)data->adc_hal_dma_ctx.rx_desc);
data->dma_buffer = k_aligned_alloc(sizeof(uint32_t), ADC_DMA_BUFFER_SIZE);
if (!data->dma_buffer) {
LOG_ERR("dma buffer allocation failed!");
k_free(data->adc_hal_dma_ctx.rx_desc);
return -ENOMEM;
}
LOG_DBG("data->dma_buffer = 0x%08X", (unsigned int)data->dma_buffer);
#endif /* defined(CONFIG_ADC_ESP32_DMA) */
for (uint8_t i = 0; i < ARRAY_SIZE(data->resolution); i++) {
data->resolution[i] = ADC_RESOLUTION_MAX;
}
for (uint8_t i = 0; i < ARRAY_SIZE(data->attenuation); i++) {
data->attenuation[i] = ADC_ATTEN_DB_0;
}
/* Default reference voltage. This could be calibrated externaly */
data->meas_ref_internal = ADC_ESP32_DEFAULT_VREF_INTERNAL;
/* Check if calibration is possible */
data->calibrate = adc_calibration_init(dev);
return 0;
}
static const struct adc_driver_api api_esp32_driver_api = {
.channel_setup = adc_esp32_channel_setup,
.read = adc_esp32_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_esp32_read_async,
#endif /* CONFIG_ADC_ASYNC */
.ref_internal = ADC_ESP32_DEFAULT_VREF_INTERNAL,
};
#if defined(CONFIG_ADC_ESP32_DMA)
#define ADC_ESP32_CONF_GPIO_PORT_INIT .gpio_port = DEVICE_DT_GET(DT_NODELABEL(gpio0)),
#define ADC_ESP32_CONF_DMA_INIT(n) .dma_dev = COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), \
(DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_IDX(n, 0))), \
(NULL)), \
.dma_channel = COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), \
(DT_INST_DMAS_CELL_BY_IDX(n, 0, channel)), \
(0xff)),
#else
#define ADC_ESP32_CONF_GPIO_PORT_INIT
#define ADC_ESP32_CONF_DMA_INIT(inst)
#endif /* defined(CONFIG_ADC_ESP32_DMA) */
#define ESP32_ADC_INIT(inst) \
\
static const struct adc_esp32_conf adc_esp32_conf_##inst = { \
.unit = DT_PROP(DT_DRV_INST(inst), unit) - 1, \
.channel_count = DT_PROP(DT_DRV_INST(inst), channel_count), \
ADC_ESP32_CONF_GPIO_PORT_INIT \
ADC_ESP32_CONF_DMA_INIT(inst) \
}; \
\
static struct adc_esp32_data adc_esp32_data_##inst = { \
}; \
\
DEVICE_DT_INST_DEFINE(inst, &adc_esp32_init, NULL, \
&adc_esp32_data_##inst, \
&adc_esp32_conf_##inst, \
POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&api_esp32_driver_api);
DT_INST_FOREACH_STATUS_OKAY(ESP32_ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_esp32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,037 |
```unknown
# ADC configuration options
config ADC_ITE_IT8XXX2
bool "ITE IT8XXX2 ADC driver"
default y
depends on DT_HAS_ITE_IT8XXX2_ADC_ENABLED
help
This option enables the ADC driver for IT8XXX2
family of processors.
Voltage range 0 to 3000mV.
Support 10-bit resolution.
Support 8 channels: ch0~ch7.
if ADC_ITE_IT8XXX2
config ADC_IT8XXX2_VOL_FULL_SCALE
bool "ADC internal voltage as full-scale"
help
This option enables ADC internal reference
voltage as full-scale 3300mV.
endif # ADC_ITE_IT8XXX2
``` | /content/code_sandbox/drivers/adc/Kconfig.it8xxx2 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 152 |
```unknown
config ADC_TLA2021
bool "Texas Instruments TLA2021 Low-Power ADC"
default y
depends on DT_HAS_TI_TLA2021_ENABLED
select I2C
help
TLA202x Cost-Optimized, Ultra-Small, 12-Bit, System-Monitoring ADCs
if ADC_TLA2021
config ADC_TLA2021_INIT_PRIORITY
int "Priority for the driver initialization"
default 80
help
Fine tune the priority for the driver initialization. Make sure it's
higher (-> lower priority) than I2C_INIT_PRIORITY.
config ADC_TLA2021_ACQUISITION_THREAD_PRIORITY
int "Priority for the data acquisition thread"
default 0
help
Execution priority for the internal data acquisition thread.
config ADC_TLA2021_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the data acquisition thread"
default 512
help
Stack size for the internal data acquisition thread. Requires room
for I2C operations.
endif # ADC_TLA2021
``` | /content/code_sandbox/drivers/adc/Kconfig.tla2021 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 225 |
```c
/* TI ADS1X1X ADC
*
*
*/
#include <stdbool.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/logging/log.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/util.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
LOG_MODULE_REGISTER(ADS1X1X, CONFIG_ADC_LOG_LEVEL);
#define ADS1X1X_CONFIG_OS BIT(15)
#define ADS1X1X_CONFIG_MUX(x) ((x) << 12)
#define ADS1X1X_CONFIG_PGA(x) ((x) << 9)
#define ADS1X1X_CONFIG_MODE BIT(8)
#define ADS1X1X_CONFIG_DR(x) ((x) << 5)
#define ADS1X1X_CONFIG_COMP_MODE BIT(4)
#define ADS1X1X_CONFIG_COMP_POL BIT(3)
#define ADS1X1X_CONFIG_COMP_LAT BIT(2)
#define ADS1X1X_CONFIG_COMP_QUE(x) (x)
enum ads1x1x_reg {
ADS1X1X_REG_CONV = 0x00,
ADS1X1X_REG_CONFIG = 0x01,
ADS1X1X_REG_LO_THRESH = 0x02,
ADS1X1X_REG_HI_THRESH = 0x03,
};
enum {
ADS1X15_CONFIG_MUX_DIFF_0_1 = 0,
ADS1X15_CONFIG_MUX_DIFF_0_3 = 1,
ADS1X15_CONFIG_MUX_DIFF_1_3 = 2,
ADS1X15_CONFIG_MUX_DIFF_2_3 = 3,
ADS1X15_CONFIG_MUX_SINGLE_0 = 4,
ADS1X15_CONFIG_MUX_SINGLE_1 = 5,
ADS1X15_CONFIG_MUX_SINGLE_2 = 6,
ADS1X15_CONFIG_MUX_SINGLE_3 = 7,
};
enum {
/* ADS111X, ADS101X samples per second */
/* 8, 128 samples per second */
ADS1X1X_CONFIG_DR_8_128 = 0,
/* 16, 250 samples per second */
ADS1X1X_CONFIG_DR_16_250 = 1,
/* 32, 490 samples per second */
ADS1X1X_CONFIG_DR_32_490 = 2,
/* 64, 920 samples per second */
ADS1X1X_CONFIG_DR_64_920 = 3,
/* 128, 1600 samples per second (default) */
ADS1X1X_CONFIG_DR_128_1600 = 4,
/* 250, 2400 samples per second */
ADS1X1X_CONFIG_DR_250_2400 = 5,
/* 475, 3300 samples per second */
ADS1X1X_CONFIG_DR_475_3300 = 6,
/* 860, 3300 samples per second */
ADS1X1X_CONFIG_DR_860_3300 = 7,
/* Default data rate */
ADS1X1X_CONFIG_DR_DEFAULT = ADS1X1X_CONFIG_DR_128_1600
};
enum {
/* +/-6.144V range = Gain 1/3 */
ADS1X1X_CONFIG_PGA_6144 = 0,
/* +/-4.096V range = Gain 1/2 */
ADS1X1X_CONFIG_PGA_4096 = 1,
/* +/-2.048V range = Gain 1 (default) */
ADS1X1X_CONFIG_PGA_2048 = 2,
/* +/-1.024V range = Gain 2 */
ADS1X1X_CONFIG_PGA_1024 = 3,
/* +/-0.512V range = Gain 4 */
ADS1X1X_CONFIG_PGA_512 = 4,
/* +/-0.256V range = Gain 8 */
ADS1X1X_CONFIG_PGA_256 = 5
};
enum {
ADS1X1X_CONFIG_MODE_CONTINUOUS = 0,
ADS1X1X_CONFIG_MODE_SINGLE_SHOT = 1,
};
enum {
/* Traditional comparator with hysteresis (default) */
ADS1X1X_CONFIG_COMP_MODE_TRADITIONAL = 0,
/* Window comparator */
ADS1X1X_CONFIG_COMP_MODE_WINDOW = 1
};
enum {
/* ALERT/RDY pin is low when active (default) */
ADS1X1X_CONFIG_COMP_POLARITY_ACTIVE_LO = 0,
/* ALERT/RDY pin is high when active */
ADS1X1X_CONFIG_COMP_POLARITY_ACTIVE_HI = 1
};
enum {
/* Non-latching comparator (default) */
ADS1X1X_CONFIG_COMP_NON_LATCHING = 0,
/* Latching comparator */
ADS1X1X_CONFIG_COMP_LATCHING = 1
};
enum {
/* Assert ALERT/RDY after one conversions */
ADS1X1X_CONFIG_COMP_QUEUE_1 = 0,
/* Assert ALERT/RDY after two conversions */
ADS1X1X_CONFIG_COMP_QUEUE_2 = 1,
/* Assert ALERT/RDY after four conversions */
ADS1X1X_CONFIG_COMP_QUEUE_4 = 2,
/* Disable the comparator and put ALERT/RDY in high state (default) */
ADS1X1X_CONFIG_COMP_QUEUE_NONE = 3
};
struct ads1x1x_config {
struct i2c_dt_spec bus;
const uint32_t odr_delay[8];
uint8_t resolution;
bool multiplexer;
bool pga;
};
struct ads1x1x_data {
const struct device *dev;
struct adc_context ctx;
k_timeout_t ready_time;
struct k_sem acq_sem;
int16_t *buffer;
int16_t *repeat_buffer;
struct k_thread thread;
bool differential;
K_KERNEL_STACK_MEMBER(stack, CONFIG_ADC_ADS1X1X_ACQUISITION_THREAD_STACK_SIZE);
};
static int ads1x1x_read_reg(const struct device *dev, enum ads1x1x_reg reg_addr, uint16_t *buf)
{
const struct ads1x1x_config *config = dev->config;
uint16_t reg_val;
int ret;
ret = i2c_burst_read_dt(&config->bus, reg_addr, (uint8_t *)®_val, sizeof(reg_val));
if (ret != 0) {
LOG_ERR("ADS1X1X[0x%X]: error reading register 0x%X (%d)", config->bus.addr,
reg_addr, ret);
return ret;
}
*buf = sys_be16_to_cpu(reg_val);
return 0;
}
static int ads1x1x_write_reg(const struct device *dev, enum ads1x1x_reg reg_addr, uint16_t reg_val)
{
const struct ads1x1x_config *config = dev->config;
uint8_t buf[3];
int ret;
buf[0] = reg_addr;
sys_put_be16(reg_val, &buf[1]);
ret = i2c_write_dt(&config->bus, buf, sizeof(buf));
if (ret != 0) {
LOG_ERR("ADS1X1X[0x%X]: error writing register 0x%X (%d)", config->bus.addr,
reg_addr, ret);
return ret;
}
return 0;
}
static int ads1x1x_start_conversion(const struct device *dev)
{
/* send start sampling command */
uint16_t config;
int ret;
ret = ads1x1x_read_reg(dev, ADS1X1X_REG_CONFIG, &config);
if (ret != 0) {
return ret;
}
config |= ADS1X1X_CONFIG_OS;
ret = ads1x1x_write_reg(dev, ADS1X1X_REG_CONFIG, config);
return ret;
}
static inline int ads1x1x_acq_time_to_dr(const struct device *dev, uint16_t acq_time)
{
struct ads1x1x_data *data = dev->data;
const struct ads1x1x_config *ads_config = dev->config;
const uint32_t *odr_delay = ads_config->odr_delay;
uint32_t odr_delay_us = 0;
int odr = -EINVAL;
uint16_t acq_value = ADC_ACQ_TIME_VALUE(acq_time);
/* The ADS1x1x uses samples per seconds units with the lowest being 8SPS
* and with acquisition_time only having 14b for time, this will not fit
* within here for microsecond units. Use Tick units and allow the user to
* specify the ODR directly.
*/
if (acq_time != ADC_ACQ_TIME_DEFAULT && ADC_ACQ_TIME_UNIT(acq_time) != ADC_ACQ_TIME_TICKS) {
return -EINVAL;
}
if (acq_time == ADC_ACQ_TIME_DEFAULT) {
odr = ADS1X1X_CONFIG_DR_DEFAULT;
odr_delay_us = odr_delay[ADS1X1X_CONFIG_DR_DEFAULT];
} else {
switch (acq_value) {
case ADS1X1X_CONFIG_DR_8_128:
odr = ADS1X1X_CONFIG_DR_8_128;
odr_delay_us = odr_delay[ADS1X1X_CONFIG_DR_8_128];
break;
case ADS1X1X_CONFIG_DR_16_250:
odr = ADS1X1X_CONFIG_DR_16_250;
odr_delay_us = odr_delay[ADS1X1X_CONFIG_DR_16_250];
break;
case ADS1X1X_CONFIG_DR_32_490:
odr = ADS1X1X_CONFIG_DR_32_490;
odr_delay_us = odr_delay[ADS1X1X_CONFIG_DR_32_490];
break;
case ADS1X1X_CONFIG_DR_64_920:
odr = ADS1X1X_CONFIG_DR_64_920;
odr_delay_us = odr_delay[ADS1X1X_CONFIG_DR_64_920];
break;
case ADS1X1X_CONFIG_DR_128_1600:
odr = ADS1X1X_CONFIG_DR_128_1600;
odr_delay_us = odr_delay[ADS1X1X_CONFIG_DR_128_1600];
break;
case ADS1X1X_CONFIG_DR_250_2400:
odr = ADS1X1X_CONFIG_DR_250_2400;
odr_delay_us = odr_delay[ADS1X1X_CONFIG_DR_250_2400];
break;
case ADS1X1X_CONFIG_DR_475_3300:
odr = ADS1X1X_CONFIG_DR_475_3300;
odr_delay_us = odr_delay[ADS1X1X_CONFIG_DR_475_3300];
break;
case ADS1X1X_CONFIG_DR_860_3300:
odr = ADS1X1X_CONFIG_DR_860_3300;
odr_delay_us = odr_delay[ADS1X1X_CONFIG_DR_860_3300];
break;
default:
break;
}
}
/* As per the datasheet, 25us is needed to wake-up from power down mode
*/
odr_delay_us += 25;
data->ready_time = K_USEC(odr_delay_us);
return odr;
}
static int ads1x1x_wait_data_ready(const struct device *dev)
{
int rc;
struct ads1x1x_data *data = dev->data;
k_sleep(data->ready_time);
uint16_t status = 0;
rc = ads1x1x_read_reg(dev, ADS1X1X_REG_CONFIG, &status);
if (rc != 0) {
return rc;
}
while (!(status & ADS1X1X_CONFIG_OS)) {
k_sleep(K_USEC(100));
rc = ads1x1x_read_reg(dev, ADS1X1X_REG_CONFIG, &status);
if (rc != 0) {
return rc;
}
}
return rc;
}
static int ads1x1x_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct ads1x1x_config *ads_config = dev->config;
struct ads1x1x_data *data = dev->data;
uint16_t config = 0;
int dr = 0;
if (channel_cfg->channel_id != 0) {
LOG_ERR("unsupported channel id '%d'", channel_cfg->channel_id);
return -ENOTSUP;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("unsupported channel reference type '%d'", channel_cfg->reference);
return -ENOTSUP;
}
if (ads_config->multiplexer) {
/* the device has an input multiplexer */
if (channel_cfg->differential) {
if (channel_cfg->input_positive == 0 && channel_cfg->input_negative == 1) {
config |= ADS1X1X_CONFIG_MUX(ADS1X15_CONFIG_MUX_DIFF_0_1);
} else if (channel_cfg->input_positive == 0 &&
channel_cfg->input_negative == 3) {
config |= ADS1X1X_CONFIG_MUX(ADS1X15_CONFIG_MUX_DIFF_0_3);
} else if (channel_cfg->input_positive == 1 &&
channel_cfg->input_negative == 3) {
config |= ADS1X1X_CONFIG_MUX(ADS1X15_CONFIG_MUX_DIFF_1_3);
} else if (channel_cfg->input_positive == 2 &&
channel_cfg->input_negative == 3) {
config |= ADS1X1X_CONFIG_MUX(ADS1X15_CONFIG_MUX_DIFF_2_3);
} else {
LOG_ERR("unsupported input positive '%d' and input negative '%d'",
channel_cfg->input_positive, channel_cfg->input_negative);
return -ENOTSUP;
}
} else {
if (channel_cfg->input_positive == 0) {
config |= ADS1X1X_CONFIG_MUX(ADS1X15_CONFIG_MUX_SINGLE_0);
} else if (channel_cfg->input_positive == 1) {
config |= ADS1X1X_CONFIG_MUX(ADS1X15_CONFIG_MUX_SINGLE_1);
} else if (channel_cfg->input_positive == 2) {
config |= ADS1X1X_CONFIG_MUX(ADS1X15_CONFIG_MUX_SINGLE_2);
} else if (channel_cfg->input_positive == 3) {
config |= ADS1X1X_CONFIG_MUX(ADS1X15_CONFIG_MUX_SINGLE_3);
} else {
LOG_ERR("unsupported input positive '%d'",
channel_cfg->input_positive);
return -ENOTSUP;
}
}
} else {
/* only differential supported without multiplexer */
if (!((channel_cfg->differential) &&
(channel_cfg->input_positive == 0 && channel_cfg->input_negative == 1))) {
LOG_ERR("unsupported input positive '%d' and input negative '%d'",
channel_cfg->input_positive, channel_cfg->input_negative);
return -ENOTSUP;
}
}
/* store differential mode to determine supported resolution */
data->differential = channel_cfg->differential;
dr = ads1x1x_acq_time_to_dr(dev, channel_cfg->acquisition_time);
if (dr < 0) {
LOG_ERR("unsupported channel acquisition time 0x%02x",
channel_cfg->acquisition_time);
return -ENOTSUP;
}
config |= ADS1X1X_CONFIG_DR(dr);
if (ads_config->pga) {
/* programmable gain amplifier support */
switch (channel_cfg->gain) {
case ADC_GAIN_1_3:
config |= ADS1X1X_CONFIG_PGA(ADS1X1X_CONFIG_PGA_6144);
break;
case ADC_GAIN_1_2:
config |= ADS1X1X_CONFIG_PGA(ADS1X1X_CONFIG_PGA_4096);
break;
case ADC_GAIN_1:
config |= ADS1X1X_CONFIG_PGA(ADS1X1X_CONFIG_PGA_2048);
break;
case ADC_GAIN_2:
config |= ADS1X1X_CONFIG_PGA(ADS1X1X_CONFIG_PGA_1024);
break;
case ADC_GAIN_4:
config |= ADS1X1X_CONFIG_PGA(ADS1X1X_CONFIG_PGA_512);
break;
case ADC_GAIN_8:
config |= ADS1X1X_CONFIG_PGA(ADS1X1X_CONFIG_PGA_256);
break;
default:
LOG_ERR("unsupported channel gain '%d'", channel_cfg->gain);
return -ENOTSUP;
}
} else {
/* no programmable gain amplifier, so only allow ADC_GAIN_1 */
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("unsupported channel gain '%d'", channel_cfg->gain);
return -ENOTSUP;
}
}
/* Only single shot supported */
config |= ADS1X1X_CONFIG_MODE;
/* disable comparator */
config |= ADS1X1X_CONFIG_COMP_MODE;
return ads1x1x_write_reg(dev, ADS1X1X_REG_CONFIG, config);
}
static int ads1x1x_validate_buffer_size(const struct adc_sequence *sequence)
{
size_t needed = sizeof(int16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
static int ads1x1x_validate_sequence(const struct device *dev, const struct adc_sequence *sequence)
{
const struct ads1x1x_config *config = dev->config;
struct ads1x1x_data *data = dev->data;
uint8_t resolution = data->differential ? config->resolution : config->resolution - 1;
int err;
if (sequence->resolution != resolution) {
LOG_ERR("unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
if (sequence->channels != BIT(0)) {
LOG_ERR("only channel 0 supported");
return -ENOTSUP;
}
if (sequence->oversampling) {
LOG_ERR("oversampling not supported");
return -ENOTSUP;
}
err = ads1x1x_validate_buffer_size(sequence);
if (err) {
LOG_ERR("buffer size too small");
return -ENOTSUP;
}
return 0;
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct ads1x1x_data *data = CONTAINER_OF(ctx, struct ads1x1x_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct ads1x1x_data *data = CONTAINER_OF(ctx, struct ads1x1x_data, ctx);
int ret;
data->repeat_buffer = data->buffer;
ret = ads1x1x_start_conversion(data->dev);
if (ret != 0) {
/* if we fail to complete the I2C operations to start
* sampling, return an immediate error (likely -EIO) rather
* than handing it off to the acquisition thread.
*/
adc_context_complete(ctx, ret);
return;
}
k_sem_give(&data->acq_sem);
}
static int ads1x1x_adc_start_read(const struct device *dev, const struct adc_sequence *sequence)
{
int rc;
struct ads1x1x_data *data = dev->data;
rc = ads1x1x_validate_sequence(dev, sequence);
if (rc != 0) {
return rc;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int ads1x1x_adc_read_async(const struct device *dev, const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int rc;
struct ads1x1x_data *data = dev->data;
adc_context_lock(&data->ctx, async ? true : false, async);
rc = ads1x1x_adc_start_read(dev, sequence);
adc_context_release(&data->ctx, rc);
return rc;
}
static int ads1x1x_adc_perform_read(const struct device *dev)
{
int rc;
struct ads1x1x_data *data = dev->data;
const struct ads1x1x_config *config = dev->config;
int16_t buf;
rc = ads1x1x_read_reg(dev, ADS1X1X_REG_CONV, &buf);
if (rc != 0) {
adc_context_complete(&data->ctx, rc);
return rc;
}
/* The ads101x stores it's 12b data in the upper part
* while the ads111x uses all 16b in the register, so
* shift down. Data is also signed, so perform
* division rather than shifting
*/
*data->buffer++ = buf / (1 << (16 - config->resolution));
adc_context_on_sampling_done(&data->ctx, dev);
return rc;
}
static int ads1x1x_read(const struct device *dev, const struct adc_sequence *sequence)
{
return ads1x1x_adc_read_async(dev, sequence, NULL);
}
static void ads1x1x_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
const struct device *dev = p1;
struct ads1x1x_data *data = dev->data;
int rc;
while (true) {
k_sem_take(&data->acq_sem, K_FOREVER);
rc = ads1x1x_wait_data_ready(dev);
if (rc != 0) {
LOG_ERR("failed to get ready status (err %d)", rc);
adc_context_complete(&data->ctx, rc);
continue;
}
ads1x1x_adc_perform_read(dev);
}
}
static int ads1x1x_init(const struct device *dev)
{
const struct ads1x1x_config *config = dev->config;
struct ads1x1x_data *data = dev->data;
data->dev = dev;
k_sem_init(&data->acq_sem, 0, 1);
if (!device_is_ready(config->bus.bus)) {
LOG_ERR("I2C bus %s not ready", config->bus.bus->name);
return -ENODEV;
}
k_tid_t tid =
k_thread_create(&data->thread, data->stack, K_THREAD_STACK_SIZEOF(data->stack),
ads1x1x_acquisition_thread, (void *)dev, NULL,
NULL, CONFIG_ADC_ADS1X1X_ACQUISITION_THREAD_PRIO, 0, K_NO_WAIT);
k_thread_name_set(tid, "adc_ads1x1x");
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api ads1x1x_api = {
.channel_setup = ads1x1x_channel_setup,
.read = ads1x1x_read,
.ref_internal = 2048,
#ifdef CONFIG_ADC_ASYNC
.read_async = ads1x1x_adc_read_async,
#endif
};
#define DT_INST_ADS1X1X(inst, t) DT_INST(inst, ti_ads##t)
#define ADS1X1X_INIT(t, n, odr_delay_us, res, mux, pgab) \
static const struct ads1x1x_config ads##t##_config_##n = { \
.bus = I2C_DT_SPEC_GET(DT_INST_ADS1X1X(n, t)), \
.odr_delay = odr_delay_us, \
.resolution = res, \
.multiplexer = mux, \
.pga = pgab, \
}; \
static struct ads1x1x_data ads##t##_data_##n = { \
ADC_CONTEXT_INIT_LOCK(ads##t##_data_##n, ctx), \
ADC_CONTEXT_INIT_TIMER(ads##t##_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(ads##t##_data_##n, ctx), \
}; \
DEVICE_DT_DEFINE(DT_INST_ADS1X1X(n, t), ads1x1x_init, NULL, &ads##t##_data_##n, \
&ads##t##_config_##n, POST_KERNEL, CONFIG_ADC_ADS1X1X_INIT_PRIORITY, \
&ads1x1x_api);
/* The ADS111X provides 16 bits of data in binary two's complement format
* A positive full-scale (+FS) input produces an output code of 7FFFh and a
* negative full-scale (FS) input produces an output code of 8000h. Single
* ended signal measurements only use the positive code range from
* 0000h to 7FFFh
*/
#define ADS111X_RESOLUTION 16
/*
* Approximated ADS111x acquisition times in microseconds. These are
* used for the initial delay when polling for data ready.
* {8 SPS, 16 SPS, 32 SPS, 64 SPS, 128 SPS (default), 250 SPS, 475 SPS, 860 SPS}
*/
#define ADS111X_ODR_DELAY_US \
{ \
125000, 62500, 31250, 15625, 7813, 4000, 2105, 1163 \
}
/*
* ADS1115: 16 bit, multiplexer, programmable gain amplifier
*/
#define ADS1115_INIT(n) ADS1X1X_INIT(1115, n, ADS111X_ODR_DELAY_US, ADS111X_RESOLUTION, true, true)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_ads1115
DT_INST_FOREACH_STATUS_OKAY(ADS1115_INIT)
/*
* ADS1114: 16 bit, no multiplexer, programmable gain amplifier
*/
#define ADS1114_INIT(n) ADS1X1X_INIT(1114, n, ADS111X_ODR_DELAY_US, ADS111X_RESOLUTION, false, true)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_ads1114
DT_INST_FOREACH_STATUS_OKAY(ADS1114_INIT)
/*
* ADS1113: 16 bit, no multiplexer, no programmable gain amplifier
*/
#define ADS1113_INIT(n) \
ADS1X1X_INIT(1113, n, ADS111X_ODR_DELAY_US, ADS111X_RESOLUTION, false, false)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_ads1113
DT_INST_FOREACH_STATUS_OKAY(ADS1113_INIT)
/* The ADS101X provides 12 bits of data in binary two's complement format
* A positive full-scale (+FS) input produces an output code of 7FFh and a
* negative full-scale (FS) input produces an output code of 800h. Single
* ended signal measurements only use the positive code range from
* 000h to 7FFh
*/
#define ADS101X_RESOLUTION 12
/*
* Approximated ADS101x acquisition times in microseconds. These are
* used for the initial delay when polling for data ready.
* {128 SPS, 250 SPS, 490 SPS, 920 SPS, 1600 SPS (default), 2400 SPS, 3300 SPS, 3300 SPS}
*/
#define ADS101X_ODR_DELAY_US \
{ \
7813, 4000, 2041, 1087, 625, 417, 303, 303 \
}
/*
* ADS1015: 12 bit, multiplexer, programmable gain amplifier
*/
#define ADS1015_INIT(n) ADS1X1X_INIT(1015, n, ADS101X_ODR_DELAY_US, ADS101X_RESOLUTION, true, true)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_ads1015
DT_INST_FOREACH_STATUS_OKAY(ADS1015_INIT)
/*
* ADS1014: 12 bit, no multiplexer, programmable gain amplifier
*/
#define ADS1014_INIT(n) ADS1X1X_INIT(1014, n, ADS101X_ODR_DELAY_US, ADS101X_RESOLUTION, false, true)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_ads1014
DT_INST_FOREACH_STATUS_OKAY(ADS1014_INIT)
/*
* ADS1013: 12 bit, no multiplexer, no programmable gain amplifier
*/
#define ADS1013_INIT(n) \
ADS1X1X_INIT(1013, n, ADS101X_ODR_DELAY_US, ADS101X_RESOLUTION, false, false)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT ti_ads1013
DT_INST_FOREACH_STATUS_OKAY(ADS1013_INIT)
``` | /content/code_sandbox/drivers/adc/adc_ads1x1x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,375 |
```c
/* LLTC LTC2451 ADC
*
*
*/
#include <zephyr/device.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/byteorder.h>
LOG_MODULE_REGISTER(ltc2451, CONFIG_ADC_LOG_LEVEL);
#define DT_DRV_COMPAT lltc_ltc2451
struct ltc2451_config {
struct i2c_dt_spec i2c;
uint8_t conversion_speed;
};
static int ltc2451_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
ARG_UNUSED(dev);
if (channel_cfg->channel_id != 0) {
LOG_ERR("Invalid channel id '%d'", channel_cfg->channel_id);
return -EINVAL;
}
return 0;
}
static int ltc2451_set_conversion_speed(const struct device *dev, uint8_t conversion_speed)
{
const struct ltc2451_config *config = dev->config;
uint8_t wr_buf[1];
int err;
if (conversion_speed == 60) {
wr_buf[0] = 0;
} else if (conversion_speed == 30) {
wr_buf[0] = 1;
} else {
LOG_ERR("Invalid conversion speed selected");
return -EINVAL;
}
err = i2c_write_dt(&config->i2c, wr_buf, sizeof(wr_buf));
if (err != 0) {
LOG_ERR("LTC write failed (err %d)", err);
}
return err;
}
static int ltc2451_read_latest_conversion(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct ltc2451_config *config = dev->config;
uint8_t rd_buf[2];
uint16_t *value_buf;
int err = i2c_read_dt(&config->i2c, rd_buf, sizeof(rd_buf));
if (err == 0) {
value_buf = (uint16_t *)sequence->buffer;
value_buf[0] = sys_get_be16(rd_buf);
} else {
LOG_ERR("LTC read failed (err %d)", err);
}
return err;
}
static int ltc2451_init(const struct device *dev)
{
const struct ltc2451_config *config = dev->config;
if (!device_is_ready(config->i2c.bus)) {
LOG_ERR("I2C device not ready");
return -ENODEV;
}
return ltc2451_set_conversion_speed(dev, config->conversion_speed);
}
static const struct adc_driver_api ltc2451_api = {
.channel_setup = ltc2451_channel_setup,
.read = ltc2451_read_latest_conversion,
};
#define LTC2451_DEFINE(index) \
static const struct ltc2451_config ltc2451_cfg_##index = { \
.i2c = I2C_DT_SPEC_INST_GET(index), \
.conversion_speed = DT_INST_PROP(index, conversion_speed), \
}; \
\
DEVICE_DT_INST_DEFINE(index, <c2451_init, NULL, NULL, \
<c2451_cfg_##index, POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
<c2451_api);
DT_INST_FOREACH_STATUS_OKAY(LTC2451_DEFINE)
``` | /content/code_sandbox/drivers/adc/adc_ltc2451.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 707 |
```unknown
# ADS1X1X ADC configuration options
config ADC_ADS1X1X
bool "ADS1X1X driver"
default y
depends on DT_HAS_TI_ADS1013_ENABLED || DT_HAS_TI_ADS1014_ENABLED || \
DT_HAS_TI_ADS1015_ENABLED || DT_HAS_TI_ADS1113_ENABLED || \
DT_HAS_TI_ADS1114_ENABLED || DT_HAS_TI_ADS1115_ENABLED || \
DT_HAS_TI_ADS1119_ENABLED
select I2C
select ADC_CONFIGURABLE_INPUTS
help
Enable ADS1X1X ADC driver.
if ADC_ADS1X1X
config ADC_ADS1X1X_INIT_PRIORITY
int "Init priority"
default 80
help
ADS1X1X ADC device driver initialization priority.
config ADC_ADS1X1X_ACQUISITION_THREAD_PRIO
int "Priority for the ADC data acquisition thread"
default 0
help
Priority level for the internal ADC data acquisition thread.
config ADC_ADS1X1X_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 1024
help
Size of the stack used for the internal data acquisition
thread.
endif # ADC_ADS1X1X
``` | /content/code_sandbox/drivers/adc/Kconfig.ads1x1x | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 274 |
```c
/*
*
*/
#define DT_DRV_COMPAT atmel_sam0_adc
#include <soc.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_sam0, CONFIG_ADC_LOG_LEVEL);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#if defined(CONFIG_SOC_SERIES_SAMD21) || defined(CONFIG_SOC_SERIES_SAMR21) || \
defined(CONFIG_SOC_SERIES_SAMD20)
/*
* SAMD21 Manual 33.6.2.1: The first conversion after changing the reference
* is invalid, so we have to discard it.
*/
#define ADC_SAM0_REFERENCE_GLITCH 1
#endif
struct adc_sam0_data {
struct adc_context ctx;
const struct device *dev;
uint16_t *buffer;
/*
* Saved initial start, so we can reset the advances we've done
* if required
*/
uint16_t *repeat_buffer;
#ifdef ADC_SAM0_REFERENCE_GLITCH
uint8_t reference_changed;
#endif
};
struct adc_sam0_cfg {
Adc *regs;
const struct pinctrl_dev_config *pcfg;
#ifdef MCLK
uint32_t mclk_mask;
uint32_t gclk_mask;
uint16_t gclk_id;
#else
uint32_t gclk;
#endif
uint32_t freq;
uint16_t prescaler;
void (*config_func)(const struct device *dev);
};
static void wait_synchronization(Adc *const adc)
{
while ((ADC_SYNC(adc) & ADC_SYNC_MASK) != 0) {
}
}
static int adc_sam0_acquisition_to_clocks(const struct device *dev,
uint16_t acquisition_time)
{
const struct adc_sam0_cfg *const cfg = dev->config;
uint64_t scaled_acq;
switch (ADC_ACQ_TIME_UNIT(acquisition_time)) {
case ADC_ACQ_TIME_TICKS:
if (ADC_ACQ_TIME_VALUE(acquisition_time) > 64U) {
return -EINVAL;
}
return (int)ADC_ACQ_TIME_VALUE(acquisition_time) - 1;
case ADC_ACQ_TIME_MICROSECONDS:
scaled_acq = (uint64_t)ADC_ACQ_TIME_VALUE(acquisition_time) *
1000000U;
break;
case ADC_ACQ_TIME_NANOSECONDS:
scaled_acq = (uint64_t)ADC_ACQ_TIME_VALUE(acquisition_time) *
1000U;
break;
default:
return -EINVAL;
}
/*
* sample_time = (sample_length+1) * (clk_adc / 2)
* sample_length = sample_time * (2/clk_adc) - 1,
*/
scaled_acq *= 2U;
scaled_acq += cfg->freq / 2U;
scaled_acq /= cfg->freq;
if (scaled_acq <= 1U) {
return 0;
}
scaled_acq -= 1U;
if (scaled_acq >= 64U) {
return -EINVAL;
}
return (int)scaled_acq;
}
static int adc_sam0_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_sam0_cfg *const cfg = dev->config;
Adc *const adc = cfg->regs;
int retval;
uint8_t sampctrl = 0;
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
retval = adc_sam0_acquisition_to_clocks(dev,
channel_cfg->acquisition_time);
if (retval < 0) {
LOG_ERR("Selected ADC acquisition time is not valid");
return retval;
}
sampctrl |= ADC_SAMPCTRL_SAMPLEN(retval);
}
adc->SAMPCTRL.reg = sampctrl;
wait_synchronization(adc);
uint8_t refctrl;
switch (channel_cfg->reference) {
case ADC_REF_INTERNAL:
refctrl = ADC_REFCTRL_REFSEL_INTERNAL | ADC_REFCTRL_REFCOMP;
/* Enable the internal bandgap reference */
ADC_BGEN = 1;
break;
#ifdef ADC_REFCTRL_REFSEL_VDD_1
case ADC_REF_VDD_1:
refctrl = ADC_REFCTRL_REFSEL_VDD_1 | ADC_REFCTRL_REFCOMP;
break;
#endif
case ADC_REF_VDD_1_2:
refctrl = ADC_REFCTRL_REFSEL_VDD_1_2 | ADC_REFCTRL_REFCOMP;
break;
case ADC_REF_EXTERNAL0:
refctrl = ADC_REFCTRL_REFSEL_AREFA;
break;
#ifdef ADC_REFCTRL_REFSEL_AREFB
case ADC_REF_EXTERNAL1:
refctrl = ADC_REFCTRL_REFSEL_AREFB;
break;
#endif
default:
LOG_ERR("Selected reference is not valid");
return -EINVAL;
}
if (adc->REFCTRL.reg != refctrl) {
#ifdef ADC_SAM0_REFERENCE_ENABLE_PROTECTED
adc->CTRLA.bit.ENABLE = 0;
wait_synchronization(adc);
#endif
adc->REFCTRL.reg = refctrl;
wait_synchronization(adc);
#ifdef ADC_SAM0_REFERENCE_ENABLE_PROTECTED
adc->CTRLA.bit.ENABLE = 1;
wait_synchronization(adc);
#endif
#ifdef ADC_SAM0_REFERENCE_GLITCH
struct adc_sam0_data *data = dev->data;
data->reference_changed = 1;
#endif
}
uint32_t inputctrl = 0;
switch (channel_cfg->gain) {
case ADC_GAIN_1:
#ifdef ADC_INPUTCTRL_GAIN_1X
inputctrl = ADC_INPUTCTRL_GAIN_1X;
#endif
break;
#ifdef ADC_INPUTCTRL_GAIN_DIV2
case ADC_GAIN_1_2:
inputctrl = ADC_INPUTCTRL_GAIN_DIV2;
break;
#endif
#ifdef ADC_INPUTCTRL_GAIN_2X
case ADC_GAIN_2:
inputctrl = ADC_INPUTCTRL_GAIN_2X;
break;
#endif
#ifdef ADC_INPUTCTRL_GAIN_4X
case ADC_GAIN_4:
inputctrl = ADC_INPUTCTRL_GAIN_4X;
break;
#endif
#ifdef ADC_INPUTCTRL_GAIN_8X
case ADC_GAIN_8:
inputctrl = ADC_INPUTCTRL_GAIN_8X;
break;
#endif
#ifdef ADC_INPUTCTRL_GAIN_16X
case ADC_GAIN_16:
inputctrl = ADC_INPUTCTRL_GAIN_16X;
break;
#endif
default:
LOG_ERR("Selected ADC gain is not valid");
return -EINVAL;
}
inputctrl |= ADC_INPUTCTRL_MUXPOS(channel_cfg->input_positive);
if (channel_cfg->differential) {
inputctrl |= ADC_INPUTCTRL_MUXNEG(channel_cfg->input_negative);
ADC_DIFF(adc) |= ADC_DIFF_MASK;
} else {
inputctrl |= ADC_INPUTCTRL_MUXNEG_GND;
ADC_DIFF(adc) &= ~ADC_DIFF_MASK;
}
wait_synchronization(adc);
adc->INPUTCTRL.reg = inputctrl;
wait_synchronization(adc);
/* Enable references if they're selected */
switch (channel_cfg->input_positive) {
#ifdef ADC_INPUTCTRL_MUXPOS_TEMP_Val
case ADC_INPUTCTRL_MUXPOS_TEMP_Val:
ADC_TSEN = 1;
break;
#endif
#ifdef ADC_INPUTCTRL_MUXPOS_PTAT_Val
case ADC_INPUTCTRL_MUXPOS_PTAT_Val:
ADC_TSEN = 1;
break;
#endif
#ifdef ADC_INPUTCTRL_MUXPOS_CTAT_Val
case ADC_INPUTCTRL_MUXPOS_CTAT_Val:
ADC_TSEN = 1;
break;
#endif
case ADC_INPUTCTRL_MUXPOS_BANDGAP_Val:
ADC_BGEN = 1;
break;
default:
break;
}
return 0;
}
static void adc_sam0_start_conversion(const struct device *dev)
{
const struct adc_sam0_cfg *const cfg = dev->config;
Adc *const adc = cfg->regs;
LOG_DBG("Starting conversion");
adc->SWTRIG.reg = ADC_SWTRIG_START;
/*
* Should be safe to not synchronize here because the only things
* that might access the ADC after this will wait for it to complete
* (synchronize finished implicitly)
*/
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_sam0_data *data =
CONTAINER_OF(ctx, struct adc_sam0_data, ctx);
adc_sam0_start_conversion(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_sam0_data *data =
CONTAINER_OF(ctx, struct adc_sam0_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static int check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1U + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_ERR("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_sam0_cfg *const cfg = dev->config;
struct adc_sam0_data *data = dev->data;
Adc *const adc = cfg->regs;
int error;
if (sequence->oversampling > 10U) {
LOG_ERR("Invalid oversampling");
return -EINVAL;
}
adc->AVGCTRL.reg = ADC_AVGCTRL_SAMPLENUM(sequence->oversampling);
/* AVGCTRL is not synchronized */
#ifdef CONFIG_SOC_SERIES_SAMD20
/*
* Errata: silicon revisions B and C do not perform the automatic right
* shifts in accumulation
*/
if (sequence->oversampling > 4U && DSU->DID.bit.REVISION < 3) {
adc->AVGCTRL.bit.ADJRES = sequence->oversampling - 4U;
}
#endif
switch (sequence->resolution) {
case 8:
if (sequence->oversampling) {
LOG_ERR("Oversampling requires 12 bit resolution");
return -EINVAL;
}
ADC_RESSEL(adc) = ADC_RESSEL_8BIT;
break;
case 10:
if (sequence->oversampling) {
LOG_ERR("Oversampling requires 12 bit resolution");
return -EINVAL;
}
ADC_RESSEL(adc) = ADC_RESSEL_10BIT;
break;
case 12:
if (sequence->oversampling) {
ADC_RESSEL(adc) = ADC_RESSEL_16BIT;
} else {
ADC_RESSEL(adc) = ADC_RESSEL_12BIT;
}
break;
default:
LOG_ERR("ADC resolution value %d is not valid",
sequence->resolution);
return -EINVAL;
}
wait_synchronization(adc);
if ((sequence->channels == 0)
|| ((sequence->channels & (sequence->channels - 1)) != 0)) {
/* The caller is expected to identify a single input channel, which will
* typically be the positive input, though no check is made for this...
*
* While ensuring that the channels bitfield matches the positive input
* might be sensible, this will likely break users before this revision
* was put in place.
*/
LOG_ERR("Channel scanning is not supported");
return -ENOTSUP;
}
error = check_buffer_size(sequence, 1);
if (error) {
return error;
}
data->buffer = sequence->buffer;
data->repeat_buffer = sequence->buffer;
/* At this point we allow the scheduler to do other things while
* we wait for the conversions to complete. This is provided by the
* adc_context functions. However, the caller of this function is
* blocked until the results are in.
*/
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static int adc_sam0_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_sam0_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
static void adc_sam0_isr(const struct device *dev)
{
struct adc_sam0_data *data = dev->data;
const struct adc_sam0_cfg *const cfg = dev->config;
Adc *const adc = cfg->regs;
uint16_t result;
adc->INTFLAG.reg = ADC_INTFLAG_MASK;
result = (uint16_t)(adc->RESULT.reg);
#ifdef ADC_SAM0_REFERENCE_GLITCH
if (data->reference_changed) {
data->reference_changed = 0;
LOG_DBG("Discarded initial conversion due to reference change");
adc_sam0_start_conversion(dev);
return;
}
#endif
*data->buffer++ = result;
adc_context_on_sampling_done(&data->ctx, dev);
}
static int adc_sam0_init(const struct device *dev)
{
const struct adc_sam0_cfg *const cfg = dev->config;
struct adc_sam0_data *data = dev->data;
Adc *const adc = cfg->regs;
int retval;
#ifdef MCLK
GCLK->PCHCTRL[cfg->gclk_id].reg = cfg->gclk_mask | GCLK_PCHCTRL_CHEN;
MCLK_ADC |= cfg->mclk_mask;
#else
PM->APBCMASK.bit.ADC_ = 1;
GCLK->CLKCTRL.reg = cfg->gclk | GCLK_CLKCTRL_CLKEN;
#endif
retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
if (retval < 0) {
return retval;
}
ADC_PRESCALER(adc) = cfg->prescaler;
wait_synchronization(adc);
adc->INTENCLR.reg = ADC_INTENCLR_MASK;
adc->INTFLAG.reg = ADC_INTFLAG_MASK;
cfg->config_func(dev);
adc->INTENSET.reg = ADC_INTENSET_RESRDY;
data->dev = dev;
#ifdef ADC_SAM0_REFERENCE_GLITCH
data->reference_changed = 1;
#endif
adc->CTRLA.bit.ENABLE = 1;
wait_synchronization(adc);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_sam0_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_sam0_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif
static const struct adc_driver_api adc_sam0_api = {
.channel_setup = adc_sam0_channel_setup,
.read = adc_sam0_read,
.ref_internal = 1000U, /* Fixed 1.0 V reference */
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_sam0_read_async,
#endif
};
#ifdef MCLK
#define ADC_SAM0_CLOCK_CONTROL(n) \
.mclk_mask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(n, mclk, bit)), \
.gclk_mask = UTIL_CAT(GCLK_PCHCTRL_GEN_GCLK, \
DT_INST_PROP(n, gclk)), \
.gclk_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, periph_ch), \
.prescaler = UTIL_CAT(ADC_CTRLx_PRESCALER_DIV, \
UTIL_CAT(DT_INST_PROP(n, prescaler), _Val)),
#define ADC_SAM0_CONFIGURE(n) \
do { \
const struct adc_sam0_cfg *const cfg = dev->config; \
Adc * const adc = cfg->regs; \
adc->CALIB.reg = ADC_SAM0_BIASCOMP(n) \
| ADC_SAM0_BIASR2R(n) \
| ADC_SAM0_BIASREFBUF(n); \
} while (false)
#else
#define ADC_SAM0_CLOCK_CONTROL(n) \
.gclk = UTIL_CAT(GCLK_CLKCTRL_GEN_GCLK, DT_INST_PROP(n, gclk)) |\
GCLK_CLKCTRL_ID_ADC, \
.prescaler = UTIL_CAT(ADC_CTRLx_PRESCALER_DIV, \
UTIL_CAT(DT_INST_PROP(n, prescaler), _Val)),
#define ADC_SAM0_CONFIGURE(n) \
do { \
const struct adc_sam0_cfg *const cfg = dev->config; \
Adc * const adc = cfg->regs; \
/* Linearity is split across two words */ \
uint32_t lin = ((*(uint32_t *)ADC_FUSES_LINEARITY_0_ADDR) & \
ADC_FUSES_LINEARITY_0_Msk) >> \
ADC_FUSES_LINEARITY_0_Pos; \
lin |= (((*(uint32_t *)ADC_FUSES_LINEARITY_1_ADDR) & \
ADC_FUSES_LINEARITY_1_Msk) >> \
ADC_FUSES_LINEARITY_1_Pos) << 4; \
uint32_t bias = ((*(uint32_t *)ADC_FUSES_BIASCAL_ADDR) & \
ADC_FUSES_BIASCAL_Msk) >> ADC_FUSES_BIASCAL_Pos; \
adc->CALIB.reg = ADC_CALIB_BIAS_CAL(bias) | \
ADC_CALIB_LINEARITY_CAL(lin); \
} while (false)
#endif
#define ADC_SAM0_DEVICE(n) \
PINCTRL_DT_INST_DEFINE(n); \
static void adc_sam0_config_##n(const struct device *dev); \
static const struct adc_sam0_cfg adc_sam_cfg_##n = { \
.regs = (Adc *)DT_INST_REG_ADDR(n), \
ADC_SAM0_CLOCK_CONTROL(n) \
.freq = UTIL_CAT(UTIL_CAT(SOC_ATMEL_SAM0_GCLK, \
DT_INST_PROP(n, gclk)), \
_FREQ_HZ) / \
DT_INST_PROP(n, prescaler), \
.config_func = &adc_sam0_config_##n, \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
}; \
static struct adc_sam0_data adc_sam_data_##n = { \
ADC_CONTEXT_INIT_TIMER(adc_sam_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_sam_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_sam_data_##n, ctx), \
}; \
DEVICE_DT_INST_DEFINE(n, adc_sam0_init, NULL, \
&adc_sam_data_##n, \
&adc_sam_cfg_##n, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&adc_sam0_api); \
static void adc_sam0_config_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, resrdy, irq), \
DT_INST_IRQ_BY_NAME(n, resrdy, priority), \
adc_sam0_isr, \
DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQ_BY_NAME(n, resrdy, irq)); \
ADC_SAM0_CONFIGURE(n); \
}
DT_INST_FOREACH_STATUS_OKAY(ADC_SAM0_DEVICE)
``` | /content/code_sandbox/drivers/adc/adc_sam0.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,375 |
```c
/*
*
*/
/**
* @file
* @brief ADC driver for the MCP3204/MCP3208 ADCs.
*/
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/util.h>
LOG_MODULE_REGISTER(adc_mcp320x, CONFIG_ADC_LOG_LEVEL);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define MCP320X_RESOLUTION 12U
struct mcp320x_config {
struct spi_dt_spec bus;
uint8_t channels;
};
struct mcp320x_data {
struct adc_context ctx;
const struct device *dev;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint8_t channels;
uint8_t differential;
struct k_thread thread;
struct k_sem sem;
K_KERNEL_STACK_MEMBER(stack,
CONFIG_ADC_MCP320X_ACQUISITION_THREAD_STACK_SIZE);
};
static int mcp320x_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct mcp320x_config *config = dev->config;
struct mcp320x_data *data = dev->data;
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("unsupported channel gain '%d'", channel_cfg->gain);
return -ENOTSUP;
}
if (channel_cfg->reference != ADC_REF_EXTERNAL0) {
LOG_ERR("unsupported channel reference '%d'",
channel_cfg->reference);
return -ENOTSUP;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("unsupported acquisition_time '%d'",
channel_cfg->acquisition_time);
return -ENOTSUP;
}
if (channel_cfg->channel_id >= config->channels) {
LOG_ERR("unsupported channel id '%d'", channel_cfg->channel_id);
return -ENOTSUP;
}
WRITE_BIT(data->differential, channel_cfg->channel_id,
channel_cfg->differential);
return 0;
}
static int mcp320x_validate_buffer_size(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct mcp320x_config *config = dev->config;
uint8_t channels = 0;
size_t needed;
uint32_t mask;
for (mask = BIT(config->channels - 1); mask != 0; mask >>= 1) {
if (mask & sequence->channels) {
channels++;
}
}
needed = channels * sizeof(uint16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
static int mcp320x_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct mcp320x_config *config = dev->config;
struct mcp320x_data *data = dev->data;
int err;
if (sequence->resolution != MCP320X_RESOLUTION) {
LOG_ERR("unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
if (find_msb_set(sequence->channels) > config->channels) {
LOG_ERR("unsupported channels in mask: 0x%08x",
sequence->channels);
return -ENOTSUP;
}
err = mcp320x_validate_buffer_size(dev, sequence);
if (err) {
LOG_ERR("buffer size too small");
return err;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int mcp320x_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct mcp320x_data *data = dev->data;
int err;
adc_context_lock(&data->ctx, async ? true : false, async);
err = mcp320x_start_read(dev, sequence);
adc_context_release(&data->ctx, err);
return err;
}
static int mcp320x_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return mcp320x_read_async(dev, sequence, NULL);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct mcp320x_data *data = CONTAINER_OF(ctx, struct mcp320x_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
k_sem_give(&data->sem);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct mcp320x_data *data = CONTAINER_OF(ctx, struct mcp320x_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static int mcp320x_read_channel(const struct device *dev, uint8_t channel,
uint16_t *result)
{
const struct mcp320x_config *config = dev->config;
struct mcp320x_data *data = dev->data;
uint8_t tx_bytes[2];
uint8_t rx_bytes[2];
int err;
const struct spi_buf tx_buf[2] = {
{
.buf = tx_bytes,
.len = sizeof(tx_bytes)
},
{
.buf = NULL,
.len = 1
}
};
const struct spi_buf rx_buf[2] = {
{
.buf = NULL,
.len = 1
},
{
.buf = rx_bytes,
.len = sizeof(rx_bytes)
}
};
const struct spi_buf_set tx = {
.buffers = tx_buf,
.count = ARRAY_SIZE(tx_buf)
};
const struct spi_buf_set rx = {
.buffers = rx_buf,
.count = ARRAY_SIZE(rx_buf)
};
/*
* Configuration bits consists of: 5 dummy bits + start bit +
* SGL/#DIFF bit + D2 + D1 + D0 + 6 dummy bits
*/
tx_bytes[0] = BIT(2) | channel >> 2;
tx_bytes[1] = channel << 6;
if ((data->differential & BIT(channel)) == 0) {
tx_bytes[0] |= BIT(1);
}
err = spi_transceive_dt(&config->bus, &tx, &rx);
if (err) {
return err;
}
*result = sys_get_be16(rx_bytes);
*result &= BIT_MASK(MCP320X_RESOLUTION);
return 0;
}
static void mcp320x_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
struct mcp320x_data *data = p1;
uint16_t result = 0;
uint8_t channel;
int err;
while (true) {
k_sem_take(&data->sem, K_FOREVER);
while (data->channels) {
channel = find_lsb_set(data->channels) - 1;
LOG_DBG("reading channel %d", channel);
err = mcp320x_read_channel(data->dev, channel, &result);
if (err) {
LOG_ERR("failed to read channel %d (err %d)",
channel, err);
adc_context_complete(&data->ctx, err);
break;
}
LOG_DBG("read channel %d, result = %d", channel,
result);
*data->buffer++ = result;
WRITE_BIT(data->channels, channel, 0);
}
adc_context_on_sampling_done(&data->ctx, data->dev);
}
}
static int mcp320x_init(const struct device *dev)
{
const struct mcp320x_config *config = dev->config;
struct mcp320x_data *data = dev->data;
data->dev = dev;
k_sem_init(&data->sem, 0, 1);
if (!spi_is_ready_dt(&config->bus)) {
LOG_ERR("SPI bus is not ready");
return -ENODEV;
}
k_tid_t tid = k_thread_create(&data->thread, data->stack,
K_KERNEL_STACK_SIZEOF(data->stack),
mcp320x_acquisition_thread,
data, NULL, NULL,
CONFIG_ADC_MCP320X_ACQUISITION_THREAD_PRIO,
0, K_NO_WAIT);
k_thread_name_set(tid, dev->name);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api mcp320x_adc_api = {
.channel_setup = mcp320x_channel_setup,
.read = mcp320x_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = mcp320x_read_async,
#endif
};
#define INST_DT_MCP320X(inst, t) DT_INST(inst, microchip_mcp##t)
#define MCP320X_DEVICE(t, n, ch) \
static struct mcp320x_data mcp##t##_data_##n = { \
ADC_CONTEXT_INIT_TIMER(mcp##t##_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(mcp##t##_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(mcp##t##_data_##n, ctx), \
}; \
static const struct mcp320x_config mcp##t##_config_##n = { \
.bus = SPI_DT_SPEC_GET(INST_DT_MCP320X(n, t), \
SPI_OP_MODE_MASTER | SPI_TRANSFER_MSB | \
SPI_WORD_SET(8), 0), \
.channels = ch, \
}; \
DEVICE_DT_DEFINE(INST_DT_MCP320X(n, t), \
&mcp320x_init, NULL, \
&mcp##t##_data_##n, \
&mcp##t##_config_##n, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&mcp320x_adc_api)
/*
* MCP3204: 4 channels
*/
#define MCP3204_DEVICE(n) MCP320X_DEVICE(3204, n, 4)
/*
* MCP3208: 8 channels
*/
#define MCP3208_DEVICE(n) MCP320X_DEVICE(3208, n, 8)
#define CALL_WITH_ARG(arg, expr) expr(arg)
#define INST_DT_MCP320X_FOREACH(t, inst_expr) \
LISTIFY(DT_NUM_INST_STATUS_OKAY(microchip_mcp##t), \
CALL_WITH_ARG, (;), inst_expr)
INST_DT_MCP320X_FOREACH(3204, MCP3204_DEVICE);
INST_DT_MCP320X_FOREACH(3208, MCP3208_DEVICE);
``` | /content/code_sandbox/drivers/adc/adc_mcp320x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,330 |
```c
/*
*
*/
#define DT_DRV_COMPAT renesas_ra_adc
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/reset.h>
#include <zephyr/logging/log.h>
#include <instances/r_adc.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_ra, CONFIG_ADC_LOG_LEVEL);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define ADC_RA_MAX_RESOLUTION 12
void adc_scan_end_isr(void);
/**
* @brief RA ADC config
*
* This structure contains constant data for given instance of RA ADC.
*/
struct adc_ra_config {
/** Number of supported channels */
uint8_t num_channels;
/** pinctrl configs */
const struct pinctrl_dev_config *pcfg;
/** function pointer to irq setup */
void (*irq_configure)(void);
};
/**
* @brief RA ADC data
*
* This structure contains data structures used by a RA ADC.
*/
struct adc_ra_data {
/** Structure that handle state of ongoing read operation */
struct adc_context ctx;
/** Pointer to RA ADC own device structure */
const struct device *dev;
/** Structure that handle fsp ADC */
adc_instance_ctrl_t adc;
/** Structure that handle fsp ADC config */
struct st_adc_cfg f_config;
/** Structure that handle fsp ADC channel config */
adc_channel_cfg_t f_channel_cfg;
/** Pointer to memory where next sample will be written */
uint16_t *buf;
/** Mask with channels that will be sampled */
uint32_t channels;
/** Buffer id */
uint16_t buf_id;
};
/**
* @brief Setup channels before starting to scan ADC
*
* @param dev RA ADC device
* @param channel_cfg channel configuration
*
* @return 0 on success
* @return -ENOTSUP if channel id or differential is wrong value
* @return -EINVAL if channel configuration is invalid
*/
static int adc_ra_channel_setup(const struct device *dev, const struct adc_channel_cfg *channel_cfg)
{
fsp_err_t fsp_err = FSP_SUCCESS;
struct adc_ra_data *data = dev->data;
if (!((channel_cfg->channel_id >= 0 && channel_cfg->channel_id <= 2) ||
(channel_cfg->channel_id >= 4 && channel_cfg->channel_id <= 8) ||
(channel_cfg->channel_id >= 16 && channel_cfg->channel_id <= 19))) {
LOG_ERR("unsupported channel id '%d'", channel_cfg->channel_id);
return -ENOTSUP;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Acquisition time is not valid");
return -EINVAL;
}
if (channel_cfg->differential) {
LOG_ERR("unsupported differential mode");
return -ENOTSUP;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Gain is not valid");
return -EINVAL;
}
data->f_channel_cfg.scan_mask |= (1U << channel_cfg->channel_id);
/* Configure ADC channel specific settings */
fsp_err = R_ADC_ScanCfg(&data->adc, &data->f_channel_cfg);
if (FSP_SUCCESS != fsp_err) {
return -ENOTSUP;
}
return 0;
}
/**
* Interrupt handler
*/
static void adc_ra_isr(const struct device *dev)
{
struct adc_ra_data *data = dev->data;
fsp_err_t fsp_err = FSP_SUCCESS;
adc_channel_t channel_id = 0;
uint32_t channels = 0;
int16_t *sample_buffer = (int16_t *)data->buf;
channels = data->channels;
for (channel_id = 0; channels > 0; channel_id++) {
/* Check if it is right channel id */
if ((channels & 0x01) != 0) {
fsp_err = R_ADC_Read(&data->adc, channel_id, &sample_buffer[data->buf_id]);
if (FSP_SUCCESS != fsp_err) {
break;
}
data->buf_id = data->buf_id + 1;
}
channels = channels >> 1;
}
adc_scan_end_isr();
adc_context_on_sampling_done(&data->ctx, dev);
}
/**
* @brief Check if buffer in @p sequence is big enough to hold all ADC samples
*
* @param dev RA ADC device
* @param sequence ADC sequence description
*
* @return 0 on success
* @return -ENOMEM if buffer is not big enough
*/
static int adc_ra_check_buffer_size(const struct device *dev, const struct adc_sequence *sequence)
{
const struct adc_ra_config *config = dev->config;
uint8_t channels = 0;
size_t needed;
uint32_t mask;
for (mask = BIT(config->num_channels - 1); mask != 0; mask >>= 1) {
if (mask & sequence->channels) {
channels++;
}
}
needed = channels * sizeof(uint16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
/**
* @brief Start processing read request
*
* @param dev RA ADC device
* @param sequence ADC sequence description
*
* @return 0 on success
* @return -ENOTSUP if requested resolution or channel is out side of supported
* range
* @return -ENOMEM if buffer is not big enough
* (see @ref adc_ra_check_buffer_size)
* @return other error code returned by adc_context_wait_for_completion
*/
static int adc_ra_start_read(const struct device *dev, const struct adc_sequence *sequence)
{
const struct adc_ra_config *config = dev->config;
struct adc_ra_data *data = dev->data;
int err;
if (sequence->resolution > ADC_RA_MAX_RESOLUTION || sequence->resolution == 0) {
LOG_ERR("unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
if (find_msb_set(sequence->channels) > config->num_channels) {
LOG_ERR("unsupported channels in mask: 0x%08x", sequence->channels);
return -ENOTSUP;
}
err = adc_ra_check_buffer_size(dev, sequence);
if (err) {
LOG_ERR("buffer size too small");
return err;
}
data->buf_id = 0;
data->buf = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
adc_context_wait_for_completion(&data->ctx);
return 0;
}
/**
* @brief Start processing read request asynchronously
*
* @param dev RA ADC device
* @param sequence ADC sequence description
* @param async async pointer to asynchronous signal
*
* @return 0 on success
* @return -ENOTSUP if requested resolution or channel is out side of supported
* range
* @return -ENOMEM if buffer is not big enough
* (see @ref adc_ra_check_buffer_size)
* @return other error code returned by adc_context_wait_for_completion
*/
static int adc_ra_read_async(const struct device *dev, const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_ra_data *data = dev->data;
int err;
adc_context_lock(&data->ctx, async ? true : false, async);
err = adc_ra_start_read(dev, sequence);
adc_context_release(&data->ctx, err);
return err;
}
/**
* @brief Start processing read request synchronously
*
* @param dev RA ADC device
* @param sequence ADC sequence description
*
* @return 0 on success
* @return -ENOTSUP if requested resolution or channel is out side of supported
* range
* @return -ENOMEM if buffer is not big enough
* (see @ref adc_ra_check_buffer_size)
* @return other error code returned by adc_context_wait_for_completion
*/
static int adc_ra_read(const struct device *dev, const struct adc_sequence *sequence)
{
return adc_ra_read_async(dev, sequence, NULL);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_ra_data *data = CONTAINER_OF(ctx, struct adc_ra_data, ctx);
data->channels = ctx->sequence.channels;
R_ADC_ScanStart(&data->adc);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct adc_ra_data *data = CONTAINER_OF(ctx, struct adc_ra_data, ctx);
if (repeat_sampling) {
data->buf_id = 0;
}
}
/**
* @brief Function called on init for each RA ADC device. It setups all
* channels to return constant 0 mV and create acquisition thread.
*
* @param dev RA ADC device
*
* @return -EIO if error
*
* @return 0 on success
*/
static int adc_ra_init(const struct device *dev)
{
const struct adc_ra_config *config = dev->config;
struct adc_ra_data *data = dev->data;
int ret;
fsp_err_t fsp_err = FSP_SUCCESS;
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0) {
return ret;
}
/* Open ADC module */
fsp_err = R_ADC_Open(&data->adc, &data->f_config);
if (FSP_SUCCESS != fsp_err) {
return -EIO;
}
config->irq_configure();
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
const adc_extended_cfg_t g_adc_cfg_extend = {
.add_average_count = ADC_ADD_OFF,
.clearing = ADC_CLEAR_AFTER_READ_ON,
.trigger_group_b = ADC_START_SOURCE_DISABLED,
.double_trigger_mode = ADC_DOUBLE_TRIGGER_DISABLED,
.adc_vref_control = ADC_VREF_CONTROL_VREFH,
.enable_adbuf = 0,
.window_a_irq = FSP_INVALID_VECTOR,
.window_a_ipl = (1),
.window_b_irq = FSP_INVALID_VECTOR,
.window_b_ipl = (BSP_IRQ_DISABLED),
.trigger = ADC_START_SOURCE_DISABLED, /* Use Software trigger */
};
#define IRQ_CONFIGURE_FUNC(idx) \
static void adc_ra_configure_func_##idx(void) \
{ \
R_ICU->IELSR[DT_INST_IRQ_BY_NAME(idx, scanend, irq)] = \
ELC_EVENT_ADC##idx##_SCAN_END; \
IRQ_CONNECT(DT_INST_IRQ_BY_NAME(idx, scanend, irq), \
DT_INST_IRQ_BY_NAME(idx, scanend, priority), adc_ra_isr, \
DEVICE_DT_INST_GET(idx), 0); \
irq_enable(DT_INST_IRQ_BY_NAME(idx, scanend, irq)); \
}
#define IRQ_CONFIGURE_DEFINE(idx) .irq_configure = adc_ra_configure_func_##idx
#define ADC_RA_INIT(idx) \
IRQ_CONFIGURE_FUNC(idx) \
PINCTRL_DT_INST_DEFINE(idx); \
static struct adc_driver_api adc_ra_api_##idx = { \
.channel_setup = adc_ra_channel_setup, \
.read = adc_ra_read, \
.ref_internal = DT_INST_PROP(idx, vref_mv), \
IF_ENABLED(CONFIG_ADC_ASYNC, (.read_async = adc_ra_read_async))}; \
static const struct adc_ra_config adc_ra_config_##idx = { \
.num_channels = DT_INST_PROP(idx, channels_num), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \
IRQ_CONFIGURE_DEFINE(idx), \
}; \
static struct adc_ra_data adc_ra_data_##idx = { \
ADC_CONTEXT_INIT_TIMER(adc_ra_data_##idx, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_ra_data_##idx, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_ra_data_##idx, ctx), \
.dev = DEVICE_DT_INST_GET(idx), \
.f_config = \
{ \
.unit = idx, \
.mode = ADC_MODE_SINGLE_SCAN, \
.resolution = ADC_RESOLUTION_12_BIT, \
.alignment = (adc_alignment_t)ADC_ALIGNMENT_RIGHT, \
.trigger = 0, \
.p_callback = NULL, \
.p_context = NULL, \
.p_extend = &g_adc_cfg_extend, \
.scan_end_irq = DT_INST_IRQ_BY_NAME(idx, scanend, irq), \
.scan_end_ipl = DT_INST_IRQ_BY_NAME(idx, scanend, priority), \
.scan_end_b_irq = FSP_INVALID_VECTOR, \
.scan_end_b_ipl = (BSP_IRQ_DISABLED), \
}, \
.f_channel_cfg = \
{ \
.scan_mask = 0, \
.scan_mask_group_b = 0, \
.priority_group_a = ADC_GROUP_A_PRIORITY_OFF, \
.add_mask = 0, \
.sample_hold_mask = 0, \
.sample_hold_states = 24, \
.p_window_cfg = NULL, \
}, \
}; \
\
DEVICE_DT_INST_DEFINE(idx, adc_ra_init, NULL, &adc_ra_data_##idx, &adc_ra_config_##idx, \
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, &adc_ra_api_##idx)
DT_INST_FOREACH_STATUS_OKAY(ADC_RA_INIT);
``` | /content/code_sandbox/drivers/adc/adc_renesas_ra.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,926 |
```unknown
config ADC_EMUL
bool "ADC emulator"
default y
depends on DT_HAS_ZEPHYR_ADC_EMUL_ENABLED
help
Enable the ADC emulator driver. This is a fake driver in that it
does not talk to real hardware. It pretends to be actual ADC. It
is used for testing higher-level API for ADC devices.
if ADC_EMUL
config ADC_EMUL_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 512
help
Size of the stack used for the internal data acquisition
thread. Increasing size may be required when value function for
emulated ADC require a lot of memory.
config ADC_EMUL_ACQUISITION_THREAD_PRIO
int "Priority for the ADC data acquisition thread"
default 0
help
Priority level for the internal ADC data acquisition thread.
endif # ADC_EMUL
``` | /content/code_sandbox/drivers/adc/Kconfig.adc_emul | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 191 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_kinetis_adc16
#include <errno.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/pinctrl.h>
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
#include <zephyr/drivers/dma.h>
#endif
#include <fsl_adc16.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_mcux_adc16);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
struct mcux_adc16_config {
ADC_Type *base;
#ifndef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
void (*irq_config_func)(const struct device *dev);
#endif
uint32_t clk_source; /* ADC clock source selection */
uint32_t long_sample; /* ADC long sample mode selection */
uint32_t hw_trigger_src; /* ADC hardware trigger source */
/* defined in SIM module SOPT7 */
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
uint32_t dma_slot; /* ADC DMA MUX slot */
#endif
uint32_t trg_offset;
uint32_t trg_bits;
uint32_t alt_offset;
uint32_t alt_bits;
bool periodic_trigger; /* ADC enable periodic trigger */
bool channel_mux_b;
bool high_speed; /* ADC enable high speed mode*/
bool continuous_convert; /* ADC enable continuous convert*/
const struct pinctrl_dev_config *pincfg;
};
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
struct adc_edma_config {
int32_t state;
uint32_t dma_channel;
void (*irq_call_back)(void);
struct dma_config dma_cfg;
struct dma_block_config dma_block;
};
#endif
struct mcux_adc16_data {
const struct device *dev;
struct adc_context ctx;
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
const struct device *dev_dma;
struct adc_edma_config adc_dma_config;
#endif
uint16_t *buffer;
uint16_t *repeat_buffer;
uint32_t channels;
uint8_t channel_id;
};
#ifdef CONFIG_ADC_MCUX_ADC16_HW_TRIGGER
#define SIM_SOPT7_ADCSET(x, shifts, mask) \
(((uint32_t)(((uint32_t)(x)) << shifts)) & mask)
#endif
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
static void adc_dma_callback(const struct device *dma_dev, void *callback_arg,
uint32_t channel, int status)
{
const struct device *dev = (const struct device *)callback_arg;
struct mcux_adc16_data *data = dev->data;
LOG_DBG("DMA done");
data->buffer++;
adc_context_on_sampling_done(&data->ctx, dev);
}
#endif
#ifdef CONFIG_ADC_MCUX_ADC16_HW_TRIGGER
static void adc_hw_trigger_enable(const struct device *dev)
{
const struct mcux_adc16_config *config = dev->config;
/* enable ADC trigger channel */
SIM->SOPT7 |= SIM_SOPT7_ADCSET(config->hw_trigger_src,
config->trg_offset, config->trg_bits) |
SIM_SOPT7_ADCSET(1, config->alt_offset, config->alt_bits);
}
#endif
static int mcux_adc16_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
uint8_t channel_id = channel_cfg->channel_id;
if (channel_id > (ADC_SC1_ADCH_MASK >> ADC_SC1_ADCH_SHIFT)) {
LOG_ERR("Channel %d is not valid", channel_id);
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Invalid channel acquisition time");
return -EINVAL;
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Invalid channel gain");
return -EINVAL;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Invalid channel reference");
return -EINVAL;
}
#ifdef CONFIG_ADC_MCUX_ADC16_HW_TRIGGER
adc_hw_trigger_enable(dev);
#endif
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct mcux_adc16_config *config = dev->config;
struct mcux_adc16_data *data = dev->data;
adc16_hardware_average_mode_t mode;
adc16_resolution_t resolution;
int error;
uint32_t tmp32;
ADC_Type *base = config->base;
switch (sequence->resolution) {
case 8:
case 9:
resolution = kADC16_Resolution8or9Bit;
break;
case 10:
case 11:
resolution = kADC16_Resolution10or11Bit;
break;
case 12:
case 13:
resolution = kADC16_Resolution12or13Bit;
break;
#if defined(FSL_FEATURE_ADC16_MAX_RESOLUTION) && \
(FSL_FEATURE_ADC16_MAX_RESOLUTION >= 16U)
case 16:
resolution = kADC16_Resolution16Bit;
break;
#endif
default:
LOG_ERR("Invalid resolution");
return -EINVAL;
}
tmp32 = base->CFG1 & ~(ADC_CFG1_MODE_MASK);
tmp32 |= ADC_CFG1_MODE(resolution);
base->CFG1 = tmp32;
switch (sequence->oversampling) {
case 0:
mode = kADC16_HardwareAverageDisabled;
break;
case 2:
mode = kADC16_HardwareAverageCount4;
break;
case 3:
mode = kADC16_HardwareAverageCount8;
break;
case 4:
mode = kADC16_HardwareAverageCount16;
break;
case 5:
mode = kADC16_HardwareAverageCount32;
break;
default:
LOG_ERR("Invalid oversampling");
return -EINVAL;
}
ADC16_SetHardwareAverage(config->base, mode);
if (sequence->buffer_size < 2) {
LOG_ERR("sequence buffer size too small %d < 2", sequence->buffer_size);
return -EINVAL;
}
if (sequence->options) {
if (sequence->buffer_size <
2 * (sequence->options->extra_samplings + 1)) {
LOG_ERR("sequence buffer size too small < 2 * extra + 2");
return -EINVAL;
}
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
dma_stop(data->dev_dma, data->adc_dma_config.dma_channel);
#endif
return error;
}
static int mcux_adc16_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct mcux_adc16_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#ifdef CONFIG_ADC_ASYNC
static int mcux_adc16_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct mcux_adc16_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif
static void mcux_adc16_start_channel(const struct device *dev)
{
const struct mcux_adc16_config *config = dev->config;
struct mcux_adc16_data *data = dev->data;
adc16_channel_config_t channel_config;
uint32_t channel_group = 0U;
data->channel_id = find_lsb_set(data->channels) - 1;
LOG_DBG("Starting channel %d", data->channel_id);
#if defined(FSL_FEATURE_ADC16_HAS_DIFF_MODE) && FSL_FEATURE_ADC16_HAS_DIFF_MODE
channel_config.enableDifferentialConversion = false;
#endif
channel_config.enableInterruptOnConversionCompleted = true;
channel_config.channelNumber = data->channel_id;
ADC16_SetChannelConfig(config->base, channel_group, &channel_config);
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
LOG_DBG("Starting EDMA");
dma_start(data->dev_dma, data->adc_dma_config.dma_channel);
#endif
LOG_DBG("Starting channel done");
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct mcux_adc16_data *data =
CONTAINER_OF(ctx, struct mcux_adc16_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
LOG_DBG("config dma");
data->adc_dma_config.dma_block.block_size = 2;
data->adc_dma_config.dma_block.dest_address = (uint32_t)data->buffer;
data->adc_dma_config.dma_cfg.head_block =
&(data->adc_dma_config.dma_block);
dma_config(data->dev_dma, data->adc_dma_config.dma_channel,
&data->adc_dma_config.dma_cfg);
#endif
mcux_adc16_start_channel(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct mcux_adc16_data *data =
CONTAINER_OF(ctx, struct mcux_adc16_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
#ifndef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
static void mcux_adc16_isr(const struct device *dev)
{
const struct mcux_adc16_config *config = dev->config;
struct mcux_adc16_data *data = dev->data;
ADC_Type *base = config->base;
uint32_t channel_group = 0U;
uint16_t result;
result = ADC16_GetChannelConversionValue(base, channel_group);
LOG_DBG("Finished channel %d. Result is 0x%04x", data->channel_id,
result);
*data->buffer++ = result;
data->channels &= ~BIT(data->channel_id);
if (data->channels) {
mcux_adc16_start_channel(dev);
} else {
adc_context_on_sampling_done(&data->ctx, dev);
}
}
#endif
static int mcux_adc16_init(const struct device *dev)
{
const struct mcux_adc16_config *config = dev->config;
struct mcux_adc16_data *data = dev->data;
ADC_Type *base = config->base;
adc16_config_t adc_config;
int err;
LOG_DBG("init adc");
ADC16_GetDefaultConfig(&adc_config);
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
adc_config.clockSource = (adc16_clock_source_t)config->clk_source;
adc_config.longSampleMode =
(adc16_long_sample_mode_t)config->long_sample;
adc_config.enableHighSpeed = config->high_speed;
adc_config.enableContinuousConversion = config->continuous_convert;
#endif
#if CONFIG_ADC_MCUX_ADC16_VREF_DEFAULT
adc_config.referenceVoltageSource = kADC16_ReferenceVoltageSourceVref;
#else /* CONFIG_ADC_MCUX_ADC16_VREF_ALTERNATE */
adc_config.referenceVoltageSource = kADC16_ReferenceVoltageSourceValt;
#endif
#if CONFIG_ADC_MCUX_ADC16_CLK_DIV_RATIO_1
adc_config.clockDivider = kADC16_ClockDivider1;
#elif CONFIG_ADC_MCUX_ADC16_CLK_DIV_RATIO_2
adc_config.clockDivider = kADC16_ClockDivider2;
#elif CONFIG_ADC_MCUX_ADC16_CLK_DIV_RATIO_4
adc_config.clockDivider = kADC16_ClockDivider4;
#else /* CONFIG_ADC_MCUX_ADC16_CLK_DIV_RATIO_8 */
adc_config.clockDivider = kADC16_ClockDivider8;
#endif
ADC16_Init(base, &adc_config);
#if defined(FSL_FEATURE_ADC16_HAS_CALIBRATION) && \
FSL_FEATURE_ADC16_HAS_CALIBRATION
ADC16_SetHardwareAverage(base, kADC16_HardwareAverageCount32);
ADC16_DoAutoCalibration(base);
#endif
if (config->channel_mux_b) {
ADC16_SetChannelMuxMode(base, kADC16_ChannelMuxB);
}
if (IS_ENABLED(CONFIG_ADC_MCUX_ADC16_HW_TRIGGER)) {
ADC16_EnableHardwareTrigger(base, true);
} else {
ADC16_EnableHardwareTrigger(base, false);
}
err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
if (err != 0) {
return err;
}
data->dev = dev;
/* dma related init */
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
/* Enable DMA. */
ADC16_EnableDMA(base, true);
data->adc_dma_config.dma_cfg.block_count = 1U;
data->adc_dma_config.dma_cfg.dma_slot = config->dma_slot;
data->adc_dma_config.dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
data->adc_dma_config.dma_cfg.source_burst_length = 2U;
data->adc_dma_config.dma_cfg.dest_burst_length = 2U;
data->adc_dma_config.dma_cfg.channel_priority = 0U;
data->adc_dma_config.dma_cfg.dma_callback = adc_dma_callback;
data->adc_dma_config.dma_cfg.user_data = (void *)dev;
data->adc_dma_config.dma_cfg.source_data_size = 2U;
data->adc_dma_config.dma_cfg.dest_data_size = 2U;
data->adc_dma_config.dma_block.source_address = (uint32_t)&base->R[0];
if (data->dev_dma == NULL || !device_is_ready(data->dev_dma)) {
LOG_ERR("dma binding fail");
return -EINVAL;
}
if (config->periodic_trigger) {
enum dma_channel_filter adc_filter = DMA_CHANNEL_PERIODIC;
data->adc_dma_config.dma_channel =
dma_request_channel(data->dev_dma, (void *)&adc_filter);
} else {
enum dma_channel_filter adc_filter = DMA_CHANNEL_NORMAL;
data->adc_dma_config.dma_channel =
dma_request_channel(data->dev_dma, (void *)&adc_filter);
}
if (data->adc_dma_config.dma_channel == -EINVAL) {
LOG_ERR("can not allocate dma channel");
return -EINVAL;
}
LOG_DBG("dma allocated channel %d", data->adc_dma_config.dma_channel);
#else
config->irq_config_func(dev);
#endif
LOG_DBG("adc init done");
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api mcux_adc16_driver_api = {
.channel_setup = mcux_adc16_channel_setup,
.read = mcux_adc16_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = mcux_adc16_read_async,
#endif
};
#ifdef CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA
#define ADC16_MCUX_EDMA_INIT(n) \
.hw_trigger_src = \
DT_INST_PROP_OR(n, hw_trigger_src, 0), \
.dma_slot = DT_INST_DMAS_CELL_BY_IDX(n, 0, source), \
.trg_offset = DT_INST_CLOCKS_CELL_BY_IDX(n, 0, offset), \
.trg_bits = DT_INST_CLOCKS_CELL_BY_IDX(n, 0, bits), \
.alt_offset = DT_INST_CLOCKS_CELL_BY_IDX(n, 1, offset), \
.alt_bits = DT_INST_CLOCKS_CELL_BY_IDX(n, 1, bits),
#define ADC16_MCUX_EDMA_DATA(n) \
.dev_dma = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, adc##n))
#define ADC16_MCUX_IRQ_INIT(n)
#define ADC16_MCUX_IRQ_DECLARE(n)
#else
#define ADC16_MCUX_EDMA_INIT(n)
#define ADC16_MCUX_EDMA_DATA(n)
#define ADC16_MCUX_IRQ_INIT(n) .irq_config_func = mcux_adc16_config_func_##n,
#define ADC16_MCUX_IRQ_DECLARE(n) \
static void mcux_adc16_config_func_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
mcux_adc16_isr, \
DEVICE_DT_INST_GET(n), 0); \
\
irq_enable(DT_INST_IRQN(n)); \
}
#endif /* CONFIG_ADC_MCUX_ADC16_ENABLE_EDMA */
#define ACD16_MCUX_INIT(n) \
ADC16_MCUX_IRQ_DECLARE(n) \
PINCTRL_DT_INST_DEFINE(n); \
\
static const struct mcux_adc16_config mcux_adc16_config_##n = { \
.base = (ADC_Type *)DT_INST_REG_ADDR(n), \
ADC16_MCUX_IRQ_INIT(n) \
.channel_mux_b = DT_INST_PROP(n, channel_mux_b), \
.clk_source = DT_INST_PROP_OR(n, clk_source, 0), \
.long_sample = DT_INST_PROP_OR(n, long_sample, 0), \
.high_speed = DT_INST_PROP(n, high_speed), \
.periodic_trigger = DT_INST_PROP(n, periodic_trigger), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.continuous_convert = \
DT_INST_PROP(n, continuous_convert), \
ADC16_MCUX_EDMA_INIT(n) \
}; \
\
static struct mcux_adc16_data mcux_adc16_data_##n = { \
ADC_CONTEXT_INIT_TIMER(mcux_adc16_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(mcux_adc16_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(mcux_adc16_data_##n, ctx), \
ADC16_MCUX_EDMA_DATA(n) \
}; \
\
DEVICE_DT_INST_DEFINE(n, &mcux_adc16_init, \
NULL, \
&mcux_adc16_data_##n, \
&mcux_adc16_config_##n, \
POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&mcux_adc16_driver_api); \
DT_INST_FOREACH_STATUS_OKAY(ACD16_MCUX_INIT)
``` | /content/code_sandbox/drivers/adc/adc_mcux_adc16.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,975 |
```c
/*
*/
#define DT_DRV_COMPAT adi_ad559x_adc
#include <zephyr/drivers/adc.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/drivers/mfd/ad559x.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_ad559x, CONFIG_ADC_LOG_LEVEL);
#define AD559X_ADC_RD_POINTER_SIZE 1
#define AD559X_ADC_RD_POINTER 0x40
#define AD559X_ADC_RESOLUTION 12U
#define AD559X_ADC_VREF_MV 2500U
#define AD559X_ADC_RES_IND_BIT BIT(15)
#define AD559X_ADC_RES_CHAN_MASK GENMASK(14, 12)
#define AD559X_ADC_RES_VAL_MASK GENMASK(11, 0)
struct adc_ad559x_config {
const struct device *mfd_dev;
};
struct adc_ad559x_data {
struct adc_context ctx;
const struct device *dev;
uint8_t adc_conf;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint8_t channels;
struct k_thread thread;
struct k_sem sem;
K_KERNEL_STACK_MEMBER(stack, CONFIG_ADC_AD559X_ACQUISITION_THREAD_STACK_SIZE);
};
static int adc_ad559x_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_ad559x_config *config = dev->config;
struct adc_ad559x_data *data = dev->data;
if (channel_cfg->channel_id >= AD559X_PIN_MAX) {
LOG_ERR("invalid channel id %d", channel_cfg->channel_id);
return -EINVAL;
}
data->adc_conf |= BIT(channel_cfg->channel_id);
return mfd_ad559x_write_reg(config->mfd_dev, AD559X_REG_ADC_CONFIG, data->adc_conf);
}
static int adc_ad559x_validate_buffer_size(const struct device *dev,
const struct adc_sequence *sequence)
{
uint8_t channels;
size_t needed;
channels = POPCOUNT(sequence->channels);
needed = channels * sizeof(uint16_t);
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
static int adc_ad559x_start_read(const struct device *dev, const struct adc_sequence *sequence)
{
struct adc_ad559x_data *data = dev->data;
int ret;
if (sequence->resolution != AD559X_ADC_RESOLUTION) {
LOG_ERR("invalid resolution %d", sequence->resolution);
return -EINVAL;
}
if (find_msb_set(sequence->channels) > AD559X_PIN_MAX) {
LOG_ERR("invalid channels in mask: 0x%08x", sequence->channels);
return -EINVAL;
}
ret = adc_ad559x_validate_buffer_size(dev, sequence);
if (ret < 0) {
LOG_ERR("insufficient buffer size");
return ret;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int adc_ad559x_read_channel(const struct device *dev, uint8_t channel, uint16_t *result)
{
const struct adc_ad559x_config *config = dev->config;
uint16_t val;
uint8_t conv_channel;
int ret;
/* Select channel */
ret = mfd_ad559x_write_reg(config->mfd_dev, AD559X_REG_SEQ_ADC, BIT(channel));
if (ret < 0) {
return ret;
}
if (mfd_ad559x_has_pointer_byte_map(config->mfd_dev)) {
/* Start readback */
val = AD559X_ADC_RD_POINTER;
ret = mfd_ad559x_write_raw(config->mfd_dev, (uint8_t *)&val,
AD559X_ADC_RD_POINTER_SIZE);
if (ret < 0) {
return ret;
}
/* Read channel */
ret = mfd_ad559x_read_raw(config->mfd_dev, (uint8_t *)&val, sizeof(val));
if (ret < 0) {
return ret;
}
} else {
/*
* Invalid data:
* See Figure 46. Single-Channel ADC Conversion Sequence.
* The first conversion result always returns invalid data.
*/
(void)mfd_ad559x_read_raw(config->mfd_dev, (uint8_t *)&val, sizeof(val));
ret = mfd_ad559x_read_raw(config->mfd_dev, (uint8_t *)&val, sizeof(val));
if (ret < 0) {
return ret;
}
}
val = sys_be16_to_cpu(val);
/*
* Invalid data:
* See AD5592 "ADC section" in "Theory of operation" chapter.
* Valid ADC result has MSB bit set to 0.
*/
if ((val & AD559X_ADC_RES_IND_BIT) != 0) {
return -EAGAIN;
}
/*
* Invalid channel converted:
* See AD5592 "ADC section" in "Theory of operation" chapter.
* Conversion result contains channel number which should match requested channel.
*/
conv_channel = FIELD_GET(AD559X_ADC_RES_CHAN_MASK, val);
if (conv_channel != channel) {
return -EIO;
}
*result = val & AD559X_ADC_RES_VAL_MASK;
return 0;
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_ad559x_data *data = CONTAINER_OF(ctx, struct adc_ad559x_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
k_sem_give(&data->sem);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct adc_ad559x_data *data = CONTAINER_OF(ctx, struct adc_ad559x_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void adc_ad559x_acquisition_thread(struct adc_ad559x_data *data)
{
uint16_t result;
uint8_t channel;
int ret;
while (true) {
k_sem_take(&data->sem, K_FOREVER);
while (data->channels != 0) {
channel = find_lsb_set(data->channels) - 1;
ret = adc_ad559x_read_channel(data->dev, channel, &result);
if (ret < 0) {
LOG_ERR("failed to read channel %d (ret %d)", channel, ret);
adc_context_complete(&data->ctx, ret);
break;
}
*data->buffer++ = result;
WRITE_BIT(data->channels, channel, 0);
}
adc_context_on_sampling_done(&data->ctx, data->dev);
}
}
static int adc_ad559x_read_async(const struct device *dev, const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_ad559x_data *data = dev->data;
int ret;
adc_context_lock(&data->ctx, async ? true : false, async);
ret = adc_ad559x_start_read(dev, sequence);
adc_context_release(&data->ctx, ret);
return ret;
}
static int adc_ad559x_read(const struct device *dev, const struct adc_sequence *sequence)
{
return adc_ad559x_read_async(dev, sequence, NULL);
}
static int adc_ad559x_init(const struct device *dev)
{
const struct adc_ad559x_config *config = dev->config;
struct adc_ad559x_data *data = dev->data;
k_tid_t tid;
int ret;
if (!device_is_ready(config->mfd_dev)) {
return -ENODEV;
}
ret = mfd_ad559x_write_reg(config->mfd_dev, AD559X_REG_PD_REF_CTRL, AD559X_EN_REF);
if (ret < 0) {
return ret;
}
data->dev = dev;
k_sem_init(&data->sem, 0, 1);
adc_context_init(&data->ctx);
tid = k_thread_create(&data->thread, data->stack,
K_KERNEL_STACK_SIZEOF(data->stack),
(k_thread_entry_t)adc_ad559x_acquisition_thread, data, NULL, NULL,
CONFIG_ADC_AD559X_ACQUISITION_THREAD_PRIO, 0, K_NO_WAIT);
if (IS_ENABLED(CONFIG_THREAD_NAME)) {
ret = k_thread_name_set(tid, "adc_ad559x");
if (ret < 0) {
return ret;
}
}
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api adc_ad559x_api = {
.channel_setup = adc_ad559x_channel_setup,
.read = adc_ad559x_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_ad559x_read_async,
#endif
.ref_internal = AD559X_ADC_VREF_MV,
};
#define ADC_AD559X_DEFINE(inst) \
static const struct adc_ad559x_config adc_ad559x_config##inst = { \
.mfd_dev = DEVICE_DT_GET(DT_INST_PARENT(inst)), \
}; \
\
static struct adc_ad559x_data adc_ad559x_data##inst; \
\
DEVICE_DT_INST_DEFINE(inst, adc_ad559x_init, NULL, &adc_ad559x_data##inst, \
&adc_ad559x_config##inst, POST_KERNEL, CONFIG_MFD_INIT_PRIORITY, \
&adc_ad559x_api);
DT_INST_FOREACH_STATUS_OKAY(ADC_AD559X_DEFINE)
``` | /content/code_sandbox/drivers/adc/adc_ad559x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,074 |
```unknown
# ADC configuration options
config ADC_SMARTBOND_GPADC
bool "Renesas SmartBond(tm) ADC driver for ADC"
default y
depends on DT_HAS_RENESAS_SMARTBOND_ADC_ENABLED
select ADC_CONFIGURABLE_INPUTS
help
Enable support for ADC driver for Renesas SmartBond(tm) MCU series.
config ADC_SMARTBOND_SDADC
bool "Renesas SmartBond(tm) ADC driver for Sigma-Delta ADC"
default y
depends on DT_HAS_RENESAS_SMARTBOND_SDADC_ENABLED
select ADC_CONFIGURABLE_INPUTS
help
Enable support for ADC driver for Renesas SmartBond(tm) MCU series.
``` | /content/code_sandbox/drivers/adc/Kconfig.smartbond | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 146 |
```unknown
# ADC configuration options
config ADC_GD32
bool "GD32 ADC driver"
default y
depends on DT_HAS_GD_GD32_ADC_ENABLED
help
Enable GigaDevice GD32 ADC driver
``` | /content/code_sandbox/drivers/adc/Kconfig.gd32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 47 |
```unknown
# ADC configuration options
config ADC_SAM_AFEC
bool "SAM ADC Driver"
default y
depends on DT_HAS_ATMEL_SAM_AFEC_ENABLED
help
Enable Atmel SAM MCU Family Analog-to-Digital Converter (ADC) driver
based on AFEC module.
``` | /content/code_sandbox/drivers/adc/Kconfig.sam_afec | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 62 |
```unknown
# Microchip XEC ADC configuration
config ADC_XEC
bool "Microchip XEC series ADC driver"
default y
depends on DT_HAS_MICROCHIP_XEC_ADC_ENABLED
help
Enable ADC driver for Microchip XEC MCU series.
``` | /content/code_sandbox/drivers/adc/Kconfig.xec | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```unknown
#
config ADC_ADS1119
bool "Texas instruments ADS1119 I2C"
default y
depends on DT_HAS_TI_ADS1119_ENABLED
select I2C
select ADC_CONFIGURABLE_INPUTS
help
Enable the driver implementation for the ADS1119
if ADC_ADS1119
config ADC_ADS1119_ASYNC_THREAD_INIT_PRIO
int "ADC ADS1119 async thread priority"
default 0
config ADC_ADS1119_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 400
help
Size of the stack used for the internal data acquisition
thread.
endif
``` | /content/code_sandbox/drivers/adc/Kconfig.ads1119 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 142 |
```unknown
# NPCX ADC driver configuration options
config ADC_NPCX
bool "Nuvoton NPCX embedded controller (EC) ADC driver"
default y
depends on DT_HAS_NUVOTON_NPCX_ADC_ENABLED
help
This option enables the ADC driver for NPCX family of
processors.
Say y if you wish to use ADC channels on NPCX MCU.
if ADC_NPCX
config ADC_NPCX_CMP_V1
bool "ADC comparator version 1 support"
default y if SOC_SERIES_NPCX7 || SOC_SERIES_NPCX9
help
This option enables ADC comparator V1 support.
config ADC_NPCX_CMP_V2
bool "ADC comparator version 2 support"
default y if SOC_SERIES_NPCX4
help
This option enables ADC comparator V2 support.
endif #ADC_NPCX
``` | /content/code_sandbox/drivers/adc/Kconfig.npcx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 175 |
```unknown
# MAX1125X ADC configuration options
config ADC_MAX1125X
bool "MAX1125X driver"
default y
depends on DT_HAS_MAXIM_MAX11254_ENABLED || DT_HAS_MAXIM_MAX11253_ENABLED
select SPI
select ADC_CONFIGURABLE_INPUTS
help
Enable the driver implementation for the MAX1125X
if ADC_MAX1125X
config ADC_MAX1125X_INIT_PRIORITY
int "Init priority"
default 80
help
ADS1X1X ADC device driver initialization priority.
config ADC_MAX1125X_ASYNC_THREAD_INIT_PRIORITY
int "ADC MAX1125X async thread priority"
default 0
config ADC_MAX1125X_ACQUISITION_THREAD_PRIORITY
int "Priority for the ADC data acquisition thread"
default 0
help
Priority level for the internal ADC data acquisition thread.
config ADC_MAX1125X_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 400
help
Size of the stack used for the internal data acquisition
thread.
endif # ADC_MAX1125X
``` | /content/code_sandbox/drivers/adc/Kconfig.max1125x | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 233 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_gau_adc
#include <zephyr/drivers/adc.h>
#include <zephyr/irq.h>
#include <errno.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_mcux_gau_adc, CONFIG_ADC_LOG_LEVEL);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <fsl_adc.h>
#define NUM_ADC_CHANNELS 16
struct mcux_gau_adc_config {
ADC_Type *base;
void (*irq_config_func)(const struct device *dev);
adc_clock_divider_t clock_div;
adc_analog_portion_power_mode_t power_mode;
bool input_gain_buffer;
adc_calibration_ref_t cal_volt;
};
struct mcux_gau_adc_data {
const struct device *dev;
struct adc_context ctx;
adc_channel_source_t channel_sources[NUM_ADC_CHANNELS];
uint8_t scan_length;
uint16_t *results;
size_t results_length;
uint16_t *repeat;
struct k_work read_samples_work;
};
static int mcux_gau_adc_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct mcux_gau_adc_config *config = dev->config;
struct mcux_gau_adc_data *data = dev->data;
ADC_Type *base = config->base;
uint8_t channel_id = channel_cfg->channel_id;
uint8_t source_channel = channel_cfg->input_positive;
uint32_t tmp_reg;
if (channel_cfg->differential) {
LOG_ERR("Differential channels not yet supported");
return -ENOTSUP;
}
if (channel_id >= NUM_ADC_CHANNELS) {
LOG_ERR("ADC does not support more than %d channels", NUM_ADC_CHANNELS);
return -ENOTSUP;
}
if (source_channel > 12 && source_channel != 15) {
LOG_ERR("Invalid source channel");
return -EINVAL;
}
/* Set Acquisition/Warmup time */
tmp_reg = base->ADC_REG_INTERVAL;
base->ADC_REG_INTERVAL &= ~ADC_ADC_REG_INTERVAL_WARMUP_TIME_MASK;
base->ADC_REG_INTERVAL &= ~ADC_ADC_REG_INTERVAL_BYPASS_WARMUP_MASK;
if (channel_cfg->acquisition_time == 0) {
base->ADC_REG_INTERVAL |= ADC_ADC_REG_INTERVAL_BYPASS_WARMUP_MASK;
} else if (channel_cfg->acquisition_time <= 32) {
base->ADC_REG_INTERVAL |=
ADC_ADC_REG_INTERVAL_WARMUP_TIME(channel_cfg->acquisition_time - 1);
} else {
LOG_ERR("Invalid acquisition time requested of ADC");
return -EINVAL;
}
/* If user changed the warmup time, warn */
if (base->ADC_REG_INTERVAL != tmp_reg) {
LOG_WRN("Acquisition/Warmup time is global to entire ADC peripheral, "
"i.e. channel_setup will override this property for all previous channels.");
}
/* Set Input Gain */
tmp_reg = base->ADC_REG_ANA;
base->ADC_REG_ANA &= ~ADC_ADC_REG_ANA_INBUF_GAIN_MASK;
if (channel_cfg->gain == ADC_GAIN_1) {
base->ADC_REG_ANA |= ADC_ADC_REG_ANA_INBUF_GAIN(kADC_InputGain1);
} else if (channel_cfg->gain == ADC_GAIN_1_2) {
base->ADC_REG_ANA |= ADC_ADC_REG_ANA_INBUF_GAIN(kADC_InputGain0P5);
} else if (channel_cfg->gain == ADC_GAIN_2) {
base->ADC_REG_ANA |= ADC_ADC_REG_ANA_INBUF_GAIN(kADC_InputGain2);
} else {
LOG_ERR("Invalid gain");
return -EINVAL;
}
/* If user changed the gain, warn */
if (base->ADC_REG_ANA != tmp_reg) {
LOG_WRN("Input gain is global to entire ADC peripheral, "
"i.e. channel_setup will override this property for all previous channels.");
}
/* Set Reference voltage of ADC */
tmp_reg = base->ADC_REG_ANA;
base->ADC_REG_ANA &= ~ADC_ADC_REG_ANA_VREF_SEL_MASK;
if (channel_cfg->reference == ADC_REF_INTERNAL) {
base->ADC_REG_ANA |= ADC_ADC_REG_ANA_VREF_SEL(kADC_Vref1P2V);
} else if (channel_cfg->reference == ADC_REF_EXTERNAL0) {
base->ADC_REG_ANA |= ADC_ADC_REG_ANA_VREF_SEL(kADC_VrefExternal);
} else if (channel_cfg->reference == ADC_REF_VDD_1) {
base->ADC_REG_ANA |= ADC_ADC_REG_ANA_VREF_SEL(kADC_Vref1P8V);
} else {
LOG_ERR("Vref not supported");
return -ENOTSUP;
}
/* if user changed the reference voltage, warn */
if (base->ADC_REG_ANA != tmp_reg) {
LOG_WRN("Reference voltage is global to entire ADC peripheral, "
"i.e. channel_setup will override this property for all previous channels.");
}
data->channel_sources[channel_id] = source_channel;
return 0;
}
static void mcux_gau_adc_read_samples(struct k_work *work)
{
struct mcux_gau_adc_data *data =
CONTAINER_OF(work, struct mcux_gau_adc_data,
read_samples_work);
const struct device *dev = data->dev;
const struct mcux_gau_adc_config *config = dev->config;
ADC_Type *base = config->base;
/* using this variable to prevent buffer overflow */
size_t length = data->results_length;
while ((ADC_GetFifoDataCount(base) > 0) && (--length > 0)) {
*(data->results++) = (uint16_t)ADC_GetConversionResult(base);
}
adc_context_on_sampling_done(&data->ctx, dev);
}
static void mcux_gau_adc_isr(const struct device *dev)
{
const struct mcux_gau_adc_config *config = dev->config;
struct mcux_gau_adc_data *data = dev->data;
ADC_Type *base = config->base;
if (ADC_GetStatusFlags(base) & kADC_DataReadyInterruptFlag) {
/* Clear flag to avoid infinite interrupt */
ADC_ClearStatusFlags(base, kADC_DataReadyInterruptFlag);
/* offload and do not block during irq */
k_work_submit(&data->read_samples_work);
} else {
LOG_ERR("ADC received unimplemented interrupt");
}
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct mcux_gau_adc_data *data =
CONTAINER_OF(ctx, struct mcux_gau_adc_data, ctx);
const struct mcux_gau_adc_config *config = data->dev->config;
ADC_Type *base = config->base;
ADC_StopConversion(base);
ADC_DoSoftwareTrigger(base);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct mcux_gau_adc_data *data =
CONTAINER_OF(ctx, struct mcux_gau_adc_data, ctx);
if (repeat_sampling) {
data->results = data->repeat;
}
}
static int mcux_gau_adc_do_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct mcux_gau_adc_config *config = dev->config;
ADC_Type *base = config->base;
struct mcux_gau_adc_data *data = dev->data;
uint8_t num_channels = 0;
/* if user selected channel >= NUM_ADC_CHANNELS that is invalid */
if (sequence->channels & (0xFFFF << NUM_ADC_CHANNELS)) {
LOG_ERR("Invalid channels selected for sequence");
return -EINVAL;
}
/* Count channels */
for (int i = 0; i < NUM_ADC_CHANNELS; i++) {
num_channels += ((sequence->channels & (0x1 << i)) ? 1 : 0);
}
/* Buffer must hold (number of samples per channel) * (number of channels) samples */
if ((sequence->options != NULL && sequence->buffer_size <
((1 + sequence->options->extra_samplings) * num_channels)) ||
(sequence->options == NULL && sequence->buffer_size < num_channels)) {
LOG_ERR("Buffer size too small");
return -ENOMEM;
}
/* Set scan length in data struct for isr to understand & set scan length register */
base->ADC_REG_CONFIG &= ~ADC_ADC_REG_CONFIG_SCAN_LENGTH_MASK;
data->scan_length = num_channels;
/* Register Value is 1 less than what it represents */
base->ADC_REG_CONFIG |= ADC_ADC_REG_CONFIG_SCAN_LENGTH(data->scan_length - 1);
/* Set up scan channels */
for (int channel = 0; channel < NUM_ADC_CHANNELS; channel++) {
if (sequence->channels & (0x1 << channel)) {
ADC_SetScanChannel(base,
data->scan_length - num_channels--,
data->channel_sources[channel]);
}
}
/* Set resolution of ADC */
base->ADC_REG_ANA &= ~ADC_ADC_REG_ANA_RES_SEL_MASK;
/* odd numbers are for differential channels */
if (sequence->resolution == 12 || sequence->resolution == 11) {
base->ADC_REG_ANA |= ADC_ADC_REG_ANA_RES_SEL(kADC_Resolution12Bit);
} else if (sequence->resolution == 14 || sequence->resolution == 13) {
base->ADC_REG_ANA |= ADC_ADC_REG_ANA_RES_SEL(kADC_Resolution14Bit);
} else if (sequence->resolution == 16 || sequence->resolution == 15) {
base->ADC_REG_ANA |= ADC_ADC_REG_ANA_RES_SEL(kADC_Resolution16Bit);
} else {
LOG_ERR("Invalid resolution");
return -EINVAL;
}
/* Set oversampling */
base->ADC_REG_CONFIG &= ~ADC_ADC_REG_CONFIG_AVG_SEL_MASK;
if (sequence->oversampling == 0) {
base->ADC_REG_CONFIG |= ADC_ADC_REG_CONFIG_AVG_SEL(kADC_AverageNone);
} else if (sequence->oversampling == 1) {
base->ADC_REG_CONFIG |= ADC_ADC_REG_CONFIG_AVG_SEL(kADC_Average2);
} else if (sequence->oversampling == 2) {
base->ADC_REG_CONFIG |= ADC_ADC_REG_CONFIG_AVG_SEL(kADC_Average4);
} else if (sequence->oversampling == 3) {
base->ADC_REG_CONFIG |= ADC_ADC_REG_CONFIG_AVG_SEL(kADC_Average8);
} else if (sequence->oversampling == 4) {
base->ADC_REG_CONFIG |= ADC_ADC_REG_CONFIG_AVG_SEL(kADC_Average16);
} else {
LOG_ERR("Invalid oversampling setting");
return -EINVAL;
}
/* Calibrate if requested */
if (sequence->calibrate) {
if (ADC_DoAutoCalibration(base, config->cal_volt)) {
LOG_WRN("Calibration of ADC failed!");
}
}
data->results = sequence->buffer;
data->results_length = sequence->buffer_size;
data->repeat = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int mcux_gau_adc_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct mcux_gau_adc_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = mcux_gau_adc_do_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#ifdef CONFIG_ADC_ASYNC
static int mcux_gau_adc_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct mcux_gau_adc_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = mcux_gau_adc_do_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif
static int mcux_gau_adc_init(const struct device *dev)
{
const struct mcux_gau_adc_config *config = dev->config;
struct mcux_gau_adc_data *data = dev->data;
ADC_Type *base = config->base;
adc_config_t adc_config;
data->dev = dev;
LOG_DBG("Initializing ADC");
ADC_GetDefaultConfig(&adc_config);
/* DT configs */
adc_config.clockDivider = config->clock_div;
adc_config.powerMode = config->power_mode;
adc_config.enableInputGainBuffer = config->input_gain_buffer;
adc_config.triggerSource = kADC_TriggerSourceSoftware;
adc_config.inputMode = kADC_InputSingleEnded;
/* One shot meets the needs of the current zephyr adc context/api */
adc_config.conversionMode = kADC_ConversionOneShot;
/* since using one shot mode, just interrupt on one sample (agnostic to # channels) */
adc_config.fifoThreshold = kADC_FifoThresholdData1;
/* 32 bit width not supported in this driver; zephyr seems to use 16 bit */
adc_config.resultWidth = kADC_ResultWidth16;
adc_config.enableDMA = false;
adc_config.enableADC = true;
ADC_Init(base, &adc_config);
if (ADC_DoAutoCalibration(base, config->cal_volt)) {
LOG_WRN("Calibration of ADC failed!");
}
ADC_ClearStatusFlags(base, kADC_DataReadyInterruptFlag);
config->irq_config_func(dev);
ADC_EnableInterrupts(base, kADC_DataReadyInterruptEnable);
k_work_init(&data->read_samples_work, &mcux_gau_adc_read_samples);
adc_context_init(&data->ctx);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api mcux_gau_adc_driver_api = {
.channel_setup = mcux_gau_adc_channel_setup,
.read = mcux_gau_adc_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = mcux_gau_adc_read_async,
#endif
.ref_internal = 1200,
};
#define GAU_ADC_MCUX_INIT(n) \
\
static void mcux_gau_adc_config_func_##n(const struct device *dev); \
\
static const struct mcux_gau_adc_config mcux_gau_adc_config_##n = { \
.base = (ADC_Type *)DT_INST_REG_ADDR(n), \
.irq_config_func = mcux_gau_adc_config_func_##n, \
/* Minus one because DT starts at 1, HAL enum starts at 0 */ \
.clock_div = DT_INST_PROP(n, nxp_clock_divider) - 1, \
.power_mode = DT_INST_ENUM_IDX(n, nxp_power_mode), \
.input_gain_buffer = DT_INST_PROP(n, nxp_input_buffer), \
.cal_volt = DT_INST_ENUM_IDX(n, nxp_calibration_voltage), \
}; \
\
static struct mcux_gau_adc_data mcux_gau_adc_data_##n = {0}; \
\
DEVICE_DT_INST_DEFINE(n, &mcux_gau_adc_init, NULL, \
&mcux_gau_adc_data_##n, &mcux_gau_adc_config_##n, \
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&mcux_gau_adc_driver_api); \
\
static void mcux_gau_adc_config_func_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
mcux_gau_adc_isr, DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
}
DT_INST_FOREACH_STATUS_OKAY(GAU_ADC_MCUX_INIT)
``` | /content/code_sandbox/drivers/adc/adc_mcux_gau_adc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,424 |
```unknown
# ADC configuration options
config ADC_NRFX_ADC
bool "nRF ADC nrfx driver"
default y
depends on DT_HAS_NORDIC_NRF_ADC_ENABLED
select NRFX_ADC
select ADC_CONFIGURABLE_INPUTS
help
Enable support for nrfx ADC driver for nRF51 MCU series.
config ADC_NRFX_ADC_CHANNEL_COUNT
int "Number of ADC channels"
depends on ADC_NRFX_ADC
range 1 8
default 1
help
Number of ADC channels to be supported by the driver. Each channel
needs a dedicated structure in RAM that stores the ADC settings
to be used when sampling this channel.
config ADC_NRFX_SAADC
bool "nRF SAADC nrfx driver"
default y
depends on DT_HAS_NORDIC_NRF_SAADC_ENABLED
select ADC_CONFIGURABLE_INPUTS
help
Enable support for nrfx SAADC driver.
``` | /content/code_sandbox/drivers/adc/Kconfig.nrfx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 197 |
```c
/*
*/
#define DT_DRV_COMPAT nxp_vf610_adc
#include <errno.h>
#include <zephyr/drivers/adc.h>
#include <adc_imx6sx.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(vf610_adc, CONFIG_ADC_LOG_LEVEL);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
struct vf610_adc_config {
ADC_Type *base;
uint8_t clock_source;
uint8_t divide_ratio;
void (*irq_config_func)(const struct device *dev);
};
struct vf610_adc_data {
const struct device *dev;
struct adc_context ctx;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint32_t channels;
uint8_t channel_id;
};
static int vf610_adc_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
uint8_t channel_id = channel_cfg->channel_id;
if (channel_id > (ADC_HC0_ADCH_MASK >> ADC_HC0_ADCH_SHIFT)) {
LOG_ERR("Channel %d is not valid", channel_id);
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Invalid channel acquisition time");
return -EINVAL;
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Invalid channel gain");
return -EINVAL;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Invalid channel reference");
return -EINVAL;
}
return 0;
}
static int start_read(const struct device *dev, const struct adc_sequence *sequence)
{
const struct vf610_adc_config *config = dev->config;
struct vf610_adc_data *data = dev->data;
enum _adc_average_number mode;
enum _adc_resolution_mode resolution;
int error;
ADC_Type *base = config->base;
switch (sequence->resolution) {
case 8:
resolution = adcResolutionBit8;
break;
case 10:
resolution = adcResolutionBit10;
break;
case 12:
resolution = adcResolutionBit12;
break;
default:
LOG_ERR("Invalid resolution");
return -EINVAL;
}
ADC_SetResolutionMode(base, resolution);
switch (sequence->oversampling) {
case 0:
mode = adcAvgNumNone;
break;
case 2:
mode = adcAvgNum4;
break;
case 3:
mode = adcAvgNum8;
break;
case 4:
mode = adcAvgNum16;
break;
case 5:
mode = adcAvgNum32;
break;
default:
LOG_ERR("Invalid oversampling");
return -EINVAL;
}
ADC_SetAverageNum(config->base, mode);
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static int vf610_adc_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct vf610_adc_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#ifdef CONFIG_ADC_ASYNC
static int vf610_adc_read_async(struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct vf610_adc_data *data = dev->driver_data;
int error;
adc_context_lock(&data->ctx, true, async);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif
static void vf610_adc_start_channel(const struct device *dev)
{
const struct vf610_adc_config *config = dev->config;
struct vf610_adc_data *data = dev->data;
data->channel_id = find_lsb_set(data->channels) - 1;
LOG_DBG("Starting channel %d", data->channel_id);
ADC_SetIntCmd(config->base, true);
ADC_TriggerSingleConvert(config->base, data->channel_id);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct vf610_adc_data *data =
CONTAINER_OF(ctx, struct vf610_adc_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
vf610_adc_start_channel(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct vf610_adc_data *data =
CONTAINER_OF(ctx, struct vf610_adc_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void vf610_adc_isr(void *arg)
{
struct device *dev = (struct device *)arg;
const struct vf610_adc_config *config = dev->config;
struct vf610_adc_data *data = dev->data;
ADC_Type *base = config->base;
uint16_t result;
result = ADC_GetConvertResult(base);
LOG_DBG("Finished channel %d. Result is 0x%04x",
data->channel_id, result);
*data->buffer++ = result;
data->channels &= ~BIT(data->channel_id);
if (data->channels) {
vf610_adc_start_channel(dev);
} else {
adc_context_on_sampling_done(&data->ctx, dev);
}
}
static int vf610_adc_init(const struct device *dev)
{
const struct vf610_adc_config *config = dev->config;
struct vf610_adc_data *data = dev->data;
ADC_Type *base = config->base;
adc_init_config_t adc_config;
adc_config.averageNumber = adcAvgNumNone;
adc_config.resolutionMode = adcResolutionBit12;
adc_config.clockSource = config->clock_source;
adc_config.divideRatio = config->divide_ratio;
ADC_Init(base, &adc_config);
ADC_SetConvertTrigMode(base, adcSoftwareTrigger);
ADC_SetCalibration(base, true);
config->irq_config_func(dev);
data->dev = dev;
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api vf610_adc_driver_api = {
.channel_setup = vf610_adc_channel_setup,
.read = vf610_adc_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = vf610_adc_read_async,
#endif
};
#define VF610_ADC_INIT(n) \
static void vf610_adc_config_func_##n(const struct device *dev);\
\
static const struct vf610_adc_config vf610_adc_config_##n = { \
.base = (ADC_Type *)DT_INST_REG_ADDR(n), \
.clock_source = DT_INST_PROP(n, clk_source), \
.divide_ratio = DT_INST_PROP(n, clk_divider), \
.irq_config_func = vf610_adc_config_func_##n, \
}; \
\
static struct vf610_adc_data vf610_adc_data_##n = { \
ADC_CONTEXT_INIT_TIMER(vf610_adc_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(vf610_adc_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(vf610_adc_data_##n, ctx), \
}; \
\
DEVICE_DT_INST_DEFINE(n, &vf610_adc_init, \
NULL, &vf610_adc_data_##n, \
&vf610_adc_config_##n, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&vf610_adc_driver_api); \
\
static void vf610_adc_config_func_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
vf610_adc_isr, \
DEVICE_DT_INST_GET(n), 0); \
\
irq_enable(DT_INST_IRQN(n)); \
}
DT_INST_FOREACH_STATUS_OKAY(VF610_ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_vf610.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,754 |
```c
/*
*
*/
#define DT_DRV_COMPAT gd_gd32_adc
#include <errno.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/clock_control/gd32.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/reset.h>
#include <zephyr/devicetree.h>
#include <zephyr/irq.h>
#include <gd32_adc.h>
#include <gd32_rcu.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_gd32, CONFIG_ADC_LOG_LEVEL);
/**
* @brief gd32 adc irq have some special cases as below:
* 1. adc number no larger than 3.
* 2. adc0 and adc1 share the same irq number.
* 3. For gd32f4xx, adc2 share the same irq number with adc0 and adc1.
*
* To cover this cases, gd32_adc driver use node-label 'adc0', 'adc1' and
* 'adc2' to handle gd32 adc irq config directly.'
*
* @note Sorry for the restriction, But new added gd32 adc node-label must be 'adc0',
* 'adc1' and 'adc2'.
*/
#define ADC0_NODE DT_NODELABEL(adc0)
#define ADC1_NODE DT_NODELABEL(adc1)
#define ADC2_NODE DT_NODELABEL(adc2)
#define ADC0_ENABLE DT_NODE_HAS_STATUS(ADC0_NODE, okay)
#define ADC1_ENABLE DT_NODE_HAS_STATUS(ADC1_NODE, okay)
#define ADC2_ENABLE DT_NODE_HAS_STATUS(ADC2_NODE, okay)
#ifndef ADC0
/**
* @brief The name of gd32 ADC HAL are different between single and multi ADC SoCs.
* This adjust the single ADC SoC HAL, so we can call gd32 ADC HAL in a common way.
*/
#undef ADC_STAT
#undef ADC_CTL0
#undef ADC_CTL1
#undef ADC_SAMPT0
#undef ADC_SAMPT1
#undef ADC_RSQ2
#undef ADC_RDATA
#define ADC_STAT(adc0) REG32((adc0) + 0x00000000U)
#define ADC_CTL0(adc0) REG32((adc0) + 0x00000004U)
#define ADC_CTL1(adc0) REG32((adc0) + 0x00000008U)
#define ADC_SAMPT0(adc0) REG32((adc0) + 0x0000000CU)
#define ADC_SAMPT1(adc0) REG32((adc0) + 0x00000010U)
#define ADC_RSQ2(adc0) REG32((adc0) + 0x00000034U)
#define ADC_RDATA(adc0) REG32((adc0) + 0x0000004CU)
#endif
#define SPT_WIDTH 3U
#define SAMPT1_SIZE 10U
#if defined(CONFIG_SOC_SERIES_GD32F4XX)
#define SMP_TIME(x) ADC_SAMPLETIME_##x
static const uint16_t acq_time_tbl[8] = {3, 15, 28, 56, 84, 112, 144, 480};
static const uint32_t table_samp_time[] = {
SMP_TIME(3),
SMP_TIME(15),
SMP_TIME(28),
SMP_TIME(56),
SMP_TIME(84),
SMP_TIME(112),
SMP_TIME(144),
SMP_TIME(480)
};
#elif defined(CONFIG_SOC_SERIES_GD32L23X)
#define SMP_TIME(x) ADC_SAMPLETIME_##x##POINT5
static const uint16_t acq_time_tbl[8] = {3, 8, 14, 29, 42, 56, 72, 240};
static const uint32_t table_samp_time[] = {
SMP_TIME(2),
SMP_TIME(7),
SMP_TIME(13),
SMP_TIME(28),
SMP_TIME(41),
SMP_TIME(55),
SMP_TIME(71),
SMP_TIME(239),
};
#elif defined(CONFIG_SOC_SERIES_GD32A50X)
#define SMP_TIME(x) ADC_SAMPLETIME_##x##POINT5
static const uint16_t acq_time_tbl[8] = {3, 15, 28, 56, 84, 112, 144, 480};
static const uint32_t table_samp_time[] = {
SMP_TIME(2),
SMP_TIME(14),
SMP_TIME(27),
SMP_TIME(55),
SMP_TIME(83),
SMP_TIME(111),
SMP_TIME(143),
SMP_TIME(479)
};
#else
#define SMP_TIME(x) ADC_SAMPLETIME_##x##POINT5
static const uint16_t acq_time_tbl[8] = {2, 8, 14, 29, 42, 56, 72, 240};
static const uint32_t table_samp_time[] = {
SMP_TIME(1),
SMP_TIME(7),
SMP_TIME(13),
SMP_TIME(28),
SMP_TIME(41),
SMP_TIME(55),
SMP_TIME(71),
SMP_TIME(239)
};
#endif
struct adc_gd32_config {
uint32_t reg;
#ifdef CONFIG_SOC_SERIES_GD32F3X0
uint32_t rcu_clock_source;
#endif
uint16_t clkid;
struct reset_dt_spec reset;
uint8_t channels;
const struct pinctrl_dev_config *pcfg;
uint8_t irq_num;
void (*irq_config_func)(void);
};
struct adc_gd32_data {
struct adc_context ctx;
const struct device *dev;
uint16_t *buffer;
uint16_t *repeat_buffer;
};
static void adc_gd32_isr(const struct device *dev)
{
struct adc_gd32_data *data = dev->data;
const struct adc_gd32_config *cfg = dev->config;
if (ADC_STAT(cfg->reg) & ADC_STAT_EOC) {
*data->buffer++ = ADC_RDATA(cfg->reg);
/* Disable EOC interrupt. */
ADC_CTL0(cfg->reg) &= ~ADC_CTL0_EOCIE;
/* Clear EOC bit. */
ADC_STAT(cfg->reg) &= ~ADC_STAT_EOC;
adc_context_on_sampling_done(&data->ctx, dev);
}
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_gd32_data *data = CONTAINER_OF(ctx, struct adc_gd32_data, ctx);
const struct device *dev = data->dev;
const struct adc_gd32_config *cfg = dev->config;
data->repeat_buffer = data->buffer;
/* Enable EOC interrupt */
ADC_CTL0(cfg->reg) |= ADC_CTL0_EOCIE;
/* Set ADC software conversion trigger. */
ADC_CTL1(cfg->reg) |= ADC_CTL1_SWRCST;
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_gd32_data *data = CONTAINER_OF(ctx, struct adc_gd32_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static inline void adc_gd32_calibration(const struct adc_gd32_config *cfg)
{
ADC_CTL1(cfg->reg) |= ADC_CTL1_RSTCLB;
/* Wait for calibration registers initialized. */
while (ADC_CTL1(cfg->reg) & ADC_CTL1_RSTCLB) {
}
ADC_CTL1(cfg->reg) |= ADC_CTL1_CLB;
/* Wait for calibration complete. */
while (ADC_CTL1(cfg->reg) & ADC_CTL1_CLB) {
}
}
static int adc_gd32_configure_sampt(const struct adc_gd32_config *cfg,
uint8_t channel, uint16_t acq_time)
{
uint8_t index = 0, offset;
if (acq_time != ADC_ACQ_TIME_DEFAULT) {
/* Acquisition time unit is adc clock cycle. */
if (ADC_ACQ_TIME_UNIT(acq_time) != ADC_ACQ_TIME_TICKS) {
return -EINVAL;
}
for ( ; index < ARRAY_SIZE(acq_time_tbl); index++) {
if (ADC_ACQ_TIME_VALUE(acq_time) <= acq_time_tbl[index]) {
break;
}
}
if (ADC_ACQ_TIME_VALUE(acq_time) != acq_time_tbl[index]) {
return -ENOTSUP;
}
}
if (channel < SAMPT1_SIZE) {
offset = SPT_WIDTH * channel;
ADC_SAMPT1(cfg->reg) &= ~(ADC_SAMPTX_SPTN << offset);
ADC_SAMPT1(cfg->reg) |= table_samp_time[index] << offset;
} else {
offset = SPT_WIDTH * (channel - SAMPT1_SIZE);
ADC_SAMPT0(cfg->reg) &= ~(ADC_SAMPTX_SPTN << offset);
ADC_SAMPT0(cfg->reg) |= table_samp_time[index] << offset;
}
return 0;
}
static int adc_gd32_channel_setup(const struct device *dev,
const struct adc_channel_cfg *chan_cfg)
{
const struct adc_gd32_config *cfg = dev->config;
if (chan_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Gain is not valid");
return -ENOTSUP;
}
if (chan_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Reference is not valid");
return -ENOTSUP;
}
if (chan_cfg->differential) {
LOG_ERR("Differential sampling not supported");
return -ENOTSUP;
}
if (chan_cfg->channel_id >= cfg->channels) {
LOG_ERR("Invalid channel (%u)", chan_cfg->channel_id);
return -EINVAL;
}
return adc_gd32_configure_sampt(cfg, chan_cfg->channel_id,
chan_cfg->acquisition_time);
}
static int adc_gd32_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_gd32_data *data = dev->data;
const struct adc_gd32_config *cfg = dev->config;
uint8_t resolution_id;
uint32_t index;
index = find_lsb_set(sequence->channels) - 1;
if (sequence->channels > BIT(index)) {
LOG_ERR("Only single channel supported");
return -ENOTSUP;
}
switch (sequence->resolution) {
case 12U:
resolution_id = 0U;
break;
case 10U:
resolution_id = 1U;
break;
case 8U:
resolution_id = 2U;
break;
case 6U:
resolution_id = 3U;
break;
default:
return -EINVAL;
}
#if defined(CONFIG_SOC_SERIES_GD32F4XX) || \
defined(CONFIG_SOC_SERIES_GD32F3X0) || \
defined(CONFIG_SOC_SERIES_GD32L23X)
ADC_CTL0(cfg->reg) &= ~ADC_CTL0_DRES;
ADC_CTL0(cfg->reg) |= CTL0_DRES(resolution_id);
#elif defined(CONFIG_SOC_SERIES_GD32F403) || \
defined(CONFIG_SOC_SERIES_GD32A50X)
ADC_OVSAMPCTL(cfg->reg) &= ~ADC_OVSAMPCTL_DRES;
ADC_OVSAMPCTL(cfg->reg) |= OVSAMPCTL_DRES(resolution_id);
#elif defined(CONFIG_SOC_SERIES_GD32VF103)
ADC_OVSCR(cfg->reg) &= ~ADC_OVSCR_DRES;
ADC_OVSCR(cfg->reg) |= OVSCR_DRES(resolution_id);
#endif
if (sequence->calibrate) {
adc_gd32_calibration(cfg);
}
/* Signle conversion mode with regular group. */
ADC_RSQ2(cfg->reg) &= ~ADC_RSQX_RSQN;
ADC_RSQ2(cfg->reg) = index;
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int adc_gd32_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_gd32_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = adc_gd32_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_gd32_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_gd32_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
error = adc_gd32_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif /* CONFIG_ADC_ASYNC */
static struct adc_driver_api adc_gd32_driver_api = {
.channel_setup = adc_gd32_channel_setup,
.read = adc_gd32_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_gd32_read_async,
#endif /* CONFIG_ADC_ASYNC */
};
static int adc_gd32_init(const struct device *dev)
{
struct adc_gd32_data *data = dev->data;
const struct adc_gd32_config *cfg = dev->config;
int ret;
data->dev = dev;
ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0) {
return ret;
}
#ifdef CONFIG_SOC_SERIES_GD32F3X0
/* Select adc clock source and its prescaler. */
rcu_adc_clock_config(cfg->rcu_clock_source);
#endif
(void)clock_control_on(GD32_CLOCK_CONTROLLER,
(clock_control_subsys_t)&cfg->clkid);
(void)reset_line_toggle_dt(&cfg->reset);
#if defined(CONFIG_SOC_SERIES_GD32F403) || \
defined(CONFIG_SOC_SERIES_GD32VF103) || \
defined(CONFIG_SOC_SERIES_GD32F3X0) || \
defined(CONFIG_SOC_SERIES_GD32L23X)
/* Set SWRCST as the regular channel external trigger. */
ADC_CTL1(cfg->reg) &= ~ADC_CTL1_ETSRC;
ADC_CTL1(cfg->reg) |= CTL1_ETSRC(7);
/* Enable external trigger for regular channel. */
ADC_CTL1(cfg->reg) |= ADC_CTL1_ETERC;
#endif
#ifdef CONFIG_SOC_SERIES_GD32A50X
ADC_CTL1(cfg->reg) |= ADC_CTL1_ETSRC;
ADC_CTL1(cfg->reg) |= ADC_CTL1_ETERC;
#endif
/* Enable ADC */
ADC_CTL1(cfg->reg) |= ADC_CTL1_ADCON;
adc_gd32_calibration(cfg);
cfg->irq_config_func();
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
#define HANDLE_SHARED_IRQ(n, active_irq) \
static const struct device *const dev_##n = DEVICE_DT_INST_GET(n); \
const struct adc_gd32_config *cfg_##n = dev_##n->config; \
\
if ((cfg_##n->irq_num == active_irq) && \
(ADC_CTL0(cfg_##n->reg) & ADC_CTL0_EOCIE)) { \
adc_gd32_isr(dev_##n); \
}
static void adc_gd32_global_irq_handler(const struct device *dev)
{
const struct adc_gd32_config *cfg = dev->config;
LOG_DBG("global irq handler: %u", cfg->irq_num);
DT_INST_FOREACH_STATUS_OKAY_VARGS(HANDLE_SHARED_IRQ, (cfg->irq_num));
}
static void adc_gd32_global_irq_cfg(void)
{
static bool global_irq_init = true;
if (!global_irq_init) {
return;
}
global_irq_init = false;
#if ADC0_ENABLE
/* Shared irq config default to adc0. */
IRQ_CONNECT(DT_IRQN(ADC0_NODE),
DT_IRQ(ADC0_NODE, priority),
adc_gd32_global_irq_handler,
DEVICE_DT_GET(ADC0_NODE),
0);
irq_enable(DT_IRQN(ADC0_NODE));
#elif ADC1_ENABLE
IRQ_CONNECT(DT_IRQN(ADC1_NODE),
DT_IRQ(ADC1_NODE, priority),
adc_gd32_global_irq_handler,
DEVICE_DT_GET(ADC1_NODE),
0);
irq_enable(DT_IRQN(ADC1_NODE));
#endif
#if (ADC0_ENABLE || ADC1_ENABLE) && \
defined(CONFIG_SOC_SERIES_GD32F4XX)
/* gd32f4xx adc2 share the same irq number with adc0 and adc1. */
#elif ADC2_ENABLE
IRQ_CONNECT(DT_IRQN(ADC2_NODE),
DT_IRQ(ADC2_NODE, priority),
adc_gd32_global_irq_handler,
DEVICE_DT_GET(ADC2_NODE),
0);
irq_enable(DT_IRQN(ADC2_NODE));
#endif
}
#ifdef CONFIG_SOC_SERIES_GD32F3X0
#define ADC_CLOCK_SOURCE(n) \
.rcu_clock_source = DT_INST_PROP(n, rcu_clock_source)
#else
#define ADC_CLOCK_SOURCE(n)
#endif
#define ADC_GD32_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
static struct adc_gd32_data adc_gd32_data_##n = { \
ADC_CONTEXT_INIT_TIMER(adc_gd32_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_gd32_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_gd32_data_##n, ctx), \
}; \
const static struct adc_gd32_config adc_gd32_config_##n = { \
.reg = DT_INST_REG_ADDR(n), \
.clkid = DT_INST_CLOCKS_CELL(n, id), \
.reset = RESET_DT_SPEC_INST_GET(n), \
.channels = DT_INST_PROP(n, channels), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.irq_num = DT_INST_IRQN(n), \
.irq_config_func = adc_gd32_global_irq_cfg, \
ADC_CLOCK_SOURCE(n) \
}; \
DEVICE_DT_INST_DEFINE(n, \
&adc_gd32_init, NULL, \
&adc_gd32_data_##n, &adc_gd32_config_##n, \
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&adc_gd32_driver_api); \
DT_INST_FOREACH_STATUS_OKAY(ADC_GD32_INIT)
``` | /content/code_sandbox/drivers/adc/adc_gd32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,125 |
```c
/*
*
*/
#define DT_DRV_COMPAT silabs_gecko_adc
#include <zephyr/drivers/adc.h>
#include <em_adc.h>
#include <em_cmu.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_gecko, CONFIG_ADC_LOG_LEVEL);
/* Number of channels available. */
#define GECKO_CHANNEL_COUNT 16
struct adc_gecko_channel_config {
bool initialized;
ADC_Ref_TypeDef reference;
ADC_PosSel_TypeDef input_select;
};
struct adc_gecko_data {
const struct device *dev;
struct adc_context ctx;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint32_t channels;
uint8_t channel_id;
ADC_Res_TypeDef resolution;
struct adc_gecko_channel_config channel_config[GECKO_CHANNEL_COUNT];
};
struct adc_gecko_config {
ADC_TypeDef *base;
void (*irq_cfg_func)(void);
uint32_t frequency;
};
static void adc_gecko_set_config(const struct device *dev)
{
struct adc_gecko_data *data = dev->data;
struct adc_gecko_channel_config *channel_config = NULL;
const struct adc_gecko_config *config = dev->config;
ADC_TypeDef *adc_base = (ADC_TypeDef *)config->base;
ADC_Init_TypeDef init = ADC_INIT_DEFAULT;
ADC_InitSingle_TypeDef initSingle = ADC_INITSINGLE_DEFAULT;
channel_config = &data->channel_config[data->channel_id];
init.prescale = ADC_PrescaleCalc(config->frequency, 0);
init.timebase = ADC_TimebaseCalc(0);
initSingle.diff = false;
initSingle.reference = channel_config->reference;
initSingle.resolution = data->resolution;
initSingle.acqTime = adcAcqTime4;
initSingle.posSel = channel_config->input_select;
ADC_Init(adc_base, &init);
ADC_InitSingle(adc_base, &initSingle);
}
static int adc_gecko_check_buffer_size(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_DBG("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int start_read(const struct device *dev, const struct adc_sequence *sequence)
{
struct adc_gecko_data *data = dev->data;
uint32_t channels;
uint8_t channel_count;
uint8_t index;
int res;
/* Check if at least 1 channel is requested */
if (sequence->channels == 0) {
LOG_DBG("No channel requested");
return -EINVAL;
}
if (sequence->oversampling) {
LOG_ERR("Oversampling is not supported");
return -ENOTSUP;
}
/* Verify all requested channels are initialized and store resolution */
channels = sequence->channels;
channel_count = 0;
while (channels) {
/* Iterate through all channels and check if they are initialized */
index = find_lsb_set(channels) - 1;
if (index >= GECKO_CHANNEL_COUNT) {
LOG_DBG("Requested channel index not available: %d", index);
return -EINVAL;
}
if (!data->channel_config[index].initialized) {
LOG_DBG("Channel not initialized");
return -EINVAL;
}
channel_count++;
channels &= ~BIT(index);
}
res = adc_gecko_check_buffer_size(sequence, channel_count);
if (res < 0) {
return res;
}
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
res = adc_context_wait_for_completion(&data->ctx);
return res;
}
static void adc_gecko_start_channel(const struct device *dev)
{
const struct adc_gecko_config *config = dev->config;
struct adc_gecko_data *data = dev->data;
ADC_TypeDef *adc_base = (ADC_TypeDef *)config->base;
data->channel_id = find_lsb_set(data->channels) - 1;
adc_gecko_set_config(data->dev);
ADC_IntEnable(adc_base, ADC_IEN_SINGLE);
ADC_Start(adc_base, adcStartSingle);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_gecko_data *data = CONTAINER_OF(ctx, struct adc_gecko_data, ctx);
data->channels = ctx->sequence.channels;
adc_gecko_start_channel(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct adc_gecko_data *data = CONTAINER_OF(ctx, struct adc_gecko_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void adc_gecko_isr(void *arg)
{
const struct device *dev = (const struct device *)arg;
const struct adc_gecko_config *config = dev->config;
struct adc_gecko_data *data = dev->data;
ADC_TypeDef *adc_base = config->base;
uint32_t sample = 0;
uint32_t flags, err;
flags = ADC_IntGet(adc_base);
__ASSERT(flags & ADC_IF_SINGLE, "unexpected ADC IRQ (flags=0x%08x)!", flags);
err = flags & (ADC_IF_EM23ERR | ADC_IF_PROGERR | ADC_IF_VREFOV | ADC_IF_SINGLEOF);
if (!err) {
sample = ADC_DataSingleGet(adc_base);
*data->buffer++ = (uint16_t)sample;
data->channels &= ~BIT(data->channel_id);
if (data->channels) {
adc_gecko_start_channel(dev);
} else {
adc_context_on_sampling_done(&data->ctx, dev);
}
} else {
LOG_ERR("ADC conversion error, flags=%08x", err);
adc_context_complete(&data->ctx, -EIO);
}
ADC_IntClear(adc_base, ADC_IF_SINGLE | err);
}
static int adc_gecko_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_gecko_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
static int adc_gecko_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
struct adc_gecko_data *data = dev->data;
struct adc_gecko_channel_config *channel_config = NULL;
if (channel_cfg->channel_id < GECKO_CHANNEL_COUNT) {
channel_config = &data->channel_config[channel_cfg->channel_id];
} else {
LOG_DBG("Requested channel index not available: %d", channel_cfg->channel_id);
return -EINVAL;
}
channel_config->initialized = false;
channel_config->input_select = channel_cfg->input_positive;
switch (channel_cfg->gain) {
case ADC_GAIN_1:
break;
default:
LOG_ERR("unsupported channel gain '%d'", channel_cfg->gain);
return -ENOTSUP;
}
switch (channel_cfg->reference) {
case ADC_REF_VDD_1:
channel_config->reference = adcRef5V;
break;
case ADC_REF_VDD_1_2:
channel_config->reference = adcRef2V5;
break;
case ADC_REF_VDD_1_4:
channel_config->reference = adcRef1V25;
break;
default:
LOG_ERR("unsupported channel reference type '%d'", channel_cfg->reference);
return -ENOTSUP;
}
channel_config->initialized = true;
return 0;
}
static int adc_gecko_init(const struct device *dev)
{
const struct adc_gecko_config *config = dev->config;
struct adc_gecko_data *data = dev->data;
CMU_ClockEnable(cmuClock_HFPER, true);
CMU_ClockEnable(cmuClock_ADC0, true);
data->dev = dev;
data->resolution = adcRes12Bit;
config->irq_cfg_func();
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api api_gecko_adc_driver_api = {
.channel_setup = adc_gecko_channel_setup,
.read = adc_gecko_read,
};
#define GECKO_ADC_INIT(n) \
\
static void adc_gecko_config_func_##n(void); \
\
const static struct adc_gecko_config adc_gecko_config_##n = { \
.base = (ADC_TypeDef *)DT_INST_REG_ADDR(n), \
.irq_cfg_func = adc_gecko_config_func_##n, \
.frequency = DT_INST_PROP(n, frequency), \
}; \
static struct adc_gecko_data adc_gecko_data_##n = { \
ADC_CONTEXT_INIT_TIMER(adc_gecko_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_gecko_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_gecko_data_##n, ctx), \
}; \
static void adc_gecko_config_func_##n(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), \
DT_INST_IRQ(n, priority), \
adc_gecko_isr, DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
}; \
DEVICE_DT_INST_DEFINE(n, \
&adc_gecko_init, NULL, \
&adc_gecko_data_##n, &adc_gecko_config_##n,\
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&api_gecko_adc_driver_api);
DT_INST_FOREACH_STATUS_OKAY(GECKO_ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_gecko.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,142 |
```unknown
# ADC configuration options
#
# ADC options
#
menuconfig ADC
bool "Analog-to-Digital Converter (ADC) drivers"
# All platforms that implement the ADC driver are now required to
# provide relevant DTS entries.
help
Enable ADC (Analog to Digital Converter) driver configuration.
if ADC
config ADC_SHELL
bool "ADC Shell"
depends on SHELL
help
Enable ADC Shell for testing.
# By selecting or not this option particular ADC drivers indicate if it is
# required to explicitly specify analog inputs when configuring channels or
# just the channel identifier is sufficient.
config ADC_CONFIGURABLE_INPUTS
bool
# By selecting or not this option particular ADC drivers indicate if it is
# required to explicitly specify for the excitation current source the pin
# which should be used.
config ADC_CONFIGURABLE_EXCITATION_CURRENT_SOURCE_PIN
bool
# By selecting or not this option particular ADC drivers indicate if they
# allow a configurable voltage bias pin.
config ADC_CONFIGURABLE_VBIAS_PIN
bool
config ADC_ASYNC
bool "Asynchronous call support"
select POLL
help
This option enables the asynchronous API calls.
config ADC_INIT_PRIORITY
int "ADC init priority"
default KERNEL_INIT_PRIORITY_DEVICE
help
ADC driver device initialization priority.
module = ADC
module-str = ADC
source "subsys/logging/Kconfig.template.log_config"
source "drivers/adc/Kconfig.b91"
source "drivers/adc/Kconfig.it8xxx2"
source "drivers/adc/Kconfig.mcux"
source "drivers/adc/Kconfig.nrfx"
source "drivers/adc/Kconfig.sam_afec"
source "drivers/adc/Kconfig.sam"
source "drivers/adc/Kconfig.sam0"
source "drivers/adc/Kconfig.stm32"
source "drivers/adc/Kconfig.esp32"
source "drivers/adc/Kconfig.xec"
source "drivers/adc/Kconfig.lmp90xxx"
source "drivers/adc/Kconfig.mcp320x"
source "drivers/adc/Kconfig.npcx"
source "drivers/adc/Kconfig.cc32xx"
source "drivers/adc/Kconfig.cc13xx_cc26xx"
source "drivers/adc/Kconfig.adc_emul"
source "drivers/adc/Kconfig.vf610"
source "drivers/adc/Kconfig.test"
source "drivers/adc/Kconfig.ads1x1x"
source "drivers/adc/Kconfig.gd32"
source "drivers/adc/Kconfig.ads1112"
source "drivers/adc/Kconfig.ads1119"
source "drivers/adc/Kconfig.ads7052"
source "drivers/adc/Kconfig.ads114s0x"
source "drivers/adc/Kconfig.rpi_pico"
source "drivers/adc/Kconfig.xmc4xxx"
source "drivers/adc/Kconfig.gecko"
source "drivers/adc/Kconfig.ifx_cat1"
source "drivers/adc/Kconfig.smartbond"
source "drivers/adc/Kconfig.tla2021"
source "drivers/adc/Kconfig.nxp_s32"
source "drivers/adc/Kconfig.max1125x"
source "drivers/adc/Kconfig.max11102_17"
source "drivers/adc/Kconfig.ad559x"
source "drivers/adc/Kconfig.ltc2451"
source "drivers/adc/Kconfig.numaker"
source "drivers/adc/Kconfig.ene"
source "drivers/adc/Kconfig.ambiq"
source "drivers/adc/Kconfig.renesas_ra"
endif # ADC
``` | /content/code_sandbox/drivers/adc/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 735 |
```c
/*
*
* Based on adc_mcux_adc16.c, which is:
*
*/
#define DT_DRV_COMPAT nxp_kinetis_adc12
#include <zephyr/drivers/adc.h>
#include <fsl_adc12.h>
#include <zephyr/drivers/pinctrl.h>
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_mcux_adc12);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
struct mcux_adc12_config {
ADC_Type *base;
adc12_clock_source_t clock_src;
adc12_clock_divider_t clock_div;
adc12_reference_voltage_source_t ref_src;
uint32_t sample_clk_count;
void (*irq_config_func)(const struct device *dev);
const struct pinctrl_dev_config *pincfg;
};
struct mcux_adc12_data {
const struct device *dev;
struct adc_context ctx;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint32_t channels;
uint8_t channel_id;
};
static int mcux_adc12_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
uint8_t channel_id = channel_cfg->channel_id;
if (channel_id > (ADC_SC1_ADCH_MASK >> ADC_SC1_ADCH_SHIFT)) {
LOG_ERR("Invalid channel %d", channel_id);
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Unsupported channel acquisition time");
return -ENOTSUP;
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -ENOTSUP;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Unsupported channel gain %d", channel_cfg->gain);
return -ENOTSUP;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Unsupported channel reference");
return -ENOTSUP;
}
return 0;
}
static int mcux_adc12_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct mcux_adc12_config *config = dev->config;
struct mcux_adc12_data *data = dev->data;
adc12_hardware_average_mode_t mode;
adc12_resolution_t resolution;
ADC_Type *base = config->base;
int error;
uint32_t tmp32;
switch (sequence->resolution) {
case 8:
resolution = kADC12_Resolution8Bit;
break;
case 10:
resolution = kADC12_Resolution10Bit;
break;
case 12:
resolution = kADC12_Resolution12Bit;
break;
default:
LOG_ERR("Unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
tmp32 = base->CFG1 & ~(ADC_CFG1_MODE_MASK);
tmp32 |= ADC_CFG1_MODE(resolution);
base->CFG1 = tmp32;
switch (sequence->oversampling) {
case 0:
mode = kADC12_HardwareAverageDisabled;
break;
case 2:
mode = kADC12_HardwareAverageCount4;
break;
case 3:
mode = kADC12_HardwareAverageCount8;
break;
case 4:
mode = kADC12_HardwareAverageCount16;
break;
case 5:
mode = kADC12_HardwareAverageCount32;
break;
default:
LOG_ERR("Unsupported oversampling value %d",
sequence->oversampling);
return -ENOTSUP;
}
ADC12_SetHardwareAverage(config->base, mode);
data->buffer = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static int mcux_adc12_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct mcux_adc12_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, async ? true : false, async);
error = mcux_adc12_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
static int mcux_adc12_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return mcux_adc12_read_async(dev, sequence, NULL);
}
static void mcux_adc12_start_channel(const struct device *dev)
{
const struct mcux_adc12_config *config = dev->config;
struct mcux_adc12_data *data = dev->data;
adc12_channel_config_t channel_config;
uint32_t channel_group = 0U;
data->channel_id = find_lsb_set(data->channels) - 1;
LOG_DBG("Starting channel %d", data->channel_id);
channel_config.enableInterruptOnConversionCompleted = true;
channel_config.channelNumber = data->channel_id;
ADC12_SetChannelConfig(config->base, channel_group, &channel_config);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct mcux_adc12_data *data =
CONTAINER_OF(ctx, struct mcux_adc12_data, ctx);
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
mcux_adc12_start_channel(data->dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct mcux_adc12_data *data =
CONTAINER_OF(ctx, struct mcux_adc12_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void mcux_adc12_isr(const struct device *dev)
{
const struct mcux_adc12_config *config = dev->config;
struct mcux_adc12_data *data = dev->data;
ADC_Type *base = config->base;
uint32_t channel_group = 0U;
uint16_t result;
result = ADC12_GetChannelConversionValue(base, channel_group);
LOG_DBG("Finished channel %d. Result is 0x%04x",
data->channel_id, result);
*data->buffer++ = result;
data->channels &= ~BIT(data->channel_id);
if (data->channels) {
mcux_adc12_start_channel(dev);
} else {
adc_context_on_sampling_done(&data->ctx, dev);
}
}
static int mcux_adc12_init(const struct device *dev)
{
const struct mcux_adc12_config *config = dev->config;
struct mcux_adc12_data *data = dev->data;
ADC_Type *base = config->base;
adc12_config_t adc_config;
int err;
ADC12_GetDefaultConfig(&adc_config);
adc_config.referenceVoltageSource = config->ref_src;
adc_config.clockSource = config->clock_src;
adc_config.clockDivider = config->clock_div;
adc_config.sampleClockCount = config->sample_clk_count;
adc_config.resolution = kADC12_Resolution12Bit;
adc_config.enableContinuousConversion = false;
ADC12_Init(base, &adc_config);
ADC12_DoAutoCalibration(base);
ADC12_EnableHardwareTrigger(base, false);
config->irq_config_func(dev);
data->dev = dev;
err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
if (err) {
return err;
}
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static const struct adc_driver_api mcux_adc12_driver_api = {
.channel_setup = mcux_adc12_channel_setup,
.read = mcux_adc12_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = mcux_adc12_read_async,
#endif
};
#define ASSERT_WITHIN_RANGE(val, min, max, str) \
BUILD_ASSERT(val >= min && val <= max, str)
#define ASSERT_ADC12_CLK_DIV_VALID(val, str) \
BUILD_ASSERT(val == 1 || val == 2 || val == 4 || val == 8, str)
#define TO_ADC12_CLOCK_SRC(val) _DO_CONCAT(kADC12_ClockSourceAlt, val)
#define TO_ADC12_CLOCK_DIV(val) _DO_CONCAT(kADC12_ClockDivider, val)
#define ADC12_REF_SRC(n) \
COND_CODE_1(DT_INST_PROP(0, alternate_voltage_reference), \
(kADC12_ReferenceVoltageSourceValt), \
(kADC12_ReferenceVoltageSourceVref))
#define ACD12_MCUX_INIT(n) \
static void mcux_adc12_config_func_##n(const struct device *dev); \
\
PINCTRL_DT_INST_DEFINE(n); \
\
ASSERT_WITHIN_RANGE(DT_INST_PROP(n, clk_source), 0, 3, \
"Invalid clock source"); \
ASSERT_ADC12_CLK_DIV_VALID(DT_INST_PROP(n, clk_divider), \
"Invalid clock divider"); \
ASSERT_WITHIN_RANGE(DT_INST_PROP(n, sample_time), 2, 256, \
"Invalid sample time"); \
static const struct mcux_adc12_config mcux_adc12_config_##n = { \
.base = (ADC_Type *)DT_INST_REG_ADDR(n), \
.clock_src = TO_ADC12_CLOCK_SRC(DT_INST_PROP(n, clk_source)),\
.clock_div = \
TO_ADC12_CLOCK_DIV(DT_INST_PROP(n, clk_divider)),\
.ref_src = ADC12_REF_SRC(n), \
.sample_clk_count = DT_INST_PROP(n, sample_time), \
.irq_config_func = mcux_adc12_config_func_##n, \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
}; \
\
static struct mcux_adc12_data mcux_adc12_data_##n = { \
ADC_CONTEXT_INIT_TIMER(mcux_adc12_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(mcux_adc12_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(mcux_adc12_data_##n, ctx), \
}; \
\
DEVICE_DT_INST_DEFINE(n, &mcux_adc12_init, \
NULL, &mcux_adc12_data_##n, \
&mcux_adc12_config_##n, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&mcux_adc12_driver_api); \
\
static void mcux_adc12_config_func_##n(const struct device *dev) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), \
DT_INST_IRQ(n, priority), mcux_adc12_isr, \
DEVICE_DT_INST_GET(n), 0); \
\
irq_enable(DT_INST_IRQN(n)); \
}
DT_INST_FOREACH_STATUS_OKAY(ACD12_MCUX_INIT)
``` | /content/code_sandbox/drivers/adc/adc_mcux_adc12.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,351 |
```unknown
config ADC_CC32XX
bool "CC32XX ADC driver"
default y
depends on DT_HAS_TI_CC32XX_ADC_ENABLED
help
This option enables the CC32XX ADC driver.
``` | /content/code_sandbox/drivers/adc/Kconfig.cc32xx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 44 |
```unknown
config ADC_LTC2451
bool "LTC2451 driver"
default y
depends on DT_HAS_LLTC_LTC2451_ENABLED
select I2C
``` | /content/code_sandbox/drivers/adc/Kconfig.ltc2451 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 38 |
```c
/*
*
*/
#define DT_DRV_COMPAT st_stm32_adc
#include <errno.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/device.h>
#include <zephyr/kernel.h>
#include <zephyr/init.h>
#include <soc.h>
#include <zephyr/pm/device.h>
#include <zephyr/pm/policy.h>
#include <stm32_ll_adc.h>
#include <stm32_ll_system.h>
#if defined(CONFIG_SOC_SERIES_STM32U5X)
#include <stm32_ll_pwr.h>
#endif /* CONFIG_SOC_SERIES_STM32U5X */
#ifdef CONFIG_ADC_STM32_DMA
#include <zephyr/drivers/dma/dma_stm32.h>
#include <zephyr/drivers/dma.h>
#include <zephyr/toolchain.h>
#include <stm32_ll_dma.h>
#endif
#define ADC_CONTEXT_USES_KERNEL_TIMER
#define ADC_CONTEXT_ENABLE_ON_COMPLETE
#include "adc_context.h"
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_stm32);
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include <zephyr/dt-bindings/adc/stm32_adc.h>
#include <zephyr/irq.h>
#include <zephyr/mem_mgmt/mem_attr.h>
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H7RSX)
#include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h>
#endif
#ifdef CONFIG_NOCACHE_MEMORY
#include <zephyr/linker/linker-defs.h>
#elif defined(CONFIG_CACHE_MANAGEMENT)
#include <zephyr/arch/cache.h>
#endif /* CONFIG_NOCACHE_MEMORY */
#if defined(CONFIG_SOC_SERIES_STM32F3X)
#if defined(ADC1_V2_5)
/* ADC1_V2_5 is the ADC version for STM32F37x */
#define STM32F3X_ADC_V2_5
#elif defined(ADC5_V1_1)
/* ADC5_V1_1 is the ADC version for other STM32F3x */
#define STM32F3X_ADC_V1_1
#endif
#endif
/*
* Other ADC versions:
* ADC_VER_V5_V90 -> STM32H72x/H73x
* ADC_VER_V5_X -> STM32H74x/H75x && U5
* ADC_VER_V5_3 -> STM32H7Ax/H7Bx
* compat st_stm32f1_adc -> STM32F1, F37x (ADC1_V2_5)
* compat st_stm32f4_adc -> STM32F2, F4, F7, L1
*/
#define ANY_NUM_COMMON_SAMPLING_TIME_CHANNELS_IS(value) \
(DT_INST_FOREACH_STATUS_OKAY_VARGS(IS_EQ_PROP_OR, \
num_sampling_time_common_channels,\
0, value) 0)
#define ANY_ADC_SEQUENCER_TYPE_IS(value) \
(DT_INST_FOREACH_STATUS_OKAY_VARGS(IS_EQ_PROP_OR, \
st_adc_sequencer,\
0, value) 0)
#define IS_EQ_PROP_OR(inst, prop, default_value, compare_value) \
IS_EQ(DT_INST_PROP_OR(inst, prop, default_value), compare_value) ||
/* reference voltage for the ADC */
#define STM32_ADC_VREF_MV DT_INST_PROP(0, vref_mv)
#if ANY_ADC_SEQUENCER_TYPE_IS(FULLY_CONFIGURABLE)
#define RANK(n) LL_ADC_REG_RANK_##n
static const uint32_t table_rank[] = {
RANK(1),
RANK(2),
RANK(3),
RANK(4),
RANK(5),
RANK(6),
RANK(7),
RANK(8),
RANK(9),
RANK(10),
RANK(11),
RANK(12),
RANK(13),
RANK(14),
RANK(15),
RANK(16),
#if defined(LL_ADC_REG_RANK_17)
RANK(17),
RANK(18),
RANK(19),
RANK(20),
RANK(21),
RANK(22),
RANK(23),
RANK(24),
RANK(25),
RANK(26),
RANK(27),
#if defined(LL_ADC_REG_RANK_28)
RANK(28),
#endif /* LL_ADC_REG_RANK_28 */
#endif /* LL_ADC_REG_RANK_17 */
};
#define SEQ_LEN(n) LL_ADC_REG_SEQ_SCAN_ENABLE_##n##RANKS
/* Length of this array signifies the maximum sequence length */
static const uint32_t table_seq_len[] = {
LL_ADC_REG_SEQ_SCAN_DISABLE,
SEQ_LEN(2),
SEQ_LEN(3),
SEQ_LEN(4),
SEQ_LEN(5),
SEQ_LEN(6),
SEQ_LEN(7),
SEQ_LEN(8),
SEQ_LEN(9),
SEQ_LEN(10),
SEQ_LEN(11),
SEQ_LEN(12),
SEQ_LEN(13),
SEQ_LEN(14),
SEQ_LEN(15),
SEQ_LEN(16),
#if defined(LL_ADC_REG_SEQ_SCAN_ENABLE_17RANKS)
SEQ_LEN(17),
SEQ_LEN(18),
SEQ_LEN(19),
SEQ_LEN(20),
SEQ_LEN(21),
SEQ_LEN(22),
SEQ_LEN(23),
SEQ_LEN(24),
SEQ_LEN(25),
SEQ_LEN(26),
SEQ_LEN(27),
#if defined(LL_ADC_REG_SEQ_SCAN_ENABLE_28RANKS)
SEQ_LEN(28),
#endif /* LL_ADC_REG_SEQ_SCAN_ENABLE_28RANKS */
#endif /* LL_ADC_REG_SEQ_SCAN_ENABLE_17RANKS */
};
#endif /* ANY_ADC_SEQUENCER_TYPE_IS(FULLY_CONFIGURABLE) */
/* Number of different sampling time values */
#define STM32_NB_SAMPLING_TIME 8
#ifdef CONFIG_ADC_STM32_DMA
struct stream {
const struct device *dma_dev;
uint32_t channel;
struct dma_config dma_cfg;
struct dma_block_config dma_blk_cfg;
uint8_t priority;
bool src_addr_increment;
bool dst_addr_increment;
};
#endif /* CONFIG_ADC_STM32_DMA */
struct adc_stm32_data {
struct adc_context ctx;
const struct device *dev;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint8_t resolution;
uint32_t channels;
uint8_t channel_count;
uint8_t samples_count;
int8_t acq_time_index[2];
#ifdef CONFIG_ADC_STM32_DMA
volatile int dma_error;
struct stream dma;
#endif
};
struct adc_stm32_cfg {
ADC_TypeDef *base;
void (*irq_cfg_func)(void);
const struct stm32_pclken *pclken;
size_t pclk_len;
uint32_t clk_prescaler;
const struct pinctrl_dev_config *pcfg;
const uint16_t sampling_time_table[STM32_NB_SAMPLING_TIME];
int8_t num_sampling_time_common_channels;
int8_t sequencer_type;
int8_t res_table_size;
const uint32_t res_table[];
};
#ifdef CONFIG_ADC_STM32_DMA
static void adc_stm32_enable_dma_support(ADC_TypeDef *adc)
{
/* Allow ADC to create DMA request and set to one-shot mode as implemented in HAL drivers */
#if defined(CONFIG_SOC_SERIES_STM32H7X)
#if defined(ADC_VER_V5_V90)
if (adc == ADC3) {
LL_ADC_REG_SetDMATransferMode(adc, LL_ADC3_REG_DMA_TRANSFER_LIMITED);
} else {
LL_ADC_REG_SetDataTransferMode(adc, LL_ADC_REG_DMA_TRANSFER_LIMITED);
}
#elif defined(ADC_VER_V5_X)
LL_ADC_REG_SetDataTransferMode(adc, LL_ADC_REG_DMA_TRANSFER_LIMITED);
#else
#error "Unsupported ADC version"
#endif
#elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) /* defined(CONFIG_SOC_SERIES_STM32H7X) */
#error "The STM32F1 ADC + DMA is not yet supported"
#elif defined(CONFIG_SOC_SERIES_STM32U5X) /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) */
if (adc == ADC4) {
LL_ADC_REG_SetDMATransfer(adc, LL_ADC_REG_DMA_TRANSFER_LIMITED_ADC4);
} else {
LL_ADC_REG_SetDataTransferMode(adc, LL_ADC_REG_DMA_TRANSFER_LIMITED);
}
#else /* defined(CONFIG_SOC_SERIES_STM32U5X) */
/* Default mechanism for other MCUs */
LL_ADC_REG_SetDMATransfer(adc, LL_ADC_REG_DMA_TRANSFER_LIMITED);
#endif
}
static int adc_stm32_dma_start(const struct device *dev,
void *buffer, size_t channel_count)
{
const struct adc_stm32_cfg *config = dev->config;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
struct adc_stm32_data *data = dev->data;
struct dma_block_config *blk_cfg;
int ret;
struct stream *dma = &data->dma;
blk_cfg = &dma->dma_blk_cfg;
/* prepare the block */
blk_cfg->block_size = channel_count * sizeof(int16_t);
/* Source and destination */
blk_cfg->source_address = (uint32_t)LL_ADC_DMA_GetRegAddr(adc, LL_ADC_DMA_REG_REGULAR_DATA);
blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
blk_cfg->source_reload_en = 0;
blk_cfg->dest_address = (uint32_t)buffer;
blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
blk_cfg->dest_reload_en = 0;
/* Manually set the FIFO threshold to 1/4 because the
* dmamux DTS entry does not contain fifo threshold
*/
blk_cfg->fifo_mode_control = 0;
/* direction is given by the DT */
dma->dma_cfg.head_block = blk_cfg;
dma->dma_cfg.user_data = data;
ret = dma_config(data->dma.dma_dev, data->dma.channel,
&dma->dma_cfg);
if (ret != 0) {
LOG_ERR("Problem setting up DMA: %d", ret);
return ret;
}
adc_stm32_enable_dma_support(adc);
data->dma_error = 0;
ret = dma_start(data->dma.dma_dev, data->dma.channel);
if (ret != 0) {
LOG_ERR("Problem starting DMA: %d", ret);
return ret;
}
LOG_DBG("DMA started");
return ret;
}
#endif /* CONFIG_ADC_STM32_DMA */
#if defined(CONFIG_ADC_STM32_DMA) && defined(CONFIG_SOC_SERIES_STM32H7X)
/* Returns true if given buffer is in a non-cacheable SRAM region.
* This is determined using the device tree, meaning the .nocache region won't work.
* The entire buffer must be in a single region.
* An example of how the SRAM region can be defined in the DTS:
* &sram4 {
* zephyr,memory-attr = <( DT_MEM_ARM(ATTR_MPU_RAM_NOCACHE) | ... )>;
* };
*/
static bool buf_in_nocache(uintptr_t buf, size_t len_bytes)
{
bool buf_within_nocache = false;
#ifdef CONFIG_NOCACHE_MEMORY
buf_within_nocache = (buf >= ((uintptr_t)_nocache_ram_start)) &&
((buf + len_bytes - 1) <= ((uintptr_t)_nocache_ram_end));
if (buf_within_nocache) {
return true;
}
#endif /* CONFIG_NOCACHE_MEMORY */
buf_within_nocache = mem_attr_check_buf(
(void *)buf, len_bytes, DT_MEM_ARM(ATTR_MPU_RAM_NOCACHE)) == 0;
return buf_within_nocache;
}
#endif /* defined(CONFIG_ADC_STM32_DMA) && defined(CONFIG_SOC_SERIES_STM32H7X) */
static int check_buffer(const struct adc_sequence *sequence,
uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_ERR("Provided buffer is too small (%u/%u)",
sequence->buffer_size, needed_buffer_size);
return -ENOMEM;
}
#if defined(CONFIG_ADC_STM32_DMA) && defined(CONFIG_SOC_SERIES_STM32H7X)
/* Buffer is forced to be in non-cacheable SRAM region to avoid cache maintenance */
if (!buf_in_nocache((uintptr_t)sequence->buffer, needed_buffer_size)) {
LOG_ERR("Supplied buffer is not in a non-cacheable region according to DTS.");
return -EINVAL;
}
#endif
return 0;
}
/*
* Enable ADC peripheral, and wait until ready if required by SOC.
*/
static int adc_stm32_enable(ADC_TypeDef *adc)
{
if (LL_ADC_IsEnabled(adc) == 1UL) {
return 0;
}
#if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
LL_ADC_ClearFlag_ADRDY(adc);
LL_ADC_Enable(adc);
/*
* Enabling ADC modules in many series may fail if they are
* still not stabilized, this will wait for a short time (about 1ms)
* to ensure ADC modules are properly enabled.
*/
uint32_t count_timeout = 0;
while (LL_ADC_IsActiveFlag_ADRDY(adc) == 0) {
#ifdef CONFIG_SOC_SERIES_STM32F0X
/* For F0, continue to write ADEN=1 until ADRDY=1 */
if (LL_ADC_IsEnabled(adc) == 0UL) {
LL_ADC_Enable(adc);
}
#endif /* CONFIG_SOC_SERIES_STM32F0X */
count_timeout++;
k_busy_wait(100);
if (count_timeout >= 10) {
return -ETIMEDOUT;
}
}
#else
/*
* On STM32F1, F2, F37x, F4, F7 and L1, do not re-enable the ADC.
* On F1 and F37x if ADON holds 1 (LL_ADC_IsEnabled is true) and 1 is
* written, then conversion starts. That's not what is expected.
*/
LL_ADC_Enable(adc);
#endif
return 0;
}
static void adc_stm32_start_conversion(const struct device *dev)
{
const struct adc_stm32_cfg *config = dev->config;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
LOG_DBG("Starting conversion");
#if !defined(CONFIG_SOC_SERIES_STM32F1X) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
LL_ADC_REG_StartConversion(adc);
#else
LL_ADC_REG_StartConversionSWStart(adc);
#endif
}
/*
* Disable ADC peripheral, and wait until it is disabled
*/
static void adc_stm32_disable(ADC_TypeDef *adc)
{
if (LL_ADC_IsEnabled(adc) != 1UL) {
return;
}
/* Stop ongoing conversion if any
* Software must poll ADSTART (or JADSTART) until the bit is reset before assuming
* the ADC is completely stopped.
*/
#if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
if (LL_ADC_REG_IsConversionOngoing(adc)) {
LL_ADC_REG_StopConversion(adc);
while (LL_ADC_REG_IsConversionOngoing(adc)) {
}
}
#endif
#if !defined(CONFIG_SOC_SERIES_STM32C0X) && \
!defined(CONFIG_SOC_SERIES_STM32F0X) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc) && \
!defined(CONFIG_SOC_SERIES_STM32G0X) && \
!defined(CONFIG_SOC_SERIES_STM32L0X) && \
!defined(CONFIG_SOC_SERIES_STM32WBAX) && \
!defined(CONFIG_SOC_SERIES_STM32WLX)
if (LL_ADC_INJ_IsConversionOngoing(adc)) {
LL_ADC_INJ_StopConversion(adc);
while (LL_ADC_INJ_IsConversionOngoing(adc)) {
}
}
#endif
LL_ADC_Disable(adc);
/* Wait ADC is fully disabled so that we don't leave the driver into intermediate state
* which could prevent enabling the peripheral
*/
while (LL_ADC_IsEnabled(adc) == 1UL) {
}
}
#if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
#define HAS_CALIBRATION
/* Number of ADC clock cycles to wait before of after starting calibration */
#if defined(LL_ADC_DELAY_CALIB_ENABLE_ADC_CYCLES)
#define ADC_DELAY_CALIB_ADC_CYCLES LL_ADC_DELAY_CALIB_ENABLE_ADC_CYCLES
#elif defined(LL_ADC_DELAY_ENABLE_CALIB_ADC_CYCLES)
#define ADC_DELAY_CALIB_ADC_CYCLES LL_ADC_DELAY_ENABLE_CALIB_ADC_CYCLES
#elif defined(LL_ADC_DELAY_DISABLE_CALIB_ADC_CYCLES)
#define ADC_DELAY_CALIB_ADC_CYCLES LL_ADC_DELAY_DISABLE_CALIB_ADC_CYCLES
#endif
static void adc_stm32_calibration_delay(const struct device *dev)
{
/*
* Calibration of F1 and F3 (ADC1_V2_5) must start two cycles after ADON
* is set.
* Other ADC modules have to wait for some cycles after calibration to
* be enabled.
*/
const struct adc_stm32_cfg *config = dev->config;
const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
uint32_t adc_rate, wait_cycles;
if (clock_control_get_rate(clk,
(clock_control_subsys_t) &config->pclken[0], &adc_rate) < 0) {
LOG_ERR("ADC clock rate get error.");
}
if (adc_rate == 0) {
LOG_ERR("ADC Clock rate null");
return;
}
wait_cycles = SystemCoreClock / adc_rate *
ADC_DELAY_CALIB_ADC_CYCLES;
for (int i = wait_cycles; i >= 0; i--) {
}
}
static void adc_stm32_calibration_start(const struct device *dev)
{
const struct adc_stm32_cfg *config =
(const struct adc_stm32_cfg *)dev->config;
ADC_TypeDef *adc = config->base;
#if defined(STM32F3X_ADC_V1_1) || \
defined(CONFIG_SOC_SERIES_STM32L4X) || \
defined(CONFIG_SOC_SERIES_STM32L5X) || \
defined(CONFIG_SOC_SERIES_STM32H5X) || \
defined(CONFIG_SOC_SERIES_STM32H7RSX) || \
defined(CONFIG_SOC_SERIES_STM32WBX) || \
defined(CONFIG_SOC_SERIES_STM32G4X)
LL_ADC_StartCalibration(adc, LL_ADC_SINGLE_ENDED);
#elif defined(CONFIG_SOC_SERIES_STM32C0X) || \
defined(CONFIG_SOC_SERIES_STM32F0X) || \
DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) || \
defined(CONFIG_SOC_SERIES_STM32G0X) || \
defined(CONFIG_SOC_SERIES_STM32L0X) || \
defined(CONFIG_SOC_SERIES_STM32WLX) || \
defined(CONFIG_SOC_SERIES_STM32WBAX)
LL_ADC_StartCalibration(adc);
#elif defined(CONFIG_SOC_SERIES_STM32U5X)
if (adc != ADC4) {
uint32_t dev_id = LL_DBGMCU_GetDeviceID();
uint32_t rev_id = LL_DBGMCU_GetRevisionID();
/* Some U5 implement an extended calibration to enhance ADC performance.
* It is not available for ADC4.
* It is available on all U5 except U575/585 (dev ID 482) revision X (rev ID 2001).
* The code below applies the procedure described in RM0456 in the ADC chapter:
* "Extended calibration mode"
*/
if ((dev_id != 0x482UL) && (rev_id != 0x2001UL)) {
adc_stm32_enable(adc);
MODIFY_REG(adc->CR, ADC_CR_CALINDEX, 0x9UL << ADC_CR_CALINDEX_Pos);
MODIFY_REG(adc->CALFACT2, 0xFFFFFF00UL, 0x03021100UL);
SET_BIT(adc->CALFACT, ADC_CALFACT_LATCH_COEF);
adc_stm32_disable(adc);
}
}
LL_ADC_StartCalibration(adc, LL_ADC_CALIB_OFFSET);
#elif defined(CONFIG_SOC_SERIES_STM32H7X)
LL_ADC_StartCalibration(adc, LL_ADC_CALIB_OFFSET, LL_ADC_SINGLE_ENDED);
#endif
/* Make sure ADCAL is cleared before returning for proper operations
* on the ADC control register, for enabling the peripheral for example
*/
while (LL_ADC_IsCalibrationOnGoing(adc)) {
}
}
static int adc_stm32_calibrate(const struct device *dev)
{
const struct adc_stm32_cfg *config =
(const struct adc_stm32_cfg *)dev->config;
ADC_TypeDef *adc = config->base;
int err;
#if defined(CONFIG_ADC_STM32_DMA)
#if defined(CONFIG_SOC_SERIES_STM32C0X) || \
defined(CONFIG_SOC_SERIES_STM32F0X) || \
defined(CONFIG_SOC_SERIES_STM32G0X) || \
defined(CONFIG_SOC_SERIES_STM32H7RSX) || \
defined(CONFIG_SOC_SERIES_STM32L0X) || \
defined(CONFIG_SOC_SERIES_STM32WBAX) || \
defined(CONFIG_SOC_SERIES_STM32WLX)
/* Make sure DMA is disabled before starting calibration */
LL_ADC_REG_SetDMATransfer(adc, LL_ADC_REG_DMA_TRANSFER_NONE);
#elif defined(CONFIG_SOC_SERIES_STM32U5X)
if (adc == ADC4) {
/* Make sure DMA is disabled before starting calibration */
LL_ADC_REG_SetDMATransfer(adc, LL_ADC_REG_DMA_TRANSFER_NONE);
}
#endif /* CONFIG_SOC_SERIES_* */
#endif /* CONFIG_ADC_STM32_DMA */
#if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc)
adc_stm32_disable(adc);
adc_stm32_calibration_start(dev);
adc_stm32_calibration_delay(dev);
#endif /* !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) */
err = adc_stm32_enable(adc);
if (err < 0) {
return err;
}
#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc)
adc_stm32_calibration_delay(dev);
adc_stm32_calibration_start(dev);
#endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) */
#if defined(CONFIG_SOC_SERIES_STM32H7X) && \
defined(CONFIG_CPU_CORTEX_M7)
/*
* To ensure linearity the factory calibration values
* should be loaded on initialization.
*/
uint32_t channel_offset = 0U;
uint32_t linear_calib_buffer = 0U;
if (adc == ADC1) {
channel_offset = 0UL;
} else if (adc == ADC2) {
channel_offset = 8UL;
} else /*Case ADC3*/ {
channel_offset = 16UL;
}
/* Read factory calibration factors */
for (uint32_t count = 0UL; count < ADC_LINEAR_CALIB_REG_COUNT; count++) {
linear_calib_buffer = *(uint32_t *)(
ADC_LINEAR_CALIB_REG_1_ADDR + channel_offset + count
);
LL_ADC_SetCalibrationLinearFactor(
adc, LL_ADC_CALIB_LINEARITY_WORD1 << count,
linear_calib_buffer
);
}
#endif /* CONFIG_SOC_SERIES_STM32H7X */
return 0;
}
#endif /* !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc) */
#if !defined(CONFIG_SOC_SERIES_STM32F0X) && \
!defined(CONFIG_SOC_SERIES_STM32F1X) && \
!defined(CONFIG_SOC_SERIES_STM32F3X) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
#define HAS_OVERSAMPLING
#define OVS_SHIFT(n) LL_ADC_OVS_SHIFT_RIGHT_##n
static const uint32_t table_oversampling_shift[] = {
LL_ADC_OVS_SHIFT_NONE,
OVS_SHIFT(1),
OVS_SHIFT(2),
OVS_SHIFT(3),
OVS_SHIFT(4),
OVS_SHIFT(5),
OVS_SHIFT(6),
OVS_SHIFT(7),
OVS_SHIFT(8),
#if defined(CONFIG_SOC_SERIES_STM32H7X) || \
defined(CONFIG_SOC_SERIES_STM32U5X)
OVS_SHIFT(9),
OVS_SHIFT(10),
#endif
};
#ifdef LL_ADC_OVS_RATIO_2
#define OVS_RATIO(n) LL_ADC_OVS_RATIO_##n
static const uint32_t table_oversampling_ratio[] = {
0,
OVS_RATIO(2),
OVS_RATIO(4),
OVS_RATIO(8),
OVS_RATIO(16),
OVS_RATIO(32),
OVS_RATIO(64),
OVS_RATIO(128),
OVS_RATIO(256),
};
#endif
/*
* Function to configure the oversampling scope. It is basically a wrapper over
* LL_ADC_SetOverSamplingScope() which in addition stops the ADC if needed.
*/
static void adc_stm32_oversampling_scope(ADC_TypeDef *adc, uint32_t ovs_scope)
{
#if defined(CONFIG_SOC_SERIES_STM32G0X) || \
defined(CONFIG_SOC_SERIES_STM32L0X) || \
defined(CONFIG_SOC_SERIES_STM32WLX)
/*
* Setting OVS bits is conditioned to ADC state: ADC must be disabled
* or enabled without conversion on going : disable it, it will stop.
* For the G0 series, ADC must be disabled to prevent CKMODE bitfield
* from getting reset, see errata ES0418 section 2.6.4.
*/
if (LL_ADC_GetOverSamplingScope(adc) == ovs_scope) {
return;
}
adc_stm32_disable(adc);
#endif
LL_ADC_SetOverSamplingScope(adc, ovs_scope);
}
/*
* Function to configure the oversampling ratio and shift. It is basically a
* wrapper over LL_ADC_SetOverSamplingRatioShift() which in addition stops the
* ADC if needed.
*/
static void adc_stm32_oversampling_ratioshift(ADC_TypeDef *adc, uint32_t ratio, uint32_t shift)
{
/*
* setting OVS bits is conditioned to ADC state: ADC must be disabled
* or enabled without conversion on going : disable it, it will stop
*/
if ((LL_ADC_GetOverSamplingRatio(adc) == ratio)
&& (LL_ADC_GetOverSamplingShift(adc) == shift)) {
return;
}
adc_stm32_disable(adc);
LL_ADC_ConfigOverSamplingRatioShift(adc, ratio, shift);
}
/*
* Function to configure the oversampling ratio and shift using stm32 LL
* ratio is directly the sequence->oversampling (a 2^n value)
* shift is the corresponding LL_ADC_OVS_SHIFT_RIGHT_x constant
*/
static int adc_stm32_oversampling(ADC_TypeDef *adc, uint8_t ratio)
{
if (ratio == 0) {
adc_stm32_oversampling_scope(adc, LL_ADC_OVS_DISABLE);
return 0;
} else if (ratio < ARRAY_SIZE(table_oversampling_shift)) {
adc_stm32_oversampling_scope(adc, LL_ADC_OVS_GRP_REGULAR_CONTINUED);
} else {
LOG_ERR("Invalid oversampling");
return -EINVAL;
}
uint32_t shift = table_oversampling_shift[ratio];
#if defined(CONFIG_SOC_SERIES_STM32H7X)
/* Certain variants of the H7, such as STM32H72x/H73x has ADC3
* as a separate entity and require special handling.
*/
#if defined(ADC_VER_V5_V90)
if (adc != ADC3) {
/* the LL function expects a value from 1 to 1024 */
adc_stm32_oversampling_ratioshift(adc, 1 << ratio, shift);
} else {
/* the LL function expects a value LL_ADC_OVS_RATIO_x */
adc_stm32_oversampling_ratioshift(adc, table_oversampling_ratio[ratio], shift);
}
#else
/* the LL function expects a value from 1 to 1024 */
adc_stm32_oversampling_ratioshift(adc, 1 << ratio, shift);
#endif /* defined(ADC_VER_V5_V90) */
#elif defined(CONFIG_SOC_SERIES_STM32U5X)
if (adc != ADC4) {
/* the LL function expects a value from 1 to 1024 */
adc_stm32_oversampling_ratioshift(adc, (1 << ratio), shift);
} else {
/* the LL function expects a value LL_ADC_OVS_RATIO_x */
adc_stm32_oversampling_ratioshift(adc, table_oversampling_ratio[ratio], shift);
}
#else /* CONFIG_SOC_SERIES_STM32H7X */
adc_stm32_oversampling_ratioshift(adc, table_oversampling_ratio[ratio], shift);
#endif /* CONFIG_SOC_SERIES_STM32H7X */
return 0;
}
#endif /* CONFIG_SOC_SERIES_STM32xxx */
#ifdef CONFIG_ADC_STM32_DMA
static void dma_callback(const struct device *dev, void *user_data,
uint32_t channel, int status)
{
/* user_data directly holds the adc device */
struct adc_stm32_data *data = user_data;
const struct adc_stm32_cfg *config = data->dev->config;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
LOG_DBG("dma callback");
if (channel == data->dma.channel) {
#if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc)
if (LL_ADC_IsActiveFlag_OVR(adc) || (status >= 0)) {
#else
if (status >= 0) {
#endif /* !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) */
data->samples_count = data->channel_count;
data->buffer += data->channel_count;
/* Stop the DMA engine, only to start it again when the callback returns
* ADC_ACTION_REPEAT or ADC_ACTION_CONTINUE, or the number of samples
* haven't been reached Starting the DMA engine is done
* within adc_context_start_sampling
*/
dma_stop(data->dma.dma_dev, data->dma.channel);
#if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc)
LL_ADC_ClearFlag_OVR(adc);
#endif /* !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) */
/* No need to invalidate the cache because it's assumed that
* the address is in a non-cacheable SRAM region.
*/
adc_context_on_sampling_done(&data->ctx, dev);
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE,
PM_ALL_SUBSTATES);
if (IS_ENABLED(CONFIG_PM_S2RAM)) {
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM,
PM_ALL_SUBSTATES);
}
} else if (status < 0) {
LOG_ERR("DMA sampling complete, but DMA reported error %d", status);
data->dma_error = status;
#if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
LL_ADC_REG_StopConversion(adc);
#endif
dma_stop(data->dma.dma_dev, data->dma.channel);
adc_context_complete(&data->ctx, status);
}
}
}
#endif /* CONFIG_ADC_STM32_DMA */
static uint8_t get_reg_value(const struct device *dev, uint32_t reg,
uint32_t shift, uint32_t mask)
{
const struct adc_stm32_cfg *config = dev->config;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
uintptr_t addr = (uintptr_t)adc + reg;
return ((*(volatile uint32_t *)addr >> shift) & mask);
}
static void set_reg_value(const struct device *dev, uint32_t reg,
uint32_t shift, uint32_t mask, uint32_t value)
{
const struct adc_stm32_cfg *config = dev->config;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
uintptr_t addr = (uintptr_t)adc + reg;
MODIFY_REG(*(volatile uint32_t *)addr, (mask << shift), (value << shift));
}
static int set_resolution(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_stm32_cfg *config = dev->config;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
uint8_t res_reg_addr = 0xFF;
uint8_t res_shift = 0;
uint8_t res_mask = 0;
uint8_t res_reg_val = 0;
int i;
for (i = 0; i < config->res_table_size; i++) {
if (sequence->resolution == STM32_ADC_GET_REAL_VAL(config->res_table[i])) {
res_reg_addr = STM32_ADC_GET_REG(config->res_table[i]);
res_shift = STM32_ADC_GET_SHIFT(config->res_table[i]);
res_mask = STM32_ADC_GET_MASK(config->res_table[i]);
res_reg_val = STM32_ADC_GET_REG_VAL(config->res_table[i]);
break;
}
}
if (i == config->res_table_size) {
LOG_ERR("Invalid resolution");
return -EINVAL;
}
/*
* Some MCUs (like STM32F1x) have no register to configure resolution.
* These MCUs have a register address value of 0xFF and should be
* ignored.
*/
if (res_reg_addr != 0xFF) {
/*
* We don't use LL_ADC_SetResolution and LL_ADC_GetResolution
* because they don't strictly use hardware resolution values
* and makes internal conversions for some series.
* (see stm32h7xx_ll_adc.h)
* Instead we set the register ourselves if needed.
*/
if (get_reg_value(dev, res_reg_addr, res_shift, res_mask) != res_reg_val) {
/*
* Writing ADC_CFGR1 register while ADEN bit is set
* resets RES[1:0] bitfield. We need to disable and enable adc.
*/
adc_stm32_disable(adc);
set_reg_value(dev, res_reg_addr, res_shift, res_mask, res_reg_val);
}
}
return 0;
}
static int set_sequencer(const struct device *dev)
{
const struct adc_stm32_cfg *config = dev->config;
struct adc_stm32_data *data = dev->data;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
uint8_t channel_id;
uint8_t channel_index = 0;
uint32_t channels_mask = 0;
/* Iterate over selected channels in bitmask keeping track of:
* - channel_index: ranging from 0 -> ( data->channel_count - 1 )
* - channel_id: ordinal position of channel in data->channels bitmask
*/
for (uint32_t channels = data->channels; channels;
channels &= ~BIT(channel_id), channel_index++) {
channel_id = find_lsb_set(channels) - 1;
uint32_t channel = __LL_ADC_DECIMAL_NB_TO_CHANNEL(channel_id);
channels_mask |= channel;
#if ANY_ADC_SEQUENCER_TYPE_IS(FULLY_CONFIGURABLE)
if (config->sequencer_type == FULLY_CONFIGURABLE) {
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32U5X)
/*
* Each channel in the sequence must be previously enabled in PCSEL.
* This register controls the analog switch integrated in the IO level.
*/
LL_ADC_SetChannelPreselection(adc, channel);
#endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32U5X */
LL_ADC_REG_SetSequencerRanks(adc, table_rank[channel_index], channel);
LL_ADC_REG_SetSequencerLength(adc, table_seq_len[channel_index]);
}
#endif /* ANY_ADC_SEQUENCER_TYPE_IS(FULLY_CONFIGURABLE) */
}
#if ANY_ADC_SEQUENCER_TYPE_IS(NOT_FULLY_CONFIGURABLE)
if (config->sequencer_type == NOT_FULLY_CONFIGURABLE) {
LL_ADC_REG_SetSequencerChannels(adc, channels_mask);
#if !defined(CONFIG_SOC_SERIES_STM32F0X) && \
!defined(CONFIG_SOC_SERIES_STM32L0X) && \
!defined(CONFIG_SOC_SERIES_STM32U5X) && \
!defined(CONFIG_SOC_SERIES_STM32WBAX)
/*
* After modifying sequencer it is mandatory to wait for the
* assertion of CCRDY flag
*/
while (LL_ADC_IsActiveFlag_CCRDY(adc) == 0) {
}
LL_ADC_ClearFlag_CCRDY(adc);
#endif /* !CONFIG_SOC_SERIES_STM32F0X && !L0X && !U5X && !WBAX */
}
#endif /* ANY_ADC_SEQUENCER_TYPE_IS(NOT_FULLY_CONFIGURABLE) */
#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) || \
DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
LL_ADC_SetSequencersScanMode(adc, LL_ADC_SEQ_SCAN_ENABLE);
#endif /* st_stm32f1_adc || st_stm32f4_adc */
return 0;
}
static int start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_stm32_cfg *config = dev->config;
struct adc_stm32_data *data = dev->data;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
int err;
data->buffer = sequence->buffer;
data->channels = sequence->channels;
data->channel_count = POPCOUNT(data->channels);
data->samples_count = 0;
if (data->channel_count == 0) {
LOG_ERR("No channels selected");
return -EINVAL;
}
#if ANY_ADC_SEQUENCER_TYPE_IS(FULLY_CONFIGURABLE)
if (data->channel_count > ARRAY_SIZE(table_seq_len)) {
LOG_ERR("Too many channels for sequencer. Max: %d", ARRAY_SIZE(table_seq_len));
return -EINVAL;
}
#endif /* ANY_ADC_SEQUENCER_TYPE_IS(FULLY_CONFIGURABLE) */
#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) && !defined(CONFIG_ADC_STM32_DMA)
/* Multiple samplings is only supported with DMA for F1 */
if (data->channel_count > 1) {
LOG_ERR("Without DMA, this device only supports single channel sampling");
return -EINVAL;
}
#endif /* DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) && !CONFIG_ADC_STM32_DMA */
/* Check and set the resolution */
err = set_resolution(dev, sequence);
if (err < 0) {
return err;
}
/* Configure the sequencer */
err = set_sequencer(dev);
if (err < 0) {
return err;
}
err = check_buffer(sequence, data->channel_count);
if (err) {
return err;
}
#ifdef HAS_OVERSAMPLING
err = adc_stm32_oversampling(adc, sequence->oversampling);
if (err) {
return err;
}
#else
if (sequence->oversampling) {
LOG_ERR("Oversampling not supported");
return -ENOTSUP;
}
#endif /* HAS_OVERSAMPLING */
if (sequence->calibrate) {
#if defined(HAS_CALIBRATION)
adc_stm32_calibrate(dev);
#else
LOG_ERR("Calibration not supported");
return -ENOTSUP;
#endif
}
/*
* Make sure the ADC is enabled as it might have been disabled earlier
* to set the resolution, to set the oversampling or to perform the
* calibration.
*/
adc_stm32_enable(adc);
#if !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc)
LL_ADC_ClearFlag_OVR(adc);
#endif /* !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) */
#if !defined(CONFIG_ADC_STM32_DMA)
#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
/* Trigger an ISR after each sampling (not just end of sequence) */
LL_ADC_REG_SetFlagEndOfConversion(adc, LL_ADC_REG_FLAG_EOC_UNITARY_CONV);
LL_ADC_EnableIT_EOCS(adc);
#elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc)
LL_ADC_EnableIT_EOS(adc);
#else
LL_ADC_EnableIT_EOC(adc);
#endif
#endif /* CONFIG_ADC_STM32_DMA */
/* This call will start the DMA */
adc_context_start_read(&data->ctx, sequence);
int result = adc_context_wait_for_completion(&data->ctx);
#ifdef CONFIG_ADC_STM32_DMA
/* check if there's anything wrong with dma start */
result = (data->dma_error ? data->dma_error : result);
#endif
return result;
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_stm32_data *data =
CONTAINER_OF(ctx, struct adc_stm32_data, ctx);
const struct device *dev = data->dev;
const struct adc_stm32_cfg *config = dev->config;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
/* Remove warning for some series */
ARG_UNUSED(adc);
data->repeat_buffer = data->buffer;
#ifdef CONFIG_ADC_STM32_DMA
#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
/* Make sure DMA bit of ADC register CR2 is set to 0 before starting a DMA transfer */
LL_ADC_REG_SetDMATransfer(adc, LL_ADC_REG_DMA_TRANSFER_NONE);
#endif
adc_stm32_dma_start(dev, data->buffer, data->channel_count);
#endif
adc_stm32_start_conversion(dev);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_stm32_data *data =
CONTAINER_OF(ctx, struct adc_stm32_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
#ifndef CONFIG_ADC_STM32_DMA
static void adc_stm32_isr(const struct device *dev)
{
struct adc_stm32_data *data = dev->data;
const struct adc_stm32_cfg *config =
(const struct adc_stm32_cfg *)dev->config;
ADC_TypeDef *adc = config->base;
#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc)
if (LL_ADC_IsActiveFlag_EOS(adc) == 1) {
#elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
if (LL_ADC_IsActiveFlag_EOCS(adc) == 1) {
#else
if (LL_ADC_IsActiveFlag_EOC(adc) == 1) {
#endif
*data->buffer++ = LL_ADC_REG_ReadConversionData32(adc);
/* ISR is triggered after each conversion, and at the end-of-sequence. */
if (++data->samples_count == data->channel_count) {
data->samples_count = 0;
adc_context_on_sampling_done(&data->ctx, dev);
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE,
PM_ALL_SUBSTATES);
if (IS_ENABLED(CONFIG_PM_S2RAM)) {
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM,
PM_ALL_SUBSTATES);
}
}
}
LOG_DBG("%s ISR triggered.", dev->name);
}
#endif /* !CONFIG_ADC_STM32_DMA */
static void adc_context_on_complete(struct adc_context *ctx, int status)
{
struct adc_stm32_data *data =
CONTAINER_OF(ctx, struct adc_stm32_data, ctx);
const struct adc_stm32_cfg *config = data->dev->config;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
ARG_UNUSED(status);
/* Reset acquisition time used for the sequence */
data->acq_time_index[0] = -1;
data->acq_time_index[1] = -1;
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32U5X)
/* Reset channel preselection register */
LL_ADC_SetChannelPreselection(adc, 0);
#else
ARG_UNUSED(adc);
#endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32U5X */
}
static int adc_stm32_read(const struct device *dev,
const struct adc_sequence *sequence)
{
struct adc_stm32_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, false, NULL);
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
if (IS_ENABLED(CONFIG_PM_S2RAM)) {
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
}
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#ifdef CONFIG_ADC_ASYNC
static int adc_stm32_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_stm32_data *data = dev->data;
int error;
adc_context_lock(&data->ctx, true, async);
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
if (IS_ENABLED(CONFIG_PM_S2RAM)) {
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
}
error = start_read(dev, sequence);
adc_context_release(&data->ctx, error);
return error;
}
#endif
static int adc_stm32_sampling_time_check(const struct device *dev, uint16_t acq_time)
{
const struct adc_stm32_cfg *config =
(const struct adc_stm32_cfg *)dev->config;
if (acq_time == ADC_ACQ_TIME_DEFAULT) {
return 0;
}
if (acq_time == ADC_ACQ_TIME_MAX) {
return STM32_NB_SAMPLING_TIME - 1;
}
for (int i = 0; i < STM32_NB_SAMPLING_TIME; i++) {
if (acq_time == ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS,
config->sampling_time_table[i])) {
return i;
}
}
LOG_ERR("Sampling time value not supported.");
return -EINVAL;
}
static int adc_stm32_sampling_time_setup(const struct device *dev, uint8_t id,
uint16_t acq_time)
{
const struct adc_stm32_cfg *config =
(const struct adc_stm32_cfg *)dev->config;
ADC_TypeDef *adc = config->base;
struct adc_stm32_data *data = dev->data;
int acq_time_index;
acq_time_index = adc_stm32_sampling_time_check(dev, acq_time);
if (acq_time_index < 0) {
return acq_time_index;
}
/*
* For all series we use the fact that the macros LL_ADC_SAMPLINGTIME_*
* that should be passed to the set functions are all coded on 3 bits
* with 0 shift (ie 0 to 7). So acq_time_index is equivalent to the
* macro we would use for the desired sampling time.
*/
switch (config->num_sampling_time_common_channels) {
case 0:
#if ANY_NUM_COMMON_SAMPLING_TIME_CHANNELS_IS(0)
ARG_UNUSED(data);
LL_ADC_SetChannelSamplingTime(adc,
__LL_ADC_DECIMAL_NB_TO_CHANNEL(id),
(uint32_t)acq_time_index);
#endif
break;
case 1:
#if ANY_NUM_COMMON_SAMPLING_TIME_CHANNELS_IS(1)
/* Only one sampling time can be selected for all channels.
* The first one we find is used, all others must match.
*/
if ((data->acq_time_index[0] == -1) ||
(acq_time_index == data->acq_time_index[0])) {
/* Reg is empty or value matches */
data->acq_time_index[0] = acq_time_index;
LL_ADC_SetSamplingTimeCommonChannels(adc,
(uint32_t)acq_time_index);
} else {
/* Reg is used and value does not match */
LOG_ERR("Multiple sampling times not supported");
return -EINVAL;
}
#endif
break;
case 2:
#if ANY_NUM_COMMON_SAMPLING_TIME_CHANNELS_IS(2)
/* Two different sampling times can be selected for all channels.
* The first two we find are used, all others must match either one.
*/
if ((data->acq_time_index[0] == -1) ||
(acq_time_index == data->acq_time_index[0])) {
/* 1st reg is empty or value matches 1st reg */
data->acq_time_index[0] = acq_time_index;
LL_ADC_SetChannelSamplingTime(adc,
__LL_ADC_DECIMAL_NB_TO_CHANNEL(id),
LL_ADC_SAMPLINGTIME_COMMON_1);
LL_ADC_SetSamplingTimeCommonChannels(adc,
LL_ADC_SAMPLINGTIME_COMMON_1,
(uint32_t)acq_time_index);
} else if ((data->acq_time_index[1] == -1) ||
(acq_time_index == data->acq_time_index[1])) {
/* 2nd reg is empty or value matches 2nd reg */
data->acq_time_index[1] = acq_time_index;
LL_ADC_SetChannelSamplingTime(adc,
__LL_ADC_DECIMAL_NB_TO_CHANNEL(id),
LL_ADC_SAMPLINGTIME_COMMON_2);
LL_ADC_SetSamplingTimeCommonChannels(adc,
LL_ADC_SAMPLINGTIME_COMMON_2,
(uint32_t)acq_time_index);
} else {
/* Both regs are used, value does not match any of them */
LOG_ERR("Only two different sampling times supported");
return -EINVAL;
}
#endif
break;
default:
LOG_ERR("Number of common sampling time channels not supported");
return -EINVAL;
}
return 0;
}
static int adc_stm32_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Invalid channel gain");
return -EINVAL;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Invalid channel reference");
return -EINVAL;
}
if (adc_stm32_sampling_time_setup(dev, channel_cfg->channel_id,
channel_cfg->acquisition_time) != 0) {
LOG_ERR("Invalid sampling time");
return -EINVAL;
}
LOG_DBG("Channel setup succeeded!");
return 0;
}
/* This symbol takes the value 1 if one of the device instances */
/* is configured in dts with a domain clock */
#if STM32_DT_INST_DEV_DOMAIN_CLOCK_SUPPORT
#define STM32_ADC_DOMAIN_CLOCK_SUPPORT 1
#else
#define STM32_ADC_DOMAIN_CLOCK_SUPPORT 0
#endif
static int adc_stm32_set_clock(const struct device *dev)
{
const struct adc_stm32_cfg *config = dev->config;
const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
ARG_UNUSED(adc); /* Necessary to avoid warnings on some series */
if (clock_control_on(clk,
(clock_control_subsys_t) &config->pclken[0]) != 0) {
return -EIO;
}
if (IS_ENABLED(STM32_ADC_DOMAIN_CLOCK_SUPPORT) && (config->pclk_len > 1)) {
/* Enable ADC clock source */
if (clock_control_configure(clk,
(clock_control_subsys_t) &config->pclken[1],
NULL) != 0) {
return -EIO;
}
}
#if defined(CONFIG_SOC_SERIES_STM32F0X)
LL_ADC_SetClock(adc, config->clk_prescaler);
#elif defined(CONFIG_SOC_SERIES_STM32C0X) || \
defined(CONFIG_SOC_SERIES_STM32G0X) || \
defined(CONFIG_SOC_SERIES_STM32L0X) || \
(defined(CONFIG_SOC_SERIES_STM32WBX) && defined(ADC_SUPPORT_2_5_MSPS)) || \
defined(CONFIG_SOC_SERIES_STM32WLX)
if ((config->clk_prescaler == LL_ADC_CLOCK_SYNC_PCLK_DIV1) ||
(config->clk_prescaler == LL_ADC_CLOCK_SYNC_PCLK_DIV2) ||
(config->clk_prescaler == LL_ADC_CLOCK_SYNC_PCLK_DIV4)) {
LL_ADC_SetClock(adc, config->clk_prescaler);
} else {
LL_ADC_SetCommonClock(__LL_ADC_COMMON_INSTANCE(adc),
config->clk_prescaler);
LL_ADC_SetClock(adc, LL_ADC_CLOCK_ASYNC);
}
#elif !DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc)
LL_ADC_SetCommonClock(__LL_ADC_COMMON_INSTANCE(adc),
config->clk_prescaler);
#endif
return 0;
}
static int adc_stm32_init(const struct device *dev)
{
struct adc_stm32_data *data = dev->data;
const struct adc_stm32_cfg *config = dev->config;
const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
int err;
ARG_UNUSED(adc); /* Necessary to avoid warnings on some series */
LOG_DBG("Initializing %s", dev->name);
if (!device_is_ready(clk)) {
LOG_ERR("clock control device not ready");
return -ENODEV;
}
data->dev = dev;
/*
* For series that use common channels for sampling time, all
* conversion time for all channels on one ADC instance has to
* be the same.
* For series that use two common channels, there can be up to two
* conversion times selected for all channels in a sequence.
* This additional table is for checking that the conversion time
* selection of all channels respects these requirements.
*/
data->acq_time_index[0] = -1;
data->acq_time_index[1] = -1;
adc_stm32_set_clock(dev);
/* Configure dt provided device signals when available */
err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (err < 0) {
LOG_ERR("ADC pinctrl setup failed (%d)", err);
return err;
}
#if defined(CONFIG_SOC_SERIES_STM32U5X)
/* Enable the independent analog supply */
LL_PWR_EnableVDDA();
#endif /* CONFIG_SOC_SERIES_STM32U5X */
#ifdef CONFIG_ADC_STM32_DMA
if ((data->dma.dma_dev != NULL) &&
!device_is_ready(data->dma.dma_dev)) {
LOG_ERR("%s device not ready", data->dma.dma_dev->name);
return -ENODEV;
}
#endif
#if defined(CONFIG_SOC_SERIES_STM32L4X) || \
defined(CONFIG_SOC_SERIES_STM32L5X) || \
defined(CONFIG_SOC_SERIES_STM32WBX) || \
defined(CONFIG_SOC_SERIES_STM32G4X) || \
defined(CONFIG_SOC_SERIES_STM32H5X) || \
defined(CONFIG_SOC_SERIES_STM32H7X) || \
defined(CONFIG_SOC_SERIES_STM32H7RSX) || \
defined(CONFIG_SOC_SERIES_STM32U5X)
/*
* L4, WB, G4, H5, H7 and U5 series STM32 needs to be awaken from deep sleep
* mode, and restore its calibration parameters if there are some
* previously stored calibration parameters.
*/
LL_ADC_DisableDeepPowerDown(adc);
#endif
/*
* Many ADC modules need some time to be stabilized before performing
* any enable or calibration actions.
*/
#if !defined(CONFIG_SOC_SERIES_STM32F0X) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
LL_ADC_EnableInternalRegulator(adc);
/* Wait for Internal regulator stabilisation
* Some series have a dedicated status bit, others relie on a delay
*/
#if defined(CONFIG_SOC_SERIES_STM32H7X) && defined(ADC_VER_V5_V90)
/* ADC3 on H72x/H73x doesn't have the LDORDY status bit */
if (adc == ADC3) {
k_busy_wait(LL_ADC_DELAY_INTERNAL_REGUL_STAB_US);
} else {
while (LL_ADC_IsActiveFlag_LDORDY(adc) == 0) {
}
}
#elif defined(CONFIG_SOC_SERIES_STM32H7X) || \
defined(CONFIG_SOC_SERIES_STM32U5X) || \
defined(CONFIG_SOC_SERIES_STM32WBAX)
/* Don't use LL_ADC_IsActiveFlag_LDORDY since not present in U5 LL (1.5.0)
* (internal issue 185106)
*/
while ((READ_BIT(adc->ISR, LL_ADC_FLAG_LDORDY) != (LL_ADC_FLAG_LDORDY))) {
}
#else
k_busy_wait(LL_ADC_DELAY_INTERNAL_REGUL_STAB_US);
#endif
#endif
if (config->irq_cfg_func) {
config->irq_cfg_func();
}
#if defined(HAS_CALIBRATION)
adc_stm32_calibrate(dev);
LL_ADC_REG_SetTriggerSource(adc, LL_ADC_REG_TRIG_SOFTWARE);
#endif /* HAS_CALIBRATION */
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
#ifdef CONFIG_PM_DEVICE
static int adc_stm32_suspend_setup(const struct device *dev)
{
const struct adc_stm32_cfg *config = dev->config;
ADC_TypeDef *adc = (ADC_TypeDef *)config->base;
const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
int err;
/* Disable ADC */
adc_stm32_disable(adc);
#if !defined(CONFIG_SOC_SERIES_STM32F0X) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc) && \
!DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_adc)
/* Disable ADC internal voltage regulator */
LL_ADC_DisableInternalRegulator(adc);
while (LL_ADC_IsInternalRegulatorEnabled(adc) == 1U) {
}
#endif
#if defined(CONFIG_SOC_SERIES_STM32L4X) || \
defined(CONFIG_SOC_SERIES_STM32L5X) || \
defined(CONFIG_SOC_SERIES_STM32WBX) || \
defined(CONFIG_SOC_SERIES_STM32G4X) || \
defined(CONFIG_SOC_SERIES_STM32H5X) || \
defined(CONFIG_SOC_SERIES_STM32H7X) || \
defined(CONFIG_SOC_SERIES_STM32H7RSX) || \
defined(CONFIG_SOC_SERIES_STM32U5X)
/*
* L4, WB, G4, H5, H7 and U5 series STM32 needs to be put into
* deep sleep mode.
*/
LL_ADC_EnableDeepPowerDown(adc);
#endif
#if defined(CONFIG_SOC_SERIES_STM32U5X)
/* Disable the independent analog supply */
LL_PWR_DisableVDDA();
#endif /* CONFIG_SOC_SERIES_STM32U5X */
/* Stop device clock. Note: fixed clocks are not handled yet. */
err = clock_control_off(clk, (clock_control_subsys_t)&config->pclken[0]);
if (err != 0) {
LOG_ERR("Could not disable ADC clock");
return err;
}
/* Move pins to sleep state */
err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP);
if ((err < 0) && (err != -ENOENT)) {
/*
* If returning -ENOENT, no pins where defined for sleep mode :
* Do not output on console (might sleep already) when going to sleep,
* "ADC pinctrl sleep state not available"
* and don't block PM suspend.
* Else return the error.
*/
return err;
}
return 0;
}
static int adc_stm32_pm_action(const struct device *dev,
enum pm_device_action action)
{
switch (action) {
case PM_DEVICE_ACTION_RESUME:
return adc_stm32_init(dev);
case PM_DEVICE_ACTION_SUSPEND:
return adc_stm32_suspend_setup(dev);
default:
return -ENOTSUP;
}
return 0;
}
#endif /* CONFIG_PM_DEVICE */
static const struct adc_driver_api api_stm32_driver_api = {
.channel_setup = adc_stm32_channel_setup,
.read = adc_stm32_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_stm32_read_async,
#endif
.ref_internal = STM32_ADC_VREF_MV, /* VREF is usually connected to VDD */
};
#if defined(CONFIG_SOC_SERIES_STM32F0X)
/* LL_ADC_CLOCK_ASYNC_DIV1 doesn't exist in F0 LL. Define it here. */
#define LL_ADC_CLOCK_ASYNC_DIV1 LL_ADC_CLOCK_ASYNC
#endif
/* st_prescaler property requires 2 elements : clock ASYNC/SYNC and DIV */
#define ADC_STM32_CLOCK(x) DT_INST_PROP(x, st_adc_clock_source)
#define ADC_STM32_DIV(x) DT_INST_PROP(x, st_adc_prescaler)
/* Macro to set the prefix depending on the 1st element: check if it is SYNC or ASYNC */
#define ADC_STM32_CLOCK_PREFIX(x) \
COND_CODE_1(IS_EQ(ADC_STM32_CLOCK(x), SYNC), \
(LL_ADC_CLOCK_SYNC_PCLK_DIV), \
(LL_ADC_CLOCK_ASYNC_DIV))
/* Concat prefix (1st element) and DIV value (2nd element) of st,adc-prescaler */
#if DT_HAS_COMPAT_STATUS_OKAY(st_stm32f1_adc)
#define ADC_STM32_DT_PRESC(x) 0
#else
#define ADC_STM32_DT_PRESC(x) \
_CONCAT(ADC_STM32_CLOCK_PREFIX(x), ADC_STM32_DIV(x))
#endif
#if defined(CONFIG_ADC_STM32_DMA)
#define ADC_DMA_CHANNEL_INIT(index, src_dev, dest_dev) \
.dma = { \
.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_IDX(index, 0)), \
.channel = DT_INST_DMAS_CELL_BY_IDX(index, 0, channel), \
.dma_cfg = { \
.dma_slot = STM32_DMA_SLOT_BY_IDX(index, 0, slot), \
.channel_direction = STM32_DMA_CONFIG_DIRECTION( \
STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \
.source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE( \
STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \
.dest_data_size = STM32_DMA_CONFIG_##dest_dev##_DATA_SIZE( \
STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \
.source_burst_length = 1, /* SINGLE transfer */ \
.dest_burst_length = 1, /* SINGLE transfer */ \
.channel_priority = STM32_DMA_CONFIG_PRIORITY( \
STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \
.dma_callback = dma_callback, \
.block_count = 2, \
}, \
.src_addr_increment = STM32_DMA_CONFIG_##src_dev##_ADDR_INC( \
STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \
.dst_addr_increment = STM32_DMA_CONFIG_##dest_dev##_ADDR_INC( \
STM32_DMA_CHANNEL_CONFIG_BY_IDX(index, 0)), \
}
#define ADC_STM32_IRQ_FUNC(index) \
.irq_cfg_func = NULL,
#else /* CONFIG_ADC_STM32_DMA */
/*
* For series that share interrupt lines for multiple ADC instances
* and have separate interrupt lines for other ADCs (example,
* STM32G473 has 5 ADC instances, ADC1 and ADC2 share IRQn 18 while
* ADC3, ADC4 and ADC5 use IRQns 47, 61 and 62 respectively), generate
* a single common ISR function for each IRQn and call adc_stm32_isr
* for each device using that interrupt line for all enabled ADCs.
*
* To achieve the above, a "first" ADC instance must be chosen for all
* ADC instances sharing the same IRQn. This "first" ADC instance
* generates the code for the common ISR and for installing and
* enabling it while any other ADC sharing the same IRQn skips this
* code generation and does nothing. The common ISR code is generated
* to include calls to adc_stm32_isr for all instances using that same
* IRQn. From the example above, four ISR functions would be generated
* for IRQn 18, 47, 61 and 62, with possible "first" ADC instances
* being ADC1, ADC3, ADC4 and ADC5 if all ADCs were enabled, with the
* ISR function 18 calling adc_stm32_isr for both ADC1 and ADC2.
*
* For some of the macros below, pseudo-code is provided to describe
* its function.
*/
/*
* return (irqn == device_irqn(index)) ? index : NULL
*/
#define FIRST_WITH_IRQN_INTERNAL(index, irqn) \
COND_CODE_1(IS_EQ(irqn, DT_INST_IRQN(index)), (index,), (EMPTY,))
/*
* Returns the "first" instance's index:
*
* instances = []
* for instance in all_active_adcs:
* instances.append(first_with_irqn_internal(device_irqn(index)))
* for instance in instances:
* if instance == NULL:
* instances.remove(instance)
* return instances[0]
*/
#define FIRST_WITH_IRQN(index) \
GET_ARG_N(1, LIST_DROP_EMPTY(DT_INST_FOREACH_STATUS_OKAY_VARGS(FIRST_WITH_IRQN_INTERNAL, \
DT_INST_IRQN(index))))
/*
* Provides code for calling adc_stm32_isr for an instance if its IRQn
* matches:
*
* if (irqn == device_irqn(index)):
* return "adc_stm32_isr(DEVICE_DT_INST_GET(index));"
*/
#define HANDLE_IRQS(index, irqn) \
COND_CODE_1(IS_EQ(irqn, DT_INST_IRQN(index)), (adc_stm32_isr(DEVICE_DT_INST_GET(index));), \
(EMPTY))
/*
* Name of the common ISR for a given IRQn (taken from a device with a
* given index). Example, for an ADC instance with IRQn 18, returns
* "adc_stm32_isr_18".
*/
#define ISR_FUNC(index) UTIL_CAT(adc_stm32_isr_, DT_INST_IRQN(index))
/*
* Macro for generating code for the common ISRs (by looping of all
* ADC instances that share the same IRQn as that of the given device
* by index) and the function for setting up the ISR.
*
* Here is where both "first" and non-"first" instances have code
* generated for their interrupts via HANDLE_IRQS.
*/
#define GENERATE_ISR_CODE(index) \
static void ISR_FUNC(index)(void) \
{ \
DT_INST_FOREACH_STATUS_OKAY_VARGS(HANDLE_IRQS, DT_INST_IRQN(index)) \
} \
\
static void UTIL_CAT(ISR_FUNC(index), _init)(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(index), DT_INST_IRQ(index, priority), ISR_FUNC(index), \
NULL, 0); \
irq_enable(DT_INST_IRQN(index)); \
}
/*
* Limit generating code to only the "first" instance:
*
* if (first_with_irqn(index) == index):
* generate_isr_code(index)
*/
#define GENERATE_ISR(index) \
COND_CODE_1(IS_EQ(index, FIRST_WITH_IRQN(index)), (GENERATE_ISR_CODE(index)), (EMPTY))
DT_INST_FOREACH_STATUS_OKAY(GENERATE_ISR)
/* Only "first" instances need to call the ISR setup function */
#define ADC_STM32_IRQ_FUNC(index) \
.irq_cfg_func = COND_CODE_1(IS_EQ(index, FIRST_WITH_IRQN(index)), \
(UTIL_CAT(ISR_FUNC(index), _init)), (NULL)),
#define ADC_DMA_CHANNEL_INIT(index, src_dev, dest_dev)
#endif /* CONFIG_ADC_STM32_DMA */
#define ADC_DMA_CHANNEL(id, src, dest) \
COND_CODE_1(DT_INST_DMAS_HAS_IDX(id, 0), \
(ADC_DMA_CHANNEL_INIT(id, src, dest)), \
(/* Required for other adc instances without dma */))
#define ADC_STM32_INIT(index) \
\
PINCTRL_DT_INST_DEFINE(index); \
\
static const struct stm32_pclken pclken_##index[] = \
STM32_DT_INST_CLOCKS(index); \
\
static const struct adc_stm32_cfg adc_stm32_cfg_##index = { \
.base = (ADC_TypeDef *)DT_INST_REG_ADDR(index), \
ADC_STM32_IRQ_FUNC(index) \
.pclken = pclken_##index, \
.pclk_len = DT_INST_NUM_CLOCKS(index), \
.clk_prescaler = ADC_STM32_DT_PRESC(index), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \
.sequencer_type = DT_INST_PROP(index, st_adc_sequencer), \
.sampling_time_table = DT_INST_PROP(index, sampling_times), \
.num_sampling_time_common_channels = \
DT_INST_PROP_OR(index, num_sampling_time_common_channels, 0),\
.res_table_size = DT_INST_PROP_LEN(index, resolutions), \
.res_table = DT_INST_PROP(index, resolutions), \
}; \
\
static struct adc_stm32_data adc_stm32_data_##index = { \
ADC_CONTEXT_INIT_TIMER(adc_stm32_data_##index, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_stm32_data_##index, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_stm32_data_##index, ctx), \
ADC_DMA_CHANNEL(index, PERIPHERAL, MEMORY) \
}; \
\
PM_DEVICE_DT_INST_DEFINE(index, adc_stm32_pm_action); \
\
DEVICE_DT_INST_DEFINE(index, \
&adc_stm32_init, PM_DEVICE_DT_INST_GET(index), \
&adc_stm32_data_##index, &adc_stm32_cfg_##index, \
POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&api_stm32_driver_api);
DT_INST_FOREACH_STATUS_OKAY(ADC_STM32_INIT)
``` | /content/code_sandbox/drivers/adc/adc_stm32.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 15,721 |
```c
/*
*
*/
#define DT_DRV_COMPAT telink_b91_adc
/* Local driver headers */
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
/* Zephyr Device Tree headers */
#include <zephyr/dt-bindings/adc/b91-adc.h>
/* Zephyr Logging headers */
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_b91, CONFIG_ADC_LOG_LEVEL);
/* Telink HAL headers */
#include <adc.h>
/* ADC B91 defines */
#define SIGN_BIT_POSITION (13)
#define AREG_ADC_DATA_STATUS (0xf6)
#define ADC_DATA_READY BIT(0)
/* B91 ADC driver data */
struct b91_adc_data {
struct adc_context ctx;
int16_t *buffer;
int16_t *repeat_buffer;
uint8_t differential;
uint8_t resolution_divider;
struct k_sem acq_sem;
struct k_thread thread;
K_KERNEL_STACK_MEMBER(stack, CONFIG_ADC_B91_ACQUISITION_THREAD_STACK_SIZE);
};
struct b91_adc_cfg {
uint32_t sample_freq;
uint16_t vref_internal_mv;
};
/* Validate ADC data buffer size */
static int adc_b91_validate_buffer_size(const struct adc_sequence *sequence)
{
size_t needed = sizeof(int16_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
/* Validate ADC read API input parameters */
static int adc_b91_validate_sequence(const struct adc_sequence *sequence)
{
int status;
if (sequence->channels != BIT(0)) {
LOG_ERR("Only channel 0 is supported.");
return -ENOTSUP;
}
if (sequence->oversampling) {
LOG_ERR("Oversampling is not supported.");
return -ENOTSUP;
}
status = adc_b91_validate_buffer_size(sequence);
if (status) {
LOG_ERR("Buffer size too small.");
return status;
}
return 0;
}
/* Convert dts pin to B91 SDK pin */
static adc_input_pin_def_e adc_b91_get_pin(uint8_t dt_pin)
{
adc_input_pin_def_e adc_pin;
switch (dt_pin) {
case DT_ADC_GPIO_PB0:
adc_pin = ADC_GPIO_PB0;
break;
case DT_ADC_GPIO_PB1:
adc_pin = ADC_GPIO_PB1;
break;
case DT_ADC_GPIO_PB2:
adc_pin = ADC_GPIO_PB2;
break;
case DT_ADC_GPIO_PB3:
adc_pin = ADC_GPIO_PB3;
break;
case DT_ADC_GPIO_PB4:
adc_pin = ADC_GPIO_PB4;
break;
case DT_ADC_GPIO_PB5:
adc_pin = ADC_GPIO_PB5;
break;
case DT_ADC_GPIO_PB6:
adc_pin = ADC_GPIO_PB6;
break;
case DT_ADC_GPIO_PB7:
adc_pin = ADC_GPIO_PB7;
break;
case DT_ADC_GPIO_PD0:
adc_pin = ADC_GPIO_PD0;
break;
case DT_ADC_GPIO_PD1:
adc_pin = ADC_GPIO_PD1;
break;
case DT_ADC_VBAT:
adc_pin = ADC_VBAT;
break;
default:
adc_pin = NOINPUTN;
break;
}
return adc_pin;
}
/* Get ADC value */
static signed short adc_b91_get_code(void)
{
signed short adc_code;
analog_write_reg8(areg_adc_data_sample_control,
analog_read_reg8(areg_adc_data_sample_control) | FLD_NOT_SAMPLE_ADC_DATA);
adc_code = analog_read_reg16(areg_adc_misc_l);
analog_write_reg8(areg_adc_data_sample_control,
analog_read_reg8(areg_adc_data_sample_control) & (~FLD_NOT_SAMPLE_ADC_DATA));
return adc_code;
}
/* ADC Context API implementation: start sampling */
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct b91_adc_data *data =
CONTAINER_OF(ctx, struct b91_adc_data, ctx);
data->repeat_buffer = data->buffer;
adc_power_on();
k_sem_give(&data->acq_sem);
}
/* ADC Context API implementation: buffer pointer */
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct b91_adc_data *data =
CONTAINER_OF(ctx, struct b91_adc_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
/* Start ADC measurements */
static int adc_b91_adc_start_read(const struct device *dev, const struct adc_sequence *sequence)
{
int status;
struct b91_adc_data *data = dev->data;
/* Validate input parameters */
status = adc_b91_validate_sequence(sequence);
if (status != 0) {
return status;
}
/* Set resolution */
switch (sequence->resolution) {
case 14:
adc_set_resolution(ADC_RES14);
data->resolution_divider = 1;
break;
case 12:
adc_set_resolution(ADC_RES12);
data->resolution_divider = 4;
break;
case 10:
adc_set_resolution(ADC_RES10);
data->resolution_divider = 16;
break;
case 8:
adc_set_resolution(ADC_RES8);
data->resolution_divider = 64;
break;
default:
LOG_ERR("Selected ADC resolution is not supported.");
return -EINVAL;
}
/* Save buffer */
data->buffer = sequence->buffer;
/* Start ADC conversion */
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
/* Main ADC Acquisition thread */
static void adc_b91_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
const struct device *dev = p1;
int16_t adc_code;
struct b91_adc_data *data = dev->data;
while (true) {
/* Wait for Acquisition semaphore */
k_sem_take(&data->acq_sem, K_FOREVER);
/* Wait for ADC data ready */
while ((analog_read_reg8(AREG_ADC_DATA_STATUS) & ADC_DATA_READY)
!= ADC_DATA_READY) {
}
/* Perform read */
adc_code = (adc_b91_get_code() / data->resolution_divider);
if (!data->differential) {
/* Sign bit is not used in case of single-ended configuration */
adc_code = adc_code * 2;
/* Do not return negative value for single-ended configuration */
if (adc_code < 0) {
adc_code = 0;
}
}
*data->buffer++ = adc_code;
/* Power off ADC */
adc_power_off();
/* Release ADC context */
adc_context_on_sampling_done(&data->ctx, dev);
}
}
/* ADC Driver initialization */
static int adc_b91_init(const struct device *dev)
{
struct b91_adc_data *data = dev->data;
k_sem_init(&data->acq_sem, 0, 1);
k_thread_create(&data->thread, data->stack,
CONFIG_ADC_B91_ACQUISITION_THREAD_STACK_SIZE,
adc_b91_acquisition_thread,
(void *)dev, NULL, NULL,
CONFIG_ADC_B91_ACQUISITION_THREAD_PRIO,
0, K_NO_WAIT);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
/* API implementation: channel_setup */
static int adc_b91_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
adc_ref_vol_e vref_internal_mv;
adc_sample_freq_e sample_freq;
adc_pre_scale_e pre_scale;
adc_sample_cycle_e sample_cycl;
adc_input_pin_def_e input_positive;
adc_input_pin_def_e input_negative;
struct b91_adc_data *data = dev->data;
const struct b91_adc_cfg *config = dev->config;
/* Check channel ID */
if (channel_cfg->channel_id > 0) {
LOG_ERR("Only channel 0 is supported.");
return -EINVAL;
}
/* Check reference */
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Selected ADC reference is not supported.");
return -EINVAL;
}
/* Check internal reference */
switch (config->vref_internal_mv) {
case 900:
vref_internal_mv = ADC_VREF_0P9V;
break;
case 1200:
vref_internal_mv = ADC_VREF_1P2V;
break;
default:
LOG_ERR("Selected reference voltage is not supported.");
return -EINVAL;
}
/* Check sample frequency */
switch (config->sample_freq) {
case 23000:
sample_freq = ADC_SAMPLE_FREQ_23K;
break;
case 48000:
sample_freq = ADC_SAMPLE_FREQ_48K;
break;
case 96000:
sample_freq = ADC_SAMPLE_FREQ_96K;
break;
default:
LOG_ERR("Selected sample frequency is not supported.");
return -EINVAL;
}
/* Check gain */
switch (channel_cfg->gain) {
case ADC_GAIN_1:
pre_scale = ADC_PRESCALE_1;
break;
case ADC_GAIN_1_4:
pre_scale = ADC_PRESCALE_1F4;
break;
default:
LOG_ERR("Selected ADC gain is not supported.");
return -EINVAL;
}
/* Check acquisition time */
switch (channel_cfg->acquisition_time) {
case ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS, 3):
sample_cycl = ADC_SAMPLE_CYC_3;
break;
case ADC_ACQ_TIME_DEFAULT:
case ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS, 6):
sample_cycl = ADC_SAMPLE_CYC_6;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS, 9):
sample_cycl = ADC_SAMPLE_CYC_9;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS, 12):
sample_cycl = ADC_SAMPLE_CYC_12;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS, 18):
sample_cycl = ADC_SAMPLE_CYC_18;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS, 24):
sample_cycl = ADC_SAMPLE_CYC_24;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS, 36):
sample_cycl = ADC_SAMPLE_CYC_36;
break;
case ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS, 48):
sample_cycl = ADC_SAMPLE_CYC_48;
break;
default:
LOG_ERR("Selected ADC acquisition time is not supported.");
return -EINVAL;
}
/* Check for valid pins configuration */
input_positive = adc_b91_get_pin(channel_cfg->input_positive);
input_negative = adc_b91_get_pin(channel_cfg->input_negative);
if ((input_positive == (uint8_t)ADC_VBAT || input_negative == (uint8_t)ADC_VBAT) &&
channel_cfg->differential) {
LOG_ERR("VBAT pin is not available for differential mode.");
return -EINVAL;
} else if (channel_cfg->differential && (input_negative == (uint8_t)NOINPUTN)) {
LOG_ERR("Negative input is not selected.");
return -EINVAL;
}
/* Init ADC */
data->differential = channel_cfg->differential;
adc_init(vref_internal_mv, pre_scale, sample_freq);
adc_set_vbat_divider(ADC_VBAT_DIV_OFF);
adc_set_tsample_cycle(sample_cycl);
/* Init ADC Pins */
if (channel_cfg->differential) {
/* Differential pins configuration */
adc_pin_config(ADC_GPIO_MODE, input_positive);
adc_pin_config(ADC_GPIO_MODE, input_negative);
adc_set_diff_input(channel_cfg->input_positive, channel_cfg->input_negative);
} else if (input_positive == (uint8_t)ADC_VBAT) {
/* Single-ended Vbat pin configuration */
adc_set_diff_input(ADC_VBAT, GND);
} else {
/* Single-ended GPIO pin configuration */
adc_pin_config(ADC_GPIO_MODE, input_positive);
adc_set_diff_input(channel_cfg->input_positive, GND);
}
return 0;
}
/* API implementation: read */
static int adc_b91_read(const struct device *dev,
const struct adc_sequence *sequence)
{
int status;
struct b91_adc_data *data = dev->data;
adc_context_lock(&data->ctx, false, NULL);
status = adc_b91_adc_start_read(dev, sequence);
adc_context_release(&data->ctx, status);
return status;
}
#ifdef CONFIG_ADC_ASYNC
/* API implementation: read_async */
static int adc_b91_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
int status;
struct b91_adc_data *data = dev->data;
adc_context_lock(&data->ctx, true, async);
status = adc_b91_adc_start_read(dev, sequence);
adc_context_release(&data->ctx, status);
return status;
}
#endif /* CONFIG_ADC_ASYNC */
static struct b91_adc_data data_0 = {
ADC_CONTEXT_INIT_TIMER(data_0, ctx),
ADC_CONTEXT_INIT_LOCK(data_0, ctx),
ADC_CONTEXT_INIT_SYNC(data_0, ctx),
};
static const struct b91_adc_cfg cfg_0 = {
.sample_freq = DT_INST_PROP(0, sample_freq),
.vref_internal_mv = DT_INST_PROP(0, vref_internal_mv),
};
static const struct adc_driver_api adc_b91_driver_api = {
.channel_setup = adc_b91_channel_setup,
.read = adc_b91_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_b91_read_async,
#endif
.ref_internal = cfg_0.vref_internal_mv,
};
DEVICE_DT_INST_DEFINE(0, adc_b91_init, NULL,
&data_0, &cfg_0,
POST_KERNEL,
CONFIG_ADC_INIT_PRIORITY,
&adc_b91_driver_api);
``` | /content/code_sandbox/drivers/adc/adc_b91.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,076 |
```c
/*
*
*/
#include <zephyr/shell/shell.h>
#include <stdlib.h>
#include <zephyr/drivers/adc.h>
#include <ctype.h>
#include <zephyr/sys/util.h>
#include <zephyr/devicetree.h>
#define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_shell);
#define CMD_HELP_ACQ_TIME \
"Configure acquisition time." \
"\nUsage: acq_time <time> <unit>" \
"\nunits: us, ns, ticks\n"
#define CMD_HELP_CHANNEL \
"Configure ADC channel\n" \
#define CMD_HELP_CH_ID \
"Configure channel id\n" \
"Usage: id <channel_id>\n"
#define CMD_HELP_DIFF \
"Configure differential\n" \
"Usage: differential <0||1>\n"
#define CMD_HELP_CH_NEG \
"Configure channel negative input\n" \
"Usage: negative <negative_input_id>\n"
#define CMD_HELP_CH_POS \
"Configure channel positive input\n" \
"Usage: positive <positive_input_id>\n"
#define CMD_HELP_READ \
"Read adc value\n" \
"Usage: read <channel>\n"
#define CMD_HELP_RES \
"Configure resolution\n" \
"Usage: resolution <resolution>\n"
#define CMD_HELP_REF "Configure reference\n"
#define CMD_HELP_GAIN "Configure gain.\n"
#define CMD_HELP_PRINT "Print current configuration"
#define ADC_HDL_LIST_ENTRY(node_id) \
{ \
.dev = DEVICE_DT_GET(node_id), \
.channel_config = \
{ \
.gain = ADC_GAIN_1, \
.reference = ADC_REF_INTERNAL, \
.acquisition_time = ADC_ACQ_TIME_DEFAULT, \
.channel_id = 0, \
}, \
.resolution = 0, \
},
#define CHOSEN_STR_LEN 20
static char chosen_reference[CHOSEN_STR_LEN + 1] = "INTERNAL";
static char chosen_gain[CHOSEN_STR_LEN + 1] = "1";
static struct adc_hdl {
const struct device *dev;
struct adc_channel_cfg channel_config;
uint8_t resolution;
} adc_list[] = {
/* zephyr-keep-sorted-start */
DT_FOREACH_STATUS_OKAY(adi_ad559x_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(atmel_sam0_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(atmel_sam_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(atmel_sam_afec, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(espressif_esp32_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(gd_gd32_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(infineon_cat1_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(infineon_xmc4xxx_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ite_it8xxx2_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(lltc_ltc2451, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11102, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11103, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11105, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11106, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11110, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11111, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11115, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11116, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11117, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11253, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(maxim_max11254, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(microchip_mcp3204, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(microchip_mcp3208, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(microchip_xec_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nordic_nrf_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nordic_nrf_saadc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nuvoton_npcx_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nuvoton_numaker_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nxp_kinetis_adc12, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nxp_kinetis_adc16, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nxp_lpc_lpadc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nxp_mcux_12b1msps_sar, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nxp_s32_adc_sar, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(nxp_vf610_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(raspberrypi_pico_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(renesas_smartbond_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(renesas_smartbond_sdadc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(silabs_gecko_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(silabs_gecko_iadc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(st_stm32_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(st_stm32f1_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(st_stm32f4_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(telink_b91_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads1013, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads1014, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads1015, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads1112, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads1113, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads1114, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads1115, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads1119, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads114s08, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_ads7052, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_cc13xx_cc26xx_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_cc32xx_adc, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_lmp90077, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_lmp90078, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_lmp90079, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_lmp90080, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_lmp90097, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_lmp90098, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_lmp90099, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_lmp90100, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(ti_tla2021, ADC_HDL_LIST_ENTRY)
DT_FOREACH_STATUS_OKAY(zephyr_adc_emul, ADC_HDL_LIST_ENTRY)
/* zephyr-keep-sorted-stop */
};
static struct adc_hdl *get_adc(const char *device_label)
{
for (int i = 0; i < ARRAY_SIZE(adc_list); i++) {
if (!strcmp(device_label, adc_list[i].dev->name)) {
return &adc_list[i];
}
}
/* This will never happen because ADC was prompted by shell */
__ASSERT_NO_MSG(false);
return NULL;
}
static int cmd_adc_ch_id(const struct shell *sh, size_t argc, char **argv)
{
/* -2: index of ADC label name */
struct adc_hdl *adc = get_adc(argv[-2]);
int retval = 0;
if (!device_is_ready(adc->dev)) {
shell_error(sh, "ADC device not ready");
return -ENODEV;
}
if (isdigit((unsigned char)argv[1][0]) == 0) {
shell_error(sh, "<channel> must be digits");
return -EINVAL;
}
adc->channel_config.channel_id = (uint8_t)strtol(argv[1], NULL, 10);
retval = adc_channel_setup(adc->dev, &adc->channel_config);
LOG_DBG("Channel setup returned %i", retval);
return retval;
}
static int cmd_adc_ch_diff(const struct shell *sh, size_t argc, char **argv)
{
/* -2: index of ADC label name */
struct adc_hdl *adc = get_adc(argv[-2]);
int retval = 0;
char *endptr;
long diff;
if (!device_is_ready(adc->dev)) {
shell_error(sh, "ADC device not ready");
return -ENODEV;
}
endptr = argv[1];
diff = strtol(argv[1], &endptr, 10);
if ((endptr == argv[1]) || ((diff != 0) && (diff != 1))) {
shell_error(sh, "<differential> must be 0 or 1");
return -EINVAL;
}
adc->channel_config.differential = (uint8_t)diff;
retval = adc_channel_setup(adc->dev, &adc->channel_config);
LOG_DBG("Channel setup returned %i", retval);
return retval;
}
static int cmd_adc_ch_neg(const struct shell *sh, size_t argc, char **argv)
{
#if CONFIG_ADC_CONFIGURABLE_INPUTS
/* -2: index of ADC label name */
struct adc_hdl *adc = get_adc(argv[-2]);
int retval = 0;
if (!device_is_ready(adc->dev)) {
shell_error(sh, "ADC device not ready");
return -ENODEV;
}
if (isdigit((unsigned char)argv[1][0]) == 0) {
shell_error(sh, "<negative input> must be digits");
return -EINVAL;
}
adc->channel_config.input_negative = (uint8_t)strtol(argv[1], NULL, 10);
retval = adc_channel_setup(adc->dev, &adc->channel_config);
LOG_DBG("Channel setup returned %i", retval);
return retval;
#else
return -EINVAL;
#endif
}
static int cmd_adc_ch_pos(const struct shell *sh, size_t argc, char **argv)
{
#if CONFIG_ADC_CONFIGURABLE_INPUTS
/* -2: index of ADC label name */
struct adc_hdl *adc = get_adc(argv[-2]);
int retval = 0;
if (!device_is_ready(adc->dev)) {
shell_error(sh, "ADC device not ready");
return -ENODEV;
}
if (isdigit((unsigned char)argv[1][0]) == 0) {
shell_error(sh, "<positive input> must be digits");
return -EINVAL;
}
adc->channel_config.input_positive = (uint8_t)strtol(argv[1], NULL, 10);
retval = adc_channel_setup(adc->dev, &adc->channel_config);
LOG_DBG("Channel setup returned %i", retval);
return retval;
#else
return -EINVAL;
#endif
}
static int cmd_adc_gain(const struct shell *sh, size_t argc, char **argv,
void *data)
{
/* -2: index of ADC label name */
struct adc_hdl *adc = get_adc(argv[-2]);
enum adc_gain gain = (enum adc_gain)data;
int retval = -EINVAL;
if (!device_is_ready(adc->dev)) {
shell_error(sh, "ADC device not ready");
return -ENODEV;
}
adc->channel_config.gain = gain;
int len = strlen(argv[0]) > CHOSEN_STR_LEN ? CHOSEN_STR_LEN
: strlen(argv[0]);
memcpy(chosen_gain, argv[0], len);
chosen_gain[len] = '\0';
retval = adc_channel_setup(adc->dev, &adc->channel_config);
LOG_DBG("Channel setup returned %i", retval);
return retval;
}
static int cmd_adc_acq(const struct shell *sh, size_t argc, char **argv)
{
/* -1 index of ADC label name */
struct adc_hdl *adc = get_adc(argv[-1]);
uint16_t acq_time;
int retval;
if (!device_is_ready(adc->dev)) {
shell_error(sh, "ADC device not ready");
return -ENODEV;
}
if (isdigit((unsigned char)argv[1][0]) == 0) {
shell_error(sh, "<time> must be digits");
return -EINVAL;
}
acq_time = (uint16_t)strtol(argv[1], NULL, 10);
if (!strcmp(argv[2], "us")) {
adc->channel_config.acquisition_time =
ADC_ACQ_TIME(ADC_ACQ_TIME_MICROSECONDS, acq_time);
} else if (!strcmp(argv[2], "ns")) {
adc->channel_config.acquisition_time =
ADC_ACQ_TIME(ADC_ACQ_TIME_NANOSECONDS, acq_time);
} else if (!strcmp(argv[2], "ticks")) {
adc->channel_config.acquisition_time =
ADC_ACQ_TIME(ADC_ACQ_TIME_TICKS, acq_time);
} else {
adc->channel_config.acquisition_time =
ADC_ACQ_TIME_DEFAULT;
}
retval = adc_channel_setup(adc->dev, &adc->channel_config);
LOG_DBG("Channel setup returned %i", retval);
return retval;
}
static int cmd_adc_reso(const struct shell *sh, size_t argc, char **argv)
{
/* -1 index of ADC label name */
struct adc_hdl *adc = get_adc(argv[-1]);
int retval;
if (!device_is_ready(adc->dev)) {
shell_error(sh, "ADC device not ready");
return -ENODEV;
}
if (isdigit((unsigned char)argv[1][0]) == 0) {
shell_error(sh, "<resolution> must be digits");
return -EINVAL;
}
adc->resolution = (uint8_t)strtol(argv[1], NULL, 10);
retval = adc_channel_setup(adc->dev, &adc->channel_config);
return retval;
}
static int cmd_adc_ref(const struct shell *sh, size_t argc, char **argv,
void *data)
{
/* -2 index of ADC label name */
struct adc_hdl *adc = get_adc(argv[-2]);
enum adc_reference reference = (enum adc_reference)data;
int retval = -EINVAL;
if (!device_is_ready(adc->dev)) {
shell_error(sh, "ADC device not ready");
return -ENODEV;
}
int len = strlen(argv[0]) > CHOSEN_STR_LEN ? CHOSEN_STR_LEN
: strlen(argv[0]);
memcpy(chosen_reference, argv[0], len);
chosen_reference[len] = '\0';
adc->channel_config.reference = reference;
retval = adc_channel_setup(adc->dev, &adc->channel_config);
LOG_DBG("Channel setup returned %i", retval);
return retval;
}
#define BUFFER_SIZE 1
static int cmd_adc_read(const struct shell *sh, size_t argc, char **argv)
{
uint8_t adc_channel_id = strtol(argv[1], NULL, 10);
/* -1 index of adc label name */
struct adc_hdl *adc = get_adc(argv[-1]);
int16_t m_sample_buffer[BUFFER_SIZE];
int retval;
if (!device_is_ready(adc->dev)) {
shell_error(sh, "ADC device not ready");
return -ENODEV;
}
adc->channel_config.channel_id = adc_channel_id;
const struct adc_sequence sequence = {
.channels = BIT(adc->channel_config.channel_id),
.buffer = m_sample_buffer,
.buffer_size = sizeof(m_sample_buffer),
.resolution = adc->resolution,
};
retval = adc_read(adc->dev, &sequence);
if (retval >= 0) {
shell_print(sh, "read: %i", m_sample_buffer[0]);
}
return retval;
}
static int cmd_adc_print(const struct shell *sh, size_t argc, char **argv)
{
/* -1 index of ADC label name */
struct adc_hdl *adc = get_adc(argv[-1]);
shell_print(sh, "%s:\n"
"Gain: %s\n"
"Reference: %s\n"
"Acquisition Time: %u\n"
"Channel ID: %u\n"
"Differential: %u\n"
"Resolution: %u",
adc->dev->name,
chosen_gain,
chosen_reference,
adc->channel_config.acquisition_time,
adc->channel_config.channel_id,
adc->channel_config.differential,
adc->resolution);
#if CONFIG_ADC_CONFIGURABLE_INPUTS
shell_print(sh, "Input positive: %u",
adc->channel_config.input_positive);
if (adc->channel_config.differential != 0) {
shell_print(sh, "Input negative: %u",
adc->channel_config.input_negative);
}
#endif
return 0;
}
SHELL_SUBCMD_DICT_SET_CREATE(sub_ref_cmds, cmd_adc_ref,
(VDD_1, ADC_REF_VDD_1, "VDD"),
(VDD_1_2, ADC_REF_VDD_1_2, "VDD/2"),
(VDD_1_3, ADC_REF_VDD_1_3, "VDD/3"),
(VDD_1_4, ADC_REF_VDD_1_4, "VDD/4"),
(INTERNAL, ADC_REF_INTERNAL, "Internal"),
(EXTERNAL_0, ADC_REF_EXTERNAL0, "External, input 0"),
(EXTERNAL_1, ADC_REF_EXTERNAL1, "External, input 1")
);
SHELL_SUBCMD_DICT_SET_CREATE(sub_gain_cmds, cmd_adc_gain,
(GAIN_1_6, ADC_GAIN_1_6, "x 1/6"),
(GAIN_1_5, ADC_GAIN_1_5, "x 1/5"),
(GAIN_1_4, ADC_GAIN_1_4, "x 1/4"),
(GAIN_1_3, ADC_GAIN_1_3, "x 1/3"),
(GAIN_1_2, ADC_GAIN_1_2, "x 1/2"),
(GAIN_2_3, ADC_GAIN_2_3, "x 2/3"),
(GAIN_1, ADC_GAIN_1, "x 1"),
(GAIN_2, ADC_GAIN_2, "x 2"),
(GAIN_3, ADC_GAIN_3, "x 3"),
(GAIN_4, ADC_GAIN_4, "x 4"),
(GAIN_8, ADC_GAIN_8, "x 8"),
(GAIN_16, ADC_GAIN_16, "x 16"),
(GAIN_32, ADC_GAIN_32, "x 32"),
(GAIN_64, ADC_GAIN_64, "x 64")
);
SHELL_STATIC_SUBCMD_SET_CREATE(sub_channel_cmds,
SHELL_CMD_ARG(id, NULL, CMD_HELP_CH_ID, cmd_adc_ch_id, 2, 0),
SHELL_CMD_ARG(differential, NULL, CMD_HELP_DIFF, cmd_adc_ch_diff, 2, 0),
SHELL_COND_CMD_ARG(CONFIG_ADC_CONFIGURABLE_INPUTS,
negative, NULL, CMD_HELP_CH_NEG, cmd_adc_ch_neg, 2, 0),
SHELL_COND_CMD_ARG(CONFIG_ADC_CONFIGURABLE_INPUTS,
positive, NULL, CMD_HELP_CH_POS, cmd_adc_ch_pos, 2, 0),
SHELL_SUBCMD_SET_END
);
SHELL_STATIC_SUBCMD_SET_CREATE(sub_adc_cmds,
/* Alphabetically sorted. */
SHELL_CMD_ARG(acq_time, NULL, CMD_HELP_ACQ_TIME, cmd_adc_acq, 3, 0),
SHELL_CMD_ARG(channel, &sub_channel_cmds, CMD_HELP_CHANNEL, NULL, 3, 0),
SHELL_CMD(gain, &sub_gain_cmds, CMD_HELP_GAIN, NULL),
SHELL_CMD_ARG(print, NULL, CMD_HELP_PRINT, cmd_adc_print, 1, 0),
SHELL_CMD_ARG(read, NULL, CMD_HELP_READ, cmd_adc_read, 2, 0),
SHELL_CMD(reference, &sub_ref_cmds, CMD_HELP_REF, NULL),
SHELL_CMD_ARG(resolution, NULL, CMD_HELP_RES, cmd_adc_reso, 2, 0),
SHELL_SUBCMD_SET_END /* Array terminated. */
);
static void cmd_adc_dev_get(size_t idx, struct shell_static_entry *entry)
{
if (idx < ARRAY_SIZE(adc_list)) {
entry->syntax = adc_list[idx].dev->name;
entry->handler = NULL;
entry->subcmd = &sub_adc_cmds;
entry->help = "Select subcommand for ADC property label.";
} else {
entry->syntax = NULL;
}
}
SHELL_DYNAMIC_CMD_CREATE(sub_adc_dev, cmd_adc_dev_get);
SHELL_CMD_REGISTER(adc, &sub_adc_dev, "ADC commands", NULL);
``` | /content/code_sandbox/drivers/adc/adc_shell.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,757 |
```c
/*
*
*/
#define DT_DRV_COMPAT ambiq_adc
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/pm/device.h>
#include <zephyr/pm/device_runtime.h>
#include <zephyr/kernel.h>
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
/* ambiq-sdk includes */
#include <am_mcu_apollo.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(adc_ambiq, CONFIG_ADC_LOG_LEVEL);
typedef int (*ambiq_adc_pwr_func_t)(void);
#define PWRCTRL_MAX_WAIT_US 5
/* Number of slots available. */
#define AMBIQ_ADC_SLOT_BUMBER AM_HAL_ADC_MAX_SLOTS
struct adc_ambiq_config {
uint32_t base;
int size;
uint8_t num_channels;
void (*irq_config_func)(void);
const struct pinctrl_dev_config *pin_cfg;
ambiq_adc_pwr_func_t pwr_func;
};
struct adc_ambiq_data {
struct adc_context ctx;
void *adcHandle;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint8_t active_channels;
};
static int adc_ambiq_set_resolution(am_hal_adc_slot_prec_e *prec, uint8_t adc_resolution)
{
switch (adc_resolution) {
case 8:
*prec = AM_HAL_ADC_SLOT_8BIT;
break;
case 10:
*prec = AM_HAL_ADC_SLOT_10BIT;
break;
case 12:
*prec = AM_HAL_ADC_SLOT_12BIT;
break;
case 14:
*prec = AM_HAL_ADC_SLOT_14BIT;
break;
default:
return -ENOTSUP;
}
return 0;
}
static int adc_ambiq_slot_config(const struct device *dev, const struct adc_sequence *sequence,
am_hal_adc_slot_chan_e channel, uint32_t ui32SlotNumber)
{
struct adc_ambiq_data *data = dev->data;
am_hal_adc_slot_config_t ADCSlotConfig;
if (adc_ambiq_set_resolution(&ADCSlotConfig.ePrecisionMode, sequence->resolution) != 0) {
LOG_ERR("unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
/* Set up an ADC slot */
ADCSlotConfig.eMeasToAvg = AM_HAL_ADC_SLOT_AVG_1;
ADCSlotConfig.eChannel = channel;
ADCSlotConfig.bWindowCompare = false;
ADCSlotConfig.bEnabled = true;
if (AM_HAL_STATUS_SUCCESS !=
am_hal_adc_configure_slot(data->adcHandle, ui32SlotNumber, &ADCSlotConfig)) {
LOG_ERR("configuring ADC Slot 0 failed.\n");
return -ENODEV;
}
return 0;
}
static void adc_ambiq_isr(const struct device *dev)
{
struct adc_ambiq_data *data = dev->data;
uint32_t ui32IntMask;
uint32_t ui32NumSamples;
am_hal_adc_sample_t Sample;
/* Read the interrupt status. */
am_hal_adc_interrupt_status(data->adcHandle, &ui32IntMask, true);
/* Clear the ADC interrupt.*/
am_hal_adc_interrupt_clear(data->adcHandle, ui32IntMask);
/*
* If we got a conversion completion interrupt (which should be our only
* ADC interrupt), go ahead and read the data.
*/
if (ui32IntMask & AM_HAL_ADC_INT_CNVCMP) {
for (uint32_t i = 0; i < data->active_channels; i++) {
/* Read the value from the FIFO. */
ui32NumSamples = 1;
am_hal_adc_samples_read(data->adcHandle, false, NULL, &ui32NumSamples,
&Sample);
*data->buffer++ = Sample.ui32Sample;
}
adc_context_on_sampling_done(&data->ctx, dev);
}
}
static int adc_ambiq_check_buffer_size(const struct adc_sequence *sequence, uint8_t active_channels)
{
size_t needed_buffer_size;
needed_buffer_size = active_channels * sizeof(uint16_t);
if (sequence->options) {
needed_buffer_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed_buffer_size) {
LOG_DBG("Provided buffer is too small (%u/%u)", sequence->buffer_size,
needed_buffer_size);
return -ENOMEM;
}
return 0;
}
static int adc_ambiq_start_read(const struct device *dev, const struct adc_sequence *sequence)
{
struct adc_ambiq_data *data = dev->data;
const struct adc_ambiq_config *cfg = dev->config;
uint8_t channel_id = 0;
uint32_t channels = 0;
uint8_t active_channels = 0;
uint8_t slot_index;
int error = 0;
if (sequence->channels & ~BIT_MASK(cfg->num_channels)) {
LOG_ERR("Incorrect channels, bitmask 0x%x", sequence->channels);
return -EINVAL;
}
if (sequence->channels == 0UL) {
LOG_ERR("No channel selected");
return -EINVAL;
}
active_channels = POPCOUNT(sequence->channels);
if (active_channels > AMBIQ_ADC_SLOT_BUMBER) {
LOG_ERR("Too many channels for sequencer. Max: %d", AMBIQ_ADC_SLOT_BUMBER);
return -ENOTSUP;
}
channels = sequence->channels;
for (slot_index = 0; slot_index < active_channels; slot_index++) {
channel_id = find_lsb_set(channels) - 1;
error = adc_ambiq_slot_config(dev, sequence, channel_id, slot_index);
if (error < 0) {
return error;
}
channels &= ~BIT(channel_id);
}
__ASSERT_NO_MSG(channels == 0);
/* Enable the ADC. */
am_hal_adc_enable(data->adcHandle);
error = adc_ambiq_check_buffer_size(sequence, active_channels);
if (error < 0) {
return error;
}
data->active_channels = active_channels;
data->buffer = sequence->buffer;
/* Start ADC conversion */
adc_context_start_read(&data->ctx, sequence);
error = adc_context_wait_for_completion(&data->ctx);
return error;
}
static int adc_ambiq_read(const struct device *dev, const struct adc_sequence *sequence)
{
struct adc_ambiq_data *data = dev->data;
int error;
#if defined(CONFIG_PM_DEVICE_RUNTIME)
error = pm_device_runtime_get(dev);
if (error < 0) {
LOG_ERR("pm_device_runtime_get failed: %d", error);
}
#endif
adc_context_lock(&data->ctx, false, NULL);
error = adc_ambiq_start_read(dev, sequence);
adc_context_release(&data->ctx, error);
#if defined(CONFIG_PM_DEVICE_RUNTIME)
error = pm_device_runtime_put(dev);
if (error < 0) {
LOG_ERR("pm_device_runtime_put failed: %d", error);
}
#endif
return error;
}
static int adc_ambiq_channel_setup(const struct device *dev, const struct adc_channel_cfg *chan_cfg)
{
const struct adc_ambiq_config *cfg = dev->config;
if (chan_cfg->channel_id >= cfg->num_channels) {
LOG_ERR("unsupported channel id '%d'", chan_cfg->channel_id);
return -ENOTSUP;
}
if (chan_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Gain is not valid");
return -ENOTSUP;
}
if (chan_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Reference is not valid");
return -ENOTSUP;
}
if (chan_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("unsupported acquisition_time '%d'", chan_cfg->acquisition_time);
return -ENOTSUP;
}
if (chan_cfg->differential) {
LOG_ERR("Differential sampling not supported");
return -ENOTSUP;
}
return 0;
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx, bool repeat_sampling)
{
struct adc_ambiq_data *data = CONTAINER_OF(ctx, struct adc_ambiq_data, ctx);
if (repeat_sampling) {
data->buffer = data->repeat_buffer;
}
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_ambiq_data *data = CONTAINER_OF(ctx, struct adc_ambiq_data, ctx);
data->repeat_buffer = data->buffer;
/*Trigger the ADC*/
am_hal_adc_sw_trigger(data->adcHandle);
}
static int adc_ambiq_init(const struct device *dev)
{
struct adc_ambiq_data *data = dev->data;
const struct adc_ambiq_config *cfg = dev->config;
am_hal_adc_config_t ADCConfig;
int ret;
/* Initialize the ADC and get the handle*/
if (AM_HAL_STATUS_SUCCESS !=
am_hal_adc_initialize((cfg->base - REG_ADC_BASEADDR) / (cfg->size * 4),
&data->adcHandle)) {
ret = -ENODEV;
LOG_ERR("Faile to initialize ADC, code:%d", ret);
return ret;
}
/* power on ADC*/
ret = cfg->pwr_func();
/* Set up the ADC configuration parameters. These settings are reasonable
* for accurate measurements at a low sample rate.
*/
ADCConfig.eClock = AM_HAL_ADC_CLKSEL_HFRC;
ADCConfig.ePolarity = AM_HAL_ADC_TRIGPOL_RISING;
ADCConfig.eTrigger = AM_HAL_ADC_TRIGSEL_SOFTWARE;
ADCConfig.eReference = AM_HAL_ADC_REFSEL_INT_1P5;
ADCConfig.eClockMode = AM_HAL_ADC_CLKMODE_LOW_POWER;
ADCConfig.ePowerMode = AM_HAL_ADC_LPMODE0;
ADCConfig.eRepeat = AM_HAL_ADC_SINGLE_SCAN;
if (AM_HAL_STATUS_SUCCESS != am_hal_adc_configure(data->adcHandle, &ADCConfig)) {
ret = -ENODEV;
LOG_ERR("Configuring ADC failed, code:%d", ret);
return ret;
}
ret = pinctrl_apply_state(cfg->pin_cfg, PINCTRL_STATE_DEFAULT);
if (ret < 0) {
return ret;
}
/* Enable the ADC interrupts in the ADC. */
cfg->irq_config_func();
am_hal_adc_interrupt_enable(data->adcHandle, AM_HAL_ADC_INT_CNVCMP);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
#ifdef CONFIG_PM_DEVICE
static int adc_ambiq_pm_action(const struct device *dev, enum pm_device_action action)
{
struct adc_ambiq_data *data = dev->data;
uint32_t ret;
am_hal_sysctrl_power_state_e status;
switch (action) {
case PM_DEVICE_ACTION_RESUME:
status = AM_HAL_SYSCTRL_WAKE;
break;
case PM_DEVICE_ACTION_SUSPEND:
status = AM_HAL_SYSCTRL_DEEPSLEEP;
break;
default:
return -ENOTSUP;
}
ret = am_hal_adc_power_control(data->adcHandle, status, true);
if (ret != AM_HAL_STATUS_SUCCESS) {
return -EPERM;
} else {
return 0;
}
}
#endif /* CONFIG_PM_DEVICE */
#define ADC_AMBIQ_DRIVER_API(n) \
static const struct adc_driver_api adc_ambiq_driver_api_##n = { \
.channel_setup = adc_ambiq_channel_setup, \
.read = adc_ambiq_read, \
.ref_internal = DT_INST_PROP(n, internal_vref_mv), \
};
#define ADC_AMBIQ_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
ADC_AMBIQ_DRIVER_API(n); \
static int pwr_on_ambiq_adc_##n(void) \
{ \
uint32_t addr = DT_REG_ADDR(DT_INST_PHANDLE(n, ambiq_pwrcfg)) + \
DT_INST_PHA(n, ambiq_pwrcfg, offset); \
sys_write32((sys_read32(addr) | DT_INST_PHA(n, ambiq_pwrcfg, mask)), addr); \
k_busy_wait(PWRCTRL_MAX_WAIT_US); \
return 0; \
} \
static void adc_irq_config_func_##n(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), adc_ambiq_isr, \
DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n)); \
}; \
static struct adc_ambiq_data adc_ambiq_data_##n = { \
ADC_CONTEXT_INIT_TIMER(adc_ambiq_data_##n, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_ambiq_data_##n, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_ambiq_data_##n, ctx), \
}; \
const static struct adc_ambiq_config adc_ambiq_config_##n = { \
.base = DT_INST_REG_ADDR(n), \
.size = DT_INST_REG_SIZE(n), \
.num_channels = DT_PROP(DT_DRV_INST(n), channel_count), \
.irq_config_func = adc_irq_config_func_##n, \
.pin_cfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.pwr_func = pwr_on_ambiq_adc_##n, \
}; \
PM_DEVICE_DT_INST_DEFINE(n, adc_ambiq_pm_action); \
DEVICE_DT_INST_DEFINE(n, &adc_ambiq_init, PM_DEVICE_DT_INST_GET(n), \
&adc_ambiq_data_##n, \
&adc_ambiq_config_##n, POST_KERNEL, CONFIG_ADC_INIT_PRIORITY, \
&adc_ambiq_driver_api_##n);
DT_INST_FOREACH_STATUS_OKAY(ADC_AMBIQ_INIT)
``` | /content/code_sandbox/drivers/adc/adc_ambiq.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,034 |
```unknown
# Infineon CAT1 ADC configuration options
# an affiliate of Cypress Semiconductor Corporation
#
config ADC_INFINEON_CAT1
bool "Infineon CAT1 ADC driver"
default y
depends on DT_HAS_INFINEON_CAT1_ADC_ENABLED
select USE_INFINEON_ADC
select ADC_CONFIGURABLE_INPUTS
help
This option enables the ADC driver for Infineon CAT1 family.
``` | /content/code_sandbox/drivers/adc/Kconfig.ifx_cat1 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 86 |
```unknown
# XMC4XXX ADC configuration options
config ADC_XMC4XXX
bool "XMC4XXX ADC"
default y
depends on DT_HAS_INFINEON_XMC4XXX_ADC_ENABLED
help
Enable XMC4XXX adc driver.
``` | /content/code_sandbox/drivers/adc/Kconfig.xmc4xxx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```unknown
config ADC_SAM0
bool "Atmel SAM0 series ADC Driver"
default y
depends on DT_HAS_ATMEL_SAM0_ADC_ENABLED
select ADC_CONFIGURABLE_INPUTS
help
Enable Atmel SAM0 MCU Family Analog-to-Digital Converter (ADC) driver.
``` | /content/code_sandbox/drivers/adc/Kconfig.sam0 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 63 |
```c
/**
* @file
*
* @brief Emulated ADC driver
*/
/*
*
*/
#define DT_DRV_COMPAT zephyr_adc_emul
#include <zephyr/drivers/adc.h>
#include <zephyr/drivers/adc/adc_emul.h>
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/util.h>
LOG_MODULE_REGISTER(adc_emul, CONFIG_ADC_LOG_LEVEL);
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define ADC_EMUL_MAX_RESOLUTION 16
typedef uint16_t adc_emul_res_t;
enum adc_emul_input_source {
ADC_EMUL_CONST_VALUE,
ADC_EMUL_CUSTOM_FUNC,
};
/**
* @brief Channel of emulated ADC config
*
* This structure contains configuration of one channel of emulated ADC.
*/
struct adc_emul_chan_cfg {
/** Pointer to function used to obtain input mV */
adc_emul_value_func func;
/** Pointer to data that are passed to @a func on call */
void *func_data;
/** Constant mV input value */
uint32_t const_value;
/** Gain used on output value */
enum adc_gain gain;
/** Reference source */
enum adc_reference ref;
/** Input source which is used to obtain input value */
enum adc_emul_input_source input;
};
/**
* @brief Emulated ADC config
*
* This structure contains constant data for given instance of emulated ADC.
*/
struct adc_emul_config {
/** Number of supported channels */
uint8_t num_channels;
};
/**
* @brief Emulated ADC data
*
* This structure contains data structures used by a emulated ADC.
*/
struct adc_emul_data {
/** Structure that handle state of ongoing read operation */
struct adc_context ctx;
/** Pointer to ADC emulator own device structure */
const struct device *dev;
/** Pointer to memory where next sample will be written */
uint16_t *buf;
/** Pointer to where will be data stored in case of repeated sampling */
uint16_t *repeat_buf;
/** Mask with channels that will be sampled */
uint32_t channels;
/** Mask created from requested resolution in read operation */
uint16_t res_mask;
/** Reference voltage for ADC_REF_VDD_1 source */
uint16_t ref_vdd;
/** Reference voltage for ADC_REF_EXTERNAL0 source */
uint16_t ref_ext0;
/** Reference voltage for ADC_REF_EXTERNAL1 source */
uint16_t ref_ext1;
/** Reference voltage for ADC_REF_INTERNAL source */
uint16_t ref_int;
/** Array of each channel configuration */
struct adc_emul_chan_cfg *chan_cfg;
/** Structure used for acquisition thread */
struct k_thread thread;
/** Semaphore used to control acquisition thread */
struct k_sem sem;
/** Mutex used to control access to channels config and ref voltages */
struct k_mutex cfg_mtx;
/** Stack for acquisition thread */
K_KERNEL_STACK_MEMBER(stack,
CONFIG_ADC_EMUL_ACQUISITION_THREAD_STACK_SIZE);
};
int adc_emul_const_value_set(const struct device *dev, unsigned int chan,
uint32_t value)
{
const struct adc_emul_config *config = dev->config;
struct adc_emul_data *data = dev->data;
struct adc_emul_chan_cfg *chan_cfg;
if (chan >= config->num_channels) {
LOG_ERR("unsupported channel %d", chan);
return -EINVAL;
}
chan_cfg = &data->chan_cfg[chan];
k_mutex_lock(&data->cfg_mtx, K_FOREVER);
chan_cfg->input = ADC_EMUL_CONST_VALUE;
chan_cfg->const_value = value;
k_mutex_unlock(&data->cfg_mtx);
return 0;
}
int adc_emul_value_func_set(const struct device *dev, unsigned int chan,
adc_emul_value_func func, void *func_data)
{
const struct adc_emul_config *config = dev->config;
struct adc_emul_data *data = dev->data;
struct adc_emul_chan_cfg *chan_cfg;
if (chan >= config->num_channels) {
LOG_ERR("unsupported channel %d", chan);
return -EINVAL;
}
chan_cfg = &data->chan_cfg[chan];
k_mutex_lock(&data->cfg_mtx, K_FOREVER);
chan_cfg->func = func;
chan_cfg->func_data = func_data;
chan_cfg->input = ADC_EMUL_CUSTOM_FUNC;
k_mutex_unlock(&data->cfg_mtx);
return 0;
}
int adc_emul_ref_voltage_set(const struct device *dev, enum adc_reference ref,
uint16_t value)
{
struct adc_driver_api *api = (struct adc_driver_api *)dev->api;
struct adc_emul_data *data = dev->data;
int err = 0;
k_mutex_lock(&data->cfg_mtx, K_FOREVER);
switch (ref) {
case ADC_REF_VDD_1:
data->ref_vdd = value;
break;
case ADC_REF_INTERNAL:
data->ref_int = value;
api->ref_internal = value;
break;
case ADC_REF_EXTERNAL0:
data->ref_ext0 = value;
break;
case ADC_REF_EXTERNAL1:
data->ref_ext1 = value;
break;
default:
err = -EINVAL;
}
k_mutex_unlock(&data->cfg_mtx);
return err;
}
/**
* @brief Convert @p ref to reference voltage value in mV
*
* @param data Internal data of ADC emulator
* @param ref Select which reference source should be used
*
* @return Reference voltage in mV
* @return 0 on error
*/
static uint16_t adc_emul_get_ref_voltage(struct adc_emul_data *data,
enum adc_reference ref)
{
uint16_t voltage;
k_mutex_lock(&data->cfg_mtx, K_FOREVER);
switch (ref) {
case ADC_REF_VDD_1:
voltage = data->ref_vdd;
break;
case ADC_REF_VDD_1_2:
voltage = data->ref_vdd / 2;
break;
case ADC_REF_VDD_1_3:
voltage = data->ref_vdd / 3;
break;
case ADC_REF_VDD_1_4:
voltage = data->ref_vdd / 4;
break;
case ADC_REF_INTERNAL:
voltage = data->ref_int;
break;
case ADC_REF_EXTERNAL0:
voltage = data->ref_ext0;
break;
case ADC_REF_EXTERNAL1:
voltage = data->ref_ext1;
break;
default:
voltage = 0;
}
k_mutex_unlock(&data->cfg_mtx);
return voltage;
}
static int adc_emul_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_emul_config *config = dev->config;
struct adc_emul_chan_cfg *emul_chan_cfg;
struct adc_emul_data *data = dev->data;
if (channel_cfg->channel_id >= config->num_channels) {
LOG_ERR("unsupported channel id '%d'", channel_cfg->channel_id);
return -ENOTSUP;
}
if (adc_emul_get_ref_voltage(data, channel_cfg->reference) == 0) {
LOG_ERR("unsupported channel reference '%d'",
channel_cfg->reference);
return -ENOTSUP;
}
if (channel_cfg->differential) {
LOG_ERR("unsupported differential mode");
return -ENOTSUP;
}
emul_chan_cfg = &data->chan_cfg[channel_cfg->channel_id];
k_mutex_lock(&data->cfg_mtx, K_FOREVER);
emul_chan_cfg->gain = channel_cfg->gain;
emul_chan_cfg->ref = channel_cfg->reference;
k_mutex_unlock(&data->cfg_mtx);
return 0;
}
/**
* @brief Check if buffer in @p sequence is big enough to hold all ADC samples
*
* @param dev ADC emulator device
* @param sequence ADC sequence description
*
* @return 0 on success
* @return -ENOMEM if buffer is not big enough
*/
static int adc_emul_check_buffer_size(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_emul_config *config = dev->config;
uint8_t channels = 0;
size_t needed;
uint32_t mask;
for (mask = BIT(config->num_channels - 1); mask != 0; mask >>= 1) {
if (mask & sequence->channels) {
channels++;
}
}
needed = channels * sizeof(adc_emul_res_t);
if (sequence->options) {
needed *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < needed) {
return -ENOMEM;
}
return 0;
}
/**
* @brief Start processing read request
*
* @param dev ADC emulator device
* @param sequence ADC sequence description
*
* @return 0 on success
* @return -ENOTSUP if requested resolution or channel is out side of supported
* range
* @return -ENOMEM if buffer is not big enough
* (see @ref adc_emul_check_buffer_size)
* @return other error code returned by adc_context_wait_for_completion
*/
static int adc_emul_start_read(const struct device *dev,
const struct adc_sequence *sequence)
{
const struct adc_emul_config *config = dev->config;
struct adc_emul_data *data = dev->data;
int err;
if (sequence->resolution > ADC_EMUL_MAX_RESOLUTION ||
sequence->resolution == 0) {
LOG_ERR("unsupported resolution %d", sequence->resolution);
return -ENOTSUP;
}
if (find_msb_set(sequence->channels) > config->num_channels) {
LOG_ERR("unsupported channels in mask: 0x%08x",
sequence->channels);
return -ENOTSUP;
}
err = adc_emul_check_buffer_size(dev, sequence);
if (err) {
LOG_ERR("buffer size too small");
return err;
}
data->res_mask = BIT_MASK(sequence->resolution);
data->buf = sequence->buffer;
adc_context_start_read(&data->ctx, sequence);
return adc_context_wait_for_completion(&data->ctx);
}
static int adc_emul_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
struct adc_emul_data *data = dev->data;
int err;
adc_context_lock(&data->ctx, async ? true : false, async);
err = adc_emul_start_read(dev, sequence);
adc_context_release(&data->ctx, err);
return err;
}
static int adc_emul_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return adc_emul_read_async(dev, sequence, NULL);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_emul_data *data = CONTAINER_OF(ctx, struct adc_emul_data,
ctx);
data->channels = ctx->sequence.channels;
data->repeat_buf = data->buf;
k_sem_give(&data->sem);
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat_sampling)
{
struct adc_emul_data *data = CONTAINER_OF(ctx, struct adc_emul_data,
ctx);
if (repeat_sampling) {
data->buf = data->repeat_buf;
}
}
/**
* @brief Convert input voltage of ADC @p chan to raw output value
*
* @param data Internal data of ADC emulator
* @param chan ADC channel to sample
* @param result Raw output value
*
* @return 0 on success
* @return -EINVAL if failed to get reference voltage or unknown input is
* selected
* @return other error code returned by custom function
*/
static int adc_emul_get_chan_value(struct adc_emul_data *data,
unsigned int chan,
adc_emul_res_t *result)
{
struct adc_emul_chan_cfg *chan_cfg = &data->chan_cfg[chan];
uint32_t input_mV;
uint32_t ref_v;
uint64_t temp; /* Temporary 64 bit value prevent overflows */
int err = 0;
k_mutex_lock(&data->cfg_mtx, K_FOREVER);
/* Get input voltage */
switch (chan_cfg->input) {
case ADC_EMUL_CONST_VALUE:
input_mV = chan_cfg->const_value;
break;
case ADC_EMUL_CUSTOM_FUNC:
err = chan_cfg->func(data->dev, chan, chan_cfg->func_data,
&input_mV);
if (err) {
LOG_ERR("failed to read channel %d (err %d)",
chan, err);
goto out;
}
break;
default:
LOG_ERR("unknown input source %d", chan_cfg->input);
err = -EINVAL;
goto out;
}
/* Get reference voltage and apply inverted gain */
ref_v = adc_emul_get_ref_voltage(data, chan_cfg->ref);
err = adc_gain_invert(chan_cfg->gain, &ref_v);
if (ref_v == 0 || err) {
LOG_ERR("failed to get ref voltage (channel %d)", chan);
err = -EINVAL;
goto out;
}
/* Calculate output value */
temp = (uint64_t)input_mV * data->res_mask / ref_v;
/* If output value is greater than resolution, it has to be trimmed */
if (temp > data->res_mask) {
temp = data->res_mask;
}
*result = temp;
out:
k_mutex_unlock(&data->cfg_mtx);
return err;
}
/**
* @brief Main function of thread which is used to collect samples from
* emulated ADC. When adc_context_start_sampling give semaphore,
* for each requested channel value function is called. Returned
* mV value is converted to output using reference voltage, gain
* and requested resolution.
*
* @param data Internal data of ADC emulator
*
* @return This thread should not end
*/
static void adc_emul_acquisition_thread(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
struct adc_emul_data *data = p1;
int err;
while (true) {
k_sem_take(&data->sem, K_FOREVER);
err = 0;
while (data->channels) {
adc_emul_res_t result = 0;
unsigned int chan = find_lsb_set(data->channels) - 1;
LOG_DBG("reading channel %d", chan);
err = adc_emul_get_chan_value(data, chan, &result);
if (err) {
adc_context_complete(&data->ctx, err);
break;
}
LOG_DBG("read channel %d, result = %d", chan, result);
*data->buf++ = result;
WRITE_BIT(data->channels, chan, 0);
}
if (!err) {
adc_context_on_sampling_done(&data->ctx, data->dev);
}
}
}
/**
* @brief Function called on init for each ADC emulator device. It setups all
* channels to return constant 0 mV and create acquisition thread.
*
* @param dev ADC emulator device
*
* @return 0 on success
*/
static int adc_emul_init(const struct device *dev)
{
const struct adc_emul_config *config = dev->config;
struct adc_emul_data *data = dev->data;
int chan;
data->dev = dev;
k_sem_init(&data->sem, 0, 1);
k_mutex_init(&data->cfg_mtx);
for (chan = 0; chan < config->num_channels; chan++) {
struct adc_emul_chan_cfg *chan_cfg = &data->chan_cfg[chan];
chan_cfg->func = NULL;
chan_cfg->func_data = NULL;
chan_cfg->input = ADC_EMUL_CONST_VALUE;
chan_cfg->const_value = 0;
}
k_thread_create(&data->thread, data->stack,
CONFIG_ADC_EMUL_ACQUISITION_THREAD_STACK_SIZE,
adc_emul_acquisition_thread,
data, NULL, NULL,
CONFIG_ADC_EMUL_ACQUISITION_THREAD_PRIO,
0, K_NO_WAIT);
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
#define ADC_EMUL_INIT(_num) \
static struct adc_driver_api adc_emul_api_##_num = { \
.channel_setup = adc_emul_channel_setup, \
.read = adc_emul_read, \
.ref_internal = DT_INST_PROP(_num, ref_internal_mv), \
IF_ENABLED(CONFIG_ADC_ASYNC, \
(.read_async = adc_emul_read_async,)) \
}; \
\
static struct adc_emul_chan_cfg \
adc_emul_ch_cfg_##_num[DT_INST_PROP(_num, nchannels)]; \
\
static const struct adc_emul_config adc_emul_config_##_num = { \
.num_channels = DT_INST_PROP(_num, nchannels), \
}; \
\
static struct adc_emul_data adc_emul_data_##_num = { \
ADC_CONTEXT_INIT_TIMER(adc_emul_data_##_num, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_emul_data_##_num, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_emul_data_##_num, ctx), \
.chan_cfg = adc_emul_ch_cfg_##_num, \
.ref_vdd = DT_INST_PROP(_num, ref_vdd_mv), \
.ref_ext0 = DT_INST_PROP(_num, ref_external0_mv), \
.ref_ext1 = DT_INST_PROP(_num, ref_external1_mv), \
.ref_int = DT_INST_PROP(_num, ref_internal_mv), \
}; \
\
DEVICE_DT_INST_DEFINE(_num, adc_emul_init, NULL, \
&adc_emul_data_##_num, \
&adc_emul_config_##_num, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&adc_emul_api_##_num);
DT_INST_FOREACH_STATUS_OKAY(ADC_EMUL_INIT)
``` | /content/code_sandbox/drivers/adc/adc_emul.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,939 |
```c
/*
* Organisation (CSIRO) ABN 41 687 119 230.
*
*/
/*
* This is not a real ADC driver. It is used to instantiate struct
* devices for the "vnd,adc" devicetree compatible used in test code.
*/
#define DT_DRV_COMPAT vnd_adc
#include <zephyr/drivers/adc.h>
#include <zephyr/device.h>
#include <zephyr/kernel.h>
static int vnd_adc_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
return -ENOTSUP;
}
static int vnd_adc_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return -ENOTSUP;
}
#ifdef CONFIG_ADC_ASYNC
static int vnd_adc_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
return -ENOTSUP;
}
#endif
static const struct adc_driver_api vnd_adc_api = {
.channel_setup = vnd_adc_channel_setup,
.read = vnd_adc_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = vnd_adc_read_async,
#endif
};
#define VND_ADC_INIT(n) \
DEVICE_DT_INST_DEFINE(n, NULL, NULL, NULL, NULL, \
POST_KERNEL, \
CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
&vnd_adc_api);
DT_INST_FOREACH_STATUS_OKAY(VND_ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_test.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 302 |
```unknown
#
menuconfig ADC_ADS114S0X
bool "Texas instruments ADS114S0x"
default y
depends on DT_HAS_TI_ADS114S08_ENABLED
select SPI
select ADC_CONFIGURABLE_INPUTS
select ADC_CONFIGURABLE_EXCITATION_CURRENT_SOURCE_PIN
select ADC_CONFIGURABLE_VBIAS_PIN
help
Enable the driver implementation for the ADS114S0X family
config ADC_ADS114S0X_ASYNC_THREAD_INIT_PRIO
int "ADC ADS114S0x async thread priority"
default 0
depends on ADC_ADS114S0X
config ADC_ADS114S0X_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 400
depends on ADC_ADS114S0X
help
Size of the stack used for the internal data acquisition
thread.
config ADC_ADS114S0X_GPIO
bool "GPIO support"
default n
depends on GPIO && ADC_ADS114S0X
help
Enable GPIO child device support in the ADS114S0x ADC driver.
The GPIO functionality is handled by the ADS114S0x GPIO
driver.
config ADC_ADS114S0X_WAIT_FOR_COMPLETION_TIMEOUT_MS
int "Timeout for wait for completion of a read in ms"
default 1000
depends on ADC_ADS114S0X
help
This is the wait time in ms until a read is completed.
``` | /content/code_sandbox/drivers/adc/Kconfig.ads114s0x | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 316 |
```unknown
# LMP90xxx ADC configuration options
config ADC_LMP90XXX
bool "LMP90xxx driver"
default y
depends on DT_HAS_TI_LMP90077_ENABLED || DT_HAS_TI_LMP90078_ENABLED || \
DT_HAS_TI_LMP90079_ENABLED || DT_HAS_TI_LMP90080_ENABLED || \
DT_HAS_TI_LMP90097_ENABLED || DT_HAS_TI_LMP90098_ENABLED || \
DT_HAS_TI_LMP90099_ENABLED || DT_HAS_TI_LMP90100_ENABLED
select SPI
select ADC_CONFIGURABLE_INPUTS
select CRC
help
Enable LMP90xxx ADC driver.
The LMP90xxx is a multi-channel, low power sensor analog
frontend (AFE).
if ADC_LMP90XXX
config ADC_LMP90XXX_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 400
help
Size of the stack used for the internal data acquisition
thread.
config ADC_LMP90XXX_ACQUISITION_THREAD_PRIO
int "Priority for the ADC data acquisition thread"
default 0
help
Priority level for the internal ADC data acquisition thread.
config ADC_LMP90XXX_CRC
bool "Use Cyclic Redundancy Check (CRC)"
default y
help
Use Cyclic Redundancy Check (CRC) to verify the integrity of
the data read from the LMP90xxx.
config ADC_LMP90XXX_GPIO
bool "GPIO support"
depends on GPIO
select GPIO_LMP90XXX
help
Enable GPIO child device support in the LMP90xxx ADC driver.
The GPIO functionality is handled by the LMP90xxx GPIO
driver.
endif # ADC_LMP90XXX
``` | /content/code_sandbox/drivers/adc/Kconfig.lmp90xxx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 369 |
```unknown
config ADC_ENE_KB1200
bool "ENE KB1200 ADC driver"
default y
depends on DT_HAS_ENE_KB1200_ADC_ENABLED
select PINCTRL
help
Enable ADC driver for ENE KB1200.
``` | /content/code_sandbox/drivers/adc/Kconfig.ene | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```unknown
config ADC_TELINK_B91
bool "Telink Semiconductor B91 ADC driver"
default y
depends on DT_HAS_TELINK_B91_ADC_ENABLED
select ADC_CONFIGURABLE_INPUTS
help
Enables Telink B91 ADC driver.
if ADC_TELINK_B91
config ADC_B91_ACQUISITION_THREAD_STACK_SIZE
int "Stack size for the ADC data acquisition thread"
default 512
help
Size of the stack used for the internal data acquisition
thread.
config ADC_B91_ACQUISITION_THREAD_PRIO
int "Priority for the ADC data acquisition thread"
default 0
help
Priority level for the internal ADC data acquisition thread.
endif # ADC_TELINK_B91
``` | /content/code_sandbox/drivers/adc/Kconfig.b91 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 155 |
```unknown
config ADC_RPI_PICO
bool "Raspberry Pi Pico ADC driver"
default y
depends on DT_HAS_RASPBERRYPI_PICO_ADC_ENABLED
select PICOSDK_USE_ADC
depends on RESET
``` | /content/code_sandbox/drivers/adc/Kconfig.rpi_pico | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 49 |
```c
/*
*
*/
#define DT_DRV_COMPAT ti_cc32xx_adc
#include <errno.h>
#include <zephyr/drivers/adc.h>
#include <zephyr/device.h>
#include <zephyr/kernel.h>
#include <zephyr/init.h>
#include <soc.h>
/* Driverlib includes */
#include <inc/hw_types.h>
#include <driverlib/pin.h>
#include <driverlib/rom.h>
#include <driverlib/rom_map.h>
#include <driverlib/prcm.h>
#include <driverlib/adc.h>
#define CHAN_COUNT 4
#define ADC_CONTEXT_USES_KERNEL_TIMER
#include "adc_context.h"
#define LOG_LEVEL CONFIG_ADC_LOG_LEVEL
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(adc_cc32xx);
#define ISR_MASK (ADC_DMA_DONE | ADC_FIFO_OVERFLOW | ADC_FIFO_UNDERFLOW \
| ADC_FIFO_EMPTY | ADC_FIFO_FULL)
struct adc_cc32xx_data {
struct adc_context ctx;
const struct device *dev;
uint16_t *buffer;
uint16_t *repeat_buffer;
uint32_t channels;
uint8_t offset[CHAN_COUNT];
size_t active_channels;
};
struct adc_cc32xx_cfg {
unsigned long base;
void (*irq_cfg_func)(void);
};
static const int s_chPin[CHAN_COUNT] = {
PIN_57,
PIN_58,
PIN_59,
PIN_60,
};
static const int s_channel[CHAN_COUNT] = {
ADC_CH_0,
ADC_CH_1,
ADC_CH_2,
ADC_CH_3,
};
static inline void start_sampling(unsigned long base, int ch)
{
MAP_ADCChannelEnable(base, ch);
for (int i = 0; i < 5; i++) {
while (!MAP_ADCFIFOLvlGet(base, ch)) {
}
MAP_ADCFIFORead(base, ch);
}
MAP_ADCIntClear(base, ch, ISR_MASK);
MAP_ADCIntEnable(base, ch, ISR_MASK);
}
static void adc_context_start_sampling(struct adc_context *ctx)
{
struct adc_cc32xx_data *data =
CONTAINER_OF(ctx, struct adc_cc32xx_data, ctx);
const struct adc_cc32xx_cfg *config = data->dev->config;
data->channels = ctx->sequence.channels;
data->repeat_buffer = data->buffer;
for (int i = 0; i < CHAN_COUNT; ++i) {
if (ctx->sequence.channels & BIT(i)) {
start_sampling(config->base, s_channel[i]);
}
}
}
static void adc_context_update_buffer_pointer(struct adc_context *ctx,
bool repeat)
{
struct adc_cc32xx_data *data =
CONTAINER_OF(ctx, struct adc_cc32xx_data, ctx);
if (repeat) {
data->buffer = data->repeat_buffer;
} else {
data->buffer += data->active_channels;
}
}
static int adc_cc32xx_init(const struct device *dev)
{
struct adc_cc32xx_data *data = dev->data;
const struct adc_cc32xx_cfg *config = dev->config;
data->dev = dev;
LOG_DBG("Initializing....");
for (int i = 0; i < CHAN_COUNT; ++i) {
const int ch = s_channel[i];
MAP_ADCIntDisable(config->base, ch, ISR_MASK);
MAP_ADCChannelDisable(config->base, ch);
MAP_ADCDMADisable(config->base, ch);
MAP_ADCIntClear(config->base, ch, ISR_MASK);
}
MAP_ADCEnable(config->base);
config->irq_cfg_func();
adc_context_unlock_unconditionally(&data->ctx);
return 0;
}
static int adc_cc32xx_channel_setup(const struct device *dev,
const struct adc_channel_cfg *channel_cfg)
{
const struct adc_cc32xx_cfg *config = dev->config;
const uint8_t ch = channel_cfg->channel_id;
if (ch >= CHAN_COUNT) {
LOG_ERR("Channel %d is not supported, max %d", ch, CHAN_COUNT);
return -EINVAL;
}
if (channel_cfg->acquisition_time != ADC_ACQ_TIME_DEFAULT) {
LOG_ERR("Acquisition time is not valid");
return -EINVAL;
}
if (channel_cfg->differential) {
LOG_ERR("Differential channels are not supported");
return -EINVAL;
}
if (channel_cfg->gain != ADC_GAIN_1) {
LOG_ERR("Gain is not valid");
return -EINVAL;
}
if (channel_cfg->reference != ADC_REF_INTERNAL) {
LOG_ERR("Reference is not valid");
return -EINVAL;
}
LOG_DBG("Setup %d", ch);
MAP_ADCChannelDisable(config->base, s_channel[ch]);
MAP_ADCIntDisable(config->base, s_channel[ch], ISR_MASK);
MAP_PinDirModeSet(s_chPin[ch], PIN_DIR_MODE_IN);
MAP_PinTypeADC(s_chPin[ch], PIN_MODE_255);
return 0;
}
static int cc32xx_read(const struct device *dev,
const struct adc_sequence *sequence,
bool asynchronous,
struct k_poll_signal *sig)
{
struct adc_cc32xx_data *data = dev->data;
int rv;
size_t exp_size;
if (sequence->resolution != 12) {
LOG_ERR("Only 12 Resolution is supported, but %d got",
sequence->resolution);
return -EINVAL;
}
data->active_channels = 0;
for (int i = 0; i < CHAN_COUNT; ++i) {
if (!(sequence->channels & BIT(i))) {
continue;
}
data->offset[i] = data->active_channels++;
}
exp_size = data->active_channels * sizeof(uint16_t);
if (sequence->options) {
exp_size *= (1 + sequence->options->extra_samplings);
}
if (sequence->buffer_size < exp_size) {
LOG_ERR("Required buffer size is %u, but %u got",
exp_size, sequence->buffer_size);
return -ENOMEM;
}
data->buffer = sequence->buffer;
adc_context_lock(&data->ctx, asynchronous, sig);
adc_context_start_read(&data->ctx, sequence);
rv = adc_context_wait_for_completion(&data->ctx);
adc_context_release(&data->ctx, rv);
return rv;
}
static int adc_cc32xx_read(const struct device *dev,
const struct adc_sequence *sequence)
{
return cc32xx_read(dev, sequence, false, NULL);
}
#ifdef CONFIG_ADC_ASYNC
static int adc_cc32xx_read_async(const struct device *dev,
const struct adc_sequence *sequence,
struct k_poll_signal *async)
{
return cc32xx_read(dev, sequence, true, async);
}
#endif
static void adc_cc32xx_isr(const struct device *dev, int no)
{
const struct adc_cc32xx_cfg *config = dev->config;
struct adc_cc32xx_data *data = dev->data;
const int chan = s_channel[no];
unsigned long mask = MAP_ADCIntStatus(config->base, chan);
int cnt = 0;
int rv = 0;
MAP_ADCIntClear(config->base, chan, mask);
if ((mask & ADC_FIFO_EMPTY) || !(mask & ADC_FIFO_FULL)) {
return;
}
while (MAP_ADCFIFOLvlGet(config->base, chan)) {
rv += (MAP_ADCFIFORead(config->base, chan) >> 2) & 0x0FFF;
cnt++;
}
*(data->buffer + data->offset[no]) = rv / cnt;
data->channels &= ~BIT(no);
MAP_ADCIntDisable(config->base, chan, ISR_MASK);
MAP_ADCChannelDisable(config->base, chan);
LOG_DBG("ISR %d, 0x%lX %d %d", chan, mask, rv, cnt);
if (!data->channels) {
adc_context_on_sampling_done(&data->ctx, dev);
}
}
static void adc_cc32xx_isr_ch0(const struct device *dev)
{
adc_cc32xx_isr(dev, 0);
}
static void adc_cc32xx_isr_ch1(const struct device *dev)
{
adc_cc32xx_isr(dev, 1);
}
static void adc_cc32xx_isr_ch2(const struct device *dev)
{
adc_cc32xx_isr(dev, 2);
}
static void adc_cc32xx_isr_ch3(const struct device *dev)
{
adc_cc32xx_isr(dev, 3);
}
static const struct adc_driver_api cc32xx_driver_api = {
.channel_setup = adc_cc32xx_channel_setup,
.read = adc_cc32xx_read,
#ifdef CONFIG_ADC_ASYNC
.read_async = adc_cc32xx_read_async,
#endif
.ref_internal = 1467,
};
#define cc32xx_ADC_IRQ_CONNECT(index, chan) \
do { \
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(index, chan, irq), \
DT_INST_IRQ_BY_IDX(index, chan, priority), \
adc_cc32xx_isr_ch##chan, \
DEVICE_DT_INST_GET(index), 0); \
irq_enable(DT_INST_IRQ_BY_IDX(index, chan, irq)); \
} while (false)
#define cc32xx_ADC_INIT(index) \
\
static void adc_cc32xx_cfg_func_##index(void); \
\
static const struct adc_cc32xx_cfg adc_cc32xx_cfg_##index = { \
.base = DT_INST_REG_ADDR(index), \
.irq_cfg_func = adc_cc32xx_cfg_func_##index, \
}; \
static struct adc_cc32xx_data adc_cc32xx_data_##index = { \
ADC_CONTEXT_INIT_TIMER(adc_cc32xx_data_##index, ctx), \
ADC_CONTEXT_INIT_LOCK(adc_cc32xx_data_##index, ctx), \
ADC_CONTEXT_INIT_SYNC(adc_cc32xx_data_##index, ctx), \
}; \
\
DEVICE_DT_INST_DEFINE(index, \
&adc_cc32xx_init, NULL, &adc_cc32xx_data_##index, \
&adc_cc32xx_cfg_##index, POST_KERNEL, \
CONFIG_ADC_INIT_PRIORITY, \
&cc32xx_driver_api); \
\
static void adc_cc32xx_cfg_func_##index(void) \
{ \
cc32xx_ADC_IRQ_CONNECT(index, 0); \
cc32xx_ADC_IRQ_CONNECT(index, 1); \
cc32xx_ADC_IRQ_CONNECT(index, 2); \
cc32xx_ADC_IRQ_CONNECT(index, 3); \
}
DT_INST_FOREACH_STATUS_OKAY(cc32xx_ADC_INIT)
``` | /content/code_sandbox/drivers/adc/adc_cc32xx.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,294 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_PSCI_PSCI_H_
#define ZEPHYR_DRIVERS_PSCI_PSCI_H_
#include <zephyr/drivers/pm_cpu_ops/psci.h>
#ifdef CONFIG_64BIT
#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name
#else
#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN_##name
#endif
/* PSCI v0.2 interface */
#define PSCI_0_2_FN_BASE 0x84000000
#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
#define PSCI_0_2_64BIT 0x40000000
#define PSCI_0_2_FN64_BASE (PSCI_0_2_FN_BASE + PSCI_0_2_64BIT)
#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n))
#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0)
#define PSCI_0_2_FN_CPU_SUSPEND PSCI_0_2_FN(1)
#define PSCI_0_2_FN_CPU_OFF PSCI_0_2_FN(2)
#define PSCI_0_2_FN_CPU_ON PSCI_0_2_FN(3)
#define PSCI_0_2_FN_AFFINITY_INFO PSCI_0_2_FN(4)
#define PSCI_0_2_FN_MIGRATE PSCI_0_2_FN(5)
#define PSCI_0_2_FN_MIGRATE_INFO_TYPE PSCI_0_2_FN(6)
#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPU PSCI_0_2_FN(7)
#define PSCI_0_2_FN_SYSTEM_OFF PSCI_0_2_FN(8)
#define PSCI_0_2_FN_SYSTEM_RESET PSCI_0_2_FN(9)
#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1)
#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3)
#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4)
#define PSCI_0_2_FN64_MIGRATE PSCI_0_2_FN64(5)
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
#define PSCI_0_2_FN64_SYSTEM_RESET PSCI_0_2_FN(9)
/* PSCI v1.0 interface */
#define PSCI_1_0_FN_BASE (0x84000000U)
#define PSCI_1_0_64BIT (0x40000000U)
#define PSCI_1_0_FN64_BASE (PSCI_1_0_FN_BASE + PSCI_1_0_64BIT)
#define PSCI_1_0_FN(n) (PSCI_1_0_FN_BASE + (n))
#define PSCI_1_0_FN64(n) (PSCI_1_0_FN64_BASE + (n))
#define PSCI_1_0_FN_PSCI_VERSION PSCI_1_0_FN(0)
#define PSCI_1_0_FN_CPU_SUSPEND PSCI_1_0_FN(1)
#define PSCI_1_0_FN_CPU_OFF PSCI_1_0_FN(2)
#define PSCI_1_0_FN_CPU_ON PSCI_1_0_FN(3)
#define PSCI_1_0_FN_AFFINITY_INFO PSCI_1_0_FN(4)
#define PSCI_1_0_FN_MIGRATE PSCI_1_0_FN(5)
#define PSCI_1_0_FN_MIGRATE_INFO_TYPE PSCI_1_0_FN(6)
#define PSCI_1_0_FN_MIGRATE_INFO_UP_CPU PSCI_1_0_FN(7)
#define PSCI_1_0_FN_SYSTEM_OFF PSCI_1_0_FN(8)
#define PSCI_1_0_FN_SYSTEM_RESET PSCI_1_0_FN(9)
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_1_0_FN(10)
#define PSCI_1_0_FN64_CPU_SUSPEND PSCI_1_0_FN64(1)
#define PSCI_1_0_FN64_CPU_ON PSCI_1_0_FN64(3)
#define PSCI_1_0_FN64_AFFINITY_INFO PSCI_1_0_FN64(4)
#define PSCI_1_0_FN64_MIGRATE PSCI_1_0_FN64(5)
#define PSCI_1_0_FN64_MIGRATE_INFO_UP_CPU PSCI_1_0_FN64(7)
/* PSCI function ID is same for both 32 and 64 bit.*/
#define PSCI_1_0_FN64_SYSTEM_RESET PSCI_1_0_FN(9)
#define PSCI_1_0_FN64_PSCI_FEATURES PSCI_1_0_FN(10)
/* PSCI v1.1 interface. */
#define PSCI_1_1_FN_BASE (0x84000000U)
#define PSCI_1_1_64BIT (0x40000000U)
#define PSCI_1_1_FN64_BASE (PSCI_1_1_FN_BASE + PSCI_1_1_64BIT)
#define PSCI_1_1_FN(n) (PSCI_1_1_FN_BASE + (n))
#define PSCI_1_1_FN64(n) (PSCI_1_1_FN64_BASE + (n))
#define PSCI_1_1_FN_PSCI_VERSION PSCI_1_1_FN(0)
#define PSCI_1_1_FN_CPU_SUSPEND PSCI_1_1_FN(1)
#define PSCI_1_1_FN_CPU_OFF PSCI_1_1_FN(2)
#define PSCI_1_1_FN_CPU_ON PSCI_1_1_FN(3)
#define PSCI_1_1_FN_AFFINITY_INFO PSCI_1_1_FN(4)
#define PSCI_1_1_FN_MIGRATE PSCI_1_1_FN(5)
#define PSCI_1_1_FN_MIGRATE_INFO_TYPE PSCI_1_1_FN(6)
#define PSCI_1_1_FN_MIGRATE_INFO_UP_CPU PSCI_1_1_FN(7)
#define PSCI_1_1_FN_SYSTEM_OFF PSCI_1_1_FN(8)
#define PSCI_1_1_FN_SYSTEM_RESET PSCI_1_1_FN(9)
#define PSCI_1_1_FN_PSCI_FEATURES PSCI_1_1_FN(10)
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_1_1_FN(18)
#define PSCI_1_1_FN64_CPU_SUSPEND PSCI_1_1_FN64(1)
#define PSCI_1_1_FN64_CPU_ON PSCI_1_1_FN64(3)
#define PSCI_1_1_FN64_AFFINITY_INFO PSCI_1_1_FN64(4)
#define PSCI_1_1_FN64_MIGRATE PSCI_1_1_FN64(5)
#define PSCI_1_1_FN64_MIGRATE_INFO_UP_CPU PSCI_1_1_FN64(7)
/* PSCI function ID is same for both 32 and 64 bit.*/
#define PSCI_1_1_FN64_SYSTEM_RESET PSCI_1_1_FN(9)
#define PSCI_1_1_FN64_PSCI_FEATURES PSCI_1_1_FN(10)
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_1_1_FN64(18)
/* PSCI return values (inclusive of all PSCI versions) */
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_NOT_SUPPORTED -1
#define PSCI_RET_INVALID_PARAMS -2
#define PSCI_RET_DENIED -3
#define PSCI_RET_ALREADY_ON -4
#define PSCI_RET_ON_PENDING -5
#define PSCI_RET_INTERNAL_FAILURE -6
#define PSCI_RET_NOT_PRESENT -7
#define PSCI_RET_DISABLED -8
#define PSCI_RET_INVALID_ADDRESS -9
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
struct psci_data_t {
enum arm_smccc_conduit conduit;
psci_fn *invoke_psci_fn;
uint32_t ver;
};
/* PSCI configuration data. */
struct psci_config_t {
const char *method;
};
#endif /* ZEPHYR_DRIVERS_PSCI_PSCI_H_ */
``` | /content/code_sandbox/drivers/pm_cpu_ops/pm_cpu_ops_psci.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,941 |
```c
/*
*
*/
#include <zephyr/kernel.h>
int __weak pm_cpu_on(unsigned long cpuid, uintptr_t entry_point)
{
return -ENOTSUP;
}
int __weak pm_cpu_off(void)
{
return -ENOTSUP;
}
``` | /content/code_sandbox/drivers/pm_cpu_ops/pm_cpu_ops_weak_impl.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 52 |
```unknown
# CPU power management driver configuration options
menuconfig PM_CPU_OPS
bool "CPU power management drivers"
help
Enable CPU power management drivers configuration
if PM_CPU_OPS
module = PM_CPU_OPS
module-str = pm_cpu_ops
source "subsys/logging/Kconfig.template.log_config"
config PM_CPU_OPS_HAS_DRIVER
bool
config PM_CPU_OPS_PSCI
bool "Support for the ARM Power State Coordination Interface (PSCI)"
default y
depends on DT_HAS_ARM_PSCI_0_2_ENABLED || DT_HAS_ARM_PSCI_1_1_ENABLED
select PM_CPU_OPS_HAS_DRIVER
select HAS_POWEROFF
help
Say Y here if you want Zephyr to communicate with system firmware
implementing the PSCI specification for CPU-centric power
management operations described in ARM document number ARM DEN
0022A ("Power State Coordination Interface System Software on
ARM processors").
config PSCI_SHELL
bool "Support for PSCI interface shell commands"
depends on SHELL && PM_CPU_OPS_PSCI
help
Say Y here if you need to enable PSCI interface shell commands
like 'warm' and 'cold' reset commands.
endif
``` | /content/code_sandbox/drivers/pm_cpu_ops/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 259 |
```c
/*
*
*/
#include <zephyr/shell/shell.h>
#include <zephyr/drivers/pm_cpu_ops.h>
/* Zephyr kernel start address. */
extern void __start(void);
static int cmd_reboot_warm(const struct shell *shctx, size_t argc, char **argv)
{
ARG_UNUSED(shctx);
ARG_UNUSED(argc);
ARG_UNUSED(argv);
int ret;
ret = pm_system_reset(SYS_WARM_RESET);
if (ret != 0) {
shell_error(shctx, "Failed to perform system warm reset");
}
return ret;
}
static int cmd_reboot_cold(const struct shell *shctx, size_t argc, char **argv)
{
ARG_UNUSED(shctx);
ARG_UNUSED(argc);
ARG_UNUSED(argv);
int ret;
ret = pm_system_reset(SYS_COLD_RESET);
if (ret != 0) {
shell_error(shctx, "Failed to perform system cold reset");
}
return ret;
}
static int cmd_psci_cpuon(const struct shell *shctx, size_t argc, char **argv)
{
ARG_UNUSED(shctx);
ARG_UNUSED(argc);
long cpu_id;
int result;
errno = 0;
cpu_id = strtol(argv[1], NULL, 10);
if (cpu_id == 0 || cpu_id == LONG_MIN || cpu_id == LONG_MAX) {
if (errno != 0) {
shell_error(shctx, "psci: invalid input:%ld", cpu_id);
return -EINVAL;
}
}
result = pm_cpu_on((unsigned long)cpu_id, (uintptr_t)&__start);
return result;
}
SHELL_STATIC_SUBCMD_SET_CREATE(
sub_reboot,
SHELL_CMD_ARG(warm, NULL, "System warm reset. Usage: <psci warm>", cmd_reboot_warm, 1, 0),
SHELL_CMD_ARG(cold, NULL, "System cold reset. Usage: <psci cold>", cmd_reboot_cold, 1, 0),
SHELL_CMD_ARG(cpuon, NULL, "Power-up the secondary CPU. Usage: <psci cpuon <cpuid>>",
cmd_psci_cpuon, 2, 0),
SHELL_SUBCMD_SET_END /* Array terminated. */
);
SHELL_CMD_REGISTER(psci, &sub_reboot, "ARM PSCI interface commands", NULL);
``` | /content/code_sandbox/drivers/pm_cpu_ops/psci_shell.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 497 |
```c
/*
*
*
*/
#define DT_DRV_COMPAT arm_psci_0_2
#define LOG_LEVEL CONFIG_PM_CPU_OPS_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(psci);
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <zephyr/drivers/pm_cpu_ops.h>
#include "pm_cpu_ops_psci.h"
#ifdef CONFIG_POWEROFF
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/poweroff.h>
#endif /* CONFIG_POWEROFF */
/* PSCI data object. */
static struct psci_data_t psci_data;
static int psci_to_dev_err(int ret)
{
switch (ret) {
case PSCI_RET_SUCCESS:
return 0;
case PSCI_RET_NOT_SUPPORTED:
return -ENOTSUP;
case PSCI_RET_INVALID_PARAMS:
case PSCI_RET_INVALID_ADDRESS:
return -EINVAL;
case PSCI_RET_DENIED:
return -EPERM;
}
return -EINVAL;
}
int pm_cpu_off(void)
{
int ret;
if (psci_data.conduit == SMCCC_CONDUIT_NONE) {
return -EINVAL;
}
ret = psci_data.invoke_psci_fn(PSCI_0_2_FN_CPU_OFF, 0, 0, 0);
return psci_to_dev_err(ret);
}
int pm_cpu_on(unsigned long cpuid,
uintptr_t entry_point)
{
int ret;
if (psci_data.conduit == SMCCC_CONDUIT_NONE) {
return -EINVAL;
}
ret = psci_data.invoke_psci_fn(PSCI_FN_NATIVE(0_2, CPU_ON), cpuid,
(unsigned long) entry_point, 0);
return psci_to_dev_err(ret);
}
#ifdef CONFIG_POWEROFF
void z_sys_poweroff(void)
{
int ret;
__ASSERT_NO_MSG(psci_data.conduit != SMCCC_CONDUIT_NONE);
ret = psci_data.invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
if (ret < 0) {
printk("System power off failed (%d) - halting\n", ret);
}
for (;;) {
/* wait for power off */
}
}
#endif /* CONFIG_POWEROFF */
/**
* This function checks whether the given ID is supported or not, using
* PSCI_FEATURES command.PSCI_FEATURES is supported from version 1.0 onwards.
*/
static int psci_features_check(unsigned long function_id)
{
/* PSCI_FEATURES function ID is supported from PSCI 1.0 onwards. */
if (!(PSCI_VERSION_MAJOR(psci_data.ver) >= 1)) {
LOG_ERR("Function ID %lu not supported", function_id);
return -ENOTSUP;
}
return psci_data.invoke_psci_fn(PSCI_FN_NATIVE(1_0, PSCI_FEATURES), function_id, 0, 0);
}
int pm_system_reset(unsigned char reset_type)
{
int ret;
if (psci_data.conduit == SMCCC_CONDUIT_NONE) {
return -EINVAL;
}
if ((reset_type == SYS_WARM_RESET) &&
(!psci_features_check(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2)))) {
ret = psci_data.invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
} else if (reset_type == SYS_COLD_RESET) {
ret = psci_data.invoke_psci_fn(PSCI_FN_NATIVE(0_2, SYSTEM_RESET), 0, 0, 0);
} else {
LOG_ERR("Invalid system reset type issued");
return -EINVAL;
}
return psci_to_dev_err(ret);
}
static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
unsigned long arg0,
unsigned long arg1,
unsigned long arg2)
{
struct arm_smccc_res res;
arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
return res.a0;
}
static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
unsigned long arg0,
unsigned long arg1,
unsigned long arg2)
{
struct arm_smccc_res res;
arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
return res.a0;
}
static uint32_t psci_get_version(void)
{
return psci_data.invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
static int set_conduit_method(const struct device *dev)
{
const struct psci_config_t *dev_config = (const struct psci_config_t *)dev->config;
if (!strcmp("hvc", dev_config->method)) {
psci_data.conduit = SMCCC_CONDUIT_HVC;
psci_data.invoke_psci_fn = __invoke_psci_fn_hvc;
} else if (!strcmp("smc", dev_config->method)) {
psci_data.conduit = SMCCC_CONDUIT_SMC;
psci_data.invoke_psci_fn = __invoke_psci_fn_smc;
} else {
LOG_ERR("Invalid conduit method");
return -EINVAL;
}
return 0;
}
static int psci_detect(void)
{
uint32_t ver = psci_get_version();
LOG_DBG("Detected PSCIv%d.%d",
PSCI_VERSION_MAJOR(ver),
PSCI_VERSION_MINOR(ver));
if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
LOG_ERR("PSCI unsupported version");
return -ENOTSUP;
}
psci_data.ver = ver;
return 0;
}
uint32_t psci_version(void)
{
return psci_data.ver;
}
static int psci_init(const struct device *dev)
{
psci_data.conduit = SMCCC_CONDUIT_NONE;
if (set_conduit_method(dev)) {
return -ENOTSUP;
}
return psci_detect();
}
/**
* Each PSCI interface versions have different DT compatible strings like arm,psci-0.2,
* arm,psci-1.1 and so on. However, the same driver can be used for all the versions with
* the below mentioned DT method where we need to #undef the default version arm,psci-0.2
* and #define the required version like arm,psci-1.0 or arm,psci-1.1.
*/
#define PSCI_DEFINE(inst, ver) \
static const struct psci_config_t psci_config_##inst##ver = { \
.method = DT_PROP(DT_DRV_INST(inst), method) \
}; \
DEVICE_DT_INST_DEFINE(inst, \
&psci_init, \
NULL, \
&psci_data, \
&psci_config_##inst##ver, \
PRE_KERNEL_1, \
CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
NULL);
#define PSCI_0_2_INIT(n) PSCI_DEFINE(n, PSCI_0_2)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT arm_psci_0_2
DT_INST_FOREACH_STATUS_OKAY(PSCI_0_2_INIT)
#define PSCI_1_1_INIT(n) PSCI_DEFINE(n, PSCI_1_1)
#undef DT_DRV_COMPAT
#define DT_DRV_COMPAT arm_psci_1_1
DT_INST_FOREACH_STATUS_OKAY(PSCI_1_1_INIT)
``` | /content/code_sandbox/drivers/pm_cpu_ops/pm_cpu_ops_psci.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,655 |
```c
/*
*
*/
/** @file
*
* @brief Per-thread errno accessor function
*
* Allow accessing the errno for the current thread without involving the
* context switching.
*/
#include <zephyr/kernel.h>
#include <zephyr/internal/syscall_handler.h>
/*
* Define _k_neg_eagain for use in assembly files as errno.h is
* not assembly language safe.
* FIXME: wastes 4 bytes
*/
const int _k_neg_eagain = -EAGAIN;
#ifdef CONFIG_ERRNO
#if defined(CONFIG_LIBC_ERRNO)
/* nothing needed here */
#elif defined(CONFIG_ERRNO_IN_TLS)
__thread int z_errno_var;
#else
#ifdef CONFIG_USERSPACE
int *z_impl_z_errno(void)
{
/* Initialized to the lowest address in the stack so the thread can
* directly read/write it
*/
return &_current->userspace_local_data->errno_var;
}
static inline int *z_vrfy_z_errno(void)
{
return z_impl_z_errno();
}
#include <zephyr/syscalls/z_errno_mrsh.c>
#else
int *z_impl_z_errno(void)
{
return &_current->errno_var;
}
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_ERRNO_IN_TLS */
#endif /* CONFIG_ERRNO */
``` | /content/code_sandbox/kernel/errno.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 259 |
```unknown
#
menu "SMP Options"
config SMP
bool "Symmetric multiprocessing support"
depends on USE_SWITCH
depends on !ATOMIC_OPERATIONS_C
help
When true, kernel will be built with SMP support, allowing
more than one CPU to schedule Zephyr tasks at a time.
config USE_SWITCH
bool "Use new-style _arch_switch instead of arch_swap"
depends on USE_SWITCH_SUPPORTED
help
The _arch_switch() API is a lower level context switching
primitive than the original arch_swap mechanism. It is required
for an SMP-aware scheduler, or if the architecture does not
provide arch_swap. In uniprocess situations where the
architecture provides both, _arch_switch incurs more somewhat
overhead and may be slower.
config USE_SWITCH_SUPPORTED
bool
help
Indicates whether _arch_switch() API is supported by the
currently enabled platform. This option should be selected by
platforms that implement it.
config SMP_BOOT_DELAY
bool "Delay booting secondary cores"
depends on SMP
help
By default Zephyr will boot all available CPUs during start up.
Select this option to skip this and allow custom code
(architecture/SoC/board/application) to boot secondary CPUs at
a later time.
config MP_NUM_CPUS
int "Number of CPUs/cores [DEPRECATED]"
default MP_MAX_NUM_CPUS
range 1 12
help
This is deprecated, please use MP_MAX_NUM_CPUS instead.
config MP_MAX_NUM_CPUS
int "Maximum number of CPUs/cores"
default 1
range 1 12
help
Maximum number of multiprocessing-capable cores available to the
multicpu API and SMP features.
config SCHED_IPI_SUPPORTED
bool
help
True if the architecture supports a call to arch_sched_broadcast_ipi()
to broadcast an interrupt that will call z_sched_ipi() on other CPUs
in the system. Required for k_thread_abort() to operate with
reasonable latency (otherwise we might have to wait for the other
thread to take an interrupt, which can be arbitrarily far in the
future).
config SCHED_IPI_CASCADE
bool "Use cascading IPIs to correct localized scheduling"
depends on SCHED_CPU_MASK && !SCHED_CPU_MASK_PIN_ONLY
default n
help
Threads that are preempted by a local thread (a thread that is
restricted by its CPU mask to execute on a subset of all CPUs) may
trigger additional IPIs when the preempted thread is of higher
priority than a currently executing thread on another CPU. Although
these cascading IPIs will ensure that the system will settle upon a
valid set of high priority threads, it comes at a performance cost.
config TRACE_SCHED_IPI
bool "Test IPI"
help
When true, it will add a hook into z_sched_ipi(), in order
to check if schedule IPI has called or not, for testing
purpose.
depends on SCHED_IPI_SUPPORTED
depends on MP_MAX_NUM_CPUS>1
config IPI_OPTIMIZE
bool "Optimize IPI delivery"
default n
depends on SCHED_IPI_SUPPORTED && MP_MAX_NUM_CPUS>1
help
When selected, the kernel will attempt to determine the minimum
set of CPUs that need an IPI to trigger a reschedule in response to
a thread newly made ready for execution. This increases the
computation required at every scheduler operation by a value that is
O(N) in the number of CPUs, and in exchange reduces the number of
interrupts delivered. Which to choose is going to depend on
application behavior. If the architecture also supports directing
IPIs to specific CPUs then this has the potential to significantly
reduce the number of IPIs (and consequently ISRs) processed by the
system as the number of CPUs increases. If not, the only benefit
would be to not issue any IPIs if the newly readied thread is of
lower priority than all the threads currently executing on other CPUs.
config KERNEL_COHERENCE
bool "Place all shared data into coherent memory"
depends on ARCH_HAS_COHERENCE
default y if SMP && MP_MAX_NUM_CPUS > 1
select THREAD_STACK_INFO
help
When available and selected, the kernel will build in a mode
where all shared data is placed in multiprocessor-coherent
(generally "uncached") memory. Thread stacks will remain
cached, as will application memory declared with
__incoherent. This is intended for Zephyr SMP kernels
running on cache-incoherent architectures only. Note that
when this is selected, there is an implicit API change that
assumes cache coherence to any memory passed to the kernel.
Code that creates kernel data structures in uncached regions
may fail strangely. Some assertions exist to catch these
mistakes, but not all circumstances can be tested.
config TICKET_SPINLOCKS
bool "Ticket spinlocks for lock acquisition fairness [EXPERIMENTAL]"
select EXPERIMENTAL
help
Basic spinlock implementation is based on single
atomic variable and doesn't guarantee locking fairness
across multiple CPUs. It's even possible that single CPU
will win the contention every time which will result
in a live-lock.
Ticket spinlocks provide a FIFO order of lock acquisition
which resolves such unfairness issue at the cost of slightly
increased memory footprint.
endmenu
``` | /content/code_sandbox/kernel/Kconfig.smp | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,185 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <ksched.h>
#include <zephyr/spinlock.h>
extern struct k_spinlock _sched_spinlock;
# ifdef CONFIG_SMP
/* Right now we use a two byte for this mask */
BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 16, "Too many CPUs for mask word");
# endif /* CONFIG_SMP */
static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
{
int ret = 0;
#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
__ASSERT(z_is_thread_prevented_from_running(thread),
"Running threads cannot change CPU pin");
#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
K_SPINLOCK(&_sched_spinlock) {
if (z_is_thread_prevented_from_running(thread)) {
thread->base.cpu_mask |= enable_mask;
thread->base.cpu_mask &= ~disable_mask;
} else {
ret = -EINVAL;
}
}
#if defined(CONFIG_ASSERT) && defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY)
int m = thread->base.cpu_mask;
__ASSERT((m == 0) || ((m & (m - 1)) == 0),
"Only one CPU allowed in mask when PIN_ONLY");
#endif /* defined(CONFIG_ASSERT) && defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY) */
return ret;
}
int k_thread_cpu_mask_clear(k_tid_t thread)
{
return cpu_mask_mod(thread, 0, 0xffffffff);
}
int k_thread_cpu_mask_enable_all(k_tid_t thread)
{
return cpu_mask_mod(thread, 0xffffffff, 0);
}
int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
{
return cpu_mask_mod(thread, BIT(cpu), 0);
}
int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
{
return cpu_mask_mod(thread, 0, BIT(cpu));
}
int k_thread_cpu_pin(k_tid_t thread, int cpu)
{
int ret;
ret = k_thread_cpu_mask_clear(thread);
if (ret == 0) {
return k_thread_cpu_mask_enable(thread, cpu);
}
return ret;
}
``` | /content/code_sandbox/kernel/cpu_mask.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 451 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <ksched.h>
#include <wait_q.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/init.h>
#ifdef CONFIG_OBJ_CORE_CONDVAR
static struct k_obj_type obj_type_condvar;
#endif /* CONFIG_OBJ_CORE_CONDVAR */
static struct k_spinlock lock;
int z_impl_k_condvar_init(struct k_condvar *condvar)
{
z_waitq_init(&condvar->wait_q);
k_object_init(condvar);
#ifdef CONFIG_OBJ_CORE_CONDVAR
k_obj_core_init_and_link(K_OBJ_CORE(condvar), &obj_type_condvar);
#endif /* CONFIG_OBJ_CORE_CONDVAR */
SYS_PORT_TRACING_OBJ_INIT(k_condvar, condvar, 0);
return 0;
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_condvar_init(struct k_condvar *condvar)
{
K_OOPS(K_SYSCALL_OBJ_INIT(condvar, K_OBJ_CONDVAR));
return z_impl_k_condvar_init(condvar);
}
#include <zephyr/syscalls/k_condvar_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_condvar_signal(struct k_condvar *condvar)
{
k_spinlock_key_t key = k_spin_lock(&lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_condvar, signal, condvar);
struct k_thread *thread = z_unpend_first_thread(&condvar->wait_q);
if (thread != NULL) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_condvar, signal, condvar, K_FOREVER);
arch_thread_return_value_set(thread, 0);
z_ready_thread(thread);
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_condvar, signal, condvar, 0);
return 0;
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_condvar_signal(struct k_condvar *condvar)
{
K_OOPS(K_SYSCALL_OBJ(condvar, K_OBJ_CONDVAR));
return z_impl_k_condvar_signal(condvar);
}
#include <zephyr/syscalls/k_condvar_signal_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_condvar_broadcast(struct k_condvar *condvar)
{
struct k_thread *pending_thread;
k_spinlock_key_t key;
int woken = 0;
key = k_spin_lock(&lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_condvar, broadcast, condvar);
/* wake up any threads that are waiting to write */
for (pending_thread = z_unpend_first_thread(&condvar->wait_q); pending_thread != NULL;
pending_thread = z_unpend_first_thread(&condvar->wait_q)) {
woken++;
arch_thread_return_value_set(pending_thread, 0);
z_ready_thread(pending_thread);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_condvar, broadcast, condvar, woken);
z_reschedule(&lock, key);
return woken;
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_condvar_broadcast(struct k_condvar *condvar)
{
K_OOPS(K_SYSCALL_OBJ(condvar, K_OBJ_CONDVAR));
return z_impl_k_condvar_broadcast(condvar);
}
#include <zephyr/syscalls/k_condvar_broadcast_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
k_timeout_t timeout)
{
k_spinlock_key_t key;
int ret;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_condvar, wait, condvar);
key = k_spin_lock(&lock);
k_mutex_unlock(mutex);
ret = z_pend_curr(&lock, key, &condvar->wait_q, timeout);
k_mutex_lock(mutex, K_FOREVER);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_condvar, wait, condvar, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(condvar, K_OBJ_CONDVAR));
K_OOPS(K_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
return z_impl_k_condvar_wait(condvar, mutex, timeout);
}
#include <zephyr/syscalls/k_condvar_wait_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_CONDVAR
static int init_condvar_obj_core_list(void)
{
/* Initialize condvar object type */
z_obj_type_init(&obj_type_condvar, K_OBJ_TYPE_CONDVAR_ID,
offsetof(struct k_condvar, obj_core));
/* Initialize and link statically defined condvars */
STRUCT_SECTION_FOREACH(k_condvar, condvar) {
k_obj_core_init_and_link(K_OBJ_CORE(condvar),
&obj_type_condvar);
}
return 0;
}
SYS_INIT(init_condvar_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_CONDVAR */
``` | /content/code_sandbox/kernel/condvar.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,080 |
```c
/*
*
*/
/**
* @file
* @brief Message queues.
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <string.h>
#include <ksched.h>
#include <wait_q.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/init.h>
#include <zephyr/internal/syscall_handler.h>
#include <kernel_internal.h>
#include <zephyr/sys/check.h>
#ifdef CONFIG_OBJ_CORE_MSGQ
static struct k_obj_type obj_type_msgq;
#endif /* CONFIG_OBJ_CORE_MSGQ */
#ifdef CONFIG_POLL
static inline void handle_poll_events(struct k_msgq *msgq, uint32_t state)
{
z_handle_obj_poll_events(&msgq->poll_events, state);
}
#endif /* CONFIG_POLL */
void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
uint32_t max_msgs)
{
msgq->msg_size = msg_size;
msgq->max_msgs = max_msgs;
msgq->buffer_start = buffer;
msgq->buffer_end = buffer + (max_msgs * msg_size);
msgq->read_ptr = buffer;
msgq->write_ptr = buffer;
msgq->used_msgs = 0;
msgq->flags = 0;
z_waitq_init(&msgq->wait_q);
msgq->lock = (struct k_spinlock) {};
#ifdef CONFIG_POLL
sys_dlist_init(&msgq->poll_events);
#endif /* CONFIG_POLL */
#ifdef CONFIG_OBJ_CORE_MSGQ
k_obj_core_init_and_link(K_OBJ_CORE(msgq), &obj_type_msgq);
#endif /* CONFIG_OBJ_CORE_MSGQ */
SYS_PORT_TRACING_OBJ_INIT(k_msgq, msgq);
k_object_init(msgq);
}
int z_impl_k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
uint32_t max_msgs)
{
void *buffer;
int ret;
size_t total_size;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_msgq, alloc_init, msgq);
if (size_mul_overflow(msg_size, max_msgs, &total_size)) {
ret = -EINVAL;
} else {
buffer = z_thread_malloc(total_size);
if (buffer != NULL) {
k_msgq_init(msgq, buffer, msg_size, max_msgs);
msgq->flags = K_MSGQ_FLAG_ALLOC;
ret = 0;
} else {
ret = -ENOMEM;
}
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, alloc_init, msgq, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
uint32_t max_msgs)
{
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(msgq, K_OBJ_MSGQ));
return z_impl_k_msgq_alloc_init(msgq, msg_size, max_msgs);
}
#include <zephyr/syscalls/k_msgq_alloc_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
int k_msgq_cleanup(struct k_msgq *msgq)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_msgq, cleanup, msgq);
CHECKIF(z_waitq_head(&msgq->wait_q) != NULL) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, cleanup, msgq, -EBUSY);
return -EBUSY;
}
if ((msgq->flags & K_MSGQ_FLAG_ALLOC) != 0U) {
k_free(msgq->buffer_start);
msgq->flags &= ~K_MSGQ_FLAG_ALLOC;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, cleanup, msgq, 0);
return 0;
}
int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout)
{
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
struct k_thread *pending_thread;
k_spinlock_key_t key;
int result;
key = k_spin_lock(&msgq->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_msgq, put, msgq, timeout);
if (msgq->used_msgs < msgq->max_msgs) {
/* message queue isn't full */
pending_thread = z_unpend_first_thread(&msgq->wait_q);
if (pending_thread != NULL) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, 0);
/* give message to waiting thread */
(void)memcpy(pending_thread->base.swap_data, data,
msgq->msg_size);
/* wake up waiting thread */
arch_thread_return_value_set(pending_thread, 0);
z_ready_thread(pending_thread);
z_reschedule(&msgq->lock, key);
return 0;
} else {
/* put message in queue */
__ASSERT_NO_MSG(msgq->write_ptr >= msgq->buffer_start &&
msgq->write_ptr < msgq->buffer_end);
(void)memcpy(msgq->write_ptr, (char *)data, msgq->msg_size);
msgq->write_ptr += msgq->msg_size;
if (msgq->write_ptr == msgq->buffer_end) {
msgq->write_ptr = msgq->buffer_start;
}
msgq->used_msgs++;
#ifdef CONFIG_POLL
handle_poll_events(msgq, K_POLL_STATE_MSGQ_DATA_AVAILABLE);
#endif /* CONFIG_POLL */
}
result = 0;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for message space to become available */
result = -ENOMSG;
} else {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, put, msgq, timeout);
/* wait for put message success, failure, or timeout */
_current->base.swap_data = (void *) data;
result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, result);
return result;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, result);
k_spin_unlock(&msgq->lock, key);
return result;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_msgq_put(struct k_msgq *msgq, const void *data,
k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
K_OOPS(K_SYSCALL_MEMORY_READ(data, msgq->msg_size));
return z_impl_k_msgq_put(msgq, data, timeout);
}
#include <zephyr/syscalls/k_msgq_put_mrsh.c>
#endif /* CONFIG_USERSPACE */
void z_impl_k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs)
{
attrs->msg_size = msgq->msg_size;
attrs->max_msgs = msgq->max_msgs;
attrs->used_msgs = msgq->used_msgs;
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *msgq,
struct k_msgq_attrs *attrs)
{
K_OOPS(K_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
K_OOPS(K_SYSCALL_MEMORY_WRITE(attrs, sizeof(struct k_msgq_attrs)));
z_impl_k_msgq_get_attrs(msgq, attrs);
}
#include <zephyr/syscalls/k_msgq_get_attrs_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
{
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
k_spinlock_key_t key;
struct k_thread *pending_thread;
int result;
key = k_spin_lock(&msgq->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_msgq, get, msgq, timeout);
if (msgq->used_msgs > 0U) {
/* take first available message from queue */
(void)memcpy((char *)data, msgq->read_ptr, msgq->msg_size);
msgq->read_ptr += msgq->msg_size;
if (msgq->read_ptr == msgq->buffer_end) {
msgq->read_ptr = msgq->buffer_start;
}
msgq->used_msgs--;
/* handle first thread waiting to write (if any) */
pending_thread = z_unpend_first_thread(&msgq->wait_q);
if (pending_thread != NULL) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout);
/* add thread's message to queue */
__ASSERT_NO_MSG(msgq->write_ptr >= msgq->buffer_start &&
msgq->write_ptr < msgq->buffer_end);
(void)memcpy(msgq->write_ptr, (char *)pending_thread->base.swap_data,
msgq->msg_size);
msgq->write_ptr += msgq->msg_size;
if (msgq->write_ptr == msgq->buffer_end) {
msgq->write_ptr = msgq->buffer_start;
}
msgq->used_msgs++;
/* wake up waiting thread */
arch_thread_return_value_set(pending_thread, 0);
z_ready_thread(pending_thread);
z_reschedule(&msgq->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, get, msgq, timeout, 0);
return 0;
}
result = 0;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for a message to become available */
result = -ENOMSG;
} else {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout);
/* wait for get message success or timeout */
_current->base.swap_data = data;
result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, get, msgq, timeout, result);
return result;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, get, msgq, timeout, result);
k_spin_unlock(&msgq->lock, key);
return result;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_msgq_get(struct k_msgq *msgq, void *data,
k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
K_OOPS(K_SYSCALL_MEMORY_WRITE(data, msgq->msg_size));
return z_impl_k_msgq_get(msgq, data, timeout);
}
#include <zephyr/syscalls/k_msgq_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_msgq_peek(struct k_msgq *msgq, void *data)
{
k_spinlock_key_t key;
int result;
key = k_spin_lock(&msgq->lock);
if (msgq->used_msgs > 0U) {
/* take first available message from queue */
(void)memcpy((char *)data, msgq->read_ptr, msgq->msg_size);
result = 0;
} else {
/* don't wait for a message to become available */
result = -ENOMSG;
}
SYS_PORT_TRACING_OBJ_FUNC(k_msgq, peek, msgq, result);
k_spin_unlock(&msgq->lock, key);
return result;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_msgq_peek(struct k_msgq *msgq, void *data)
{
K_OOPS(K_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
K_OOPS(K_SYSCALL_MEMORY_WRITE(data, msgq->msg_size));
return z_impl_k_msgq_peek(msgq, data);
}
#include <zephyr/syscalls/k_msgq_peek_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx)
{
k_spinlock_key_t key;
int result;
uint32_t bytes_to_end;
uint32_t byte_offset;
char *start_addr;
key = k_spin_lock(&msgq->lock);
if (msgq->used_msgs > idx) {
bytes_to_end = (msgq->buffer_end - msgq->read_ptr);
byte_offset = idx * msgq->msg_size;
start_addr = msgq->read_ptr;
/* check item available in start/end of ring buffer */
if (bytes_to_end <= byte_offset) {
/* Tweak the values in case */
byte_offset -= bytes_to_end;
/* wrap-around is required */
start_addr = msgq->buffer_start;
}
(void)memcpy(data, start_addr + byte_offset, msgq->msg_size);
result = 0;
} else {
/* don't wait for a message to become available */
result = -ENOMSG;
}
SYS_PORT_TRACING_OBJ_FUNC(k_msgq, peek, msgq, result);
k_spin_unlock(&msgq->lock, key);
return result;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx)
{
K_OOPS(K_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
K_OOPS(K_SYSCALL_MEMORY_WRITE(data, msgq->msg_size));
return z_impl_k_msgq_peek_at(msgq, data, idx);
}
#include <zephyr/syscalls/k_msgq_peek_at_mrsh.c>
#endif /* CONFIG_USERSPACE */
void z_impl_k_msgq_purge(struct k_msgq *msgq)
{
k_spinlock_key_t key;
struct k_thread *pending_thread;
key = k_spin_lock(&msgq->lock);
SYS_PORT_TRACING_OBJ_FUNC(k_msgq, purge, msgq);
/* wake up any threads that are waiting to write */
for (pending_thread = z_unpend_first_thread(&msgq->wait_q); pending_thread != NULL;
pending_thread = z_unpend_first_thread(&msgq->wait_q)) {
arch_thread_return_value_set(pending_thread, -ENOMSG);
z_ready_thread(pending_thread);
}
msgq->used_msgs = 0;
msgq->read_ptr = msgq->write_ptr;
z_reschedule(&msgq->lock, key);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_msgq_purge(struct k_msgq *msgq)
{
K_OOPS(K_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
z_impl_k_msgq_purge(msgq);
}
#include <zephyr/syscalls/k_msgq_purge_mrsh.c>
static inline uint32_t z_vrfy_k_msgq_num_free_get(struct k_msgq *msgq)
{
K_OOPS(K_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
return z_impl_k_msgq_num_free_get(msgq);
}
#include <zephyr/syscalls/k_msgq_num_free_get_mrsh.c>
static inline uint32_t z_vrfy_k_msgq_num_used_get(struct k_msgq *msgq)
{
K_OOPS(K_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
return z_impl_k_msgq_num_used_get(msgq);
}
#include <zephyr/syscalls/k_msgq_num_used_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_MSGQ
static int init_msgq_obj_core_list(void)
{
/* Initialize msgq object type */
z_obj_type_init(&obj_type_msgq, K_OBJ_TYPE_MSGQ_ID,
offsetof(struct k_msgq, obj_core));
/* Initialize and link statically defined message queues */
STRUCT_SECTION_FOREACH(k_msgq, msgq) {
k_obj_core_init_and_link(K_OBJ_CORE(msgq), &obj_type_msgq);
}
return 0;
};
SYS_INIT(init_msgq_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_MSGQ */
``` | /content/code_sandbox/kernel/msg_q.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,509 |
```c
/*
*
*/
/**
* @file
*
* Second generation work queue implementation
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <wait_q.h>
#include <zephyr/spinlock.h>
#include <errno.h>
#include <ksched.h>
#include <zephyr/sys/printk.h>
static inline void flag_clear(uint32_t *flagp,
uint32_t bit)
{
*flagp &= ~BIT(bit);
}
static inline void flag_set(uint32_t *flagp,
uint32_t bit)
{
*flagp |= BIT(bit);
}
static inline bool flag_test(const uint32_t *flagp,
uint32_t bit)
{
return (*flagp & BIT(bit)) != 0U;
}
static inline bool flag_test_and_clear(uint32_t *flagp,
int bit)
{
bool ret = flag_test(flagp, bit);
flag_clear(flagp, bit);
return ret;
}
static inline void flags_set(uint32_t *flagp,
uint32_t flags)
{
*flagp = flags;
}
static inline uint32_t flags_get(const uint32_t *flagp)
{
return *flagp;
}
/* Lock to protect the internal state of all work items, work queues,
* and pending_cancels.
*/
static struct k_spinlock lock;
/* Invoked by work thread */
static void handle_flush(struct k_work *work) { }
static inline void init_flusher(struct z_work_flusher *flusher)
{
struct k_work *work = &flusher->work;
k_sem_init(&flusher->sem, 0, 1);
k_work_init(&flusher->work, handle_flush);
flag_set(&work->flags, K_WORK_FLUSHING_BIT);
}
/* List of pending cancellations. */
static sys_slist_t pending_cancels;
/* Initialize a canceler record and add it to the list of pending
* cancels.
*
* Invoked with work lock held.
*
* @param canceler the structure used to notify a waiting process.
* @param work the work structure that is to be canceled
*/
static inline void init_work_cancel(struct z_work_canceller *canceler,
struct k_work *work)
{
k_sem_init(&canceler->sem, 0, 1);
canceler->work = work;
sys_slist_append(&pending_cancels, &canceler->node);
}
/* Complete flushing of a work item.
*
* Invoked with work lock held.
*
* Invoked from a work queue thread.
*
* Reschedules.
*
* @param work the work structure that has completed flushing.
*/
static void finalize_flush_locked(struct k_work *work)
{
struct z_work_flusher *flusher
= CONTAINER_OF(work, struct z_work_flusher, work);
flag_clear(&work->flags, K_WORK_FLUSHING_BIT);
k_sem_give(&flusher->sem);
};
/* Complete cancellation of a work item and unlock held lock.
*
* Invoked with work lock held.
*
* Invoked from a work queue thread.
*
* Reschedules.
*
* @param work the work structure that has completed cancellation
*/
static void finalize_cancel_locked(struct k_work *work)
{
struct z_work_canceller *wc, *tmp;
sys_snode_t *prev = NULL;
/* Clear this first, so released high-priority threads don't
* see it when doing things.
*/
flag_clear(&work->flags, K_WORK_CANCELING_BIT);
/* Search for and remove the matching container, and release
* what's waiting for the completion. The same work item can
* appear multiple times in the list if multiple threads
* attempt to cancel it.
*/
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) {
if (wc->work == work) {
sys_slist_remove(&pending_cancels, prev, &wc->node);
k_sem_give(&wc->sem);
break;
}
prev = &wc->node;
}
}
void k_work_init(struct k_work *work,
k_work_handler_t handler)
{
__ASSERT_NO_MSG(work != NULL);
__ASSERT_NO_MSG(handler != NULL);
*work = (struct k_work)Z_WORK_INITIALIZER(handler);
SYS_PORT_TRACING_OBJ_INIT(k_work, work);
}
static inline int work_busy_get_locked(const struct k_work *work)
{
return flags_get(&work->flags) & K_WORK_MASK;
}
int k_work_busy_get(const struct k_work *work)
{
k_spinlock_key_t key = k_spin_lock(&lock);
int ret = work_busy_get_locked(work);
k_spin_unlock(&lock, key);
return ret;
}
/* Add a flusher work item to the queue.
*
* Invoked with work lock held.
*
* Caller must notify queue of pending work.
*
* @param queue queue on which a work item may appear.
* @param work the work item that is either queued or running on @p
* queue
* @param flusher an uninitialized/unused flusher object
*/
static void queue_flusher_locked(struct k_work_q *queue,
struct k_work *work,
struct z_work_flusher *flusher)
{
bool in_list = false;
struct k_work *wn;
/* Determine whether the work item is still queued. */
SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) {
if (wn == work) {
in_list = true;
break;
}
}
init_flusher(flusher);
if (in_list) {
sys_slist_insert(&queue->pending, &work->node,
&flusher->work.node);
} else {
sys_slist_prepend(&queue->pending, &flusher->work.node);
}
}
/* Try to remove a work item from the given queue.
*
* Invoked with work lock held.
*
* @param queue the queue from which the work should be removed
* @param work work that may be on the queue
*/
static inline void queue_remove_locked(struct k_work_q *queue,
struct k_work *work)
{
if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) {
(void)sys_slist_find_and_remove(&queue->pending, &work->node);
}
}
/* Potentially notify a queue that it needs to look for pending work.
*
* This may make the work queue thread ready, but as the lock is held it
* will not be a reschedule point. Callers should yield after the lock is
* released where appropriate (generally if this returns true).
*
* @param queue to be notified. If this is null no notification is required.
*
* @return true if and only if the queue was notified and woken, i.e. a
* reschedule is pending.
*/
static inline bool notify_queue_locked(struct k_work_q *queue)
{
bool rv = false;
if (queue != NULL) {
rv = z_sched_wake(&queue->notifyq, 0, NULL);
}
return rv;
}
/* Submit an work item to a queue if queue state allows new work.
*
* Submission is rejected if no queue is provided, or if the queue is
* draining and the work isn't being submitted from the queue's
* thread (chained submission).
*
* Invoked with work lock held.
* Conditionally notifies queue.
*
* @param queue the queue to which work should be submitted. This may
* be null, in which case the submission will fail.
*
* @param work to be submitted
*
* @retval 1 if successfully queued
* @retval -EINVAL if no queue is provided
* @retval -ENODEV if the queue is not started
* @retval -EBUSY if the submission was rejected (draining, plugged)
*/
static inline int queue_submit_locked(struct k_work_q *queue,
struct k_work *work)
{
if (queue == NULL) {
return -EINVAL;
}
int ret;
bool chained = (_current == &queue->thread) && !k_is_in_isr();
bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
/* Test for acceptability, in priority order:
*
* * -ENODEV if the queue isn't running.
* * -EBUSY if draining and not chained
* * -EBUSY if plugged and not draining
* * otherwise OK
*/
if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) {
ret = -ENODEV;
} else if (draining && !chained) {
ret = -EBUSY;
} else if (plugged && !draining) {
ret = -EBUSY;
} else {
sys_slist_append(&queue->pending, &work->node);
ret = 1;
(void)notify_queue_locked(queue);
}
return ret;
}
/* Attempt to submit work to a queue.
*
* The submission can fail if:
* * the work is cancelling,
* * no candidate queue can be identified;
* * the candidate queue rejects the submission.
*
* Invoked with work lock held.
* Conditionally notifies queue.
*
* @param work the work structure to be submitted
* @param queuep pointer to a queue reference. On input this should
* dereference to the proposed queue (which may be null); after completion it
* will be null if the work was not submitted or if submitted will reference
* the queue it was submitted to. That may or may not be the queue provided
* on input.
*
* @retval 0 if work was already submitted to a queue
* @retval 1 if work was not submitted and has been queued to @p queue
* @retval 2 if work was running and has been queued to the queue that was
* running it
* @retval -EBUSY if canceling or submission was rejected by queue
* @retval -EINVAL if no queue is provided
* @retval -ENODEV if the queue is not started
*/
static int submit_to_queue_locked(struct k_work *work,
struct k_work_q **queuep)
{
int ret = 0;
if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
/* Disallowed */
ret = -EBUSY;
} else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) {
/* Not currently queued */
ret = 1;
/* If no queue specified resubmit to last queue.
*/
if (*queuep == NULL) {
*queuep = work->queue;
}
/* If the work is currently running we have to use the
* queue it's running on to prevent handler
* re-entrancy.
*/
if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) {
__ASSERT_NO_MSG(work->queue != NULL);
*queuep = work->queue;
ret = 2;
}
int rc = queue_submit_locked(*queuep, work);
if (rc < 0) {
ret = rc;
} else {
flag_set(&work->flags, K_WORK_QUEUED_BIT);
work->queue = *queuep;
}
} else {
/* Already queued, do nothing. */
}
if (ret <= 0) {
*queuep = NULL;
}
return ret;
}
/* Submit work to a queue but do not yield the current thread.
*
* Intended for internal use.
*
* See also submit_to_queue_locked().
*
* @param queuep pointer to a queue reference.
* @param work the work structure to be submitted
*
* @retval see submit_to_queue_locked()
*/
int z_work_submit_to_queue(struct k_work_q *queue,
struct k_work *work)
{
__ASSERT_NO_MSG(work != NULL);
__ASSERT_NO_MSG(work->handler != NULL);
k_spinlock_key_t key = k_spin_lock(&lock);
int ret = submit_to_queue_locked(work, &queue);
k_spin_unlock(&lock, key);
return ret;
}
int k_work_submit_to_queue(struct k_work_q *queue,
struct k_work *work)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
int ret = z_work_submit_to_queue(queue, work);
/* submit_to_queue_locked() won't reschedule on its own
* (really it should, otherwise this process will result in
* spurious calls to z_swap() due to the race), so do it here
* if the queue state changed.
*/
if (ret > 0) {
z_reschedule_unlocked();
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
return ret;
}
int k_work_submit(struct k_work *work)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work);
int ret = k_work_submit_to_queue(&k_sys_work_q, work);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret);
return ret;
}
/* Flush the work item if necessary.
*
* Flushing is necessary only if the work is either queued or running.
*
* Invoked with work lock held by key.
* Sleeps.
*
* @param work the work item that is to be flushed
* @param flusher state used to synchronize the flush
*
* @retval true if work is queued or running. If this happens the
* caller must take the flusher semaphore after releasing the lock.
*
* @retval false otherwise. No wait required.
*/
static bool work_flush_locked(struct k_work *work,
struct z_work_flusher *flusher)
{
bool need_flush = (flags_get(&work->flags)
& (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
if (need_flush) {
struct k_work_q *queue = work->queue;
__ASSERT_NO_MSG(queue != NULL);
queue_flusher_locked(queue, work, flusher);
notify_queue_locked(queue);
}
return need_flush;
}
bool k_work_flush(struct k_work *work,
struct k_work_sync *sync)
{
__ASSERT_NO_MSG(work != NULL);
__ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
__ASSERT_NO_MSG(!k_is_in_isr());
__ASSERT_NO_MSG(sync != NULL);
#ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(sync));
#endif /* CONFIG_KERNEL_COHERENCE */
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
struct z_work_flusher *flusher = &sync->flusher;
k_spinlock_key_t key = k_spin_lock(&lock);
bool need_flush = work_flush_locked(work, flusher);
k_spin_unlock(&lock, key);
/* If necessary wait until the flusher item completes */
if (need_flush) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER);
k_sem_take(&flusher->sem, K_FOREVER);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush);
return need_flush;
}
/* Execute the non-waiting steps necessary to cancel a work item.
*
* Invoked with work lock held.
*
* @param work the work item to be canceled.
*
* @retval true if we need to wait for the work item to finish canceling
* @retval false if the work item is idle
*
* @return k_busy_wait() captured under lock
*/
static int cancel_async_locked(struct k_work *work)
{
/* If we haven't already started canceling, do it now. */
if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
/* Remove it from the queue, if it's queued. */
queue_remove_locked(work->queue, work);
}
/* If it's still busy after it's been dequeued, then flag it
* as canceling.
*/
int ret = work_busy_get_locked(work);
if (ret != 0) {
flag_set(&work->flags, K_WORK_CANCELING_BIT);
ret = work_busy_get_locked(work);
}
return ret;
}
/* Complete cancellation necessary, release work lock, and wait if
* necessary.
*
* Invoked with work lock held by key.
* Sleeps.
*
* @param work work that is being canceled
* @param canceller state used to synchronize the cancellation
* @param key used by work lock
*
* @retval true if and only if the work was still active on entry. The caller
* must wait on the canceller semaphore after releasing the lock.
*
* @retval false if work was idle on entry. The caller need not wait.
*/
static bool cancel_sync_locked(struct k_work *work,
struct z_work_canceller *canceller)
{
bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT);
/* If something's still running then we have to wait for
* completion, which is indicated when finish_cancel() gets
* invoked.
*/
if (ret) {
init_work_cancel(canceller, work);
}
return ret;
}
int k_work_cancel(struct k_work *work)
{
__ASSERT_NO_MSG(work != NULL);
__ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work);
k_spinlock_key_t key = k_spin_lock(&lock);
int ret = cancel_async_locked(work);
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret);
return ret;
}
bool k_work_cancel_sync(struct k_work *work,
struct k_work_sync *sync)
{
__ASSERT_NO_MSG(work != NULL);
__ASSERT_NO_MSG(sync != NULL);
__ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
__ASSERT_NO_MSG(!k_is_in_isr());
#ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(sync));
#endif /* CONFIG_KERNEL_COHERENCE */
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
struct z_work_canceller *canceller = &sync->canceller;
k_spinlock_key_t key = k_spin_lock(&lock);
bool pending = (work_busy_get_locked(work) != 0U);
bool need_wait = false;
if (pending) {
(void)cancel_async_locked(work);
need_wait = cancel_sync_locked(work, canceller);
}
k_spin_unlock(&lock, key);
if (need_wait) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync);
k_sem_take(&canceller->sem, K_FOREVER);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending);
return pending;
}
/* Loop executed by a work queue thread.
*
* @param workq_ptr pointer to the work queue structure
*/
static void work_queue_main(void *workq_ptr, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
struct k_work_q *queue = (struct k_work_q *)workq_ptr;
while (true) {
sys_snode_t *node;
struct k_work *work = NULL;
k_work_handler_t handler = NULL;
k_spinlock_key_t key = k_spin_lock(&lock);
bool yield;
/* Check for and prepare any new work. */
node = sys_slist_get(&queue->pending);
if (node != NULL) {
/* Mark that there's some work active that's
* not on the pending list.
*/
flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
work = CONTAINER_OF(node, struct k_work, node);
flag_set(&work->flags, K_WORK_RUNNING_BIT);
flag_clear(&work->flags, K_WORK_QUEUED_BIT);
/* Static code analysis tool can raise a false-positive violation
* in the line below that 'work' is checked for null after being
* dereferenced.
*
* The work is figured out by CONTAINER_OF, as a container
* of type struct k_work that contains the node.
* The only way for it to be NULL is if node would be a member
* of struct k_work object that has been placed at address NULL,
* which should never happen, even line 'if (work != NULL)'
* ensures that.
* This means that if node is not NULL, then work will not be NULL.
*/
handler = work->handler;
} else if (flag_test_and_clear(&queue->flags,
K_WORK_QUEUE_DRAIN_BIT)) {
/* Not busy and draining: move threads waiting for
* drain to ready state. The held spinlock inhibits
* immediate reschedule; released threads get their
* chance when this invokes z_sched_wait() below.
*
* We don't touch K_WORK_QUEUE_PLUGGABLE, so getting
* here doesn't mean that the queue will allow new
* submissions.
*/
(void)z_sched_wake_all(&queue->drainq, 1, NULL);
} else {
/* No work is available and no queue state requires
* special handling.
*/
;
}
if (work == NULL) {
/* Nothing's had a chance to add work since we took
* the lock, and we didn't find work nor got asked to
* stop. Just go to sleep: when something happens the
* work thread will be woken and we can check again.
*/
(void)z_sched_wait(&lock, key, &queue->notifyq,
K_FOREVER, NULL);
continue;
}
k_spin_unlock(&lock, key);
__ASSERT_NO_MSG(handler != NULL);
handler(work);
/* Mark the work item as no longer running and deal
* with any cancellation and flushing issued while it
* was running. Clear the BUSY flag and optionally
* yield to prevent starving other threads.
*/
key = k_spin_lock(&lock);
flag_clear(&work->flags, K_WORK_RUNNING_BIT);
if (flag_test(&work->flags, K_WORK_FLUSHING_BIT)) {
finalize_flush_locked(work);
}
if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
finalize_cancel_locked(work);
}
flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
k_spin_unlock(&lock, key);
/* Optionally yield to prevent the work queue from
* starving other threads.
*/
if (yield) {
k_yield();
}
}
}
void k_work_queue_init(struct k_work_q *queue)
{
__ASSERT_NO_MSG(queue != NULL);
*queue = (struct k_work_q) {
.flags = 0,
};
SYS_PORT_TRACING_OBJ_INIT(k_work_queue, queue);
}
void k_work_queue_start(struct k_work_q *queue,
k_thread_stack_t *stack,
size_t stack_size,
int prio,
const struct k_work_queue_config *cfg)
{
__ASSERT_NO_MSG(queue);
__ASSERT_NO_MSG(stack);
__ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT));
uint32_t flags = K_WORK_QUEUE_STARTED;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, start, queue);
sys_slist_init(&queue->pending);
z_waitq_init(&queue->notifyq);
z_waitq_init(&queue->drainq);
if ((cfg != NULL) && cfg->no_yield) {
flags |= K_WORK_QUEUE_NO_YIELD;
}
/* It hasn't actually been started yet, but all the state is in place
* so we can submit things and once the thread gets control it's ready
* to roll.
*/
flags_set(&queue->flags, flags);
(void)k_thread_create(&queue->thread, stack, stack_size,
work_queue_main, queue, NULL, NULL,
prio, 0, K_FOREVER);
if ((cfg != NULL) && (cfg->name != NULL)) {
k_thread_name_set(&queue->thread, cfg->name);
}
if ((cfg != NULL) && (cfg->essential)) {
queue->thread.base.user_options |= K_ESSENTIAL;
}
k_thread_start(&queue->thread);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, start, queue);
}
int k_work_queue_drain(struct k_work_q *queue,
bool plug)
{
__ASSERT_NO_MSG(queue);
__ASSERT_NO_MSG(!k_is_in_isr());
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, drain, queue);
int ret = 0;
k_spinlock_key_t key = k_spin_lock(&lock);
if (((flags_get(&queue->flags)
& (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U)
|| plug
|| !sys_slist_is_empty(&queue->pending)) {
flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
if (plug) {
flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
}
notify_queue_locked(queue);
ret = z_sched_wait(&lock, key, &queue->drainq,
K_FOREVER, NULL);
} else {
k_spin_unlock(&lock, key);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, drain, queue, ret);
return ret;
}
int k_work_queue_unplug(struct k_work_q *queue)
{
__ASSERT_NO_MSG(queue);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, unplug, queue);
int ret = -EALREADY;
k_spinlock_key_t key = k_spin_lock(&lock);
if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) {
ret = 0;
}
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, unplug, queue, ret);
return ret;
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
/* Timeout handler for delayable work.
*
* Invoked by timeout infrastructure.
* Takes and releases work lock.
* Conditionally reschedules.
*/
static void work_timeout(struct _timeout *to)
{
struct k_work_delayable *dw
= CONTAINER_OF(to, struct k_work_delayable, timeout);
struct k_work *wp = &dw->work;
k_spinlock_key_t key = k_spin_lock(&lock);
struct k_work_q *queue = NULL;
/* If the work is still marked delayed (should be) then clear that
* state and submit it to the queue. If successful the queue will be
* notified of new work at the next reschedule point.
*
* If not successful there is no notification that the work has been
* abandoned. Sorry.
*/
if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) {
queue = dw->queue;
(void)submit_to_queue_locked(wp, &queue);
}
k_spin_unlock(&lock, key);
}
void k_work_init_delayable(struct k_work_delayable *dwork,
k_work_handler_t handler)
{
__ASSERT_NO_MSG(dwork != NULL);
__ASSERT_NO_MSG(handler != NULL);
*dwork = (struct k_work_delayable){
.work = {
.handler = handler,
.flags = K_WORK_DELAYABLE,
},
};
z_init_timeout(&dwork->timeout);
SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork);
}
static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork)
{
return flags_get(&dwork->work.flags) & K_WORK_MASK;
}
int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
{
k_spinlock_key_t key = k_spin_lock(&lock);
int ret = work_delayable_busy_get_locked(dwork);
k_spin_unlock(&lock, key);
return ret;
}
/* Attempt to schedule a work item for future (maybe immediate)
* submission.
*
* Invoked with work lock held.
*
* See also submit_to_queue_locked(), which implements this for a no-wait
* delay.
*
* Invoked with work lock held.
*
* @param queuep pointer to a pointer to a queue. On input this
* should dereference to the proposed queue (which may be null); after
* completion it will be null if the work was not submitted or if
* submitted will reference the queue it was submitted to. That may
* or may not be the queue provided on input.
*
* @param dwork the delayed work structure
*
* @param delay the delay to use before scheduling.
*
* @retval from submit_to_queue_locked() if delay is K_NO_WAIT; otherwise
* @retval 1 to indicate successfully scheduled.
*/
static int schedule_for_queue_locked(struct k_work_q **queuep,
struct k_work_delayable *dwork,
k_timeout_t delay)
{
int ret = 1;
struct k_work *work = &dwork->work;
if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
return submit_to_queue_locked(work, queuep);
}
flag_set(&work->flags, K_WORK_DELAYED_BIT);
dwork->queue = *queuep;
/* Add timeout */
z_add_timeout(&dwork->timeout, work_timeout, delay);
return ret;
}
/* Unschedule delayable work.
*
* If the work is delayed, cancel the timeout and clear the delayed
* flag.
*
* Invoked with work lock held.
*
* @param dwork pointer to delayable work structure.
*
* @return true if and only if work had been delayed so the timeout
* was cancelled.
*/
static inline bool unschedule_locked(struct k_work_delayable *dwork)
{
bool ret = false;
struct k_work *work = &dwork->work;
/* If scheduled, try to cancel. If it fails, that means the
* callback has been dequeued and will inevitably run (or has
* already run), so treat that as "undelayed" and return
* false.
*/
if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) {
ret = z_abort_timeout(&dwork->timeout) == 0;
}
return ret;
}
/* Full cancellation of a delayable work item.
*
* Unschedules the delayed part then delegates to standard work
* cancellation.
*
* Invoked with work lock held.
*
* @param dwork delayable work item
*
* @return k_work_busy_get() flags
*/
static int cancel_delayable_async_locked(struct k_work_delayable *dwork)
{
(void)unschedule_locked(dwork);
return cancel_async_locked(&dwork->work);
}
int k_work_schedule_for_queue(struct k_work_q *queue,
struct k_work_delayable *dwork,
k_timeout_t delay)
{
__ASSERT_NO_MSG(dwork != NULL);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay);
struct k_work *work = &dwork->work;
int ret = 0;
k_spinlock_key_t key = k_spin_lock(&lock);
/* Schedule the work item if it's idle or running. */
if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) {
ret = schedule_for_queue_locked(&queue, dwork, delay);
}
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret);
return ret;
}
int k_work_schedule(struct k_work_delayable *dwork,
k_timeout_t delay)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay);
int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret);
return ret;
}
int k_work_reschedule_for_queue(struct k_work_q *queue,
struct k_work_delayable *dwork,
k_timeout_t delay)
{
__ASSERT_NO_MSG(dwork != NULL);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay);
int ret;
k_spinlock_key_t key = k_spin_lock(&lock);
/* Remove any active scheduling. */
(void)unschedule_locked(dwork);
/* Schedule the work item with the new parameters. */
ret = schedule_for_queue_locked(&queue, dwork, delay);
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret);
return ret;
}
int k_work_reschedule(struct k_work_delayable *dwork,
k_timeout_t delay)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay);
int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret);
return ret;
}
int k_work_cancel_delayable(struct k_work_delayable *dwork)
{
__ASSERT_NO_MSG(dwork != NULL);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork);
k_spinlock_key_t key = k_spin_lock(&lock);
int ret = cancel_delayable_async_locked(dwork);
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret);
return ret;
}
bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
struct k_work_sync *sync)
{
__ASSERT_NO_MSG(dwork != NULL);
__ASSERT_NO_MSG(sync != NULL);
__ASSERT_NO_MSG(!k_is_in_isr());
#ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(sync));
#endif /* CONFIG_KERNEL_COHERENCE */
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
struct z_work_canceller *canceller = &sync->canceller;
k_spinlock_key_t key = k_spin_lock(&lock);
bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
bool need_wait = false;
if (pending) {
(void)cancel_delayable_async_locked(dwork);
need_wait = cancel_sync_locked(&dwork->work, canceller);
}
k_spin_unlock(&lock, key);
if (need_wait) {
k_sem_take(&canceller->sem, K_FOREVER);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending);
return pending;
}
bool k_work_flush_delayable(struct k_work_delayable *dwork,
struct k_work_sync *sync)
{
__ASSERT_NO_MSG(dwork != NULL);
__ASSERT_NO_MSG(sync != NULL);
__ASSERT_NO_MSG(!k_is_in_isr());
#ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(sync));
#endif /* CONFIG_KERNEL_COHERENCE */
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
struct k_work *work = &dwork->work;
struct z_work_flusher *flusher = &sync->flusher;
k_spinlock_key_t key = k_spin_lock(&lock);
/* If it's idle release the lock and return immediately. */
if (work_busy_get_locked(work) == 0U) {
k_spin_unlock(&lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false);
return false;
}
/* If unscheduling did something then submit it. Ignore a
* failed submission (e.g. when cancelling).
*/
if (unschedule_locked(dwork)) {
struct k_work_q *queue = dwork->queue;
(void)submit_to_queue_locked(work, &queue);
}
/* Wait for it to finish */
bool need_flush = work_flush_locked(work, flusher);
k_spin_unlock(&lock, key);
/* If necessary wait until the flusher item completes */
if (need_flush) {
k_sem_take(&flusher->sem, K_FOREVER);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush);
return need_flush;
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */
``` | /content/code_sandbox/kernel/work.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,939 |
```c
/*
*
*/
/**
* @file
* @brief Kernel thread support
*
* This module provides general purpose thread support.
*/
#include <zephyr/kernel.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/sys_clock.h>
#include <ksched.h>
#include <kthread.h>
#include <wait_q.h>
#include <zephyr/internal/syscall_handler.h>
#include <kernel_internal.h>
#include <kswap.h>
#include <zephyr/init.h>
#include <zephyr/tracing/tracing.h>
#include <string.h>
#include <stdbool.h>
#include <zephyr/sys/check.h>
#include <zephyr/random/random.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/logging/log.h>
#include <zephyr/llext/symbol.h>
#include <zephyr/sys/iterable_sections.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_OBJ_CORE_THREAD
static struct k_obj_type obj_type_thread;
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
static struct k_obj_core_stats_desc thread_stats_desc = {
.raw_size = sizeof(struct k_cycle_stats),
.query_size = sizeof(struct k_thread_runtime_stats),
.raw = z_thread_stats_raw,
.query = z_thread_stats_query,
.reset = z_thread_stats_reset,
.disable = z_thread_stats_disable,
.enable = z_thread_stats_enable,
};
#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
static int init_thread_obj_core_list(void)
{
/* Initialize mem_slab object type */
#ifdef CONFIG_OBJ_CORE_THREAD
z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID,
offsetof(struct k_thread, obj_core));
#endif /* CONFIG_OBJ_CORE_THREAD */
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
k_obj_type_stats_init(&obj_type_thread, &thread_stats_desc);
#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
return 0;
}
SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_THREAD */
#define _FOREACH_STATIC_THREAD(thread_data) \
STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
bool k_is_in_isr(void)
{
return arch_is_in_isr();
}
EXPORT_SYMBOL(k_is_in_isr);
#ifdef CONFIG_THREAD_CUSTOM_DATA
void z_impl_k_thread_custom_data_set(void *value)
{
_current->custom_data = value;
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_custom_data_set(void *data)
{
z_impl_k_thread_custom_data_set(data);
}
#include <zephyr/syscalls/k_thread_custom_data_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
void *z_impl_k_thread_custom_data_get(void)
{
return _current->custom_data;
}
#ifdef CONFIG_USERSPACE
static inline void *z_vrfy_k_thread_custom_data_get(void)
{
return z_impl_k_thread_custom_data_get();
}
#include <zephyr/syscalls/k_thread_custom_data_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_THREAD_CUSTOM_DATA */
int z_impl_k_is_preempt_thread(void)
{
return !arch_is_in_isr() && thread_is_preemptible(_current);
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_is_preempt_thread(void)
{
return z_impl_k_is_preempt_thread();
}
#include <zephyr/syscalls/k_is_preempt_thread_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_thread_priority_get(k_tid_t thread)
{
return thread->base.prio;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
return z_impl_k_thread_priority_get(thread);
}
#include <zephyr/syscalls/k_thread_priority_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_thread_name_set(k_tid_t thread, const char *str)
{
#ifdef CONFIG_THREAD_NAME
if (thread == NULL) {
thread = _current;
}
strncpy(thread->name, str, CONFIG_THREAD_MAX_NAME_LEN - 1);
thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
return 0;
#else
ARG_UNUSED(thread);
ARG_UNUSED(str);
SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
return -ENOSYS;
#endif /* CONFIG_THREAD_NAME */
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_thread_name_set(k_tid_t thread, const char *str)
{
#ifdef CONFIG_THREAD_NAME
char name[CONFIG_THREAD_MAX_NAME_LEN];
if (thread != NULL) {
if (K_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
return -EINVAL;
}
}
/* In theory we could copy directly into thread->name, but
* the current z_vrfy / z_impl split does not provide a
* means of doing so.
*/
if (k_usermode_string_copy(name, str, sizeof(name)) != 0) {
return -EFAULT;
}
return z_impl_k_thread_name_set(thread, name);
#else
return -ENOSYS;
#endif /* CONFIG_THREAD_NAME */
}
#include <zephyr/syscalls/k_thread_name_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
const char *k_thread_name_get(k_tid_t thread)
{
#ifdef CONFIG_THREAD_NAME
return (const char *)thread->name;
#else
ARG_UNUSED(thread);
return NULL;
#endif /* CONFIG_THREAD_NAME */
}
int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
{
#ifdef CONFIG_THREAD_NAME
strncpy(buf, thread->name, size);
return 0;
#else
ARG_UNUSED(thread);
ARG_UNUSED(buf);
ARG_UNUSED(size);
return -ENOSYS;
#endif /* CONFIG_THREAD_NAME */
}
static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t src_size)
{
size_t bytes_to_copy;
bytes_to_copy = MIN(dest_size, src_size);
memcpy(dest, src, bytes_to_copy);
return bytes_to_copy;
}
const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
{
size_t off = 0;
uint8_t bit;
uint8_t thread_state = thread_id->base.thread_state;
static const struct {
const char *str;
size_t len;
} state_string[] = {
{ Z_STATE_STR_DUMMY, sizeof(Z_STATE_STR_DUMMY) - 1},
{ Z_STATE_STR_PENDING, sizeof(Z_STATE_STR_PENDING) - 1},
{ Z_STATE_STR_PRESTART, sizeof(Z_STATE_STR_PRESTART) - 1},
{ Z_STATE_STR_DEAD, sizeof(Z_STATE_STR_DEAD) - 1},
{ Z_STATE_STR_SUSPENDED, sizeof(Z_STATE_STR_SUSPENDED) - 1},
{ Z_STATE_STR_ABORTING, sizeof(Z_STATE_STR_ABORTING) - 1},
{ Z_STATE_STR_SUSPENDING, sizeof(Z_STATE_STR_SUSPENDING) - 1},
{ Z_STATE_STR_QUEUED, sizeof(Z_STATE_STR_QUEUED) - 1},
};
if ((buf == NULL) || (buf_size == 0)) {
return "";
}
buf_size--; /* Reserve 1 byte for end-of-string character */
/*
* Loop through each bit in the thread_state. Stop once all have
* been processed. If more than one thread_state bit is set, then
* separate the descriptive strings with a '+'.
*/
for (unsigned int index = 0; thread_state != 0; index++) {
bit = BIT(index);
if ((thread_state & bit) == 0) {
continue;
}
off += copy_bytes(buf + off, buf_size - off,
state_string[index].str,
state_string[index].len);
thread_state &= ~bit;
if (thread_state != 0) {
off += copy_bytes(buf + off, buf_size - off, "+", 1);
}
}
buf[off] = '\0';
return (const char *)buf;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
char *buf, size_t size)
{
#ifdef CONFIG_THREAD_NAME
size_t len;
struct k_object *ko = k_object_find(thread);
/* Special case: we allow reading the names of initialized threads
* even if we don't have permission on them
*/
if ((thread == NULL) || (ko->type != K_OBJ_THREAD) ||
((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0)) {
return -EINVAL;
}
if (K_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
return -EFAULT;
}
len = strlen(thread->name);
if ((len + 1) > size) {
return -ENOSPC;
}
return k_usermode_to_copy((void *)buf, thread->name, len + 1);
#else
ARG_UNUSED(thread);
ARG_UNUSED(buf);
ARG_UNUSED(size);
return -ENOSYS;
#endif /* CONFIG_THREAD_NAME */
}
#include <zephyr/syscalls/k_thread_name_copy_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_STACK_SENTINEL
/* Check that the stack sentinel is still present
*
* The stack sentinel feature writes a magic value to the lowest 4 bytes of
* the thread's stack when the thread is initialized. This value gets checked
* in a few places:
*
* 1) In k_yield() if the current thread is not swapped out
* 2) After servicing a non-nested interrupt
* 3) In z_swap(), check the sentinel in the outgoing thread
*
* Item 2 requires support in arch/ code.
*
* If the check fails, the thread will be terminated appropriately through
* the system fatal error handler.
*/
void z_check_stack_sentinel(void)
{
uint32_t *stack;
if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
return;
}
stack = (uint32_t *)_current->stack_info.start;
if (*stack != STACK_SENTINEL) {
/* Restore it so further checks don't trigger this same error */
*stack = STACK_SENTINEL;
z_except_reason(K_ERR_STACK_CHK_FAIL);
}
}
#endif /* CONFIG_STACK_SENTINEL */
void z_impl_k_thread_start(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread);
z_sched_start(thread);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_start(k_tid_t thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
return z_impl_k_thread_start(thread);
}
#include <zephyr/syscalls/k_thread_start_mrsh.c>
#endif /* CONFIG_USERSPACE */
#if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
int z_stack_adjust_initialized;
static size_t random_offset(size_t stack_size)
{
size_t random_val;
if (!z_stack_adjust_initialized) {
z_early_rand_get((uint8_t *)&random_val, sizeof(random_val));
} else {
sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
}
/* Don't need to worry about alignment of the size here,
* arch_new_thread() is required to do it.
*
* FIXME: Not the best way to get a random number in a range.
* See #6493
*/
const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
if (unlikely(fuzz * 2 > stack_size)) {
return 0;
}
return fuzz;
}
#if defined(CONFIG_STACK_GROWS_UP)
/* This is so rare not bothering for now */
#error "Stack pointer randomization not implemented for upward growing stacks"
#endif /* CONFIG_STACK_GROWS_UP */
#endif /* CONFIG_STACK_POINTER_RANDOM */
static char *setup_thread_stack(struct k_thread *new_thread,
k_thread_stack_t *stack, size_t stack_size)
{
size_t stack_obj_size, stack_buf_size;
char *stack_ptr, *stack_buf_start;
size_t delta = 0;
#ifdef CONFIG_USERSPACE
if (z_stack_is_user_capable(stack)) {
stack_obj_size = K_THREAD_STACK_LEN(stack_size);
stack_buf_start = K_THREAD_STACK_BUFFER(stack);
stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
} else
#endif /* CONFIG_USERSPACE */
{
/* Object cannot host a user mode thread */
stack_obj_size = K_KERNEL_STACK_LEN(stack_size);
stack_buf_start = K_KERNEL_STACK_BUFFER(stack);
stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
/* Zephyr treats stack overflow as an app bug. But
* this particular overflow can be seen by static
* analysis so needs to be handled somehow.
*/
if (K_KERNEL_STACK_RESERVED > stack_obj_size) {
k_panic();
}
}
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
/* Map the stack into virtual memory and use that as the base to
* calculate the initial stack pointer at the high end of the stack
* object. The stack pointer may be reduced later in this function
* by TLS or random offset.
*
* K_MEM_MAP_UNINIT is used to mimic the behavior of non-mapped
* stack. If CONFIG_INIT_STACKS is enabled, the stack will be
* cleared below.
*/
void *stack_mapped = k_mem_map_phys_guard((uintptr_t)stack, stack_obj_size,
K_MEM_PERM_RW | K_MEM_CACHE_WB | K_MEM_MAP_UNINIT,
false);
__ASSERT_NO_MSG((uintptr_t)stack_mapped != 0);
#ifdef CONFIG_USERSPACE
if (z_stack_is_user_capable(stack)) {
stack_buf_start = K_THREAD_STACK_BUFFER(stack_mapped);
} else
#endif /* CONFIG_USERSPACE */
{
stack_buf_start = K_KERNEL_STACK_BUFFER(stack_mapped);
}
stack_ptr = (char *)stack_mapped + stack_obj_size;
/* Need to store the info on mapped stack so we can remove the mappings
* when the thread ends.
*/
new_thread->stack_info.mapped.addr = stack_mapped;
new_thread->stack_info.mapped.sz = stack_obj_size;
#else /* CONFIG_THREAD_STACK_MEM_MAPPED */
/* Initial stack pointer at the high end of the stack object, may
* be reduced later in this function by TLS or random offset
*/
stack_ptr = (char *)stack + stack_obj_size;
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
" buf_size %zu stack_ptr=%p",
stack, new_thread, stack_obj_size, (void *)stack_buf_start,
stack_buf_size, (void *)stack_ptr);
#ifdef CONFIG_INIT_STACKS
memset(stack_buf_start, 0xaa, stack_buf_size);
#endif /* CONFIG_INIT_STACKS */
#ifdef CONFIG_STACK_SENTINEL
/* Put the stack sentinel at the lowest 4 bytes of the stack area.
* We periodically check that it's still present and kill the thread
* if it isn't.
*/
*((uint32_t *)stack_buf_start) = STACK_SENTINEL;
#endif /* CONFIG_STACK_SENTINEL */
#ifdef CONFIG_THREAD_LOCAL_STORAGE
/* TLS is always last within the stack buffer */
delta += arch_tls_stack_setup(new_thread, stack_ptr);
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
size_t tls_size = sizeof(struct _thread_userspace_local_data);
/* reserve space on highest memory of stack buffer for local data */
delta += tls_size;
new_thread->userspace_local_data =
(struct _thread_userspace_local_data *)(stack_ptr - delta);
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
#if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
delta += random_offset(stack_buf_size);
#endif /* CONFIG_STACK_POINTER_RANDOM */
delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
#ifdef CONFIG_THREAD_STACK_INFO
/* Initial values. Arches which implement MPU guards that "borrow"
* memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
* will need to appropriately update this.
*
* The bounds tracked here correspond to the area of the stack object
* that the thread can access, which includes TLS.
*/
new_thread->stack_info.start = (uintptr_t)stack_buf_start;
new_thread->stack_info.size = stack_buf_size;
new_thread->stack_info.delta = delta;
#endif /* CONFIG_THREAD_STACK_INFO */
stack_ptr -= delta;
return stack_ptr;
}
/*
* The provided stack_size value is presumed to be either the result of
* K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
* of K_THREAD_STACK_DEFINE() which defined 'stack'.
*/
char *z_setup_new_thread(struct k_thread *new_thread,
k_thread_stack_t *stack, size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, uint32_t options, const char *name)
{
char *stack_ptr;
Z_ASSERT_VALID_PRIO(prio, entry);
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
k_thread_abort_cleanup_check_reuse(new_thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
#ifdef CONFIG_OBJ_CORE_THREAD
k_obj_core_init_and_link(K_OBJ_CORE(new_thread), &obj_type_thread);
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
k_obj_core_stats_register(K_OBJ_CORE(new_thread),
&new_thread->base.usage,
sizeof(new_thread->base.usage));
#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
#endif /* CONFIG_OBJ_CORE_THREAD */
#ifdef CONFIG_USERSPACE
__ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
"user thread %p with kernel-only stack %p",
new_thread, stack);
k_object_init(new_thread);
k_object_init(stack);
new_thread->stack_obj = stack;
new_thread->syscall_frame = NULL;
/* Any given thread has access to itself */
k_object_access_grant(new_thread, new_thread);
#endif /* CONFIG_USERSPACE */
z_waitq_init(&new_thread->join_queue);
/* Initialize various struct k_thread members */
z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
#ifdef CONFIG_KERNEL_COHERENCE
/* Check that the thread object is safe, but that the stack is
* still cached!
*/
__ASSERT_NO_MSG(arch_mem_coherent(new_thread));
/* When dynamic thread stack is available, the stack may come from
* uncached area.
*/
#ifndef CONFIG_DYNAMIC_THREAD
__ASSERT_NO_MSG(!arch_mem_coherent(stack));
#endif /* CONFIG_DYNAMIC_THREAD */
#endif /* CONFIG_KERNEL_COHERENCE */
arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
/* static threads overwrite it afterwards with real value */
new_thread->init_data = NULL;
#ifdef CONFIG_USE_SWITCH
/* switch_handle must be non-null except when inside z_swap()
* for synchronization reasons. Historically some notional
* USE_SWITCH architectures have actually ignored the field
*/
__ASSERT(new_thread->switch_handle != NULL,
"arch layer failed to initialize switch_handle");
#endif /* CONFIG_USE_SWITCH */
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
new_thread->custom_data = NULL;
#endif /* CONFIG_THREAD_CUSTOM_DATA */
#ifdef CONFIG_EVENTS
new_thread->no_wake_on_timeout = false;
#endif /* CONFIG_EVENTS */
#ifdef CONFIG_THREAD_MONITOR
new_thread->entry.pEntry = entry;
new_thread->entry.parameter1 = p1;
new_thread->entry.parameter2 = p2;
new_thread->entry.parameter3 = p3;
k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
new_thread->next_thread = _kernel.threads;
_kernel.threads = new_thread;
k_spin_unlock(&z_thread_monitor_lock, key);
#endif /* CONFIG_THREAD_MONITOR */
#ifdef CONFIG_THREAD_NAME
if (name != NULL) {
strncpy(new_thread->name, name,
CONFIG_THREAD_MAX_NAME_LEN - 1);
/* Ensure NULL termination, truncate if longer */
new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
} else {
new_thread->name[0] = '\0';
}
#endif /* CONFIG_THREAD_NAME */
#ifdef CONFIG_SCHED_CPU_MASK
if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
new_thread->base.cpu_mask = 1; /* must specify only one cpu */
} else {
new_thread->base.cpu_mask = -1; /* allow all cpus */
}
#endif /* CONFIG_SCHED_CPU_MASK */
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/* _current may be null if the dummy thread is not used */
if (!_current) {
new_thread->resource_pool = NULL;
return stack_ptr;
}
#endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
#ifdef CONFIG_USERSPACE
z_mem_domain_init_thread(new_thread);
if ((options & K_INHERIT_PERMS) != 0U) {
k_thread_perms_inherit(_current, new_thread);
}
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_SCHED_DEADLINE
new_thread->base.prio_deadline = 0;
#endif /* CONFIG_SCHED_DEADLINE */
new_thread->resource_pool = _current->resource_pool;
#ifdef CONFIG_SMP
z_waitq_init(&new_thread->halt_queue);
#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_THREAD_USAGE
new_thread->base.usage = (struct k_cycle_stats) {};
new_thread->base.usage.track_usage =
CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
#endif /* CONFIG_SCHED_THREAD_USAGE */
SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
return stack_ptr;
}
k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, uint32_t options, k_timeout_t delay)
{
__ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
prio, options, NULL);
if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
thread_schedule_new(new_thread, delay);
}
return new_thread;
}
#ifdef CONFIG_USERSPACE
bool z_stack_is_user_capable(k_thread_stack_t *stack)
{
return k_object_find(stack) != NULL;
}
k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, uint32_t options, k_timeout_t delay)
{
size_t total_size, stack_obj_size;
struct k_object *stack_object;
/* The thread and stack objects *must* be in an uninitialized state */
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
/* No need to check z_stack_is_user_capable(), it won't be in the
* object table if it isn't
*/
stack_object = k_object_find(stack);
K_OOPS(K_SYSCALL_VERIFY_MSG(k_object_validation_check(stack_object, stack,
K_OBJ_THREAD_STACK_ELEMENT,
_OBJ_INIT_FALSE) == 0,
"bad stack object"));
/* Verify that the stack size passed in is OK by computing the total
* size and comparing it with the size value in the object metadata
*/
K_OOPS(K_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
stack_size, &total_size),
"stack size overflow (%zu+%zu)",
stack_size,
K_THREAD_STACK_RESERVED));
/* Testing less-than-or-equal since additional room may have been
* allocated for alignment constraints
*/
#ifdef CONFIG_GEN_PRIV_STACKS
stack_obj_size = stack_object->data.stack_data->size;
#else
stack_obj_size = stack_object->data.stack_size;
#endif /* CONFIG_GEN_PRIV_STACKS */
K_OOPS(K_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
"stack size %zu is too big, max is %zu",
total_size, stack_obj_size));
/* User threads may only create other user threads and they can't
* be marked as essential
*/
K_OOPS(K_SYSCALL_VERIFY(options & K_USER));
K_OOPS(K_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
/* Check validity of prio argument; must be the same or worse priority
* than the caller
*/
K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
_current->base.prio)));
z_setup_new_thread(new_thread, stack, stack_size,
entry, p1, p2, p3, prio, options, NULL);
if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
thread_schedule_new(new_thread, delay);
}
return new_thread;
}
#include <zephyr/syscalls/k_thread_create_mrsh.c>
#endif /* CONFIG_USERSPACE */
void z_init_thread_base(struct _thread_base *thread_base, int priority,
uint32_t initial_state, unsigned int options)
{
/* k_q_node is initialized upon first insertion in a list */
thread_base->pended_on = NULL;
thread_base->user_options = (uint8_t)options;
thread_base->thread_state = (uint8_t)initial_state;
thread_base->prio = priority;
thread_base->sched_locked = 0U;
#ifdef CONFIG_SMP
thread_base->is_idle = 0;
#endif /* CONFIG_SMP */
#ifdef CONFIG_TIMESLICE_PER_THREAD
thread_base->slice_ticks = 0;
thread_base->slice_expired = NULL;
#endif /* CONFIG_TIMESLICE_PER_THREAD */
/* swap_data does not need to be initialized */
z_init_thread_timeout(thread_base);
}
FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
_current->base.user_options |= K_USER;
z_thread_essential_clear(_current);
#ifdef CONFIG_THREAD_MONITOR
_current->entry.pEntry = entry;
_current->entry.parameter1 = p1;
_current->entry.parameter2 = p2;
_current->entry.parameter3 = p3;
#endif /* CONFIG_THREAD_MONITOR */
#ifdef CONFIG_USERSPACE
__ASSERT(z_stack_is_user_capable(_current->stack_obj),
"dropping to user mode with kernel-only stack object");
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
memset(_current->userspace_local_data, 0,
sizeof(struct _thread_userspace_local_data));
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
#ifdef CONFIG_THREAD_LOCAL_STORAGE
arch_tls_stack_setup(_current,
(char *)(_current->stack_info.start +
_current->stack_info.size));
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
arch_user_mode_enter(entry, p1, p2, p3);
#else
/* XXX In this case we do not reset the stack */
z_thread_entry(entry, p1, p2, p3);
#endif /* CONFIG_USERSPACE */
}
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
#ifdef CONFIG_STACK_GROWS_UP
#error "Unsupported configuration for stack analysis"
#endif /* CONFIG_STACK_GROWS_UP */
int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
{
size_t unused = 0;
const uint8_t *checked_stack = stack_start;
/* Take the address of any local variable as a shallow bound for the
* stack pointer. Addresses above it are guaranteed to be
* accessible.
*/
const uint8_t *stack_pointer = (const uint8_t *)&stack_start;
/* If we are currently running on the stack being analyzed, some
* memory management hardware will generate an exception if we
* read unused stack memory.
*
* This never happens when invoked from user mode, as user mode
* will always run this function on the privilege elevation stack.
*/
if ((stack_pointer > stack_start) && (stack_pointer <= (stack_start + size)) &&
IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
/* TODO: We could add an arch_ API call to temporarily
* disable the stack checking in the CPU, but this would
* need to be properly managed wrt context switches/interrupts
*/
return -ENOTSUP;
}
if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
/* First 4 bytes of the stack buffer reserved for the
* sentinel value, it won't be 0xAAAAAAAA for thread
* stacks.
*
* FIXME: thread->stack_info.start ought to reflect
* this!
*/
checked_stack += 4;
size -= 4;
}
for (size_t i = 0; i < size; i++) {
if ((checked_stack[i]) == 0xaaU) {
unused++;
} else {
break;
}
}
*unused_ptr = unused;
return 0;
}
int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
size_t *unused_ptr)
{
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
if (thread->stack_info.mapped.addr == NULL) {
return -EINVAL;
}
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
return z_stack_space_get((const uint8_t *)thread->stack_info.start,
thread->stack_info.size, unused_ptr);
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
size_t *unused_ptr)
{
size_t unused;
int ret;
ret = K_SYSCALL_OBJ(thread, K_OBJ_THREAD);
CHECKIF(ret != 0) {
return ret;
}
ret = z_impl_k_thread_stack_space_get(thread, &unused);
CHECKIF(ret != 0) {
return ret;
}
ret = k_usermode_to_copy(unused_ptr, &unused, sizeof(size_t));
CHECKIF(ret != 0) {
return ret;
}
return 0;
}
#include <zephyr/syscalls/k_thread_stack_space_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_USERSPACE
static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
const struct k_thread *thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
return z_impl_k_thread_timeout_remaining_ticks(thread);
}
#include <zephyr/syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
const struct k_thread *thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
return z_impl_k_thread_timeout_expires_ticks(thread);
}
#include <zephyr/syscalls/k_thread_timeout_expires_ticks_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
void z_thread_mark_switched_in(void)
{
#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
z_sched_usage_start(_current);
#endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
#ifdef CONFIG_TRACING
SYS_PORT_TRACING_FUNC(k_thread, switched_in);
#endif /* CONFIG_TRACING */
}
void z_thread_mark_switched_out(void)
{
#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
z_sched_usage_stop();
#endif /*CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
#ifdef CONFIG_TRACING
#ifdef CONFIG_THREAD_LOCAL_STORAGE
/* Dummy thread won't have TLS set up to run arbitrary code */
if (!_current_cpu->current ||
(_current_cpu->current->base.thread_state & _THREAD_DUMMY) != 0)
return;
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
SYS_PORT_TRACING_FUNC(k_thread, switched_out);
#endif /* CONFIG_TRACING */
}
#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
int k_thread_runtime_stats_get(k_tid_t thread,
k_thread_runtime_stats_t *stats)
{
if ((thread == NULL) || (stats == NULL)) {
return -EINVAL;
}
#ifdef CONFIG_SCHED_THREAD_USAGE
z_sched_thread_usage(thread, stats);
#else
*stats = (k_thread_runtime_stats_t) {};
#endif /* CONFIG_SCHED_THREAD_USAGE */
return 0;
}
int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
{
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
k_thread_runtime_stats_t tmp_stats;
#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
if (stats == NULL) {
return -EINVAL;
}
*stats = (k_thread_runtime_stats_t) {};
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
/* Retrieve the usage stats for each core and amalgamate them. */
unsigned int num_cpus = arch_num_cpus();
for (uint8_t i = 0; i < num_cpus; i++) {
z_sched_cpu_usage(i, &tmp_stats);
stats->execution_cycles += tmp_stats.execution_cycles;
stats->total_cycles += tmp_stats.total_cycles;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
stats->current_cycles += tmp_stats.current_cycles;
stats->peak_cycles += tmp_stats.peak_cycles;
stats->average_cycles += tmp_stats.average_cycles;
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
stats->idle_cycles += tmp_stats.idle_cycles;
}
#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
return 0;
}
int k_thread_runtime_stats_cpu_get(int cpu, k_thread_runtime_stats_t *stats)
{
if (stats == NULL) {
return -EINVAL;
}
*stats = (k_thread_runtime_stats_t) {};
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
#ifdef CONFIG_SMP
z_sched_cpu_usage(cpu, stats);
#else
__ASSERT(cpu == 0, "cpu filter out of bounds");
ARG_UNUSED(cpu);
z_sched_cpu_usage(0, stats);
#endif
#endif
return 0;
}
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
/** Pointer to thread which needs to be cleaned up. */
static struct k_thread *thread_to_cleanup;
/** Spinlock for thread abort cleanup. */
static struct k_spinlock thread_cleanup_lock;
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
static void *thread_cleanup_stack_addr;
static size_t thread_cleanup_stack_sz;
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
void defer_thread_cleanup(struct k_thread *thread)
{
/* Note when adding new deferred cleanup steps:
* - The thread object may have been overwritten by the time
* the actual cleanup is being done (e.g. thread object
* allocated on a stack). So stash any necessary data here
* that will be used in the actual cleanup steps.
*/
thread_to_cleanup = thread;
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
/* Note that the permission of the stack should have been
* stripped of user thread access due to the thread having
* already exited from a memory domain. That is done via
* k_thread_abort().
*/
/* Stash the address and size so the region can be unmapped
* later.
*/
thread_cleanup_stack_addr = thread->stack_info.mapped.addr;
thread_cleanup_stack_sz = thread->stack_info.mapped.sz;
/* The stack is now considered un-usable. This should prevent any functions
* from looking directly into the mapped stack if they are made to be aware
* of memory mapped stacks, e.g., z_stack_space_get().
*/
thread->stack_info.mapped.addr = NULL;
thread->stack_info.mapped.sz = 0;
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
}
void do_thread_cleanup(struct k_thread *thread)
{
/* Note when adding new actual cleanup steps:
* - The thread object may have been overwritten when this is
* called. So avoid using any data from the thread object.
*/
ARG_UNUSED(thread);
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
if (thread_cleanup_stack_addr != NULL) {
k_mem_unmap_phys_guard(thread_cleanup_stack_addr,
thread_cleanup_stack_sz, false);
thread_cleanup_stack_addr = NULL;
}
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
}
void k_thread_abort_cleanup(struct k_thread *thread)
{
K_SPINLOCK(&thread_cleanup_lock) {
if (thread_to_cleanup != NULL) {
/* Finish the pending one first. */
do_thread_cleanup(thread_to_cleanup);
thread_to_cleanup = NULL;
}
if (thread == _current) {
/* Need to defer for current running thread as the cleanup
* might result in exception. Actual cleanup will be done
* at the next time k_thread_abort() is called, or at thread
* creation if the same thread object is being reused. This
* is to make sure the cleanup code no longer needs this
* thread's stack. This is not exactly ideal as the stack
* may still be memory mapped for a while. However, this is
* a simple solution without a) the need to workaround
* the schedule lock during k_thread_abort(), b) creating
* another thread to perform the cleanup, and c) does not
* require architecture code support (e.g. via exception).
*/
defer_thread_cleanup(thread);
} else {
/* Not the current running thread, so we are safe to do
* cleanups.
*/
do_thread_cleanup(thread);
}
}
}
void k_thread_abort_cleanup_check_reuse(struct k_thread *thread)
{
K_SPINLOCK(&thread_cleanup_lock) {
/* This is to guard reuse of the same thread object and make sure
* any pending cleanups of it needs to be finished before the thread
* object can be reused.
*/
if (thread_to_cleanup == thread) {
do_thread_cleanup(thread_to_cleanup);
thread_to_cleanup = NULL;
}
}
}
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
``` | /content/code_sandbox/kernel/thread.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 8,185 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
static struct k_object *validate_kernel_object(const void *obj,
enum k_objects otype,
enum _obj_init_check init)
{
struct k_object *ko;
int ret;
ko = k_object_find(obj);
/* This can be any kernel object and it doesn't have to be
* initialized
*/
ret = k_object_validate(ko, K_OBJ_ANY, _OBJ_INIT_ANY);
if (ret != 0) {
#ifdef CONFIG_LOG
k_object_dump_error(ret, obj, ko, otype);
#endif /* CONFIG_LOG */
return NULL;
}
return ko;
}
static ALWAYS_INLINE struct k_object *validate_any_object(const void *obj)
{
return validate_kernel_object(obj, K_OBJ_ANY, _OBJ_INIT_ANY);
}
bool k_object_is_valid(const void *obj, enum k_objects otype)
{
struct k_object *ko;
ko = validate_kernel_object(obj, otype, _OBJ_INIT_TRUE);
return (ko != NULL);
}
/* Normally these would be included in userspace.c, but the way
* syscall_dispatch.c declares weak handlers results in build errors if these
* are located in userspace.c. Just put in a separate file.
*
* To avoid double k_object_find() lookups, we don't call the implementation
* function, but call a level deeper.
*/
static inline void z_vrfy_k_object_access_grant(const void *object,
struct k_thread *thread)
{
struct k_object *ko;
K_OOPS(K_SYSCALL_OBJ_INIT(thread, K_OBJ_THREAD));
ko = validate_any_object(object);
K_OOPS(K_SYSCALL_VERIFY_MSG(ko != NULL, "object %p access denied",
object));
k_thread_perms_set(ko, thread);
}
#include <zephyr/syscalls/k_object_access_grant_mrsh.c>
static inline void z_vrfy_k_object_release(const void *object)
{
struct k_object *ko;
ko = validate_any_object(object);
K_OOPS(K_SYSCALL_VERIFY_MSG(ko != NULL, "object %p access denied", object));
k_thread_perms_clear(ko, _current);
}
#include <zephyr/syscalls/k_object_release_mrsh.c>
static inline void *z_vrfy_k_object_alloc(enum k_objects otype)
{
return z_impl_k_object_alloc(otype);
}
#include <zephyr/syscalls/k_object_alloc_mrsh.c>
static inline void *z_vrfy_k_object_alloc_size(enum k_objects otype, size_t size)
{
return z_impl_k_object_alloc_size(otype, size);
}
#include <zephyr/syscalls/k_object_alloc_size_mrsh.c>
``` | /content/code_sandbox/kernel/userspace_handler.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 591 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/irq_offload.h>
/* Make offload_sem visible outside testing, in order to release
* it outside when error happened.
*/
K_SEM_DEFINE(offload_sem, 1, 1);
void irq_offload(irq_offload_routine_t routine, const void *parameter)
{
#ifdef CONFIG_IRQ_OFFLOAD_NESTED
arch_irq_offload(routine, parameter);
#else
k_sem_take(&offload_sem, K_FOREVER);
arch_irq_offload(routine, parameter);
k_sem_give(&offload_sem);
#endif /* CONFIG_IRQ_OFFLOAD_NESTED */
}
``` | /content/code_sandbox/kernel/irq_offload.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 141 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <kswap.h>
#include <ksched.h>
#include <ipi.h>
static int slice_ticks = DIV_ROUND_UP(CONFIG_TIMESLICE_SIZE * Z_HZ_ticks, Z_HZ_ms);
static int slice_max_prio = CONFIG_TIMESLICE_PRIORITY;
static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS];
static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
#ifdef CONFIG_SWAP_NONATOMIC
/* If z_swap() isn't atomic, then it's possible for a timer interrupt
* to try to timeslice away _current after it has already pended
* itself but before the corresponding context switch. Treat that as
* a noop condition in z_time_slice().
*/
struct k_thread *pending_current;
#endif
static inline int slice_time(struct k_thread *thread)
{
int ret = slice_ticks;
#ifdef CONFIG_TIMESLICE_PER_THREAD
if (thread->base.slice_ticks != 0) {
ret = thread->base.slice_ticks;
}
#else
ARG_UNUSED(thread);
#endif
return ret;
}
bool thread_is_sliceable(struct k_thread *thread)
{
bool ret = thread_is_preemptible(thread)
&& slice_time(thread) != 0
&& !z_is_prio_higher(thread->base.prio, slice_max_prio)
&& !z_is_thread_prevented_from_running(thread)
&& !z_is_idle_thread_object(thread);
#ifdef CONFIG_TIMESLICE_PER_THREAD
ret |= thread->base.slice_ticks != 0;
#endif
return ret;
}
static void slice_timeout(struct _timeout *timeout)
{
int cpu = ARRAY_INDEX(slice_timeouts, timeout);
slice_expired[cpu] = true;
/* We need an IPI if we just handled a timeslice expiration
* for a different CPU.
*/
if (cpu != _current_cpu->id) {
flag_ipi(IPI_CPU_MASK(cpu));
}
}
void z_reset_time_slice(struct k_thread *thread)
{
int cpu = _current_cpu->id;
z_abort_timeout(&slice_timeouts[cpu]);
slice_expired[cpu] = false;
if (thread_is_sliceable(thread)) {
z_add_timeout(&slice_timeouts[cpu], slice_timeout,
K_TICKS(slice_time(thread) - 1));
}
}
void k_sched_time_slice_set(int32_t slice, int prio)
{
K_SPINLOCK(&_sched_spinlock) {
slice_ticks = k_ms_to_ticks_ceil32(slice);
slice_max_prio = prio;
z_reset_time_slice(_current);
}
}
#ifdef CONFIG_TIMESLICE_PER_THREAD
void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks,
k_thread_timeslice_fn_t expired, void *data)
{
K_SPINLOCK(&_sched_spinlock) {
thread->base.slice_ticks = thread_slice_ticks;
thread->base.slice_expired = expired;
thread->base.slice_data = data;
z_reset_time_slice(thread);
}
}
#endif
/* Called out of each timer interrupt */
void z_time_slice(void)
{
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
struct k_thread *curr = _current;
#ifdef CONFIG_SWAP_NONATOMIC
if (pending_current == curr) {
z_reset_time_slice(curr);
k_spin_unlock(&_sched_spinlock, key);
return;
}
pending_current = NULL;
#endif
if (slice_expired[_current_cpu->id] && thread_is_sliceable(curr)) {
#ifdef CONFIG_TIMESLICE_PER_THREAD
if (curr->base.slice_expired) {
k_spin_unlock(&_sched_spinlock, key);
curr->base.slice_expired(curr, curr->base.slice_data);
key = k_spin_lock(&_sched_spinlock);
}
#endif
if (!z_is_thread_prevented_from_running(curr)) {
move_thread_to_end_of_prio_q(curr);
}
z_reset_time_slice(curr);
}
k_spin_unlock(&_sched_spinlock, key);
}
``` | /content/code_sandbox/kernel/timeslicing.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 850 |
```c
/*
*
*/
/**
* @brief fixed-size stack object
*/
#include <zephyr/sys/math_extras.h>
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <ksched.h>
#include <wait_q.h>
#include <zephyr/sys/check.h>
#include <zephyr/init.h>
#include <zephyr/internal/syscall_handler.h>
#include <kernel_internal.h>
#ifdef CONFIG_OBJ_CORE_STACK
static struct k_obj_type obj_type_stack;
#endif /* CONFIG_OBJ_CORE_STACK */
void k_stack_init(struct k_stack *stack, stack_data_t *buffer,
uint32_t num_entries)
{
z_waitq_init(&stack->wait_q);
stack->lock = (struct k_spinlock) {};
stack->next = buffer;
stack->base = buffer;
stack->top = stack->base + num_entries;
SYS_PORT_TRACING_OBJ_INIT(k_stack, stack);
k_object_init(stack);
#ifdef CONFIG_OBJ_CORE_STACK
k_obj_core_init_and_link(K_OBJ_CORE(stack), &obj_type_stack);
#endif /* CONFIG_OBJ_CORE_STACK */
}
int32_t z_impl_k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
{
void *buffer;
int32_t ret;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_stack, alloc_init, stack);
buffer = z_thread_malloc(num_entries * sizeof(stack_data_t));
if (buffer != NULL) {
k_stack_init(stack, buffer, num_entries);
stack->flags = K_STACK_FLAG_ALLOC;
ret = 0;
} else {
ret = -ENOMEM;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, alloc_init, stack, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline int32_t z_vrfy_k_stack_alloc_init(struct k_stack *stack,
uint32_t num_entries)
{
size_t total_size;
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_STACK));
K_OOPS(K_SYSCALL_VERIFY(num_entries > 0));
K_OOPS(K_SYSCALL_VERIFY(!size_mul_overflow(num_entries, sizeof(stack_data_t),
&total_size)));
return z_impl_k_stack_alloc_init(stack, num_entries);
}
#include <zephyr/syscalls/k_stack_alloc_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
int k_stack_cleanup(struct k_stack *stack)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_stack, cleanup, stack);
CHECKIF(z_waitq_head(&stack->wait_q) != NULL) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, cleanup, stack, -EAGAIN);
return -EAGAIN;
}
if ((stack->flags & K_STACK_FLAG_ALLOC) != (uint8_t)0) {
k_free(stack->base);
stack->base = NULL;
stack->flags &= ~K_STACK_FLAG_ALLOC;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, cleanup, stack, 0);
return 0;
}
int z_impl_k_stack_push(struct k_stack *stack, stack_data_t data)
{
struct k_thread *first_pending_thread;
int ret = 0;
k_spinlock_key_t key = k_spin_lock(&stack->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_stack, push, stack);
CHECKIF(stack->next == stack->top) {
ret = -ENOMEM;
goto out;
}
first_pending_thread = z_unpend_first_thread(&stack->wait_q);
if (first_pending_thread != NULL) {
z_thread_return_value_set_with_data(first_pending_thread,
0, (void *)data);
z_ready_thread(first_pending_thread);
z_reschedule(&stack->lock, key);
goto end;
} else {
*(stack->next) = data;
stack->next++;
goto out;
}
out:
k_spin_unlock(&stack->lock, key);
end:
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, push, stack, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_stack_push(struct k_stack *stack, stack_data_t data)
{
K_OOPS(K_SYSCALL_OBJ(stack, K_OBJ_STACK));
return z_impl_k_stack_push(stack, data);
}
#include <zephyr/syscalls/k_stack_push_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data,
k_timeout_t timeout)
{
k_spinlock_key_t key;
int result;
key = k_spin_lock(&stack->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_stack, pop, stack, timeout);
if (likely(stack->next > stack->base)) {
stack->next--;
*data = *(stack->next);
k_spin_unlock(&stack->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, 0);
return 0;
}
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_stack, pop, stack, timeout);
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&stack->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, -EBUSY);
return -EBUSY;
}
result = z_pend_curr(&stack->lock, key, &stack->wait_q, timeout);
if (result == -EAGAIN) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, -EAGAIN);
return -EAGAIN;
}
*data = (stack_data_t)_current->base.swap_data;
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, 0);
return 0;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_stack_pop(struct k_stack *stack,
stack_data_t *data, k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(stack, K_OBJ_STACK));
K_OOPS(K_SYSCALL_MEMORY_WRITE(data, sizeof(stack_data_t)));
return z_impl_k_stack_pop(stack, data, timeout);
}
#include <zephyr/syscalls/k_stack_pop_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_STACK
static int init_stack_obj_core_list(void)
{
/* Initialize stack object type */
z_obj_type_init(&obj_type_stack, K_OBJ_TYPE_STACK_ID,
offsetof(struct k_stack, obj_core));
/* Initialize and link statically defined stacks */
STRUCT_SECTION_FOREACH(k_stack, stack) {
k_obj_core_init_and_link(K_OBJ_CORE(stack), &obj_type_stack);
}
return 0;
}
SYS_INIT(init_stack_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_STACK */
``` | /content/code_sandbox/kernel/stack.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,427 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/init.h>
#include <zephyr/sys/check.h>
#include <zephyr/sys/iterable_sections.h>
#include <string.h>
/* private kernel APIs */
#include <ksched.h>
#include <wait_q.h>
#ifdef CONFIG_OBJ_CORE_MEM_SLAB
static struct k_obj_type obj_type_mem_slab;
#ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
static int k_mem_slab_stats_raw(struct k_obj_core *obj_core, void *stats)
{
__ASSERT((obj_core != NULL) && (stats != NULL), "NULL parameter");
struct k_mem_slab *slab;
k_spinlock_key_t key;
slab = CONTAINER_OF(obj_core, struct k_mem_slab, obj_core);
key = k_spin_lock(&slab->lock);
memcpy(stats, &slab->info, sizeof(slab->info));
k_spin_unlock(&slab->lock, key);
return 0;
}
static int k_mem_slab_stats_query(struct k_obj_core *obj_core, void *stats)
{
__ASSERT((obj_core != NULL) && (stats != NULL), "NULL parameter");
struct k_mem_slab *slab;
k_spinlock_key_t key;
struct sys_memory_stats *ptr = stats;
slab = CONTAINER_OF(obj_core, struct k_mem_slab, obj_core);
key = k_spin_lock(&slab->lock);
ptr->free_bytes = (slab->info.num_blocks - slab->info.num_used) *
slab->info.block_size;
ptr->allocated_bytes = slab->info.num_used * slab->info.block_size;
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
ptr->max_allocated_bytes = slab->info.max_used * slab->info.block_size;
#else
ptr->max_allocated_bytes = 0;
#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
k_spin_unlock(&slab->lock, key);
return 0;
}
static int k_mem_slab_stats_reset(struct k_obj_core *obj_core)
{
__ASSERT(obj_core != NULL, "NULL parameter");
struct k_mem_slab *slab;
k_spinlock_key_t key;
slab = CONTAINER_OF(obj_core, struct k_mem_slab, obj_core);
key = k_spin_lock(&slab->lock);
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->info.max_used = slab->info.num_used;
#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
k_spin_unlock(&slab->lock, key);
return 0;
}
static struct k_obj_core_stats_desc mem_slab_stats_desc = {
.raw_size = sizeof(struct k_mem_slab_info),
.query_size = sizeof(struct sys_memory_stats),
.raw = k_mem_slab_stats_raw,
.query = k_mem_slab_stats_query,
.reset = k_mem_slab_stats_reset,
.disable = NULL,
.enable = NULL,
};
#endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
#endif /* CONFIG_OBJ_CORE_MEM_SLAB */
/**
* @brief Initialize kernel memory slab subsystem.
*
* Perform any initialization of memory slabs that wasn't done at build time.
* Currently this just involves creating the list of free blocks for each slab.
*
* @retval 0 on success.
* @retval -EINVAL if @p slab contains invalid configuration and/or values.
*/
static int create_free_list(struct k_mem_slab *slab)
{
char *p;
/* blocks must be word aligned */
CHECKIF(((slab->info.block_size | (uintptr_t)slab->buffer) &
(sizeof(void *) - 1)) != 0U) {
return -EINVAL;
}
slab->free_list = NULL;
p = slab->buffer + slab->info.block_size * (slab->info.num_blocks - 1);
while (p >= slab->buffer) {
*(char **)p = slab->free_list;
slab->free_list = p;
p -= slab->info.block_size;
}
return 0;
}
/**
* @brief Complete initialization of statically defined memory slabs.
*
* Perform any initialization that wasn't done at build time.
*
* @return 0 on success, fails otherwise.
*/
static int init_mem_slab_obj_core_list(void)
{
int rc = 0;
/* Initialize mem_slab object type */
#ifdef CONFIG_OBJ_CORE_MEM_SLAB
z_obj_type_init(&obj_type_mem_slab, K_OBJ_TYPE_MEM_SLAB_ID,
offsetof(struct k_mem_slab, obj_core));
#ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
k_obj_type_stats_init(&obj_type_mem_slab, &mem_slab_stats_desc);
#endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
#endif /* CONFIG_OBJ_CORE_MEM_SLAB */
/* Initialize statically defined mem_slabs */
STRUCT_SECTION_FOREACH(k_mem_slab, slab) {
rc = create_free_list(slab);
if (rc < 0) {
goto out;
}
k_object_init(slab);
#ifdef CONFIG_OBJ_CORE_MEM_SLAB
k_obj_core_init_and_link(K_OBJ_CORE(slab), &obj_type_mem_slab);
#ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info,
sizeof(struct k_mem_slab_info));
#endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
#endif /* CONFIG_OBJ_CORE_MEM_SLAB */
}
out:
return rc;
}
SYS_INIT(init_mem_slab_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
size_t block_size, uint32_t num_blocks)
{
int rc;
slab->info.num_blocks = num_blocks;
slab->info.block_size = block_size;
slab->buffer = buffer;
slab->info.num_used = 0U;
slab->lock = (struct k_spinlock) {};
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->info.max_used = 0U;
#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
rc = create_free_list(slab);
if (rc < 0) {
goto out;
}
#ifdef CONFIG_OBJ_CORE_MEM_SLAB
k_obj_core_init_and_link(K_OBJ_CORE(slab), &obj_type_mem_slab);
#endif /* CONFIG_OBJ_CORE_MEM_SLAB */
#ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info,
sizeof(struct k_mem_slab_info));
#endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
z_waitq_init(&slab->wait_q);
k_object_init(slab);
out:
SYS_PORT_TRACING_OBJ_INIT(k_mem_slab, slab, rc);
return rc;
}
#if __ASSERT_ON
static bool slab_ptr_is_good(struct k_mem_slab *slab, const void *ptr)
{
const char *p = ptr;
ptrdiff_t offset = p - slab->buffer;
return (offset >= 0) &&
(offset < (slab->info.block_size * slab->info.num_blocks)) &&
((offset % slab->info.block_size) == 0);
}
#endif
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
{
k_spinlock_key_t key = k_spin_lock(&slab->lock);
int result;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, alloc, slab, timeout);
if (slab->free_list != NULL) {
/* take a free block */
*mem = slab->free_list;
slab->free_list = *(char **)(slab->free_list);
slab->info.num_used++;
__ASSERT((slab->free_list == NULL &&
slab->info.num_used == slab->info.num_blocks) ||
slab_ptr_is_good(slab, slab->free_list),
"slab corruption detected");
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->info.max_used = MAX(slab->info.num_used,
slab->info.max_used);
#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
result = 0;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
!IS_ENABLED(CONFIG_MULTITHREADING)) {
/* don't wait for a free block to become available */
*mem = NULL;
result = -ENOMEM;
} else {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mem_slab, alloc, slab, timeout);
/* wait for a free block or timeout */
result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout);
if (result == 0) {
*mem = _current->base.swap_data;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result);
return result;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result);
k_spin_unlock(&slab->lock, key);
return result;
}
void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
{
k_spinlock_key_t key = k_spin_lock(&slab->lock);
__ASSERT(slab_ptr_is_good(slab, mem), "Invalid memory pointer provided");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab);
if ((slab->free_list == NULL) && IS_ENABLED(CONFIG_MULTITHREADING)) {
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
if (pending_thread != NULL) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab);
z_thread_return_value_set_with_data(pending_thread, 0, mem);
z_ready_thread(pending_thread);
z_reschedule(&slab->lock, key);
return;
}
}
*(char **) mem = slab->free_list;
slab->free_list = (char *) mem;
slab->info.num_used--;
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab);
k_spin_unlock(&slab->lock, key);
}
int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stats *stats)
{
if ((slab == NULL) || (stats == NULL)) {
return -EINVAL;
}
k_spinlock_key_t key = k_spin_lock(&slab->lock);
stats->allocated_bytes = slab->info.num_used * slab->info.block_size;
stats->free_bytes = (slab->info.num_blocks - slab->info.num_used) *
slab->info.block_size;
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
stats->max_allocated_bytes = slab->info.max_used *
slab->info.block_size;
#else
stats->max_allocated_bytes = 0;
#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
k_spin_unlock(&slab->lock, key);
return 0;
}
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab)
{
if (slab == NULL) {
return -EINVAL;
}
k_spinlock_key_t key = k_spin_lock(&slab->lock);
slab->info.max_used = slab->info.num_used;
k_spin_unlock(&slab->lock, key);
return 0;
}
#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
``` | /content/code_sandbox/kernel/mem_slab.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,483 |
```c
/*
*
*/
void __do_global_ctors_aux(void);
void __do_init_array_aux(void);
void z_init_static(void)
{
#if defined(CONFIG_STATIC_INIT_GNU)
__do_global_ctors_aux();
__do_init_array_aux();
#elif defined(__CCAC__) /* ARC MWDT */
__do_global_ctors_aux();
#endif
}
/**
* @section - Constructor module
* @brief
* The ctors section contains a list of function pointers that execute both the C++ constructors of
* static global objects, as well as either C or C++ initializer functions (declared with the
* attribute constructor). These must be executed before the application's main() routine.
*
* NOTE: Not all compilers put those function pointers into the ctors section;
* some put them into the init_array section instead.
*/
#ifdef CONFIG_STATIC_INIT_GNU
/* What a constructor function pointer looks like */
typedef void (*CtorFuncPtr)(void);
/* Constructor function pointer list is generated by the linker script. */
extern CtorFuncPtr __ZEPHYR_CTOR_LIST__[];
extern CtorFuncPtr __ZEPHYR_CTOR_END__[];
/**
*
* @brief Invoke all C++ style global object constructors
*
* This routine is invoked by the kernel prior to the execution of the
* application's main().
*/
void __do_global_ctors_aux(void)
{
unsigned int nCtors;
nCtors = (unsigned long)__ZEPHYR_CTOR_LIST__[0];
while (nCtors >= 1U) {
__ZEPHYR_CTOR_LIST__[nCtors--]();
}
}
#endif
/*
* @section
* @brief Execute initialization routines referenced in .init_array section
*/
#ifdef CONFIG_STATIC_INIT_GNU
typedef void (*func_ptr)(void);
extern func_ptr __zephyr_init_array_start[];
extern func_ptr __zephyr_init_array_end[];
/**
* @brief Execute initialization routines referenced in .init_array section
*/
void __do_init_array_aux(void)
{
for (func_ptr *func = __zephyr_init_array_start;
func < __zephyr_init_array_end;
func++) {
(*func)();
}
}
#endif
``` | /content/code_sandbox/kernel/init_static.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 463 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/internal/syscall_handler.h>
#include <kernel_arch_interface.h>
int z_impl_k_float_disable(struct k_thread *thread)
{
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
return arch_float_disable(thread);
#else
ARG_UNUSED(thread);
return -ENOTSUP;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
}
int z_impl_k_float_enable(struct k_thread *thread, unsigned int options)
{
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
return arch_float_enable(thread, options);
#else
ARG_UNUSED(thread);
ARG_UNUSED(options);
return -ENOTSUP;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_float_disable(struct k_thread *thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
return z_impl_k_float_disable(thread);
}
#include <zephyr/syscalls/k_float_disable_mrsh.c>
static inline int z_vrfy_k_float_enable(struct k_thread *thread, unsigned int options)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
return z_impl_k_float_enable(thread, options);
}
#include <zephyr/syscalls/k_float_enable_mrsh.c>
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/kernel/float.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 282 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/init.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/sys/iterable_sections.h>
/* private kernel APIs */
#include <ksched.h>
#include <wait_q.h>
void k_heap_init(struct k_heap *heap, void *mem, size_t bytes)
{
z_waitq_init(&heap->wait_q);
sys_heap_init(&heap->heap, mem, bytes);
SYS_PORT_TRACING_OBJ_INIT(k_heap, heap);
}
static int statics_init(void)
{
STRUCT_SECTION_FOREACH(k_heap, heap) {
#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
/* Some heaps may not present at boot, so we need to wait for
* paging mechanism to be initialized before we can initialize
* each heap.
*/
extern bool z_sys_post_kernel;
bool do_clear = z_sys_post_kernel;
/* During pre-kernel init, z_sys_post_kernel == false,
* initialize if within pinned region. Otherwise skip.
* In post-kernel init, z_sys_post_kernel == true, skip those in
* pinned region as they have already been initialized and
* possibly already in use. Otherwise initialize.
*/
if (lnkr_is_pinned((uint8_t *)heap) &&
lnkr_is_pinned((uint8_t *)&heap->wait_q) &&
lnkr_is_region_pinned((uint8_t *)heap->heap.init_mem,
heap->heap.init_bytes)) {
do_clear = !do_clear;
}
if (do_clear)
#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
{
k_heap_init(heap, heap->heap.init_mem, heap->heap.init_bytes);
}
}
return 0;
}
SYS_INIT_NAMED(statics_init_pre, statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
/* Need to wait for paging mechanism to be initialized before
* heaps that are not in pinned sections can be initialized.
*/
SYS_INIT_NAMED(statics_init_post, statics_init, POST_KERNEL, 0);
#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
k_timeout_t timeout)
{
k_timepoint_t end = sys_timepoint_calc(timeout);
void *ret = NULL;
k_spinlock_key_t key = k_spin_lock(&heap->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, aligned_alloc, heap, timeout);
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
bool blocked_alloc = false;
while (ret == NULL) {
ret = sys_heap_aligned_alloc(&heap->heap, align, bytes);
if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
(ret != NULL) || K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
break;
}
if (!blocked_alloc) {
blocked_alloc = true;
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_heap, aligned_alloc, heap, timeout);
} else {
/**
* @todo Trace attempt to avoid empty trace segments
*/
}
timeout = sys_timepoint_timeout(end);
(void) z_pend_curr(&heap->lock, key, &heap->wait_q, timeout);
key = k_spin_lock(&heap->lock);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, aligned_alloc, heap, timeout, ret);
k_spin_unlock(&heap->lock, key);
return ret;
}
void *k_heap_alloc(struct k_heap *heap, size_t bytes, k_timeout_t timeout)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, alloc, heap, timeout);
void *ret = k_heap_aligned_alloc(heap, sizeof(void *), bytes, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, alloc, heap, timeout, ret);
return ret;
}
void *k_heap_realloc(struct k_heap *heap, void *ptr, size_t bytes, k_timeout_t timeout)
{
k_timepoint_t end = sys_timepoint_calc(timeout);
void *ret = NULL;
k_spinlock_key_t key = k_spin_lock(&heap->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, realloc, heap, ptr, bytes, timeout);
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
while (ret == NULL) {
ret = sys_heap_aligned_realloc(&heap->heap, ptr, sizeof(void *), bytes);
if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
(ret != NULL) || K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
break;
}
timeout = sys_timepoint_timeout(end);
(void) z_pend_curr(&heap->lock, key, &heap->wait_q, timeout);
key = k_spin_lock(&heap->lock);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, realloc, heap, ptr, bytes, timeout, ret);
k_spin_unlock(&heap->lock, key);
return ret;
}
void k_heap_free(struct k_heap *heap, void *mem)
{
k_spinlock_key_t key = k_spin_lock(&heap->lock);
sys_heap_free(&heap->heap, mem);
SYS_PORT_TRACING_OBJ_FUNC(k_heap, free, heap);
if (IS_ENABLED(CONFIG_MULTITHREADING) && (z_unpend_all(&heap->wait_q) != 0)) {
z_reschedule(&heap->lock, key);
} else {
k_spin_unlock(&heap->lock, key);
}
}
``` | /content/code_sandbox/kernel/kheap.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,247 |
```c
/*
*
*/
/**
* @file
*
* @brief Pipes
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <ksched.h>
#include <wait_q.h>
#include <zephyr/init.h>
#include <zephyr/internal/syscall_handler.h>
#include <kernel_internal.h>
#include <zephyr/sys/check.h>
struct waitq_walk_data {
sys_dlist_t *list;
size_t bytes_requested;
size_t bytes_available;
};
static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
void *data, size_t bytes_to_read,
size_t *bytes_read, size_t min_xfer,
k_timeout_t timeout);
#ifdef CONFIG_OBJ_CORE_PIPE
static struct k_obj_type obj_type_pipe;
#endif /* CONFIG_OBJ_CORE_PIPE */
void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
{
pipe->buffer = buffer;
pipe->size = size;
pipe->bytes_used = 0U;
pipe->read_index = 0U;
pipe->write_index = 0U;
pipe->lock = (struct k_spinlock){};
z_waitq_init(&pipe->wait_q.writers);
z_waitq_init(&pipe->wait_q.readers);
SYS_PORT_TRACING_OBJ_INIT(k_pipe, pipe);
pipe->flags = 0;
#if defined(CONFIG_POLL)
sys_dlist_init(&pipe->poll_events);
#endif /* CONFIG_POLL */
k_object_init(pipe);
#ifdef CONFIG_OBJ_CORE_PIPE
k_obj_core_init_and_link(K_OBJ_CORE(pipe), &obj_type_pipe);
#endif /* CONFIG_OBJ_CORE_PIPE */
}
int z_impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
{
void *buffer;
int ret;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, alloc_init, pipe);
if (size != 0U) {
buffer = z_thread_malloc(size);
if (buffer != NULL) {
k_pipe_init(pipe, buffer, size);
pipe->flags = K_PIPE_FLAG_ALLOC;
ret = 0;
} else {
ret = -ENOMEM;
}
} else {
k_pipe_init(pipe, NULL, 0U);
ret = 0;
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, alloc_init, pipe, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
{
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(pipe, K_OBJ_PIPE));
return z_impl_k_pipe_alloc_init(pipe, size);
}
#include <zephyr/syscalls/k_pipe_alloc_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
static inline void handle_poll_events(struct k_pipe *pipe)
{
#ifdef CONFIG_POLL
z_handle_obj_poll_events(&pipe->poll_events, K_POLL_STATE_PIPE_DATA_AVAILABLE);
#else
ARG_UNUSED(pipe);
#endif /* CONFIG_POLL */
}
void z_impl_k_pipe_flush(struct k_pipe *pipe)
{
size_t bytes_read;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, flush, pipe);
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
(void) pipe_get_internal(key, pipe, NULL, (size_t) -1, &bytes_read, 0U,
K_NO_WAIT);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, flush, pipe);
}
#ifdef CONFIG_USERSPACE
void z_vrfy_k_pipe_flush(struct k_pipe *pipe)
{
K_OOPS(K_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
z_impl_k_pipe_flush(pipe);
}
#include <zephyr/syscalls/k_pipe_flush_mrsh.c>
#endif /* CONFIG_USERSPACE */
void z_impl_k_pipe_buffer_flush(struct k_pipe *pipe)
{
size_t bytes_read;
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, buffer_flush, pipe);
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
if (pipe->buffer != NULL) {
(void) pipe_get_internal(key, pipe, NULL, pipe->size,
&bytes_read, 0U, K_NO_WAIT);
} else {
k_spin_unlock(&pipe->lock, key);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, buffer_flush, pipe);
}
#ifdef CONFIG_USERSPACE
void z_vrfy_k_pipe_buffer_flush(struct k_pipe *pipe)
{
K_OOPS(K_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
z_impl_k_pipe_buffer_flush(pipe);
}
#endif /* CONFIG_USERSPACE */
int k_pipe_cleanup(struct k_pipe *pipe)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, cleanup, pipe);
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
CHECKIF((z_waitq_head(&pipe->wait_q.readers) != NULL) ||
(z_waitq_head(&pipe->wait_q.writers) != NULL)) {
k_spin_unlock(&pipe->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, cleanup, pipe, -EAGAIN);
return -EAGAIN;
}
if ((pipe->flags & K_PIPE_FLAG_ALLOC) != 0U) {
k_free(pipe->buffer);
pipe->buffer = NULL;
/*
* Freeing the buffer changes the pipe into a bufferless
* pipe. Reset the pipe's counters to prevent malfunction.
*/
pipe->size = 0U;
pipe->bytes_used = 0U;
pipe->read_index = 0U;
pipe->write_index = 0U;
pipe->flags &= ~K_PIPE_FLAG_ALLOC;
}
k_spin_unlock(&pipe->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, cleanup, pipe, 0U);
return 0;
}
/**
* @brief Copy bytes from @a src to @a dest
*
* @return Number of bytes copied
*/
static size_t pipe_xfer(unsigned char *dest, size_t dest_size,
const unsigned char *src, size_t src_size)
{
size_t num_bytes = MIN(dest_size, src_size);
if (dest == NULL) {
/* Data is being flushed. Pretend the data was copied. */
return num_bytes;
}
(void) memcpy(dest, src, num_bytes);
return num_bytes;
}
/**
* @brief Callback routine used to populate wait list
*
* @return 1 to stop further walking; 0 to continue walking
*/
static int pipe_walk_op(struct k_thread *thread, void *data)
{
struct waitq_walk_data *walk_data = data;
struct _pipe_desc *desc = (struct _pipe_desc *)thread->base.swap_data;
sys_dlist_append(walk_data->list, &desc->node);
walk_data->bytes_available += desc->bytes_to_xfer;
if (walk_data->bytes_available >= walk_data->bytes_requested) {
return 1;
}
return 0;
}
/**
* @brief Popluate pipe descriptors for copying to/from waiters' buffers
*
* This routine cycles through the waiters on the wait queue and creates
* a list of threads that will have data directly copied to / read from
* their buffers. This list helps us avoid double copying later.
*
* @return # of bytes available for direct copying
*/
static size_t pipe_waiter_list_populate(sys_dlist_t *list,
_wait_q_t *wait_q,
size_t bytes_to_xfer)
{
struct waitq_walk_data walk_data;
walk_data.list = list;
walk_data.bytes_requested = bytes_to_xfer;
walk_data.bytes_available = 0;
(void) z_sched_waitq_walk(wait_q, pipe_walk_op, &walk_data);
return walk_data.bytes_available;
}
/**
* @brief Populate pipe descriptors for copying to/from pipe buffer
*
* This routine is only called if the pipe buffer is not empty (when reading),
* or if not full (when writing).
*/
static size_t pipe_buffer_list_populate(sys_dlist_t *list,
struct _pipe_desc *desc,
unsigned char *buffer,
size_t size,
size_t start,
size_t end)
{
sys_dlist_append(list, &desc[0].node);
desc[0].thread = NULL;
desc[0].buffer = &buffer[start];
if (start < end) {
desc[0].bytes_to_xfer = end - start;
return end - start;
}
desc[0].bytes_to_xfer = size - start;
desc[1].thread = NULL;
desc[1].buffer = &buffer[0];
desc[1].bytes_to_xfer = end;
sys_dlist_append(list, &desc[1].node);
return size - start + end;
}
/**
* @brief Determine the correct return code
*
* Bytes Xferred No Wait Wait
* >= Minimum 0 0
* < Minimum -EIO* -EAGAIN
*
* * The "-EIO No Wait" case was already checked after the list of pipe
* descriptors was created.
*
* @return See table above
*/
static int pipe_return_code(size_t min_xfer, size_t bytes_remaining,
size_t bytes_requested)
{
if ((bytes_requested - bytes_remaining) >= min_xfer) {
/*
* At least the minimum number of requested
* bytes have been transferred.
*/
return 0;
}
return -EAGAIN;
}
/**
* @brief Copy data from source(s) to destination(s)
*/
static size_t pipe_write(struct k_pipe *pipe, sys_dlist_t *src_list,
sys_dlist_t *dest_list, bool *reschedule)
{
struct _pipe_desc *src;
struct _pipe_desc *dest;
size_t bytes_copied;
size_t num_bytes_written = 0U;
src = (struct _pipe_desc *)sys_dlist_get(src_list);
dest = (struct _pipe_desc *)sys_dlist_get(dest_list);
while ((src != NULL) && (dest != NULL)) {
bytes_copied = pipe_xfer(dest->buffer, dest->bytes_to_xfer,
src->buffer, src->bytes_to_xfer);
num_bytes_written += bytes_copied;
dest->buffer += bytes_copied;
dest->bytes_to_xfer -= bytes_copied;
src->buffer += bytes_copied;
src->bytes_to_xfer -= bytes_copied;
if (dest->thread == NULL) {
/* Writing to the pipe buffer. Update details. */
pipe->bytes_used += bytes_copied;
pipe->write_index += bytes_copied;
if (pipe->write_index >= pipe->size) {
pipe->write_index -= pipe->size;
}
} else if (dest->bytes_to_xfer == 0U) {
/* The thread's read request has been satisfied. */
z_unpend_thread(dest->thread);
z_ready_thread(dest->thread);
*reschedule = true;
}
if (src->bytes_to_xfer == 0U) {
src = (struct _pipe_desc *)sys_dlist_get(src_list);
}
if (dest->bytes_to_xfer == 0U) {
dest = (struct _pipe_desc *)sys_dlist_get(dest_list);
}
}
return num_bytes_written;
}
int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data,
size_t bytes_to_write, size_t *bytes_written,
size_t min_xfer, k_timeout_t timeout)
{
struct _pipe_desc pipe_desc[2];
struct _pipe_desc isr_desc;
struct _pipe_desc *src_desc;
sys_dlist_t dest_list;
sys_dlist_t src_list;
size_t bytes_can_write;
bool reschedule_needed = false;
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, put, pipe, timeout);
CHECKIF((min_xfer > bytes_to_write) || (bytes_written == NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout,
-EINVAL);
return -EINVAL;
}
sys_dlist_init(&src_list);
sys_dlist_init(&dest_list);
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
/*
* First, write to any waiting readers, if any exist.
* Second, write to the pipe buffer, if it exists.
*/
bytes_can_write = pipe_waiter_list_populate(&dest_list,
&pipe->wait_q.readers,
bytes_to_write);
if (pipe->bytes_used != pipe->size) {
bytes_can_write += pipe_buffer_list_populate(&dest_list,
pipe_desc,
pipe->buffer,
pipe->size,
pipe->write_index,
pipe->read_index);
}
if ((bytes_can_write < min_xfer) &&
(K_TIMEOUT_EQ(timeout, K_NO_WAIT))) {
/* The request can not be fulfilled. */
k_spin_unlock(&pipe->lock, key);
*bytes_written = 0U;
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe,
timeout, -EIO);
return -EIO;
}
/*
* Do not use the pipe descriptor stored within k_thread if
* invoked from within an ISR as that is not safe to do.
*/
src_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc;
src_desc->buffer = (unsigned char *)data;
src_desc->bytes_to_xfer = bytes_to_write;
src_desc->thread = _current;
sys_dlist_append(&src_list, &src_desc->node);
*bytes_written = pipe_write(pipe, &src_list,
&dest_list, &reschedule_needed);
/*
* Only handle poll events if the pipe has had some bytes written and
* there are bytes remaining after any pending readers have read from it
*/
if ((pipe->bytes_used != 0U) && (*bytes_written != 0U)) {
handle_poll_events(pipe);
}
/*
* The immediate success conditions below are backwards
* compatible with an earlier pipe implementation.
*/
if ((*bytes_written == bytes_to_write) ||
(K_TIMEOUT_EQ(timeout, K_NO_WAIT)) ||
((*bytes_written >= min_xfer) && (min_xfer > 0U))) {
/* The minimum amount of data has been copied */
if (reschedule_needed) {
z_reschedule(&pipe->lock, key);
} else {
k_spin_unlock(&pipe->lock, key);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout, 0);
return 0;
}
/* The minimum amount of data has not been copied. Block. */
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, put, pipe, timeout);
_current->base.swap_data = src_desc;
z_sched_wait(&pipe->lock, key, &pipe->wait_q.writers, timeout, NULL);
/*
* On SMP systems, threads in the processing list may timeout before
* the data has finished copying. The following spin lock/unlock pair
* prevents those threads from executing further until the data copying
* is complete.
*/
key = k_spin_lock(&pipe->lock);
k_spin_unlock(&pipe->lock, key);
*bytes_written = bytes_to_write - src_desc->bytes_to_xfer;
int ret = pipe_return_code(min_xfer, src_desc->bytes_to_xfer,
bytes_to_write);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_pipe_put(struct k_pipe *pipe, const void *data,
size_t bytes_to_write, size_t *bytes_written,
size_t min_xfer, k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
K_OOPS(K_SYSCALL_MEMORY_WRITE(bytes_written, sizeof(*bytes_written)));
K_OOPS(K_SYSCALL_MEMORY_READ(data, bytes_to_write));
return z_impl_k_pipe_put(pipe, data,
bytes_to_write, bytes_written, min_xfer,
timeout);
}
#include <zephyr/syscalls/k_pipe_put_mrsh.c>
#endif /* CONFIG_USERSPACE */
static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
void *data, size_t bytes_to_read,
size_t *bytes_read, size_t min_xfer,
k_timeout_t timeout)
{
sys_dlist_t src_list;
struct _pipe_desc pipe_desc[2];
struct _pipe_desc isr_desc;
struct _pipe_desc *dest_desc;
struct _pipe_desc *src_desc;
size_t num_bytes_read = 0U;
size_t bytes_copied;
size_t bytes_can_read = 0U;
bool reschedule_needed = false;
/*
* Data copying takes place in the following order.
* 1. Copy data from the pipe buffer to the receive buffer.
* 2. Copy data from the waiting writer(s) to the receive buffer.
* 3. Refill the pipe buffer from the waiting writer(s).
*/
sys_dlist_init(&src_list);
if (pipe->bytes_used != 0) {
bytes_can_read = pipe_buffer_list_populate(&src_list,
pipe_desc,
pipe->buffer,
pipe->size,
pipe->read_index,
pipe->write_index);
}
bytes_can_read += pipe_waiter_list_populate(&src_list,
&pipe->wait_q.writers,
bytes_to_read);
if ((bytes_can_read < min_xfer) &&
(K_TIMEOUT_EQ(timeout, K_NO_WAIT))) {
/* The request can not be fulfilled. */
k_spin_unlock(&pipe->lock, key);
*bytes_read = 0;
return -EIO;
}
/*
* Do not use the pipe descriptor stored within k_thread if
* invoked from within an ISR as that is not safe to do.
*/
dest_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc;
dest_desc->buffer = data;
dest_desc->bytes_to_xfer = bytes_to_read;
dest_desc->thread = _current;
src_desc = (struct _pipe_desc *)sys_dlist_get(&src_list);
while (src_desc != NULL) {
bytes_copied = pipe_xfer(dest_desc->buffer,
dest_desc->bytes_to_xfer,
src_desc->buffer,
src_desc->bytes_to_xfer);
num_bytes_read += bytes_copied;
src_desc->buffer += bytes_copied;
src_desc->bytes_to_xfer -= bytes_copied;
if (dest_desc->buffer != NULL) {
dest_desc->buffer += bytes_copied;
}
dest_desc->bytes_to_xfer -= bytes_copied;
if (src_desc->thread == NULL) {
/* Reading from the pipe buffer. Update details. */
pipe->bytes_used -= bytes_copied;
pipe->read_index += bytes_copied;
if (pipe->read_index >= pipe->size) {
pipe->read_index -= pipe->size;
}
} else if (src_desc->bytes_to_xfer == 0U) {
/* The thread's write request has been satisfied. */
z_unpend_thread(src_desc->thread);
z_ready_thread(src_desc->thread);
reschedule_needed = true;
}
src_desc = (struct _pipe_desc *)sys_dlist_get(&src_list);
}
if (pipe->bytes_used != pipe->size) {
sys_dlist_t pipe_list;
/*
* The pipe is not full. If there are any waiting writers,
* refill the pipe.
*/
sys_dlist_init(&src_list);
sys_dlist_init(&pipe_list);
(void) pipe_waiter_list_populate(&src_list,
&pipe->wait_q.writers,
pipe->size - pipe->bytes_used);
(void) pipe_buffer_list_populate(&pipe_list, pipe_desc,
pipe->buffer, pipe->size,
pipe->write_index,
pipe->read_index);
(void) pipe_write(pipe, &src_list,
&pipe_list, &reschedule_needed);
}
/*
* The immediate success conditions below are backwards
* compatible with an earlier pipe implementation.
*/
if ((num_bytes_read == bytes_to_read) ||
(K_TIMEOUT_EQ(timeout, K_NO_WAIT)) ||
((num_bytes_read >= min_xfer) && (min_xfer > 0U))) {
/* The minimum amount of data has been copied */
*bytes_read = num_bytes_read;
if (reschedule_needed) {
z_reschedule(&pipe->lock, key);
} else {
k_spin_unlock(&pipe->lock, key);
}
return 0;
}
/* The minimum amount of data has not been copied. Block. */
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, get, pipe, timeout);
_current->base.swap_data = dest_desc;
z_sched_wait(&pipe->lock, key, &pipe->wait_q.readers, timeout, NULL);
/*
* On SMP systems, threads in the processing list may timeout before
* the data has finished copying. The following spin lock/unlock pair
* prevents those threads from executing further until the data copying
* is complete.
*/
key = k_spin_lock(&pipe->lock);
k_spin_unlock(&pipe->lock, key);
*bytes_read = bytes_to_read - dest_desc->bytes_to_xfer;
int ret = pipe_return_code(min_xfer, dest_desc->bytes_to_xfer,
bytes_to_read);
return ret;
}
int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
size_t *bytes_read, size_t min_xfer, k_timeout_t timeout)
{
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, get, pipe, timeout);
CHECKIF((min_xfer > bytes_to_read) || (bytes_read == NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe,
timeout, -EINVAL);
return -EINVAL;
}
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
int ret = pipe_get_internal(key, pipe, data, bytes_to_read, bytes_read,
min_xfer, timeout);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe, timeout, ret);
return ret;
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
size_t *bytes_read, size_t min_xfer, k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
K_OOPS(K_SYSCALL_MEMORY_WRITE(bytes_read, sizeof(*bytes_read)));
K_OOPS(K_SYSCALL_MEMORY_WRITE(data, bytes_to_read));
return z_impl_k_pipe_get(pipe, data,
bytes_to_read, bytes_read, min_xfer,
timeout);
}
#include <zephyr/syscalls/k_pipe_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
size_t z_impl_k_pipe_read_avail(struct k_pipe *pipe)
{
size_t res;
k_spinlock_key_t key;
/* Buffer and size are fixed. No need to spin. */
if ((pipe->buffer == NULL) || (pipe->size == 0U)) {
res = 0;
goto out;
}
key = k_spin_lock(&pipe->lock);
if (pipe->read_index == pipe->write_index) {
res = pipe->bytes_used;
} else if (pipe->read_index < pipe->write_index) {
res = pipe->write_index - pipe->read_index;
} else {
res = pipe->size - (pipe->read_index - pipe->write_index);
}
k_spin_unlock(&pipe->lock, key);
out:
return res;
}
#ifdef CONFIG_USERSPACE
size_t z_vrfy_k_pipe_read_avail(struct k_pipe *pipe)
{
K_OOPS(K_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
return z_impl_k_pipe_read_avail(pipe);
}
#include <zephyr/syscalls/k_pipe_read_avail_mrsh.c>
#endif /* CONFIG_USERSPACE */
size_t z_impl_k_pipe_write_avail(struct k_pipe *pipe)
{
size_t res;
k_spinlock_key_t key;
/* Buffer and size are fixed. No need to spin. */
if ((pipe->buffer == NULL) || (pipe->size == 0U)) {
res = 0;
goto out;
}
key = k_spin_lock(&pipe->lock);
if (pipe->write_index == pipe->read_index) {
res = pipe->size - pipe->bytes_used;
} else if (pipe->write_index < pipe->read_index) {
res = pipe->read_index - pipe->write_index;
} else {
res = pipe->size - (pipe->write_index - pipe->read_index);
}
k_spin_unlock(&pipe->lock, key);
out:
return res;
}
#ifdef CONFIG_USERSPACE
size_t z_vrfy_k_pipe_write_avail(struct k_pipe *pipe)
{
K_OOPS(K_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
return z_impl_k_pipe_write_avail(pipe);
}
#include <zephyr/syscalls/k_pipe_write_avail_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_PIPE
static int init_pipe_obj_core_list(void)
{
/* Initialize pipe object type */
z_obj_type_init(&obj_type_pipe, K_OBJ_TYPE_PIPE_ID,
offsetof(struct k_pipe, obj_core));
/* Initialize and link statically defined pipes */
STRUCT_SECTION_FOREACH(k_pipe, pipe) {
k_obj_core_init_and_link(K_OBJ_CORE(pipe), &obj_type_pipe);
}
return 0;
}
SYS_INIT(init_pipe_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_PIPE */
``` | /content/code_sandbox/kernel/pipes.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,673 |
```c
/**
*/
#include <zephyr/kernel.h>
#include <kswap.h>
#include <ksched.h>
#include <ipi.h>
#ifdef CONFIG_TRACE_SCHED_IPI
extern void z_trace_sched_ipi(void);
#endif
void flag_ipi(uint32_t ipi_mask)
{
#if defined(CONFIG_SCHED_IPI_SUPPORTED)
if (arch_num_cpus() > 1) {
atomic_or(&_kernel.pending_ipi, (atomic_val_t)ipi_mask);
}
#endif /* CONFIG_SCHED_IPI_SUPPORTED */
}
/* Create a bitmask of CPUs that need an IPI. Note: sched_spinlock is held. */
atomic_val_t ipi_mask_create(struct k_thread *thread)
{
if (!IS_ENABLED(CONFIG_IPI_OPTIMIZE)) {
return (CONFIG_MP_MAX_NUM_CPUS > 1) ? IPI_ALL_CPUS_MASK : 0;
}
uint32_t ipi_mask = 0;
uint32_t num_cpus = (uint32_t)arch_num_cpus();
uint32_t id = _current_cpu->id;
struct k_thread *cpu_thread;
bool executable_on_cpu = true;
for (uint32_t i = 0; i < num_cpus; i++) {
if (id == i) {
continue;
}
/*
* An IPI absolutely does not need to be sent if ...
* 1. the CPU is not active, or
* 2. <thread> can not execute on the target CPU
* ... and might not need to be sent if ...
* 3. the target CPU's active thread is not preemptible, or
* 4. the target CPU's active thread has a higher priority
* (Items 3 & 4 may be overridden by a metaIRQ thread)
*/
#if defined(CONFIG_SCHED_CPU_MASK)
executable_on_cpu = ((thread->base.cpu_mask & BIT(i)) != 0);
#endif
cpu_thread = _kernel.cpus[i].current;
if ((cpu_thread != NULL) &&
(((z_sched_prio_cmp(cpu_thread, thread) < 0) &&
(thread_is_preemptible(cpu_thread))) ||
thread_is_metairq(thread)) && executable_on_cpu) {
ipi_mask |= BIT(i);
}
}
return (atomic_val_t)ipi_mask;
}
void signal_pending_ipi(void)
{
/* Synchronization note: you might think we need to lock these
* two steps, but an IPI is idempotent. It's OK if we do it
* twice. All we require is that if a CPU sees the flag true,
* it is guaranteed to send the IPI, and if a core sets
* pending_ipi, the IPI will be sent the next time through
* this code.
*/
#if defined(CONFIG_SCHED_IPI_SUPPORTED)
if (arch_num_cpus() > 1) {
uint32_t cpu_bitmap;
cpu_bitmap = (uint32_t)atomic_clear(&_kernel.pending_ipi);
if (cpu_bitmap != 0) {
#ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
arch_sched_directed_ipi(cpu_bitmap);
#else
arch_sched_broadcast_ipi();
#endif
}
}
#endif /* CONFIG_SCHED_IPI_SUPPORTED */
}
void z_sched_ipi(void)
{
/* NOTE: When adding code to this, make sure this is called
* at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
*/
#ifdef CONFIG_TRACE_SCHED_IPI
z_trace_sched_ipi();
#endif /* CONFIG_TRACE_SCHED_IPI */
#ifdef CONFIG_TIMESLICING
if (thread_is_sliceable(_current)) {
z_time_slice();
}
#endif /* CONFIG_TIMESLICING */
}
``` | /content/code_sandbox/kernel/ipi.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 787 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel/obj_core.h>
static struct k_spinlock lock;
sys_slist_t z_obj_type_list = SYS_SLIST_STATIC_INIT(&z_obj_type_list);
struct k_obj_type *z_obj_type_init(struct k_obj_type *type,
uint32_t id, size_t off)
{
sys_slist_init(&type->list);
sys_slist_append(&z_obj_type_list, &type->node);
type->id = id;
type->obj_core_offset = off;
return type;
}
void k_obj_core_init(struct k_obj_core *obj_core, struct k_obj_type *type)
{
obj_core->node.next = NULL;
obj_core->type = type;
#ifdef CONFIG_OBJ_CORE_STATS
obj_core->stats = NULL;
#endif /* CONFIG_OBJ_CORE_STATS */
}
void k_obj_core_link(struct k_obj_core *obj_core)
{
k_spinlock_key_t key = k_spin_lock(&lock);
sys_slist_append(&obj_core->type->list, &obj_core->node);
k_spin_unlock(&lock, key);
}
void k_obj_core_init_and_link(struct k_obj_core *obj_core,
struct k_obj_type *type)
{
k_obj_core_init(obj_core, type);
k_obj_core_link(obj_core);
}
void k_obj_core_unlink(struct k_obj_core *obj_core)
{
k_spinlock_key_t key = k_spin_lock(&lock);
sys_slist_find_and_remove(&obj_core->type->list, &obj_core->node);
k_spin_unlock(&lock, key);
}
struct k_obj_type *k_obj_type_find(uint32_t type_id)
{
struct k_obj_type *type;
struct k_obj_type *rv = NULL;
sys_snode_t *node;
k_spinlock_key_t key = k_spin_lock(&lock);
SYS_SLIST_FOR_EACH_NODE(&z_obj_type_list, node) {
type = CONTAINER_OF(node, struct k_obj_type, node);
if (type->id == type_id) {
rv = type;
break;
}
}
k_spin_unlock(&lock, key);
return rv;
}
int k_obj_type_walk_locked(struct k_obj_type *type,
int (*func)(struct k_obj_core *, void *),
void *data)
{
k_spinlock_key_t key;
struct k_obj_core *obj_core;
sys_snode_t *node;
int status = 0;
key = k_spin_lock(&lock);
SYS_SLIST_FOR_EACH_NODE(&type->list, node) {
obj_core = CONTAINER_OF(node, struct k_obj_core, node);
status = func(obj_core, data);
if (status != 0) {
break;
}
}
k_spin_unlock(&lock, key);
return status;
}
int k_obj_type_walk_unlocked(struct k_obj_type *type,
int (*func)(struct k_obj_core *, void *),
void *data)
{
struct k_obj_core *obj_core;
sys_snode_t *node;
sys_snode_t *next;
int status = 0;
SYS_SLIST_FOR_EACH_NODE_SAFE(&type->list, node, next) {
obj_core = CONTAINER_OF(node, struct k_obj_core, node);
status = func(obj_core, data);
if (status != 0) {
break;
}
}
return status;
}
#ifdef CONFIG_OBJ_CORE_STATS
int k_obj_core_stats_register(struct k_obj_core *obj_core, void *stats,
size_t stats_len)
{
int rv;
k_spinlock_key_t key = k_spin_lock(&lock);
if (obj_core->type->stats_desc == NULL) {
/* Object type not configured for statistics. */
rv = -ENOTSUP;
} else if (obj_core->type->stats_desc->raw_size != stats_len) {
/* Buffer size mismatch */
rv = -EINVAL;
} else {
obj_core->stats = stats;
rv = 0;
}
k_spin_unlock(&lock, key);
return rv;
}
int k_obj_core_stats_deregister(struct k_obj_core *obj_core)
{
int rv;
k_spinlock_key_t key = k_spin_lock(&lock);
if (obj_core->type->stats_desc == NULL) {
/* Object type not configured for statistics. */
rv = -ENOTSUP;
} else {
obj_core->stats = NULL;
rv = 0;
}
k_spin_unlock(&lock, key);
return rv;
}
int k_obj_core_stats_raw(struct k_obj_core *obj_core, void *stats,
size_t stats_len)
{
int rv;
struct k_obj_core_stats_desc *desc;
k_spinlock_key_t key = k_spin_lock(&lock);
desc = obj_core->type->stats_desc;
if ((desc == NULL) || (desc->raw == NULL)) {
/* The object type is not configured for this operation */
rv = -ENOTSUP;
} else if ((desc->raw_size != stats_len) || (obj_core->stats == NULL)) {
/*
* Either the size of the stats buffer is wrong or
* the kernel object was not registered for statistics.
*/
rv = -EINVAL;
} else {
rv = desc->raw(obj_core, stats);
}
k_spin_unlock(&lock, key);
return rv;
}
int k_obj_core_stats_query(struct k_obj_core *obj_core, void *stats,
size_t stats_len)
{
int rv;
struct k_obj_core_stats_desc *desc;
k_spinlock_key_t key = k_spin_lock(&lock);
desc = obj_core->type->stats_desc;
if ((desc == NULL) || (desc->query == NULL)) {
/* The object type is not configured for this operation */
rv = -ENOTSUP;
} else if ((desc->query_size != stats_len) || (obj_core->stats == NULL)) {
/*
* Either the size of the stats buffer is wrong or
* the kernel object was not registered for statistics.
*/
rv = -EINVAL;
} else {
rv = desc->query(obj_core, stats);
}
k_spin_unlock(&lock, key);
return rv;
}
int k_obj_core_stats_reset(struct k_obj_core *obj_core)
{
int rv;
struct k_obj_core_stats_desc *desc;
k_spinlock_key_t key = k_spin_lock(&lock);
desc = obj_core->type->stats_desc;
if ((desc == NULL) || (desc->reset == NULL)) {
/* The object type is not configured for this operation */
rv = -ENOTSUP;
} else if (obj_core->stats == NULL) {
/* This kernel object is not configured for statistics */
rv = -EINVAL;
} else {
rv = desc->reset(obj_core);
}
k_spin_unlock(&lock, key);
return rv;
}
int k_obj_core_stats_disable(struct k_obj_core *obj_core)
{
int rv;
struct k_obj_core_stats_desc *desc;
k_spinlock_key_t key = k_spin_lock(&lock);
desc = obj_core->type->stats_desc;
if ((desc == NULL) || (desc->disable == NULL)) {
/* The object type is not configured for this operation */
rv = -ENOTSUP;
} else if (obj_core->stats == NULL) {
/* This kernel object is not configured for statistics */
rv = -EINVAL;
} else {
rv = desc->disable(obj_core);
}
k_spin_unlock(&lock, key);
return rv;
}
int k_obj_core_stats_enable(struct k_obj_core *obj_core)
{
int rv;
struct k_obj_core_stats_desc *desc;
k_spinlock_key_t key = k_spin_lock(&lock);
desc = obj_core->type->stats_desc;
if ((desc == NULL) || (desc->enable == NULL)) {
/* The object type is not configured for this operation */
rv = -ENOTSUP;
} else if (obj_core->stats == NULL) {
/* This kernel object is not configured for statistics */
rv = -EINVAL;
} else {
rv = desc->enable(obj_core);
}
k_spin_unlock(&lock, key);
return rv;
}
#endif /* CONFIG_OBJ_CORE_STATS */
``` | /content/code_sandbox/kernel/obj_core.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,767 |
```unknown
# Kernel configuration options
menu "Memory Domains"
config MAX_DOMAIN_PARTITIONS
int "Maximum number of partitions per memory domain"
default 16
range 0 $(UINT8_MAX)
depends on USERSPACE
help
Configure the maximum number of partitions per memory domain.
config ARCH_MEM_DOMAIN_DATA
bool
depends on USERSPACE
help
This hidden option is selected by the target architecture if
architecture-specific data is needed on a per memory domain basis.
If so, the architecture defines a 'struct arch_mem_domain' which is
embedded within every struct k_mem_domain. The architecture
must also define the arch_mem_domain_init() function to set this up
when a memory domain is created.
Typical uses might be a set of page tables for that memory domain.
config ARCH_MEM_DOMAIN_SYNCHRONOUS_API
bool
depends on USERSPACE
help
This hidden option is selected by the target architecture if
modifying a memory domain's partitions at runtime, or changing
a memory domain's thread membership requires synchronous calls
into the architecture layer.
If enabled, the architecture layer must implement the following
APIs:
arch_mem_domain_thread_add
arch_mem_domain_thread_remove
arch_mem_domain_partition_remove
arch_mem_domain_partition_add
It's important to note that although supervisor threads can be
members of memory domains, they have no implications on supervisor
thread access to memory. Memory domain APIs may only be invoked from
supervisor mode.
For these reasons, on uniprocessor systems unless memory access
policy is managed in separate software constructions like page
tables, these APIs don't need to be implemented as the underlying
memory management hardware will be reprogrammed on context switch
anyway.
config ARCH_MEM_DOMAIN_SUPPORTS_ISOLATED_STACKS
bool
help
This hidden option is selected by the target architecture if
the architecture supports isolating thread stacks for threads
within the same memory domain.
config MEM_DOMAIN_ISOLATED_STACKS
bool
default y
depends on (MMU || MPU) && ARCH_MEM_DOMAIN_SUPPORTS_ISOLATED_STACKS
help
If enabled, thread stacks within the same memory domains are
isolated which means threads within the same memory domains
have no access to others threads' stacks.
If disabled, threads within the same memory domains can access
other threads' stacks.
Regardless of this settings, threads cannot access the stacks of
threads outside of their domains.
endmenu
``` | /content/code_sandbox/kernel/Kconfig.mem_domain | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 534 |
```c
/*
*
*/
#include <stddef.h>
#include <string.h>
#include <zephyr/device.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/sys/kobject.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/toolchain.h>
/**
* @brief Initialize state for all static devices.
*
* The state object is always zero-initialized, but this may not be
* sufficient.
*/
void z_device_state_init(void)
{
STRUCT_SECTION_FOREACH(device, dev) {
k_object_init(dev);
}
}
const struct device *z_impl_device_get_binding(const char *name)
{
/* A null string identifies no device. So does an empty
* string.
*/
if ((name == NULL) || (name[0] == '\0')) {
return NULL;
}
/* Split the search into two loops: in the common scenario, where
* device names are stored in ROM (and are referenced by the user
* with CONFIG_* macros), only cheap pointer comparisons will be
* performed. Reserve string comparisons for a fallback.
*/
STRUCT_SECTION_FOREACH(device, dev) {
if (z_impl_device_is_ready(dev) && (dev->name == name)) {
return dev;
}
}
STRUCT_SECTION_FOREACH(device, dev) {
if (z_impl_device_is_ready(dev) && (strcmp(name, dev->name) == 0)) {
return dev;
}
}
return NULL;
}
#ifdef CONFIG_USERSPACE
static inline const struct device *z_vrfy_device_get_binding(const char *name)
{
char name_copy[Z_DEVICE_MAX_NAME_LEN];
if (k_usermode_string_copy(name_copy, name, sizeof(name_copy))
!= 0) {
return NULL;
}
return z_impl_device_get_binding(name_copy);
}
#include <zephyr/syscalls/device_get_binding_mrsh.c>
static inline bool z_vrfy_device_is_ready(const struct device *dev)
{
K_OOPS(K_SYSCALL_OBJ_INIT(dev, K_OBJ_ANY));
return z_impl_device_is_ready(dev);
}
#include <zephyr/syscalls/device_is_ready_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_DEVICE_DT_METADATA
const struct device *z_impl_device_get_by_dt_nodelabel(const char *nodelabel)
{
/* For consistency with device_get_binding(). */
if ((nodelabel == NULL) || (nodelabel[0] == '\0')) {
return NULL;
}
/* Unlike device_get_binding(), which has a history of being
* used in application code, we don't expect
* device_get_by_dt_nodelabel() to be used outside of
* scenarios where a human is in the loop. The shell is the
* main expected use case. Therefore, nodelabel is probably
* not the same pointer as any of the entry->nodelabel
* elements. We therefore skip the pointer comparison that
* device_get_binding() does.
*/
STRUCT_SECTION_FOREACH(device, dev) {
const struct device_dt_nodelabels *nl = device_get_dt_nodelabels(dev);
if (!z_impl_device_is_ready(dev) || nl == NULL) {
continue;
}
for (size_t i = 0; i < nl->num_nodelabels; i++) {
const char *dev_nodelabel = nl->nodelabels[i];
if (strcmp(nodelabel, dev_nodelabel) == 0) {
return dev;
}
}
}
return NULL;
}
#ifdef CONFIG_USERSPACE
static inline const struct device *z_vrfy_device_get_by_dt_nodelabel(const char *nodelabel)
{
char nl_copy[Z_DEVICE_MAX_NODELABEL_LEN];
if (k_usermode_string_copy(nl_copy, (char *)nodelabel, sizeof(nl_copy)) != 0) {
return NULL;
}
return z_impl_device_get_by_dt_nodelabel(nl_copy);
}
#include <zephyr/syscalls/device_get_by_dt_nodelabel_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_DEVICE_DT_METADATA */
size_t z_device_get_all_static(struct device const **devices)
{
size_t cnt;
STRUCT_SECTION_GET(device, 0, devices);
STRUCT_SECTION_COUNT(device, &cnt);
return cnt;
}
bool z_impl_device_is_ready(const struct device *dev)
{
/*
* if an invalid device pointer is passed as argument, this call
* reports the `device` as not ready for usage.
*/
if (dev == NULL) {
return false;
}
return dev->state->initialized && (dev->state->init_res == 0U);
}
#ifdef CONFIG_DEVICE_DEPS
static int device_visitor(const device_handle_t *handles,
size_t handle_count,
device_visitor_callback_t visitor_cb,
void *context)
{
/* Iterate over fixed devices */
for (size_t i = 0; i < handle_count; ++i) {
device_handle_t dh = handles[i];
const struct device *rdev = device_from_handle(dh);
int rc = visitor_cb(rdev, context);
if (rc < 0) {
return rc;
}
}
return handle_count;
}
int device_required_foreach(const struct device *dev,
device_visitor_callback_t visitor_cb,
void *context)
{
size_t handle_count = 0;
const device_handle_t *handles = device_required_handles_get(dev, &handle_count);
return device_visitor(handles, handle_count, visitor_cb, context);
}
int device_supported_foreach(const struct device *dev,
device_visitor_callback_t visitor_cb,
void *context)
{
size_t handle_count = 0;
const device_handle_t *handles = device_supported_handles_get(dev, &handle_count);
return device_visitor(handles, handle_count, visitor_cb, context);
}
#endif /* CONFIG_DEVICE_DEPS */
``` | /content/code_sandbox/kernel/device.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,272 |
```c
/*
*
*/
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
#include <zephyr/sys/__assert.h>
#include <stdbool.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys/check.h>
#include <zephyr/sys/libc-hooks.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
struct k_spinlock z_mem_domain_lock;
static uint8_t max_partitions;
struct k_mem_domain k_mem_domain_default;
static bool check_add_partition(struct k_mem_domain *domain,
struct k_mem_partition *part)
{
int i;
uintptr_t pstart, pend, dstart, dend;
if (part == NULL) {
LOG_ERR("NULL k_mem_partition provided");
return false;
}
#ifdef CONFIG_EXECUTE_XOR_WRITE
/* Arches where execution cannot be disabled should always return
* false to this check
*/
if (K_MEM_PARTITION_IS_EXECUTABLE(part->attr) &&
K_MEM_PARTITION_IS_WRITABLE(part->attr)) {
LOG_ERR("partition is writable and executable <start %lx>",
part->start);
return false;
}
#endif /* CONFIG_EXECUTE_XOR_WRITE */
if (part->size == 0U) {
LOG_ERR("zero sized partition at %p with base 0x%lx",
part, part->start);
return false;
}
pstart = part->start;
pend = part->start + part->size;
if (pend <= pstart) {
LOG_ERR("invalid partition %p, wraparound detected. base 0x%lx size %zu",
part, part->start, part->size);
return false;
}
/* Check that this partition doesn't overlap any existing ones already
* in the domain
*/
for (i = 0; i < domain->num_partitions; i++) {
struct k_mem_partition *dpart = &domain->partitions[i];
if (dpart->size == 0U) {
/* Unused slot */
continue;
}
dstart = dpart->start;
dend = dstart + dpart->size;
if (pend > dstart && dend > pstart) {
LOG_ERR("partition %p base %lx (size %zu) overlaps existing base %lx (size %zu)",
part, part->start, part->size,
dpart->start, dpart->size);
return false;
}
}
return true;
}
int k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
struct k_mem_partition *parts[])
{
k_spinlock_key_t key;
int ret = 0;
CHECKIF(domain == NULL) {
ret = -EINVAL;
goto out;
}
CHECKIF(!(num_parts == 0U || parts != NULL)) {
LOG_ERR("parts array is NULL and num_parts is nonzero");
ret = -EINVAL;
goto out;
}
CHECKIF(!(num_parts <= max_partitions)) {
LOG_ERR("num_parts of %d exceeds maximum allowable partitions (%d)",
num_parts, max_partitions);
ret = -EINVAL;
goto out;
}
key = k_spin_lock(&z_mem_domain_lock);
domain->num_partitions = 0U;
(void)memset(domain->partitions, 0, sizeof(domain->partitions));
sys_dlist_init(&domain->mem_domain_q);
#ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
ret = arch_mem_domain_init(domain);
if (ret != 0) {
LOG_ERR("architecture-specific initialization failed for domain %p with %d",
domain, ret);
ret = -ENOMEM;
goto unlock_out;
}
#endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
if (num_parts != 0U) {
uint32_t i;
for (i = 0U; i < num_parts; i++) {
CHECKIF(!check_add_partition(domain, parts[i])) {
LOG_ERR("invalid partition index %d (%p)",
i, parts[i]);
ret = -EINVAL;
goto unlock_out;
}
domain->partitions[i] = *parts[i];
domain->num_partitions++;
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
int ret2 = arch_mem_domain_partition_add(domain, i);
ARG_UNUSED(ret2);
CHECKIF(ret2 != 0) {
ret = ret2;
}
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
}
}
unlock_out:
k_spin_unlock(&z_mem_domain_lock, key);
out:
return ret;
}
int k_mem_domain_add_partition(struct k_mem_domain *domain,
struct k_mem_partition *part)
{
int p_idx;
k_spinlock_key_t key;
int ret = 0;
CHECKIF(domain == NULL) {
ret = -EINVAL;
goto out;
}
CHECKIF(!check_add_partition(domain, part)) {
LOG_ERR("invalid partition %p", part);
ret = -EINVAL;
goto out;
}
key = k_spin_lock(&z_mem_domain_lock);
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
/* A zero-sized partition denotes it's a free partition */
if (domain->partitions[p_idx].size == 0U) {
break;
}
}
CHECKIF(!(p_idx < max_partitions)) {
LOG_ERR("no free partition slots available");
ret = -ENOSPC;
goto unlock_out;
}
LOG_DBG("add partition base %lx size %zu to domain %p\n",
part->start, part->size, domain);
domain->partitions[p_idx].start = part->start;
domain->partitions[p_idx].size = part->size;
domain->partitions[p_idx].attr = part->attr;
domain->num_partitions++;
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
ret = arch_mem_domain_partition_add(domain, p_idx);
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
unlock_out:
k_spin_unlock(&z_mem_domain_lock, key);
out:
return ret;
}
int k_mem_domain_remove_partition(struct k_mem_domain *domain,
struct k_mem_partition *part)
{
int p_idx;
k_spinlock_key_t key;
int ret = 0;
CHECKIF((domain == NULL) || (part == NULL)) {
ret = -EINVAL;
goto out;
}
key = k_spin_lock(&z_mem_domain_lock);
/* find a partition that matches the given start and size */
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
if ((domain->partitions[p_idx].start == part->start) &&
(domain->partitions[p_idx].size == part->size)) {
break;
}
}
CHECKIF(!(p_idx < max_partitions)) {
LOG_ERR("no matching partition found");
ret = -ENOENT;
goto unlock_out;
}
LOG_DBG("remove partition base %lx size %zu from domain %p\n",
part->start, part->size, domain);
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
ret = arch_mem_domain_partition_remove(domain, p_idx);
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
/* A zero-sized partition denotes it's a free partition */
domain->partitions[p_idx].size = 0U;
domain->num_partitions--;
unlock_out:
k_spin_unlock(&z_mem_domain_lock, key);
out:
return ret;
}
static int add_thread_locked(struct k_mem_domain *domain,
k_tid_t thread)
{
int ret = 0;
__ASSERT_NO_MSG(domain != NULL);
__ASSERT_NO_MSG(thread != NULL);
LOG_DBG("add thread %p to domain %p\n", thread, domain);
sys_dlist_append(&domain->mem_domain_q,
&thread->mem_domain_info.mem_domain_q_node);
thread->mem_domain_info.mem_domain = domain;
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
ret = arch_mem_domain_thread_add(thread);
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
return ret;
}
static int remove_thread_locked(struct k_thread *thread)
{
int ret = 0;
__ASSERT_NO_MSG(thread != NULL);
LOG_DBG("remove thread %p from memory domain %p\n",
thread, thread->mem_domain_info.mem_domain);
sys_dlist_remove(&thread->mem_domain_info.mem_domain_q_node);
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
ret = arch_mem_domain_thread_remove(thread);
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
return ret;
}
/* Called from thread object initialization */
void z_mem_domain_init_thread(struct k_thread *thread)
{
int ret;
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
/* New threads inherit memory domain configuration from parent */
ret = add_thread_locked(_current->mem_domain_info.mem_domain, thread);
__ASSERT_NO_MSG(ret == 0);
ARG_UNUSED(ret);
k_spin_unlock(&z_mem_domain_lock, key);
}
/* Called when thread aborts during teardown tasks. _sched_spinlock is held */
void z_mem_domain_exit_thread(struct k_thread *thread)
{
int ret;
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
ret = remove_thread_locked(thread);
__ASSERT_NO_MSG(ret == 0);
ARG_UNUSED(ret);
k_spin_unlock(&z_mem_domain_lock, key);
}
int k_mem_domain_add_thread(struct k_mem_domain *domain, k_tid_t thread)
{
int ret = 0;
k_spinlock_key_t key;
key = k_spin_lock(&z_mem_domain_lock);
if (thread->mem_domain_info.mem_domain != domain) {
ret = remove_thread_locked(thread);
if (ret == 0) {
ret = add_thread_locked(domain, thread);
}
}
k_spin_unlock(&z_mem_domain_lock, key);
return ret;
}
static int init_mem_domain_module(void)
{
int ret;
ARG_UNUSED(ret);
max_partitions = arch_mem_domain_max_partitions_get();
/*
* max_partitions must be less than or equal to
* CONFIG_MAX_DOMAIN_PARTITIONS, or would encounter array index
* out of bounds error.
*/
__ASSERT(max_partitions <= CONFIG_MAX_DOMAIN_PARTITIONS, "");
ret = k_mem_domain_init(&k_mem_domain_default, 0, NULL);
__ASSERT(ret == 0, "failed to init default mem domain");
#ifdef Z_LIBC_PARTITION_EXISTS
ret = k_mem_domain_add_partition(&k_mem_domain_default,
&z_libc_partition);
__ASSERT(ret == 0, "failed to add default libc mem partition");
#endif /* Z_LIBC_PARTITION_EXISTS */
return 0;
}
SYS_INIT(init_mem_domain_module, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
``` | /content/code_sandbox/kernel/mem_domain.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,324 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.