text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_touch #include <zephyr/device.h> #include <zephyr/input/input.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <esp_err.h> #include <soc/soc_pins.h> #include <soc/periph_defs.h> #include <hal/touch_sensor_types.h> #include <hal/touch_sensor_hal.h> #include <driver/rtc_io.h> #include <esp_intr_alloc.h> LOG_MODULE_REGISTER(espressif_esp32_touch, CONFIG_INPUT_LOG_LEVEL); BUILD_ASSERT(!IS_ENABLED(CONFIG_COUNTER_RTC_ESP32), "Conflict detected: COUNTER_RTC_ESP32 enabled"); #define ESP32_SCAN_DONE_MAX_COUNT 5 #if defined(CONFIG_SOC_SERIES_ESP32) #define ESP32_RTC_INTR_MSK RTC_CNTL_TOUCH_INT_ST_M #elif defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) #define ESP32_RTC_INTR_MSK (RTC_CNTL_TOUCH_DONE_INT_ST_M | \ RTC_CNTL_TOUCH_ACTIVE_INT_ST_M | \ RTC_CNTL_TOUCH_INACTIVE_INT_ST_M | \ RTC_CNTL_TOUCH_SCAN_DONE_INT_ST_M | \ RTC_CNTL_TOUCH_TIMEOUT_INT_ST_M) #define ESP32_TOUCH_PAD_INTR_MASK (TOUCH_PAD_INTR_MASK_ACTIVE | \ TOUCH_PAD_INTR_MASK_INACTIVE | \ TOUCH_PAD_INTR_MASK_TIMEOUT | \ TOUCH_PAD_INTR_MASK_SCAN_DONE) #endif /* defined(CONFIG_SOC_SERIES_ESP32) */ struct esp32_touch_sensor_channel_config { int32_t channel_num; int32_t channel_sens; uint32_t zephyr_code; }; struct esp32_touch_sensor_config { uint32_t debounce_interval_ms; int num_channels; int href_microvolt_enum_idx; int lref_microvolt_enum_idx; int href_atten_microvolt_enum_idx; int filter_mode; int filter_debounce_cnt; int filter_noise_thr; int filter_jitter_step; int filter_smooth_level; const struct esp32_touch_sensor_channel_config *channel_cfg; struct esp32_touch_sensor_channel_data *channel_data; }; struct esp32_touch_sensor_channel_data { const struct device *dev; struct k_work_delayable work; uint32_t status; #if defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) uint32_t last_status; #endif /* defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) */ }; struct esp32_touch_sensor_data { uint32_t rtc_intr_msk; }; static void esp32_touch_sensor_interrupt_cb(void *arg) { const struct device *dev = arg; struct esp32_touch_sensor_data *dev_data = dev->data; const struct esp32_touch_sensor_config *dev_cfg = dev->config; const struct esp32_touch_sensor_channel_config *channel_cfg; const int num_channels = dev_cfg->num_channels; uint32_t pad_status; #if defined(CONFIG_SOC_SERIES_ESP32) touch_hal_intr_clear(); #elif defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) static uint8_t scan_done_counter; touch_pad_intr_mask_t intr_mask = touch_hal_read_intr_status_mask(); if (intr_mask & TOUCH_PAD_INTR_MASK_SCAN_DONE) { if (++scan_done_counter == ESP32_SCAN_DONE_MAX_COUNT) { touch_hal_intr_disable(TOUCH_PAD_INTR_MASK_SCAN_DONE); for (int i = 0; i < num_channels; i++) { channel_cfg = &dev_cfg->channel_cfg[i]; /* Set interrupt threshold */ uint32_t benchmark_value; touch_hal_read_benchmark(channel_cfg->channel_num, &benchmark_value); touch_hal_set_threshold(channel_cfg->channel_num, channel_cfg->channel_sens * benchmark_value / 100); } } return; } #endif /* defined(CONFIG_SOC_SERIES_ESP32) */ touch_hal_read_trigger_status_mask(&pad_status); #if defined(CONFIG_SOC_SERIES_ESP32) touch_hal_clear_trigger_status_mask(); #endif /* defined(CONFIG_SOC_SERIES_ESP32) */ for (int i = 0; i < num_channels; i++) { uint32_t channel_status; channel_cfg = &dev_cfg->channel_cfg[i]; channel_status = (pad_status >> channel_cfg->channel_num) & 0x01; #if defined(CONFIG_SOC_SERIES_ESP32) if (channel_status != 0) { #elif defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) uint32_t channel_num = (uint32_t)touch_hal_get_current_meas_channel(); if (channel_cfg->channel_num == channel_num) { #endif /* CONFIG_SOC_SERIES_ESP32 */ struct esp32_touch_sensor_channel_data *channel_data = &dev_cfg->channel_data[i]; channel_data->status = channel_status; (void)k_work_reschedule(&channel_data->work, K_MSEC(dev_cfg->debounce_interval_ms)); } } } static void esp32_rtc_isr(void *arg) { uint32_t status = REG_READ(RTC_CNTL_INT_ST_REG); if (arg != NULL) { const struct device *dev = arg; struct esp32_touch_sensor_data *dev_data = dev->data; if (dev_data->rtc_intr_msk & status) { esp32_touch_sensor_interrupt_cb(arg); } } REG_WRITE(RTC_CNTL_INT_CLR_REG, status); } static esp_err_t esp32_rtc_isr_install(intr_handler_t intr_handler, const void *handler_arg) { esp_err_t err; REG_WRITE(RTC_CNTL_INT_ENA_REG, 0); REG_WRITE(RTC_CNTL_INT_CLR_REG, UINT32_MAX); err = esp_intr_alloc(ETS_RTC_CORE_INTR_SOURCE, 0, intr_handler, (void *)handler_arg, NULL); return err; } /** * Handle debounced touch sensor touch state. */ static void esp32_touch_sensor_change_deferred(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct esp32_touch_sensor_channel_data *channel_data = CONTAINER_OF(dwork, struct esp32_touch_sensor_channel_data, work); const struct device *dev = channel_data->dev; const struct esp32_touch_sensor_config *dev_cfg = dev->config; int key_index = channel_data - &dev_cfg->channel_data[0]; const struct esp32_touch_sensor_channel_config *channel_cfg = &dev_cfg->channel_cfg[key_index]; #if defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) if (channel_data->last_status != channel_data->status) { #endif /* defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) */ input_report_key(dev, channel_cfg->zephyr_code, channel_data->status, true, K_FOREVER); #if defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) channel_data->last_status = channel_data->status; } #endif /* defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) */ } static int esp32_touch_sensor_init(const struct device *dev) { struct esp32_touch_sensor_data *dev_data = dev->data; const struct esp32_touch_sensor_config *dev_cfg = dev->config; const int num_channels = dev_cfg->num_channels; touch_hal_init(); #if defined(CONFIG_SOC_SERIES_ESP32) touch_hal_volt_t volt = { .refh = dev_cfg->href_microvolt_enum_idx, .refh = dev_cfg->href_microvolt_enum_idx, .atten = dev_cfg->href_atten_microvolt_enum_idx }; touch_hal_set_voltage(&volt); touch_hal_set_fsm_mode(TOUCH_FSM_MODE_TIMER); #endif /* defined(CONFIG_SOC_SERIES_ESP32) */ for (int i = 0; i < num_channels; i++) { struct esp32_touch_sensor_channel_data *channel_data = &dev_cfg->channel_data[i]; const struct esp32_touch_sensor_channel_config *channel_cfg = &dev_cfg->channel_cfg[i]; if (!(channel_cfg->channel_num > 0 && channel_cfg->channel_num < SOC_TOUCH_SENSOR_NUM)) { LOG_ERR("Touch %d configuration failed: " "Touch channel error", i); return -EINVAL; } #if defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) if (channel_cfg->channel_num == SOC_TOUCH_DENOISE_CHANNEL) { LOG_ERR("Touch %d configuration failed: " "TOUCH0 is internal denoise channel", i); return -EINVAL; } #endif /* defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) */ gpio_num_t gpio_num = touch_sensor_channel_io_map[channel_cfg->channel_num]; rtc_gpio_init(gpio_num); rtc_gpio_set_direction(gpio_num, RTC_GPIO_MODE_DISABLED); rtc_gpio_pulldown_dis(gpio_num); rtc_gpio_pullup_dis(gpio_num); touch_hal_config(channel_cfg->channel_num); #if defined(CONFIG_SOC_SERIES_ESP32) touch_hal_set_threshold(channel_cfg->channel_num, 0); touch_hal_set_group_mask(BIT(channel_cfg->channel_num), BIT(channel_cfg->channel_num)); #endif /* defined(CONFIG_SOC_SERIES_ESP32) */ touch_hal_set_channel_mask(BIT(channel_cfg->channel_num)); channel_data->status = 0; #if defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) channel_data->last_status = 0; #endif /* defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) */ channel_data->dev = dev; k_work_init_delayable(&channel_data->work, esp32_touch_sensor_change_deferred); } #if defined(CONFIG_SOC_SERIES_ESP32) for (int i = 0; i < num_channels; i++) { const struct esp32_touch_sensor_channel_config *channel_cfg = &dev_cfg->channel_cfg[i]; uint32_t ref_time; ref_time = k_uptime_get_32(); while (!touch_hal_meas_is_done()) { if (k_uptime_get_32() - ref_time > 500) { return -ETIMEDOUT; } k_busy_wait(1000); } uint16_t touch_value = touch_hal_read_raw_data(channel_cfg->channel_num); touch_hal_set_threshold(channel_cfg->channel_num, touch_value * (100 - channel_cfg->channel_num) / 100); } #elif defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) touch_filter_config_t filter_info = { .mode = dev_cfg->filter_mode, .debounce_cnt = dev_cfg->filter_debounce_cnt, .noise_thr = dev_cfg->filter_noise_thr, .jitter_step = dev_cfg->filter_jitter_step, .smh_lvl = dev_cfg->filter_smooth_level, }; touch_hal_filter_set_config(&filter_info); touch_hal_filter_enable(); touch_hal_timeout_enable(); touch_hal_timeout_set_threshold(SOC_TOUCH_PAD_THRESHOLD_MAX); #endif /* defined(CONFIG_SOC_SERIES_ESP32) */ dev_data->rtc_intr_msk = ESP32_RTC_INTR_MSK; esp32_rtc_isr_install(&esp32_rtc_isr, dev); #if defined(CONFIG_SOC_SERIES_ESP32) touch_hal_intr_enable(); #elif defined(CONFIG_SOC_SERIES_ESP32S2) || defined(CONFIG_SOC_SERIES_ESP32S3) touch_hal_intr_enable(ESP32_TOUCH_PAD_INTR_MASK); touch_hal_set_fsm_mode(TOUCH_FSM_MODE_TIMER); #endif /* defined(CONFIG_SOC_SERIES_ESP32) */ touch_hal_start_fsm(); return 0; } #define ESP32_TOUCH_SENSOR_CHANNEL_CFG_INIT(node_id) \ { \ .channel_num = DT_PROP(node_id, channel_num), \ .channel_sens = DT_PROP(node_id, channel_sens), \ .zephyr_code = DT_PROP(node_id, zephyr_code), \ } #define ESP32_TOUCH_SENSOR_INIT(inst) \ static const struct esp32_touch_sensor_channel_config \ esp32_touch_sensor_channel_config_##inst[] = { \ DT_INST_FOREACH_CHILD_STATUS_OKAY_SEP(inst, \ ESP32_TOUCH_SENSOR_CHANNEL_CFG_INIT, (,)) \ }; \ \ static struct esp32_touch_sensor_channel_data esp32_touch_sensor_channel_data_##inst \ [ARRAY_SIZE(esp32_touch_sensor_channel_config_##inst)]; \ \ static const struct esp32_touch_sensor_config esp32_touch_sensor_config_##inst = { \ .debounce_interval_ms = DT_INST_PROP(inst, debounce_interval_ms), \ .num_channels = ARRAY_SIZE(esp32_touch_sensor_channel_config_##inst), \ .href_microvolt_enum_idx = DT_INST_ENUM_IDX(inst, href_microvolt), \ .lref_microvolt_enum_idx = DT_INST_ENUM_IDX(inst, lref_microvolt), \ .href_atten_microvolt_enum_idx = DT_INST_ENUM_IDX(inst, href_atten_microvolt), \ .filter_mode = DT_INST_PROP(inst, filter_mode), \ .filter_debounce_cnt = DT_INST_PROP(inst, filter_debounce_cnt), \ .filter_noise_thr = DT_INST_PROP(inst, filter_noise_thr), \ .filter_jitter_step = DT_INST_PROP(inst, filter_jitter_step), \ .filter_smooth_level = DT_INST_PROP(inst, filter_smooth_level), \ .channel_cfg = esp32_touch_sensor_channel_config_##inst, \ .channel_data = esp32_touch_sensor_channel_data_##inst, \ }; \ \ static struct esp32_touch_sensor_data esp32_touch_sensor_data_##inst; \ \ DEVICE_DT_INST_DEFINE(inst, \ &esp32_touch_sensor_init, \ NULL, \ &esp32_touch_sensor_data_##inst, \ &esp32_touch_sensor_config_##inst, \ POST_KERNEL, CONFIG_INPUT_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(ESP32_TOUCH_SENSOR_INIT) ```
/content/code_sandbox/drivers/input/input_esp32_touch_sensor.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,178
```c /* * */ #define DT_DRV_COMPAT futaba_sbus #include <zephyr/device.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/input/input.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/time_units.h> #include <zephyr/sys_clock.h> #include <zephyr/drivers/uart.h> LOG_MODULE_REGISTER(futaba_sbus, CONFIG_INPUT_LOG_LEVEL); /* Driver config */ struct sbus_input_channel { uint32_t sbus_channel; uint32_t type; uint32_t zephyr_code; }; const struct uart_config uart_cfg_sbus = { .baudrate = 100000, .parity = UART_CFG_PARITY_EVEN, .stop_bits = UART_CFG_STOP_BITS_2, .data_bits = UART_CFG_DATA_BITS_8, .flow_ctrl = UART_CFG_FLOW_CTRL_NONE }; struct input_sbus_config { uint8_t num_channels; const struct sbus_input_channel *channel_info; const struct device *uart_dev; uart_irq_callback_user_data_t cb; }; #define SBUS_FRAME_LEN 25 #define SBUS_HEADER 0x0F #define SBUS_FOOTER 0x00 #define SBUS_SERVO_LEN 22 #define SBUS_SERVO_CH_MASK 0x7FF #define SBUS_BYTE24_IDX 23 #define SBUS_BYTE24_CH17 0x01 #define SBUS_BYTE24_CH18 0x02 #define SBUS_BYTE24_FRAME_LOST 0x04 #define SBUS_BYTE24_FAILSAFE 0x08 #define SBUS_TRANSMISSION_TIME_MS 4 /* Max transmission of a single SBUS frame */ #define SBUS_INTERFRAME_SPACING_MS 20 /* Max spacing between SBUS frames */ #define SBUS_CHANNEL_COUNT 16 #define REPORT_FILTER CONFIG_INPUT_SBUS_REPORT_FILTER #define CHANNEL_VALUE_ZERO CONFIG_INPUT_SBUS_CHANNEL_VALUE_ZERO #define CHANNEL_VALUE_ONE CONFIG_INPUT_SBUS_CHANNEL_VALUE_ONE struct input_sbus_data { struct k_thread thread; struct k_sem report_lock; uint16_t xfer_bytes; uint8_t rd_data[SBUS_FRAME_LEN]; uint8_t sbus_frame[SBUS_FRAME_LEN]; bool partial_sync; bool in_sync; uint32_t last_rx_time; uint16_t last_reported_value[SBUS_CHANNEL_COUNT]; int8_t channel_mapping[SBUS_CHANNEL_COUNT]; K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_INPUT_SBUS_THREAD_STACK_SIZE); }; static void input_sbus_report(const struct device *dev, unsigned int sbus_channel, unsigned int value) { const struct input_sbus_config *const config = dev->config; struct input_sbus_data *const data = dev->data; int channel = data->channel_mapping[sbus_channel]; /* Not Mapped */ if (channel == -1) { return; } if (value >= (data->last_reported_value[channel] + REPORT_FILTER) || value <= (data->last_reported_value[channel] - REPORT_FILTER)) { switch (config->channel_info[channel].type) { case INPUT_EV_ABS: case INPUT_EV_MSC: input_report(dev, config->channel_info[channel].type, config->channel_info[channel].zephyr_code, value, false, K_FOREVER); break; default: if (value > CHANNEL_VALUE_ONE) { input_report_key(dev, config->channel_info[channel].zephyr_code, 1, false, K_FOREVER); } else if (value < CHANNEL_VALUE_ZERO) { input_report_key(dev, config->channel_info[channel].zephyr_code, 0, false, K_FOREVER); } } data->last_reported_value[channel] = value; } } static void input_sbus_input_report_thread(const struct device *dev, void *dummy2, void *dummy3) { struct input_sbus_data *const data = dev->data; ARG_UNUSED(dummy2); ARG_UNUSED(dummy3); uint8_t i, channel; uint8_t *sbus_channel_data = &data->sbus_frame[1]; /* Omit header */ uint16_t value; int bits_read; unsigned int key; int ret; bool connected_reported = false; while (true) { if (!data->in_sync) { k_sem_take(&data->report_lock, K_FOREVER); if (data->in_sync) { LOG_DBG("SBUS receiver connected"); } else { continue; } } else { ret = k_sem_take(&data->report_lock, K_MSEC(SBUS_INTERFRAME_SPACING_MS)); if (ret == -EBUSY) { continue; } else if (ret < 0 || !data->in_sync) { /* We've lost sync with the UART receiver */ key = irq_lock(); data->partial_sync = false; data->in_sync = false; data->xfer_bytes = 0; irq_unlock(key); connected_reported = false; LOG_DBG("SBUS receiver connection lost"); /* Report connection lost */ continue; } } if (connected_reported && data->sbus_frame[SBUS_BYTE24_IDX] & SBUS_BYTE24_FRAME_LOST) { LOG_DBG("SBUS controller connection lost"); connected_reported = false; } else if (!connected_reported && !(data->sbus_frame[SBUS_BYTE24_IDX] & SBUS_BYTE24_FRAME_LOST)) { LOG_DBG("SBUS controller connected"); connected_reported = true; } /* Parse the data */ channel = 0; value = 0; bits_read = 0; for (i = 0; i < SBUS_SERVO_LEN; i++) { /* Read the next byte */ unsigned char byte = sbus_channel_data[i]; /* Extract bits and construct the 11-bit value */ value |= byte << bits_read; bits_read += 8; /* Check if we've read enough bits to form a full 11-bit value */ while (bits_read >= 11) { input_sbus_report(dev, channel, value & SBUS_SERVO_CH_MASK); /* Shift right to prepare for the next 11 bits */ value >>= 11; bits_read -= 11; channel++; } } #ifdef CONFIG_INPUT_SBUS_SEND_SYNC input_report(dev, 0, 0, 0, true, K_FOREVER); #endif } } static void sbus_resync(const struct device *uart_dev, struct input_sbus_data *const data) { uint8_t *rd_data = data->rd_data; if (data->partial_sync) { data->xfer_bytes += uart_fifo_read(uart_dev, &rd_data[data->xfer_bytes], SBUS_FRAME_LEN - data->xfer_bytes); if (data->xfer_bytes == SBUS_FRAME_LEN) { /* Transfer took longer then 4ms probably faulty */ if (k_uptime_get_32() - data->last_rx_time > SBUS_TRANSMISSION_TIME_MS) { data->xfer_bytes = 0; data->partial_sync = false; } else if (rd_data[0] == SBUS_HEADER && rd_data[SBUS_FRAME_LEN - 1] == SBUS_FOOTER) { data->in_sync = true; } else { /* Dummy read to clear fifo */ uart_fifo_read(uart_dev, &rd_data[0], 1); data->xfer_bytes = 0; data->partial_sync = false; } } } else { if (uart_fifo_read(uart_dev, &rd_data[0], 1) == 1) { if (rd_data[0] == SBUS_HEADER) { data->partial_sync = true; data->xfer_bytes = 1; data->last_rx_time = k_uptime_get_32(); } } } } static void sbus_uart_isr(const struct device *uart_dev, void *user_data) { const struct device *dev = user_data; struct input_sbus_data *const data = dev->data; uint8_t *rd_data = data->rd_data; if (uart_dev == NULL) { LOG_DBG("UART device is NULL"); return; } if (!uart_irq_update(uart_dev)) { LOG_DBG("Unable to start processing interrupts"); return; } while (uart_irq_rx_ready(uart_dev) && data->xfer_bytes <= SBUS_FRAME_LEN) { if (data->in_sync) { if (data->xfer_bytes == 0) { data->last_rx_time = k_uptime_get_32(); } data->xfer_bytes += uart_fifo_read(uart_dev, &rd_data[data->xfer_bytes], SBUS_FRAME_LEN - data->xfer_bytes); } else { sbus_resync(uart_dev, data); } } if (data->in_sync && (k_uptime_get_32() - data->last_rx_time > SBUS_INTERFRAME_SPACING_MS)) { data->partial_sync = false; data->in_sync = false; data->xfer_bytes = 0; k_sem_give(&data->report_lock); } else if (data->in_sync && data->xfer_bytes == SBUS_FRAME_LEN) { data->xfer_bytes = 0; if (rd_data[0] == SBUS_HEADER && rd_data[SBUS_FRAME_LEN - 1] == SBUS_FOOTER) { memcpy(data->sbus_frame, rd_data, SBUS_FRAME_LEN); k_sem_give(&data->report_lock); } else { data->partial_sync = false; data->in_sync = false; } } } /* * @brief Initialize sbus driver */ static int input_sbus_init(const struct device *dev) { const struct input_sbus_config *const config = dev->config; struct input_sbus_data *const data = dev->data; int i, ret; uart_irq_rx_disable(config->uart_dev); uart_irq_tx_disable(config->uart_dev); LOG_DBG("Initializing SBUS driver"); for (i = 0; i < SBUS_CHANNEL_COUNT; i++) { data->last_reported_value[i] = 0; data->channel_mapping[i] = -1; } data->xfer_bytes = 0; data->in_sync = false; data->partial_sync = false; data->last_rx_time = 0; for (i = 0; i < config->num_channels; i++) { data->channel_mapping[config->channel_info[i].sbus_channel - 1] = i; } ret = uart_configure(config->uart_dev, &uart_cfg_sbus); if (ret < 0) { LOG_ERR("Unable to configure UART port: %d", ret); return ret; } ret = uart_irq_callback_user_data_set(config->uart_dev, config->cb, (void *)dev); if (ret < 0) { if (ret == -ENOTSUP) { LOG_ERR("Interrupt-driven UART API support not enabled"); } else if (ret == -ENOSYS) { LOG_ERR("UART device does not support interrupt-driven API"); } else { LOG_ERR("Error setting UART callback: %d", ret); } return ret; } uart_irq_rx_enable(config->uart_dev); k_sem_init(&data->report_lock, 0, 1); k_thread_create(&data->thread, data->thread_stack, K_KERNEL_STACK_SIZEOF(data->thread_stack), (k_thread_entry_t)input_sbus_input_report_thread, (void *)dev, NULL, NULL, CONFIG_INPUT_SBUS_THREAD_PRIORITY, 0, K_NO_WAIT); k_thread_name_set(&data->thread, dev->name); return ret; } #define INPUT_CHANNEL_CHECK(input_channel_id) \ BUILD_ASSERT(IN_RANGE(DT_PROP(input_channel_id, channel), 1, 16), \ "invalid channel number"); \ BUILD_ASSERT(DT_PROP(input_channel_id, type) == INPUT_EV_ABS || \ DT_PROP(input_channel_id, type) == INPUT_EV_KEY || \ DT_PROP(input_channel_id, type) == INPUT_EV_MSC, \ "invalid channel type"); #define SBUS_INPUT_CHANNEL_INITIALIZER(input_channel_id) \ { \ .sbus_channel = DT_PROP(input_channel_id, channel), \ .type = DT_PROP(input_channel_id, type), \ .zephyr_code = DT_PROP(input_channel_id, zephyr_code), \ }, #define INPUT_SBUS_INIT(n) \ \ static const struct sbus_input_channel input_##id[] = { \ DT_INST_FOREACH_CHILD(n, SBUS_INPUT_CHANNEL_INITIALIZER) \ }; \ DT_INST_FOREACH_CHILD(n, INPUT_CHANNEL_CHECK) \ \ static struct input_sbus_data sbus_data_##n; \ \ static const struct input_sbus_config sbus_cfg_##n = { \ .channel_info = input_##id, \ .uart_dev = DEVICE_DT_GET(DT_INST_BUS(n)), \ .num_channels = ARRAY_SIZE(input_##id), \ .cb = sbus_uart_isr, \ }; \ \ DEVICE_DT_INST_DEFINE(n, input_sbus_init, NULL, &sbus_data_##n, &sbus_cfg_##n, \ POST_KERNEL, CONFIG_INPUT_INIT_PRIORITY, NULL); DT_INST_FOREACH_STATUS_OKAY(INPUT_SBUS_INIT) ```
/content/code_sandbox/drivers/input/input_sbus.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,014
```unknown menuconfig INPUT_CST816S bool "CST816S capacitive touch panel driver" default y depends on DT_HAS_HYNITRON_CST816S_ENABLED select I2C help Enable driver for hynitron cst816s touch panel. if INPUT_CST816S config INPUT_CST816S_PERIOD int "Sample period" depends on !INPUT_CST816S_INTERRUPT default 20 help Sample period in milliseconds when in polling mode. config INPUT_CST816S_INTERRUPT bool "Interrupt support" default y depends on GPIO help Enable interrupt support (requires GPIO). config INPUT_CST816S_EV_DEVICE bool "Device specific event support" help Enable device specific event support. endif # INPUT_CST816S ```
/content/code_sandbox/drivers/input/Kconfig.cst816s
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
172
```unknown config INPUT_CHSC6X bool "Use CHSC6X capacitive touch panel driver" default y depends on DT_HAS_CHIPSEMI_CHSC6X_ENABLED select GPIO select I2C help Enable out of tree driver for CHSC6X touch panel. ```
/content/code_sandbox/drivers/input/Kconfig.chsc6x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
63
```unknown menuconfig INPUT_CAP1203 bool "CAP1203 3-cannel capacitive touch sensor driver" default y depends on DT_HAS_MICROCHIP_CAP1203_ENABLED select I2C help Enable driver for microchip CAP1203 3-cannel capacitive touch sensor. if INPUT_CAP1203 config INPUT_CAP1203_POLL bool "Polling" help Enable polling mode when interrupt GPIO is not specified. config INPUT_CAP1203_PERIOD int "Sample period" depends on INPUT_CAP1203_POLL default 10 help Sample period in milliseconds when in polling mode. endif # INPUT_CAP1203 ```
/content/code_sandbox/drivers/input/Kconfig.cap1203
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
142
```unknown config INPUT_XPT2046 bool "XPT2046 resistive touch panel driver" default y depends on DT_HAS_XPTEK_XPT2046_ENABLED select SPI help Enable driver for Xptek XPT2046 resistive touch panel. This driver is very similar to ADS7843, but differs on channel numbering. ```
/content/code_sandbox/drivers/input/Kconfig.xpt2046
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
77
```unknown # Microchip XEC Keyboard Scan Matrix configuration options config INPUT_XEC_KBD bool "Microchip XEC series keyboard matrix driver" default y depends on DT_HAS_MICROCHIP_XEC_KBD_ENABLED select INPUT_KBD_MATRIX select MULTITHREADING select PINCTRL help Enable the Microchip XEC Kscan IO driver. ```
/content/code_sandbox/drivers/input/Kconfig.xec
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
76
```unknown # NPCX Keyboard scan driver configuration options config INPUT_NPCX_KBD bool "Nuvoton NPCX embedded controller (EC) keyboard scan driver" default y depends on DT_HAS_NUVOTON_NPCX_KBD_ENABLED select INPUT_KBD_MATRIX help This option enables the keyboard scan driver for NPCX family of processors. config INPUT_NPCX_KBD_KSO_HIGH_DRIVE bool "Select quasi-bidirectional buffers for KSO pins" default y depends on INPUT_NPCX_KBD help Select quasi-bidirectional buffers for KSO pins to reduce the low-to-high transition time. ```
/content/code_sandbox/drivers/input/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
136
```unknown config INPUT_STMPE811 bool "STMPE811 touch driver" default y depends on DT_HAS_ST_STMPE811_ENABLED select I2C help Enable driver for STMPE811 touch panel. ```
/content/code_sandbox/drivers/input/Kconfig.stmpe811
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
49
```c /* */ #define DT_DRV_COMPAT microchip_xec_kbd #include <cmsis_core.h> #include <errno.h> #include <soc.h> #include <zephyr/device.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/input/input.h> #include <zephyr/input/input_kbd_matrix.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #ifdef CONFIG_SOC_SERIES_MEC172X #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h> #endif LOG_MODULE_REGISTER(input_xec_kbd, CONFIG_INPUT_LOG_LEVEL); struct xec_kbd_config { struct input_kbd_matrix_common_config common; struct kscan_regs *regs; const struct pinctrl_dev_config *pcfg; uint8_t girq; uint8_t girq_pos; #ifdef CONFIG_SOC_SERIES_MEC172X uint8_t pcr_idx; uint8_t pcr_pos; #endif bool wakeup_source; }; struct xec_kbd_data { struct input_kbd_matrix_common_data common; bool pm_lock_taken; }; static void xec_kbd_clear_girq_status(const struct device *dev) { struct xec_kbd_config const *cfg = dev->config; #ifdef CONFIG_SOC_SERIES_MEC172X mchp_xec_ecia_girq_src_clr(cfg->girq, cfg->girq_pos); #else MCHP_GIRQ_SRC(cfg->girq) = BIT(cfg->girq_pos); #endif } static void xec_kbd_configure_girq(const struct device *dev) { struct xec_kbd_config const *cfg = dev->config; #ifdef CONFIG_SOC_SERIES_MEC172X mchp_xec_ecia_enable(cfg->girq, cfg->girq_pos); #else MCHP_GIRQ_ENSET(cfg->girq) = BIT(cfg->girq_pos); #endif } static void xec_kbd_clr_slp_en(const struct device *dev) { #ifdef CONFIG_SOC_SERIES_MEC172X struct xec_kbd_config const *cfg = dev->config; z_mchp_xec_pcr_periph_sleep(cfg->pcr_idx, cfg->pcr_pos, 0); #else ARG_UNUSED(dev); mchp_pcr_periph_slp_ctrl(PCR_KEYSCAN, 0); #endif } static void xec_kbd_drive_column(const struct device *dev, int data) { struct xec_kbd_config const *cfg = dev->config; struct kscan_regs *regs = cfg->regs; if (data == INPUT_KBD_MATRIX_COLUMN_DRIVE_ALL) { /* KSO output controlled by the KSO_SELECT field */ regs->KSO_SEL = MCHP_KSCAN_KSO_ALL; } else if (data == INPUT_KBD_MATRIX_COLUMN_DRIVE_NONE) { /* Keyboard scan disabled. All KSO output buffers disabled */ regs->KSO_SEL = MCHP_KSCAN_KSO_EN; } else { /* Assume, ALL was previously set */ regs->KSO_SEL = data; } } static kbd_row_t xec_kbd_read_row(const struct device *dev) { struct xec_kbd_config const *cfg = dev->config; struct kscan_regs *regs = cfg->regs; /* In this implementation a 1 means key pressed */ return ~(regs->KSI_IN & 0xff); } static void xec_kbd_isr(const struct device *dev) { xec_kbd_clear_girq_status(dev); irq_disable(DT_INST_IRQN(0)); input_kbd_matrix_poll_start(dev); } static void xec_kbd_set_detect_mode(const struct device *dev, bool enabled) { struct xec_kbd_config const *cfg = dev->config; struct xec_kbd_data *data = dev->data; struct kscan_regs *regs = cfg->regs; if (enabled) { if (data->pm_lock_taken) { pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } regs->KSI_STS = MCHP_KSCAN_KSO_SEL_REG_MASK; xec_kbd_clear_girq_status(dev); NVIC_ClearPendingIRQ(DT_INST_IRQN(0)); irq_enable(DT_INST_IRQN(0)); } else { pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); data->pm_lock_taken = true; } } #ifdef CONFIG_PM_DEVICE static int xec_kbd_pm_action(const struct device *dev, enum pm_device_action action) { struct xec_kbd_config const *cfg = dev->config; struct kscan_regs *regs = cfg->regs; int ret; if (cfg->wakeup_source) { return 0; } switch (action) { case PM_DEVICE_ACTION_RESUME: ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("XEC KSCAN pinctrl init failed (%d)", ret); return ret; } regs->KSO_SEL &= ~BIT(MCHP_KSCAN_KSO_EN_POS); /* Clear status register */ regs->KSI_STS = MCHP_KSCAN_KSO_SEL_REG_MASK; regs->KSI_IEN = MCHP_KSCAN_KSI_IEN_REG_MASK; break; case PM_DEVICE_ACTION_SUSPEND: regs->KSO_SEL |= BIT(MCHP_KSCAN_KSO_EN_POS); regs->KSI_IEN = (~MCHP_KSCAN_KSI_IEN_REG_MASK); ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP); if (ret != -ENOENT) { /* pinctrl-1 does not exist */ return ret; } break; default: return -ENOTSUP; } return 0; } #endif /* CONFIG_PM_DEVICE */ static int xec_kbd_init(const struct device *dev) { struct xec_kbd_config const *cfg = dev->config; struct kscan_regs *regs = cfg->regs; int ret; ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("XEC KSCAN pinctrl init failed (%d)", ret); return ret; } xec_kbd_clr_slp_en(dev); /* Enable predrive */ regs->KSO_SEL |= BIT(MCHP_KSCAN_KSO_EN_POS); regs->EXT_CTRL = MCHP_KSCAN_EXT_CTRL_PREDRV_EN; regs->KSO_SEL &= ~BIT(MCHP_KSCAN_KSO_EN_POS); regs->KSI_IEN = MCHP_KSCAN_KSI_IEN_REG_MASK; /* Interrupts are enabled in the thread function */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), xec_kbd_isr, DEVICE_DT_INST_GET(0), 0); xec_kbd_clear_girq_status(dev); xec_kbd_configure_girq(dev); return input_kbd_matrix_common_init(dev); } PINCTRL_DT_INST_DEFINE(0); PM_DEVICE_DT_INST_DEFINE(0, xec_kbd_pm_action); INPUT_KBD_MATRIX_DT_INST_DEFINE(0); static const struct input_kbd_matrix_api xec_kbd_api = { .drive_column = xec_kbd_drive_column, .read_row = xec_kbd_read_row, .set_detect_mode = xec_kbd_set_detect_mode, }; /* To enable wakeup, set the "wakeup-source" on the keyboard scanning device * node. */ static struct xec_kbd_config xec_kbd_cfg_0 = { .common = INPUT_KBD_MATRIX_DT_INST_COMMON_CONFIG_INIT(0, &xec_kbd_api), .regs = (struct kscan_regs *)(DT_INST_REG_ADDR(0)), .girq = DT_INST_PROP_BY_IDX(0, girqs, 0), .girq_pos = DT_INST_PROP_BY_IDX(0, girqs, 1), #ifdef CONFIG_SOC_SERIES_MEC172X .pcr_idx = DT_INST_PROP_BY_IDX(0, pcrs, 0), .pcr_pos = DT_INST_PROP_BY_IDX(0, pcrs, 1), #endif .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), .wakeup_source = DT_INST_PROP(0, wakeup_source) }; static struct xec_kbd_data kbd_data_0; DEVICE_DT_INST_DEFINE(0, xec_kbd_init, PM_DEVICE_DT_INST_GET(0), &kbd_data_0, &xec_kbd_cfg_0, POST_KERNEL, CONFIG_INPUT_INIT_PRIORITY, NULL); BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "only one microchip,xec-kbd compatible node can be supported"); BUILD_ASSERT(IN_RANGE(DT_INST_PROP(0, row_size), 1, 8), "invalid row-size"); BUILD_ASSERT(IN_RANGE(DT_INST_PROP(0, col_size), 1, 18), "invalid col-size"); ```
/content/code_sandbox/drivers/input/input_xec_kbd.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,013
```c /* * */ #define DT_DRV_COMPAT focaltech_ft5336 #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/i2c.h> #include <zephyr/input/input.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ft5336, CONFIG_INPUT_LOG_LEVEL); /* FT5336 used registers */ #define REG_TD_STATUS 0x02U #define REG_P1_XH 0x03U #define REG_G_PMODE 0xA5U /* REG_TD_STATUS: Touch points. */ #define TOUCH_POINTS_POS 0U #define TOUCH_POINTS_MSK 0x0FU /* REG_Pn_XH: Events. */ #define EVENT_POS 6U #define EVENT_MSK 0x03U #define EVENT_PRESS_DOWN 0x00U #define EVENT_LIFT_UP 0x01U #define EVENT_CONTACT 0x02U #define EVENT_NONE 0x03U /* REG_Pn_YH: Touch ID */ #define TOUCH_ID_POS 4U #define TOUCH_ID_MSK 0x0FU #define TOUCH_ID_INVALID 0x0FU /* REG_Pn_XH and REG_Pn_YH: Position */ #define POSITION_H_MSK 0x0FU /* REG_G_PMODE: Power Consume Mode */ #define PMOD_HIBERNATE 0x03U /** FT5336 configuration (DT). */ struct ft5336_config { /** I2C bus. */ struct i2c_dt_spec bus; struct gpio_dt_spec reset_gpio; #ifdef CONFIG_INPUT_FT5336_INTERRUPT /** Interrupt GPIO information. */ struct gpio_dt_spec int_gpio; #endif }; /** FT5336 data. */ struct ft5336_data { /** Device pointer. */ const struct device *dev; /** Work queue (for deferred read). */ struct k_work work; #ifdef CONFIG_INPUT_FT5336_INTERRUPT /** Interrupt GPIO callback. */ struct gpio_callback int_gpio_cb; #else /** Timer (polling mode). */ struct k_timer timer; #endif /** Last pressed state. */ bool pressed_old; }; static int ft5336_process(const struct device *dev) { const struct ft5336_config *config = dev->config; struct ft5336_data *data = dev->data; int r; uint8_t points; uint8_t coords[4U]; uint16_t row, col; bool pressed; /* obtain number of touch points */ r = i2c_reg_read_byte_dt(&config->bus, REG_TD_STATUS, &points); if (r < 0) { return r; } points = FIELD_GET(TOUCH_POINTS_MSK, points); if (points != 0) { /* Any number of touches still counts as one touch. All touch * points except the first are ignored. Obtain first point * X, Y coordinates from: * REG_P1_XH, REG_P1_XL, REG_P1_YH, REG_P1_YL. * We ignore the Event Flag because Zephyr only cares about * pressed / not pressed and not press down / lift up */ r = i2c_burst_read_dt(&config->bus, REG_P1_XH, coords, sizeof(coords)); if (r < 0) { return r; } row = ((coords[0] & POSITION_H_MSK) << 8U) | coords[1]; col = ((coords[2] & POSITION_H_MSK) << 8U) | coords[3]; uint8_t touch_id = FIELD_GET(TOUCH_ID_MSK, coords[2]); if (touch_id != TOUCH_ID_INVALID) { pressed = true; LOG_DBG("points: %d, touch_id: %d, row: %d, col: %d", points, touch_id, row, col); } else { pressed = false; LOG_WRN("bad TOUCH_ID: row: %d, col: %d", row, col); } } else { /* no touch = no press */ pressed = false; } if (pressed) { input_report_abs(dev, INPUT_ABS_X, col, false, K_FOREVER); input_report_abs(dev, INPUT_ABS_Y, row, false, K_FOREVER); input_report_key(dev, INPUT_BTN_TOUCH, 1, true, K_FOREVER); } else if (data->pressed_old && !pressed) { input_report_key(dev, INPUT_BTN_TOUCH, 0, true, K_FOREVER); } data->pressed_old = pressed; return 0; } static void ft5336_work_handler(struct k_work *work) { struct ft5336_data *data = CONTAINER_OF(work, struct ft5336_data, work); ft5336_process(data->dev); } #ifdef CONFIG_INPUT_FT5336_INTERRUPT static void ft5336_isr_handler(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct ft5336_data *data = CONTAINER_OF(cb, struct ft5336_data, int_gpio_cb); k_work_submit(&data->work); } #else static void ft5336_timer_handler(struct k_timer *timer) { struct ft5336_data *data = CONTAINER_OF(timer, struct ft5336_data, timer); k_work_submit(&data->work); } #endif static int ft5336_init(const struct device *dev) { const struct ft5336_config *config = dev->config; struct ft5336_data *data = dev->data; int r; if (!device_is_ready(config->bus.bus)) { LOG_ERR("I2C controller device not ready"); return -ENODEV; } data->dev = dev; k_work_init(&data->work, ft5336_work_handler); if (config->reset_gpio.port != NULL) { /* Enable reset GPIO and assert reset */ r = gpio_pin_configure_dt(&config->reset_gpio, GPIO_OUTPUT_ACTIVE); if (r < 0) { LOG_ERR("Could not enable reset GPIO"); return r; } /* * Datasheet requires reset be held low 1 ms, or * 1 ms + 100us if powering on controller. Hold low for * 5 ms to be safe. */ k_sleep(K_MSEC(5)); /* Pull reset pin high to complete reset sequence */ r = gpio_pin_set_dt(&config->reset_gpio, 0); if (r < 0) { return r; } } #ifdef CONFIG_INPUT_FT5336_INTERRUPT if (!gpio_is_ready_dt(&config->int_gpio)) { LOG_ERR("Interrupt GPIO controller device not ready"); return -ENODEV; } r = gpio_pin_configure_dt(&config->int_gpio, GPIO_INPUT); if (r < 0) { LOG_ERR("Could not configure interrupt GPIO pin"); return r; } r = gpio_pin_interrupt_configure_dt(&config->int_gpio, GPIO_INT_EDGE_TO_ACTIVE); if (r < 0) { LOG_ERR("Could not configure interrupt GPIO interrupt."); return r; } gpio_init_callback(&data->int_gpio_cb, ft5336_isr_handler, BIT(config->int_gpio.pin)); r = gpio_add_callback(config->int_gpio.port, &data->int_gpio_cb); if (r < 0) { LOG_ERR("Could not set gpio callback"); return r; } #else k_timer_init(&data->timer, ft5336_timer_handler, NULL); k_timer_start(&data->timer, K_MSEC(CONFIG_INPUT_FT5336_PERIOD), K_MSEC(CONFIG_INPUT_FT5336_PERIOD)); #endif r = pm_device_runtime_enable(dev); if (r < 0 && r != -ENOTSUP) { LOG_ERR("Failed to enable runtime power management"); return r; } return 0; } #ifdef CONFIG_PM_DEVICE static int ft5336_pm_action(const struct device *dev, enum pm_device_action action) { const struct ft5336_config *config = dev->config; #ifndef CONFIG_INPUT_FT5336_INTERRUPT struct ft5336_data *data = dev->data; #endif int ret; if (config->reset_gpio.port == NULL) { return -ENOTSUP; } switch (action) { case PM_DEVICE_ACTION_SUSPEND: ret = i2c_reg_write_byte_dt(&config->bus, REG_G_PMODE, PMOD_HIBERNATE); if (ret < 0) { return ret; } #ifndef CONFIG_INPUT_FT5336_INTERRUPT k_timer_stop(&data->timer); #endif break; case PM_DEVICE_ACTION_RESUME: ret = gpio_pin_set_dt(&config->reset_gpio, 1); if (ret < 0) { return ret; } k_sleep(K_MSEC(5)); ret = gpio_pin_set_dt(&config->reset_gpio, 0); if (ret < 0) { return ret; } #ifndef CONFIG_INPUT_FT5336_INTERRUPT k_timer_start(&data->timer, K_MSEC(CONFIG_INPUT_FT5336_PERIOD), K_MSEC(CONFIG_INPUT_FT5336_PERIOD)); #endif break; default: return -ENOTSUP; } return 0; } #endif #define FT5336_INIT(index) \ PM_DEVICE_DT_INST_DEFINE(n, ft5336_pm_action); \ static const struct ft5336_config ft5336_config_##index = { \ .bus = I2C_DT_SPEC_INST_GET(index), \ .reset_gpio = GPIO_DT_SPEC_INST_GET_OR(index, reset_gpios, {0}), \ IF_ENABLED(CONFIG_INPUT_FT5336_INTERRUPT, \ (.int_gpio = GPIO_DT_SPEC_INST_GET(index, int_gpios),)) \ }; \ static struct ft5336_data ft5336_data_##index; \ DEVICE_DT_INST_DEFINE(index, ft5336_init, PM_DEVICE_DT_INST_GET(n), \ &ft5336_data_##index, &ft5336_config_##index, \ POST_KERNEL, CONFIG_INPUT_INIT_PRIORITY, NULL); DT_INST_FOREACH_STATUS_OKAY(FT5336_INIT) ```
/content/code_sandbox/drivers/input/input_ft5336.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,232
```unknown config INPUT_PMW3610 bool "PMW3610 low power laser mouse sensor input driver" default y depends on DT_HAS_PIXART_PMW3610_ENABLED select SPI help PMW3610 low power laser mouse sensor input driver ```
/content/code_sandbox/drivers/input/Kconfig.pmw3610
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
57
```c /* * */ #define DT_DRV_COMPAT microchip_cap1203 #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/gpio.h> #include <zephyr/input/input.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(cap1203, CONFIG_INPUT_LOG_LEVEL); #define REG_MAIN_CONTROL 0x0 #define CONTROL_INT 0x1 #define REG_INPUT_STATUS 0x03 #define REG_INTERRUPT_ENABLE 0x27 #define INTERRUPT_ENABLE 0x7 #define INTERRUPT_DISABLE 0x0 #define TOUCH_INPUT_COUNT 3 struct cap1203_config { struct i2c_dt_spec i2c; struct gpio_dt_spec int_gpio; const uint16_t *input_codes; }; struct cap1203_data { const struct device *dev; struct k_work work; /* Interrupt GPIO callback. */ struct gpio_callback int_gpio_cb; uint8_t prev_input_state; #ifdef CONFIG_INPUT_CAP1203_POLL /* Timer (polling mode). */ struct k_timer timer; #endif }; static int cap1203_clear_interrupt(const struct i2c_dt_spec *i2c) { uint8_t ctrl; int r; r = i2c_reg_read_byte_dt(i2c, REG_MAIN_CONTROL, &ctrl); if (r < 0) { return r; } ctrl = ctrl & ~CONTROL_INT; return i2c_reg_write_byte_dt(i2c, REG_MAIN_CONTROL, ctrl); } static int cap1203_enable_interrupt(const struct i2c_dt_spec *i2c, bool enable) { uint8_t intr = enable ? INTERRUPT_ENABLE : INTERRUPT_DISABLE; return i2c_reg_write_byte_dt(i2c, REG_INTERRUPT_ENABLE, intr); } static int cap1203_process(const struct device *dev) { const struct cap1203_config *config = dev->config; struct cap1203_data *data = dev->data; int r; uint8_t input; uint8_t single_input_state; r = i2c_reg_read_byte_dt(&config->i2c, REG_INPUT_STATUS, &input); if (r < 0) { return r; } for (uint8_t i = 0; i < TOUCH_INPUT_COUNT; i++) { single_input_state = input & BIT(i); if (single_input_state != (data->prev_input_state & BIT(i))) { input_report_key(dev, config->input_codes[i], single_input_state, true, K_FOREVER); } } data->prev_input_state = input; LOG_DBG("event: input: %d\n", input); /* * Clear INT bit to clear SENSOR INPUT STATUS bits. * Note that this is also required in polling mode. */ r = cap1203_clear_interrupt(&config->i2c); if (r < 0) { return r; } return 0; } static void cap1203_work_handler(struct k_work *work) { struct cap1203_data *data = CONTAINER_OF(work, struct cap1203_data, work); cap1203_process(data->dev); } static void cap1203_isr_handler(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct cap1203_data *data = CONTAINER_OF(cb, struct cap1203_data, int_gpio_cb); k_work_submit(&data->work); } #ifdef CONFIG_INPUT_CAP1203_POLL static void cap1203_timer_handler(struct k_timer *timer) { struct cap1203_data *data = CONTAINER_OF(timer, struct cap1203_data, timer); k_work_submit(&data->work); } #endif static int cap1203_init(const struct device *dev) { const struct cap1203_config *config = dev->config; struct cap1203_data *data = dev->data; int r; if (!device_is_ready(config->i2c.bus)) { LOG_ERR("I2C controller device not ready"); return -ENODEV; } data->dev = dev; k_work_init(&data->work, cap1203_work_handler); if (config->int_gpio.port != NULL) { if (!gpio_is_ready_dt(&config->int_gpio)) { LOG_ERR("Interrupt GPIO controller device not ready"); return -ENODEV; } r = gpio_pin_configure_dt(&config->int_gpio, GPIO_INPUT); if (r < 0) { LOG_ERR("Could not confighure interrupt GPIO pin"); return r; } r = gpio_pin_interrupt_configure_dt(&config->int_gpio, GPIO_INT_EDGE_TO_ACTIVE); if (r < 0) { LOG_ERR("Could not configure interrupt GPIO interrupt"); return r; } gpio_init_callback(&data->int_gpio_cb, cap1203_isr_handler, BIT(config->int_gpio.pin)); r = gpio_add_callback(config->int_gpio.port, &data->int_gpio_cb); if (r < 0) { LOG_ERR("Could not set gpio callback"); return r; } r = cap1203_clear_interrupt(&config->i2c); if (r < 0) { LOG_ERR("Could not clear interrupt"); return r; } r = cap1203_enable_interrupt(&config->i2c, true); if (r < 0) { LOG_ERR("Could not configure interrupt"); return r; } } #ifdef CONFIG_INPUT_CAP1203_POLL else { k_timer_init(&data->timer, cap1203_timer_handler, NULL); r = cap1203_enable_interrupt(&config->i2c, false); if (r < 0) { LOG_ERR("Could not configure interrupt"); return r; } k_timer_start(&data->timer, K_MSEC(CONFIG_INPUT_CAP1203_PERIOD), K_MSEC(CONFIG_INPUT_CAP1203_PERIOD)); } #endif return 0; } #define CAP1203_INIT(index) \ static const uint16_t cap1203_input_codes_##inst[] = DT_INST_PROP(index, input_codes); \ BUILD_ASSERT(DT_INST_PROP_LEN(index, input_codes) == TOUCH_INPUT_COUNT); \ static const struct cap1203_config cap1203_config_##index = { \ .i2c = I2C_DT_SPEC_INST_GET(index), \ .int_gpio = GPIO_DT_SPEC_INST_GET_OR(index, int_gpios, {0}), \ .input_codes = cap1203_input_codes_##inst, \ }; \ static struct cap1203_data cap1203_data_##index; \ DEVICE_DT_INST_DEFINE(index, cap1203_init, NULL, &cap1203_data_##index, \ &cap1203_config_##index, POST_KERNEL, CONFIG_INPUT_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(CAP1203_INIT) ```
/content/code_sandbox/drivers/input/input_cap1203.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,468
```unknown if INPUT menu "Input drivers" # zephyr-keep-sorted-start source "drivers/input/Kconfig.adc_keys" source "drivers/input/Kconfig.analog_axis" source "drivers/input/Kconfig.cap1203" source "drivers/input/Kconfig.cf1133" source "drivers/input/Kconfig.chsc6x" source "drivers/input/Kconfig.cst816s" source "drivers/input/Kconfig.esp32" source "drivers/input/Kconfig.evdev" source "drivers/input/Kconfig.ft5336" source "drivers/input/Kconfig.gpio_kbd_matrix" source "drivers/input/Kconfig.gpio_keys" source "drivers/input/Kconfig.gpio_qdec" source "drivers/input/Kconfig.gt911" source "drivers/input/Kconfig.it8xxx2" source "drivers/input/Kconfig.kbd_matrix" source "drivers/input/Kconfig.npcx" source "drivers/input/Kconfig.pat912x" source "drivers/input/Kconfig.paw32xx" source "drivers/input/Kconfig.pinnacle" source "drivers/input/Kconfig.pmw3610" source "drivers/input/Kconfig.sbus" source "drivers/input/Kconfig.sdl" source "drivers/input/Kconfig.stmpe811" source "drivers/input/Kconfig.xec" source "drivers/input/Kconfig.xpt2046" # zephyr-keep-sorted-stop endmenu # Input Drivers endif # INPUT ```
/content/code_sandbox/drivers/input/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
293
```unknown config NATIVE_LINUX_EVDEV bool "Native Linux evdev based input device" default y depends on DT_HAS_ZEPHYR_NATIVE_LINUX_EVDEV_ENABLED depends on ARCH_POSIX depends on MULTITHREADING help Enable reading input from a Linux evdev device, requires specifying an evdev device path in the --evdev command line argument. if NATIVE_LINUX_EVDEV config NATIVE_LINUX_EVDEV_THREAD_PRIORITY int "Priority for the Linux evdev thread" default 0 help Priority level of the internal thread handling Linux input events. config NATIVE_LINUX_THREAD_SLEEP_MS int "Sleep period for the Linux evdev thread" default 10 help How long to sleep between checking for new events in the Linux input events thread. endif ```
/content/code_sandbox/drivers/input/Kconfig.evdev
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
172
```unknown config INPUT_GPIO_KEYS bool "GPIO Keys input driver" default y depends on DT_HAS_GPIO_KEYS_ENABLED depends on GPIO help Enable support for GPIO Keys input driver. ```
/content/code_sandbox/drivers/input/Kconfig.gpio_keys
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
42
```unknown config INPUT_SBUS bool "SBUS driver" default y depends on DT_HAS_FUTABA_SBUS_ENABLED depends on UART_INTERRUPT_DRIVEN select UART_USE_RUNTIME_CONFIGURE help Enable driver for SBUS Remote controller. if INPUT_SBUS config INPUT_SBUS_THREAD_STACK_SIZE int "Stack size for the sbus thread" default 1024 help Size of the stack used for the sbus thread. config INPUT_SBUS_THREAD_PRIORITY int "Priority for the sbus thread" default 0 help Priority level of the sbus thread. config INPUT_SBUS_REPORT_FILTER int "Minimal change in signal to report" default 1 help SBUS tends to be a bit noisy you can increase the threshold to lower the amounts of input events. Set to 0 for no filtering config INPUT_SBUS_SEND_SYNC bool "Send Sync to input subsys on each SBUS frame" default y help Sends sync message to input subsys with sync bit. config INPUT_SBUS_CHANNEL_VALUE_ONE int "Threshold value > for INPUT_EV_KEY value 1" default 1800 help SBUS sends analogue values for digital switches. This config value sets the threshold to interperted the analogue value as an logic 1 config INPUT_SBUS_CHANNEL_VALUE_ZERO int "Threshold value < for INPUT_EV_KEY value 0" default 1200 help SBUS sends analogue values for digital switches. This config value sets the threshold to interperted the analogue value as an logic 0 endif # INPUT_SBUS ```
/content/code_sandbox/drivers/input/Kconfig.sbus
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
343
```unknown config INPUT_ANALOG_AXIS bool "ADC based analog axis input driver" default y depends on DT_HAS_ANALOG_AXIS_ENABLED depends on MULTITHREADING select ADC help ADC based analog axis input driver if INPUT_ANALOG_AXIS config INPUT_ANALOG_AXIS_THREAD_STACK_SIZE int "Stack size for the analog axis thread" default 762 help Size of the stack used for the analog axis thread. config INPUT_ANALOG_AXIS_THREAD_PRIORITY int "Priority for the analog axis thread" default 0 help Priority level of the analog axis thread. config INPUT_ANALOG_AXIS_SETTINGS bool "Analog axis settings support" default y depends on SETTINGS help Settings support for the analog axis driver, exposes a analog_axis_calibration_save() function to save the calibration into settings and load them automatically on startup. config INPUT_ANALOG_AXIS_SETTINGS_MAX_AXES int "Maximum number of axes supported in the settings." default 8 help Maximum number of axes that can have calibration value saved in settings. endif ```
/content/code_sandbox/drivers/input/Kconfig.analog_axis
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
229
```unknown config INPUT_KBD_MATRIX bool depends on MULTITHREADING help Enable library used for keyboard matrix drivers. if INPUT_KBD_MATRIX config INPUT_KBD_MATRIX_THREAD_STACK_SIZE int "Stack size for the keyboard matrix thread" default 1024 help Size of the stack used for the keyboard matrix thread. config INPUT_KBD_MATRIX_THREAD_PRIORITY int "Priority for the keyboard matrix thread" default 0 help Priority level of the keyboard matrix thread. config INPUT_KBD_MATRIX_16_BIT_ROW bool "16 bit row size support" help Use a 16 bit type for the internal structure, allow using a matrix with up to 16 rows if the driver supports it. config INPUT_KBD_ACTUAL_KEY_MASK_DYNAMIC bool "Allow runtime changes to the actual key mask" help If enabled, the actual-key-mask devicetree property data is stored in RAM, and a input_kbd_matrix_actual_key_mask_set() function is available to change the content at runtime. config INPUT_SHELL_KBD_MATRIX_STATE bool "Input kbd_matrix_state shell command" depends on INPUT_SHELL help Enable an input kbd_matrix_state shell command to log the state of a keyboard matrix device. config INPUT_SHELL_KBD_MATRIX_STATE_MAX_COLS int "Maximum column count for the kbd_matrix_state command" default 32 depends on INPUT_SHELL_KBD_MATRIX_STATE help Maximum column count for a device processed by the input kbd_matrix_state shell command. config INPUT_KBD_DRIVE_COLUMN_HOOK bool help Call an application specific hook after the driver specific drive_column implementation. The application must implement the input_kbd_matrix_drive_column_hook function. endif # INPUT_KBD_MATRIX ```
/content/code_sandbox/drivers/input/Kconfig.kbd_matrix
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
383
```unknown config INPUT_PAT912X bool "PAT912X miniature optical navigation chip input driver" default y depends on DT_HAS_PIXART_PAT912X_ENABLED select I2C help PAT912X miniature optical navigation chip input driver ```
/content/code_sandbox/drivers/input/Kconfig.pat912x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
55
```c /* * */ #include <errno.h> #include <fcntl.h> #include <linux/input.h> #include <nsi_tracing.h> #include <string.h> #include <unistd.h> #include "linux_evdev_bottom.h" int linux_evdev_read(int fd, uint16_t *type, uint16_t *code, int32_t *value) { struct input_event ev; int ret; ret = read(fd, &ev, sizeof(ev)); if (ret < 0) { if (errno == EAGAIN || errno == EINTR) { return NATIVE_LINUX_EVDEV_NO_DATA; } nsi_print_warning("Read error: %s", strerror(errno)); return -EIO; } else if (ret < sizeof(ev)) { nsi_print_warning("Unexpected read size: %d, expecting %d", ret, sizeof(ev)); return -EIO; } *type = ev.type; *code = ev.code; *value = ev.value; return 0; } int linux_evdev_open(const char *path) { int fd; fd = open(path, O_RDONLY | O_NONBLOCK); if (fd < 0) { nsi_print_error_and_exit( "Failed to open the evdev device %s: %s\n", path, strerror(errno)); } return fd; } ```
/content/code_sandbox/drivers/input/linux_evdev_bottom.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
287
```c /* * */ #define DT_DRV_COMPAT gpio_qdec #include <stdint.h> #include <stdlib.h> #include <zephyr/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/input/input.h> #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #include <zephyr/sys/atomic.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(input_gpio_qdec, CONFIG_INPUT_LOG_LEVEL); #define GPIO_QDEC_GPIO_NUM 2 struct gpio_qdec_config { struct gpio_dt_spec ab_gpio[GPIO_QDEC_GPIO_NUM]; const struct gpio_dt_spec *led_gpio; uint8_t led_gpio_count; uint32_t led_pre_us; uint32_t sample_time_us; uint32_t idle_poll_time_us; uint32_t idle_timeout_ms; uint16_t axis; uint8_t steps_per_period; }; struct gpio_qdec_data { const struct device *dev; struct k_timer sample_timer; uint8_t prev_step; int32_t acc; struct k_work event_work; struct k_work_delayable idle_work; struct gpio_callback gpio_cb; atomic_t polling; #ifdef CONFIG_PM_DEVICE atomic_t suspended; #endif }; /* Positive transitions */ #define QDEC_LL_LH 0x01 #define QDEC_LH_HH 0x13 #define QDEC_HH_HL 0x32 #define QDEC_HL_LL 0x20 /* Negative transitions */ #define QDEC_LL_HL 0x02 #define QDEC_LH_LL 0x10 #define QDEC_HH_LH 0x31 #define QDEC_HL_HH 0x23 static void gpio_qdec_irq_setup(const struct device *dev, bool enable) { const struct gpio_qdec_config *cfg = dev->config; gpio_flags_t flags = enable ? GPIO_INT_EDGE_BOTH : GPIO_INT_DISABLE; int ret; for (int i = 0; i < GPIO_QDEC_GPIO_NUM; i++) { const struct gpio_dt_spec *gpio = &cfg->ab_gpio[i]; ret = gpio_pin_interrupt_configure_dt(gpio, flags); if (ret != 0) { LOG_ERR("Pin %d interrupt configuration failed: %d", i, ret); return; } } } static bool gpio_qdec_idle_polling_mode(const struct device *dev) { const struct gpio_qdec_config *cfg = dev->config; if (cfg->idle_poll_time_us > 0) { return true; } return false; } static void gpio_qdec_poll_mode(const struct device *dev) { const struct gpio_qdec_config *cfg = dev->config; struct gpio_qdec_data *data = dev->data; if (!gpio_qdec_idle_polling_mode(dev)) { gpio_qdec_irq_setup(dev, false); } k_timer_start(&data->sample_timer, K_NO_WAIT, K_USEC(cfg->sample_time_us)); atomic_set(&data->polling, 1); LOG_DBG("polling start"); } static void gpio_qdec_idle_mode(const struct device *dev) { const struct gpio_qdec_config *cfg = dev->config; struct gpio_qdec_data *data = dev->data; if (gpio_qdec_idle_polling_mode(dev)) { k_timer_start(&data->sample_timer, K_NO_WAIT, K_USEC(cfg->idle_poll_time_us)); } else { k_timer_stop(&data->sample_timer); gpio_qdec_irq_setup(dev, true); } atomic_set(&data->polling, 0); LOG_DBG("polling stop"); } static uint8_t gpio_qdec_get_step(const struct device *dev) { const struct gpio_qdec_config *cfg = dev->config; uint8_t step = 0x00; if (gpio_qdec_idle_polling_mode(dev)) { for (int i = 0; i < cfg->led_gpio_count; i++) { gpio_pin_set_dt(&cfg->led_gpio[i], 1); } k_busy_wait(cfg->led_pre_us); } if (gpio_pin_get_dt(&cfg->ab_gpio[0])) { step |= 0x01; } if (gpio_pin_get_dt(&cfg->ab_gpio[1])) { step |= 0x02; } if (gpio_qdec_idle_polling_mode(dev)) { for (int i = 0; i < cfg->led_gpio_count; i++) { gpio_pin_set_dt(&cfg->led_gpio[i], 0); } } return step; } static void gpio_qdec_sample_timer_timeout(struct k_timer *timer) { const struct device *dev = k_timer_user_data_get(timer); const struct gpio_qdec_config *cfg = dev->config; struct gpio_qdec_data *data = dev->data; int8_t delta = 0; unsigned int key; uint8_t step; #ifdef CONFIG_PM_DEVICE if (atomic_get(&data->suspended) == 1) { return; } #endif step = gpio_qdec_get_step(dev); if (data->prev_step == step) { return; } if (gpio_qdec_idle_polling_mode(dev) && atomic_get(&data->polling) == 0) { gpio_qdec_poll_mode(dev); } switch ((data->prev_step << 4U) | step) { case QDEC_LL_LH: case QDEC_LH_HH: case QDEC_HH_HL: case QDEC_HL_LL: delta = 1; break; case QDEC_LL_HL: case QDEC_LH_LL: case QDEC_HH_LH: case QDEC_HL_HH: delta = -1; break; default: LOG_WRN("%s: lost steps", dev->name); } data->prev_step = step; key = irq_lock(); data->acc += delta; irq_unlock(key); if (abs(data->acc) >= cfg->steps_per_period) { k_work_submit(&data->event_work); } k_work_reschedule(&data->idle_work, K_MSEC(cfg->idle_timeout_ms)); } static void gpio_qdec_event_worker(struct k_work *work) { struct gpio_qdec_data *data = CONTAINER_OF( work, struct gpio_qdec_data, event_work); const struct device *dev = data->dev; const struct gpio_qdec_config *cfg = dev->config; unsigned int key; int32_t acc; key = irq_lock(); acc = data->acc / cfg->steps_per_period; data->acc -= acc * cfg->steps_per_period; irq_unlock(key); if (acc != 0) { input_report_rel(data->dev, cfg->axis, acc, true, K_FOREVER); } } static void gpio_qdec_idle_worker(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct gpio_qdec_data *data = CONTAINER_OF( dwork, struct gpio_qdec_data, idle_work); const struct device *dev = data->dev; gpio_qdec_idle_mode(dev); } static void gpio_qdec_cb(const struct device *gpio_dev, struct gpio_callback *cb, uint32_t pins) { struct gpio_qdec_data *data = CONTAINER_OF( cb, struct gpio_qdec_data, gpio_cb); const struct device *dev = data->dev; gpio_qdec_poll_mode(dev); } static int gpio_qdec_init(const struct device *dev) { const struct gpio_qdec_config *cfg = dev->config; struct gpio_qdec_data *data = dev->data; int ret; data->dev = dev; k_work_init(&data->event_work, gpio_qdec_event_worker); k_work_init_delayable(&data->idle_work, gpio_qdec_idle_worker); k_timer_init(&data->sample_timer, gpio_qdec_sample_timer_timeout, NULL); k_timer_user_data_set(&data->sample_timer, (void *)dev); gpio_init_callback(&data->gpio_cb, gpio_qdec_cb, BIT(cfg->ab_gpio[0].pin) | BIT(cfg->ab_gpio[1].pin)); for (int i = 0; i < GPIO_QDEC_GPIO_NUM; i++) { const struct gpio_dt_spec *gpio = &cfg->ab_gpio[i]; if (!gpio_is_ready_dt(gpio)) { LOG_ERR("%s is not ready", gpio->port->name); return -ENODEV; } ret = gpio_pin_configure_dt(gpio, GPIO_INPUT); if (ret != 0) { LOG_ERR("Pin %d configuration failed: %d", i, ret); return ret; } if (gpio_qdec_idle_polling_mode(dev)) { continue; } ret = gpio_add_callback_dt(gpio, &data->gpio_cb); if (ret < 0) { LOG_ERR("Could not set gpio callback"); return ret; } } for (int i = 0; i < cfg->led_gpio_count; i++) { const struct gpio_dt_spec *gpio = &cfg->led_gpio[i]; gpio_flags_t mode; if (!gpio_is_ready_dt(gpio)) { LOG_ERR("%s is not ready", gpio->port->name); return -ENODEV; } mode = gpio_qdec_idle_polling_mode(dev) ? GPIO_OUTPUT_INACTIVE : GPIO_OUTPUT_ACTIVE; ret = gpio_pin_configure_dt(gpio, mode); if (ret != 0) { LOG_ERR("Pin %d configuration failed: %d", i, ret); return ret; } } data->prev_step = gpio_qdec_get_step(dev); gpio_qdec_idle_mode(dev); ret = pm_device_runtime_enable(dev); if (ret < 0) { LOG_ERR("Failed to enable runtime power management"); return ret; } LOG_DBG("Device %s initialized", dev->name); return 0; } #ifdef CONFIG_PM_DEVICE static void gpio_qdec_pin_suspend(const struct device *dev, bool suspend) { const struct gpio_qdec_config *cfg = dev->config; gpio_flags_t mode = suspend ? GPIO_DISCONNECTED : GPIO_INPUT; int ret; for (int i = 0; i < GPIO_QDEC_GPIO_NUM; i++) { const struct gpio_dt_spec *gpio = &cfg->ab_gpio[i]; ret = gpio_pin_configure_dt(gpio, mode); if (ret != 0) { LOG_ERR("Pin %d configuration failed: %d", i, ret); return; } } for (int i = 0; i < cfg->led_gpio_count; i++) { if (suspend) { gpio_pin_set_dt(&cfg->led_gpio[i], 0); } else if (!gpio_qdec_idle_polling_mode(dev)) { gpio_pin_set_dt(&cfg->led_gpio[i], 1); } } } static int gpio_qdec_pm_action(const struct device *dev, enum pm_device_action action) { struct gpio_qdec_data *data = dev->data; switch (action) { case PM_DEVICE_ACTION_SUSPEND: struct k_work_sync sync; atomic_set(&data->suspended, 1); k_work_cancel_delayable_sync(&data->idle_work, &sync); if (!gpio_qdec_idle_polling_mode(dev)) { gpio_qdec_irq_setup(dev, false); } k_timer_stop(&data->sample_timer); gpio_qdec_pin_suspend(dev, true); break; case PM_DEVICE_ACTION_RESUME: atomic_set(&data->suspended, 0); gpio_qdec_pin_suspend(dev, false); data->prev_step = gpio_qdec_get_step(dev); data->acc = 0; gpio_qdec_idle_mode(dev); break; default: return -ENOTSUP; } return 0; } #endif #define QDEC_GPIO_INIT(n) \ BUILD_ASSERT(DT_INST_PROP_LEN(n, gpios) == GPIO_QDEC_GPIO_NUM, \ "input_gpio_qdec: gpios must have exactly two entries"); \ \ BUILD_ASSERT(!(DT_INST_NODE_HAS_PROP(n, led_gpios) && \ DT_INST_NODE_HAS_PROP(n, idle_poll_time_us)) || \ DT_INST_NODE_HAS_PROP(n, led_pre_us), \ "led-pre-us must be specified when setting led-gpios and " \ "idle-poll-time-us"); \ \ IF_ENABLED(DT_INST_NODE_HAS_PROP(n, led_gpios), ( \ static const struct gpio_dt_spec gpio_qdec_led_gpio_##n[] = { \ DT_INST_FOREACH_PROP_ELEM_SEP(n, led_gpios, \ GPIO_DT_SPEC_GET_BY_IDX, (,)) \ }; \ )) \ \ static const struct gpio_qdec_config gpio_qdec_cfg_##n = { \ .ab_gpio = { \ GPIO_DT_SPEC_INST_GET_BY_IDX(n, gpios, 0), \ GPIO_DT_SPEC_INST_GET_BY_IDX(n, gpios, 1), \ }, \ IF_ENABLED(DT_INST_NODE_HAS_PROP(n, led_gpios), ( \ .led_gpio = gpio_qdec_led_gpio_##n, \ .led_gpio_count = ARRAY_SIZE(gpio_qdec_led_gpio_##n), \ .led_pre_us = DT_INST_PROP_OR(n, led_pre_us, 0), \ )) \ .sample_time_us = DT_INST_PROP(n, sample_time_us), \ .idle_poll_time_us = DT_INST_PROP_OR(n, idle_poll_time_us, 0), \ .idle_timeout_ms = DT_INST_PROP(n, idle_timeout_ms), \ .steps_per_period = DT_INST_PROP(n, steps_per_period), \ .axis = DT_INST_PROP(n, zephyr_axis), \ }; \ \ static struct gpio_qdec_data gpio_qdec_data_##n; \ \ PM_DEVICE_DT_INST_DEFINE(n, gpio_qdec_pm_action); \ \ DEVICE_DT_INST_DEFINE(n, gpio_qdec_init, PM_DEVICE_DT_INST_GET(n), \ &gpio_qdec_data_##n, \ &gpio_qdec_cfg_##n, \ POST_KERNEL, CONFIG_INPUT_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(QDEC_GPIO_INIT) ```
/content/code_sandbox/drivers/input/input_gpio_qdec.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,150
```c /* * */ #define DT_DRV_COMPAT zephyr_native_linux_evdev #include <cmdline.h> #include <nsi_host_trampolines.h> #include <posix_native_task.h> #include <zephyr/device.h> #include <zephyr/input/input.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include "linux_evdev_bottom.h" LOG_MODULE_REGISTER(linux_evdev, CONFIG_INPUT_LOG_LEVEL); static int linux_evdev_fd = -1; static const char *linux_evdev_path; static struct k_thread linux_evdev_thread; static K_KERNEL_STACK_DEFINE(linux_evdev_thread_stack, CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE); static void linux_evdev_options(void) { static struct args_struct_t linux_evdev_options[] = { { .is_mandatory = true, .option = "evdev", .name = "path", .type = 's', .dest = (void *)&linux_evdev_path, .descript = "Path of the evdev device to use", }, ARG_TABLE_ENDMARKER, }; native_add_command_line_opts(linux_evdev_options); } static void linux_evdev_check_arg(void) { if (linux_evdev_path == NULL) { posix_print_error_and_exit( "Error: evdev device missing.\n" "Please specify an evdev device with the --evdev " "argument when using CONFIG_NATIVE_LINUX_EVDEV=y\n"); } } static void linux_evdev_cleanup(void) { if (linux_evdev_fd >= 0) { nsi_host_close(linux_evdev_fd); } } NATIVE_TASK(linux_evdev_options, PRE_BOOT_1, 10); NATIVE_TASK(linux_evdev_check_arg, PRE_BOOT_2, 10); NATIVE_TASK(linux_evdev_cleanup, ON_EXIT, 10); static void linux_evdev_thread_fn(void *p1, void *p2, void *p3) { const struct device *dev = p1; uint16_t type; uint16_t code; int32_t value; int ret; while (true) { ret = linux_evdev_read(linux_evdev_fd, &type, &code, &value); if (ret == NATIVE_LINUX_EVDEV_NO_DATA) { /* Let other threads run. */ k_sleep(K_MSEC(CONFIG_NATIVE_LINUX_THREAD_SLEEP_MS)); continue; } else if (ret < 0) { return; } LOG_DBG("evdev event: type=%d code=%d val=%d", type, code, value); if (type == 0) { /* EV_SYN */ input_report(dev, 0, 0, 0, true, K_FOREVER); } else if (type == INPUT_EV_KEY && value == 2) { /* nothing, ignore key repeats */ } else { input_report(dev, type, code, value, false, K_FOREVER); } } } static int linux_evdev_init(const struct device *dev) { linux_evdev_fd = linux_evdev_open(linux_evdev_path); k_thread_create(&linux_evdev_thread, linux_evdev_thread_stack, K_KERNEL_STACK_SIZEOF(linux_evdev_thread_stack), linux_evdev_thread_fn, (void *)dev, NULL, NULL, CONFIG_NATIVE_LINUX_EVDEV_THREAD_PRIORITY, 0, K_NO_WAIT); k_thread_name_set(&linux_evdev_thread, dev->name); return 0; } BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "Only one zephyr,native-linux-evdev compatible node is supported"); DEVICE_DT_INST_DEFINE(0, linux_evdev_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_INPUT_INIT_PRIORITY, NULL); ```
/content/code_sandbox/drivers/input/linux_evdev.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
839
```unknown # USB-C configuration options source "drivers/usb_c/tcpc/Kconfig" source "drivers/usb_c/vbus/Kconfig" source "drivers/usb_c/ppc/Kconfig" ```
/content/code_sandbox/drivers/usb_c/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
41
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_ppc #include <zephyr/kernel.h> #include <zephyr/drivers/usb_c/usbc_ppc.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/clock_control_numaker.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ppc_numaker, CONFIG_USBC_LOG_LEVEL); #include <soc.h> #include <NuMicro.h> #include "../tcpc/ucpd_numaker.h" /* Implementation notes on NuMaker TCPC/PPC/VBUS * * PPC and VBUS rely on TCPC/UTCPD and are just pseudo. They are completely * implemented in TCPC/UTCPD. */ /** * @brief Immutable device context */ struct numaker_ppc_config { const struct device *tcpc_dev; }; /** * @brief Initializes the usb-c ppc driver * * @retval 0 on success * @retval -ENODEV if dependent TCPC device is not ready */ static int numaker_ppc_init(const struct device *dev) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; /* Rely on TCPC */ if (!device_is_ready(tcpc_dev)) { LOG_ERR("TCPC device not ready"); return -ENODEV; } return 0; } /** * @brief Check if PPC is in the dead battery mode * * @retval 1 if PPC is in the dead battery mode * @retval 0 if PPC is not in the dead battery mode * @retval -EIO if on failure */ static int numaker_ppc_is_dead_battery_mode(const struct device *dev) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_is_dead_battery_mode(tcpc_dev); } /** * @brief Request the PPC to exit from the dead battery mode * * @retval 0 if request was successfully sent * @retval -EIO if on failure */ static int numaker_ppc_exit_dead_battery_mode(const struct device *dev) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_exit_dead_battery_mode(tcpc_dev); } /** * @brief Check if the PPC is sourcing the VBUS * * @retval 1 if the PPC is sourcing the VBUS * @retval 0 if the PPC is not sourcing the VBUS * @retval -EIO on failure */ static int numaker_ppc_is_vbus_source(const struct device *dev) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_is_vbus_source(tcpc_dev); } /** * @brief Check if the PPC is sinking the VBUS * * @retval 1 if the PPC is sinking the VBUS * @retval 0 if the PPC is not sinking the VBUS * @retval -EIO on failure */ static int numaker_ppc_is_vbus_sink(const struct device *dev) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_is_vbus_sink(tcpc_dev); } /** * @brief Set the state of VBUS sinking * * @retval 0 if success * @retval -EIO on failure */ static int numaker_ppc_set_snk_ctrl(const struct device *dev, bool enable) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_set_snk_ctrl(tcpc_dev, enable); } /** * @brief Set the state of VBUS sourcing * * @retval 0 if success * @retval -EIO on failure */ static int numaker_ppc_set_src_ctrl(const struct device *dev, bool enable) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_set_src_ctrl(tcpc_dev, enable); } /** * @brief Set the state of VBUS discharging * * @retval 0 if success * @retval -EIO on failure */ static int numaker_ppc_set_vbus_discharge(const struct device *dev, bool enable) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_set_vbus_discharge(tcpc_dev, enable); } /** * @brief Check if VBUS is present * * @retval 1 if VBUS voltage is present * @retval 0 if no VBUS voltage is detected * @retval -EIO on failure */ static int numaker_ppc_is_vbus_present(const struct device *dev) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_is_vbus_present(tcpc_dev); } /** * @brief Set the callback used to notify about PPC events * * @retval 0 if success */ static int numaker_ppc_set_event_handler(const struct device *dev, usbc_ppc_event_cb_t handler, void *data) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_set_event_handler(tcpc_dev, handler, data); } /** * @brief Print the values or PPC registers * * @retval 0 if success * @retval -EIO on failure */ static int numaker_ppc_dump_regs(const struct device *dev) { const struct numaker_ppc_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_ppc_dump_regs(tcpc_dev); } static const struct usbc_ppc_driver_api numaker_ppc_driver_api = { .is_dead_battery_mode = numaker_ppc_is_dead_battery_mode, .exit_dead_battery_mode = numaker_ppc_exit_dead_battery_mode, .is_vbus_source = numaker_ppc_is_vbus_source, .is_vbus_sink = numaker_ppc_is_vbus_sink, .set_snk_ctrl = numaker_ppc_set_snk_ctrl, .set_src_ctrl = numaker_ppc_set_src_ctrl, .set_vbus_discharge = numaker_ppc_set_vbus_discharge, .is_vbus_present = numaker_ppc_is_vbus_present, .set_event_handler = numaker_ppc_set_event_handler, .dump_regs = numaker_ppc_dump_regs, }; #define NUMAKER_TCPC(inst) DT_INST_PARENT(inst) #define PPC_NUMAKER_INIT(inst) \ static const struct numaker_ppc_config numaker_ppc_config_##inst = { \ .tcpc_dev = DEVICE_DT_GET(NUMAKER_TCPC(inst)), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, numaker_ppc_init, NULL, NULL, &numaker_ppc_config_##inst, \ POST_KERNEL, CONFIG_USBC_PPC_INIT_PRIORITY, \ &numaker_ppc_driver_api); DT_INST_FOREACH_STATUS_OKAY(PPC_NUMAKER_INIT); ```
/content/code_sandbox/drivers/usb_c/ppc/usbc_ppc_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,659
```unknown # NXP NX20P3483 Configuration menu config USBC_PPC_NX20P3483 bool "NXP NX20P3483 support" default y depends on DT_HAS_NXP_NX20P3483_ENABLED help Enable USB-C PPC support for NXP nx20p3483 chip if USBC_PPC_NX20P3483 config USBC_PPC_NX20P3483_DUMP_FULL_REG_NAMES bool "Dump full register names" help Dump human-readable names instead of offsets of registers endif ```
/content/code_sandbox/drivers/usb_c/ppc/Kconfig.nxp
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
121
```c /* * */ #define DT_DRV_COMPAT gpio_kbd_matrix #include <stdint.h> #include <stdlib.h> #include <zephyr/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/input/input_kbd_matrix.h> #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(input_gpio_kbd_matrix, CONFIG_INPUT_LOG_LEVEL); struct gpio_kbd_matrix_config { struct input_kbd_matrix_common_config common; const struct gpio_dt_spec *row_gpio; const struct gpio_dt_spec *col_gpio; struct gpio_callback *gpio_cb; gpio_callback_handler_t gpio_cb_handler; struct k_work_delayable *idle_poll_dwork; k_work_handler_t idle_poll_handler; bool col_drive_inactive; }; struct gpio_kbd_matrix_data { struct input_kbd_matrix_common_data common; uint32_t last_col_state; bool direct_read; bool direct_write; }; INPUT_KBD_STRUCT_CHECK(struct gpio_kbd_matrix_config, struct gpio_kbd_matrix_data); static void gpio_kbd_matrix_drive_column(const struct device *dev, int col) { const struct gpio_kbd_matrix_config *cfg = dev->config; const struct input_kbd_matrix_common_config *common = &cfg->common; struct gpio_kbd_matrix_data *data = dev->data; uint32_t state; if (col == INPUT_KBD_MATRIX_COLUMN_DRIVE_NONE) { state = 0; } else if (col == INPUT_KBD_MATRIX_COLUMN_DRIVE_ALL) { state = BIT_MASK(common->col_size); } else { state = BIT(col); } if (data->direct_write) { const struct gpio_dt_spec *gpio0 = &cfg->col_gpio[0]; gpio_port_pins_t gpio_mask; gpio_port_value_t gpio_val; gpio_mask = BIT_MASK(common->col_size) << gpio0->pin; gpio_val = state << gpio0->pin; gpio_port_set_masked(gpio0->port, gpio_mask, gpio_val); return; } for (int i = 0; i < common->col_size; i++) { const struct gpio_dt_spec *gpio = &cfg->col_gpio[i]; if ((data->last_col_state ^ state) & BIT(i)) { if (cfg->col_drive_inactive) { gpio_pin_set_dt(gpio, state & BIT(i)); } else if (state & BIT(i)) { gpio_pin_configure_dt(gpio, GPIO_OUTPUT_ACTIVE); } else { gpio_pin_configure_dt(gpio, GPIO_INPUT); } } } data->last_col_state = state; } static kbd_row_t gpio_kbd_matrix_read_row(const struct device *dev) { const struct gpio_kbd_matrix_config *cfg = dev->config; const struct input_kbd_matrix_common_config *common = &cfg->common; struct gpio_kbd_matrix_data *data = dev->data; kbd_row_t val = 0; if (data->direct_read) { const struct gpio_dt_spec *gpio0 = &cfg->row_gpio[0]; gpio_port_value_t gpio_val; gpio_port_get(gpio0->port, &gpio_val); return (gpio_val >> gpio0->pin) & BIT_MASK(common->row_size); } for (int i = 0; i < common->row_size; i++) { const struct gpio_dt_spec *gpio = &cfg->row_gpio[i]; if (gpio_pin_get_dt(gpio)) { val |= BIT(i); } } return val; } static __maybe_unused void gpio_kbd_matrix_idle_poll_handler(const struct device *dev) { const struct gpio_kbd_matrix_config *cfg = dev->config; const struct input_kbd_matrix_common_config *common = &cfg->common; if (gpio_kbd_matrix_read_row(dev) == 0) { k_work_reschedule(cfg->idle_poll_dwork, K_USEC(common->poll_period_us)); return; } input_kbd_matrix_poll_start(dev); } static void gpio_kbd_matrix_set_detect_mode(const struct device *dev, bool enabled) { const struct gpio_kbd_matrix_config *cfg = dev->config; const struct input_kbd_matrix_common_config *common = &cfg->common; int ret; if (cfg->idle_poll_dwork != NULL) { if (enabled) { k_work_reschedule(cfg->idle_poll_dwork, K_USEC(common->poll_period_us)); } return; } if (cfg->gpio_cb == NULL) { return; } for (int i = 0; i < common->row_size; i++) { const struct gpio_dt_spec *gpio = &cfg->row_gpio[i]; gpio_flags_t flags = enabled ? GPIO_INT_EDGE_TO_ACTIVE : GPIO_INT_DISABLE; ret = gpio_pin_interrupt_configure_dt(gpio, flags); if (ret != 0) { LOG_ERR("Pin %d interrupt configuration failed: %d", i, ret); return; } } } static bool gpio_kbd_matrix_is_gpio_coherent( const struct gpio_dt_spec *gpio, int gpio_count) { const struct gpio_dt_spec *gpio0 = &gpio[0]; for (int i = 1; i < gpio_count; i++) { if (gpio[i].port != gpio0->port || gpio[i].dt_flags != gpio0->dt_flags || gpio[i].pin != gpio0->pin + i) { return false; } } return true; } static bool gpio_kbd_continuous_scan_mode(const struct device *dev) { const struct gpio_kbd_matrix_config *cfg = dev->config; if (cfg->gpio_cb == NULL && cfg->idle_poll_dwork == NULL) { return true; } return false; } static int gpio_kbd_matrix_init(const struct device *dev) { const struct gpio_kbd_matrix_config *cfg = dev->config; const struct input_kbd_matrix_common_config *common = &cfg->common; struct gpio_kbd_matrix_data *data = dev->data; int ret; int i; for (i = 0; i < common->col_size; i++) { const struct gpio_dt_spec *gpio = &cfg->col_gpio[i]; if (!gpio_is_ready_dt(gpio)) { LOG_ERR("%s is not ready", gpio->port->name); return -ENODEV; } if (cfg->col_drive_inactive) { ret = gpio_pin_configure_dt(gpio, GPIO_OUTPUT_INACTIVE); } else { ret = gpio_pin_configure_dt(gpio, GPIO_INPUT); } if (ret != 0) { LOG_ERR("Pin %d configuration failed: %d", i, ret); return ret; } } for (i = 0; i < common->row_size; i++) { const struct gpio_dt_spec *gpio = &cfg->row_gpio[i]; struct gpio_callback *gpio_cb; if (!gpio_is_ready_dt(gpio)) { LOG_ERR("%s is not ready", gpio->port->name); return -ENODEV; } ret = gpio_pin_configure_dt(gpio, GPIO_INPUT); if (ret != 0) { LOG_ERR("Pin %d configuration failed: %d", i, ret); return ret; } if (cfg->gpio_cb == NULL) { continue; } gpio_cb = &cfg->gpio_cb[i]; gpio_init_callback(gpio_cb, cfg->gpio_cb_handler, BIT(gpio->pin)); ret = gpio_add_callback_dt(gpio, gpio_cb); if (ret < 0) { LOG_ERR("Could not set gpio callback"); return ret; } } if (cfg->idle_poll_dwork != NULL) { k_work_init_delayable(cfg->idle_poll_dwork, cfg->idle_poll_handler); } data->direct_read = gpio_kbd_matrix_is_gpio_coherent( cfg->row_gpio, common->row_size); if (cfg->col_drive_inactive) { data->direct_write = gpio_kbd_matrix_is_gpio_coherent( cfg->col_gpio, common->col_size); } LOG_DBG("direct_read: %d direct_write: %d", data->direct_read, data->direct_write); ret = input_kbd_matrix_common_init(dev); if (ret != 0) { return ret; } if (gpio_kbd_continuous_scan_mode(dev)) { input_kbd_matrix_poll_start(dev); } return 0; } static const struct input_kbd_matrix_api gpio_kbd_matrix_api = { .drive_column = gpio_kbd_matrix_drive_column, .read_row = gpio_kbd_matrix_read_row, .set_detect_mode = gpio_kbd_matrix_set_detect_mode, }; #define INPUT_GPIO_KBD_MATRIX_INIT(n) \ BUILD_ASSERT(DT_INST_PROP_LEN(n, col_gpios) <= 32, "invalid col-size"); \ \ INPUT_KBD_MATRIX_DT_INST_DEFINE_ROW_COL( \ n, DT_INST_PROP_LEN(n, row_gpios), DT_INST_PROP_LEN(n, col_gpios)); \ \ static const struct gpio_dt_spec gpio_kbd_matrix_row_gpio_##n[DT_INST_PROP_LEN( \ n, row_gpios)] = { \ DT_INST_FOREACH_PROP_ELEM_SEP(n, row_gpios, GPIO_DT_SPEC_GET_BY_IDX, (,)) \ }; \ static const struct gpio_dt_spec gpio_kbd_matrix_col_gpio_##n[DT_INST_PROP_LEN( \ n, col_gpios)] = { \ DT_INST_FOREACH_PROP_ELEM_SEP(n, col_gpios, GPIO_DT_SPEC_GET_BY_IDX, (,)) \ }; \ \ IF_ENABLED(DT_INST_ENUM_HAS_VALUE(n, idle_mode, interrupt), ( \ static struct gpio_callback gpio_kbd_matrix_gpio_cb_##n[DT_INST_PROP_LEN(n, row_gpios)];\ static void gpio_kbd_matrix_cb_##n(const struct device *gpio_dev, \ struct gpio_callback *cb, uint32_t pins) \ { \ input_kbd_matrix_poll_start(DEVICE_DT_INST_GET(n)); \ } \ )) \ IF_ENABLED(DT_INST_ENUM_HAS_VALUE(n, idle_mode, poll), ( \ static struct k_work_delayable gpio_kbd_matrix_idle_poll_dwork_##n; \ static void gpio_kbd_matrix_idle_poll_handler_##n(struct k_work *work) \ { \ gpio_kbd_matrix_idle_poll_handler(DEVICE_DT_INST_GET(n)); \ } \ )) \ IF_ENABLED(DT_INST_ENUM_HAS_VALUE(n, idle_mode, scan), ( \ BUILD_ASSERT(DT_INST_PROP(n, poll_timeout_ms) == 0, \ "poll-timeout-ms must be set to 0 for scan mode to work correctly"); \ )) \ \ static const struct gpio_kbd_matrix_config gpio_kbd_matrix_cfg_##n = { \ .common = INPUT_KBD_MATRIX_DT_INST_COMMON_CONFIG_INIT_ROW_COL( \ n, &gpio_kbd_matrix_api, \ DT_INST_PROP_LEN(n, row_gpios), DT_INST_PROP_LEN(n, col_gpios)), \ .row_gpio = gpio_kbd_matrix_row_gpio_##n, \ .col_gpio = gpio_kbd_matrix_col_gpio_##n, \ IF_ENABLED(DT_INST_ENUM_HAS_VALUE(n, idle_mode, interrupt), ( \ .gpio_cb = gpio_kbd_matrix_gpio_cb_##n, \ .gpio_cb_handler = gpio_kbd_matrix_cb_##n, \ )) \ IF_ENABLED(DT_INST_ENUM_HAS_VALUE(n, idle_mode, poll), ( \ .idle_poll_dwork = &gpio_kbd_matrix_idle_poll_dwork_##n, \ .idle_poll_handler = gpio_kbd_matrix_idle_poll_handler_##n, \ )) \ .col_drive_inactive = DT_INST_PROP(n, col_drive_inactive), \ }; \ \ static struct gpio_kbd_matrix_data gpio_kbd_matrix_data_##n; \ \ DEVICE_DT_INST_DEFINE(n, gpio_kbd_matrix_init, NULL, \ &gpio_kbd_matrix_data_##n, &gpio_kbd_matrix_cfg_##n, \ POST_KERNEL, CONFIG_INPUT_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(INPUT_GPIO_KBD_MATRIX_INIT) ```
/content/code_sandbox/drivers/input/input_gpio_kbd_matrix.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,741
```c /* */ #include <zephyr/device.h> #include <zephyr/shell/shell.h> #include <zephyr/drivers/usb_c/usbc_ppc.h> /** Macro used to iterate over USB-C connector and call a function if the node has PPC property */ #define CALL_IF_HAS_PPC(usb_node, func) \ COND_CODE_1(DT_NODE_HAS_PROP(usb_node, ppc), \ (ret |= func(DEVICE_DT_GET(DT_PHANDLE_BY_IDX(usb_node, ppc, 0)));), ()) /** * @brief Command that dumps registers of one or all of the PPCs * * @param sh Shell structure * @param argc Arguments count * @param argv Device name * @return int ORed return values of all the functions executed, 0 in case of success */ static int cmd_ppc_dump(const struct shell *sh, size_t argc, char **argv) { int ret = 0; if (argc <= 1) { DT_FOREACH_STATUS_OKAY_VARGS(usb_c_connector, CALL_IF_HAS_PPC, ppc_dump_regs); } else { const struct device *dev = device_get_binding(argv[1]); ret = ppc_dump_regs(dev); } return ret; } /** * @brief Function used to pretty print status of the PPC * * @param dev Pointer to the PPC device structure */ static int print_status(const struct device *dev) { printk("PPC %s:\n", dev->name); printk(" Dead battery: %d\n", ppc_is_dead_battery_mode(dev)); printk(" Is sourcing: %d\n", ppc_is_vbus_source(dev)); printk(" Is sinking: %d\n", ppc_is_vbus_sink(dev)); printk(" Is VBUS present: %d\n", ppc_is_vbus_present(dev)); return 0; } /** * @brief Command that prints the status of one or all of the PPCs * * @param sh Shell structure * @param argc Arguments count * @param argv Device name * @return int ORed return values of all the functions executed, 0 in case of success */ static int cmd_ppc_status(const struct shell *sh, size_t argc, char **argv) { int ret = 0; if (argc <= 1) { DT_FOREACH_STATUS_OKAY_VARGS(usb_c_connector, CALL_IF_HAS_PPC, print_status); } else { const struct device *dev = device_get_binding(argv[1]); ret = print_status(dev); } return ret; } /** * @brief Command that requests one or all of the PPCs to try exiting the dead battery mode * * @param sh Shell structure * @param argc Arguments count * @param argv Device name * @return int ORed return values of all the functions executed, 0 in case of success */ static int cmd_ppc_exit_db(const struct shell *sh, size_t argc, char **argv) { int ret = 0; if (argc <= 1) { DT_FOREACH_STATUS_OKAY_VARGS(usb_c_connector, CALL_IF_HAS_PPC, ppc_exit_dead_battery_mode); } else { const struct device *dev = device_get_binding(argv[1]); ret = ppc_exit_dead_battery_mode(dev); } return ret; } /** * @brief Function used to create subcommands with devices names * * @param idx counter of devices * @param entry shell structure that will be filled */ static void device_name_get(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; } SHELL_DYNAMIC_CMD_CREATE(list_device_names, device_name_get); SHELL_STATIC_SUBCMD_SET_CREATE(sub_ppc_cmds, SHELL_CMD_ARG(dump, &list_device_names, "Dump PPC registers\n" "Usage: ppc dump [<ppc device>]", cmd_ppc_dump, 1, 1), SHELL_CMD_ARG(status, &list_device_names, "Write PPC power status\n" "Usage: ppc statuc [<ppc device>]", cmd_ppc_status, 1, 1), SHELL_CMD_ARG(exitdb, &list_device_names, "Exit from the dead battery mode\n" "Usage: ppc exitdb [<ppc device>]", cmd_ppc_exit_db, 1, 1), SHELL_SUBCMD_SET_END); SHELL_CMD_REGISTER(ppc, &sub_ppc_cmds, "PPC (USB-C PD) diagnostics", NULL); ```
/content/code_sandbox/drivers/usb_c/ppc/shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,021
```unknown # Nuvoton NuMaker USB-C PPC device configuration options config USBC_PPC_NUMAKER bool "Nuvoton NuMaker USB-C PPC" default y depends on DT_HAS_NUVOTON_NUMAKER_PPC_ENABLED && USBC_TCPC_NUMAKER help Enable USB-C PPC support for Nuvoton NuMaker chip with UTCPD. ```
/content/code_sandbox/drivers/usb_c/ppc/Kconfig.numaker
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
78
```unknown # Power path controllers configuration options menuconfig USBC_PPC_DRIVER bool "USB-C PPC drivers" help Enable USB-C Power Path Controllers support if USBC_PPC_DRIVER config USBC_PPC_INIT_PRIORITY int "USBC PPC driver init priority" default 82 help Initialization priority of the USB-C PPC drivers in POST_KERNEL. config USBC_PPC_SHELL bool "Shell commands for PPC" help Add useful shell commands to manipulate and debug the PPCs source "drivers/usb_c/ppc/Kconfig.nxp" source "drivers/usb_c/ppc/Kconfig.numaker" module = USBC_PPC module-str = usbc-ppc source "subsys/logging/Kconfig.template.log_config" endif # USBC_PPC_DRIVER ```
/content/code_sandbox/drivers/usb_c/ppc/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
168
```objective-c /* */ /** * @file * @brief NX20P3483 PPC registers definitions */ #ifndef ZEPHYR_DRIVERS_USBC_PPC_NXP_NX20P3483_PRIV_H_ #define ZEPHYR_DRIVERS_USBC_PPC_NXP_NX20P3483_PRIV_H_ #include<zephyr/dt-bindings/usb-c/nxp_nx20p3483.h> /** Register address - device id */ #define NX20P3483_REG_DEVICE_ID 0x00 /** Bit mask for vendor id */ #define NX20P3483_REG_DEVICE_ID_VENDOR_MASK GENMASK(7, 3) /** Bit mask for version id */ #define NX20P3483_REG_DEVICE_ID_REVISION_MASK GENMASK(2, 0) /** Register address - device status */ #define NX20P3483_REG_DEVICE_STATUS 0x01 /** Bit mask for device mode */ #define NX20P3483_REG_DEVICE_STATUS_MODE_MASK GENMASK(2, 0) /** Value for dead battery mode */ #define NX20P3483_MODE_DEAD_BATTERY 0 /** Value for high-voltage sink mode */ #define NX20P3483_MODE_HV_SNK 1 /** Value for 5V source mode */ #define NX20P3483_MODE_5V_SRC 2 /** Value for high-voltage source mode */ #define NX20P3483_MODE_HV_SRC 3 /** Value for standby mode */ #define NX20P3483_MODE_STANDBY 4 /** Register address - switch control */ #define NX20P3483_REG_SWITCH_CTRL 0x02 /** Bit field for source path selection. If set, HV source path is selected, 5V otherwise. */ #define NX20P3483_REG_SWITCH_CTRL_SRC BIT(7) /** Register address - switch status */ #define NX20P3483_REG_SWITCH_STATUS 0x03 /** Bit field for 5V source switch enabled */ #define NX20P3483_REG_SWITCH_STATUS_5VSRC BIT(2) /** Bit field for HV source switch enabled */ #define NX20P3483_REG_SWITCH_STATUS_HVSRC BIT(1) /** Bit field for HV sink switch enabled */ #define NX20P3483_REG_SWITCH_STATUS_HVSNK BIT(0) /** Register address - interrupt1 */ #define NX20P3483_REG_INT1 0x04 /** Bit field for exit dead battery error */ #define NX20P3483_REG_INT1_DBEXIT_ERR BIT(7) /** Bit field for overvoltage fault triggered on 5V source path */ #define NX20P3483_REG_INT1_OV_5VSRC BIT(4) /** Bit field for reverse current fault triggered on 5V source path */ #define NX20P3483_REG_INT1_RCP_5VSRC BIT(3) /** Bit field for short circuit fault triggered on 5V source path */ #define NX20P3483_REG_INT1_SC_5VSRC BIT(2) /** Bit field for overcurrent fault triggered on 5V source path */ #define NX20P3483_REG_INT1_OC_5VSRC BIT(1) /** Bit field for over temperature protection fault triggered */ #define NX20P3483_REG_INT1_OTP BIT(0) /** Register address - interrupt2*/ #define NX20P3483_REG_INT2 0x05 /** Bit field for sink and source routes enabled fault */ #define NX20P3483_REG_INT2_EN_ERR BIT(7) /** Bit field for reverse current fault triggered on HV sink path */ #define NX20P3483_REG_INT2_RCP_HVSNK BIT(6) /** Bit field for short circuit fault triggered on HV sink path */ #define NX20P3483_REG_INT2_SC_HVSNK BIT(5) /** Bit field for overvoltage fault triggered on HV sink path */ #define NX20P3483_REG_INT2_OV_HVSNK BIT(4) /** Bit field for reverse current fault triggered on HV source path */ #define NX20P3483_REG_INT2_RCP_HVSRC BIT(3) /** Bit field for short circuit fault triggered on HV source path */ #define NX20P3483_REG_INT2_SC_HVSRC BIT(2) /** Bit field for overcurrent fault triggered on HV source path */ #define NX20P3483_REG_INT2_OC_HVSRC BIT(1) /** Bit field for overvoltage fault triggered on HV source path */ #define NX20P3483_REG_INT2_OV_HVSRC BIT(0) /** Register address - interrupt1 mask */ #define NX20P3483_REG_INT1_MASK 0x06 /** Register address - interrupt2 mask*/ #define NX20P3483_REG_INT2_MASK 0x07 /** Register address - OVLO threshold (overvoltage threshold) */ #define NX20P3483_REG_OVLO_THRESHOLD 0x08 /** * Bit mask for overvoltage threshold value * Values used in this register are defined as NX20P3483_U_THRESHOLD_* */ #define NX20P3483_REG_OVLO_THRESHOLD_MASK GENMASK(2, 0) /* Internal 5V VBUS Switch Current Limit Settings (min) */ #define NX20P3483_ILIM_MASK 0xF /** * Register address - HV source switch OCP threshold * Values used in this register are defined as NX20P3483_I_THRESHOLD_* */ #define NX20P3483_REG_HV_SRC_OCP_THRESHOLD 0x09 /** * Register address - 5V source switch OCP threshold * Values used in this register are defined as NX20P3483_I_THRESHOLD_* */ #define NX20P3483_REG_5V_SRC_OCP_THRESHOLD 0x0A /** Register address - device control */ #define NX20P3483_REG_DEVICE_CTRL 0x0B /** Bit field for fast role swap capability activated */ #define NX20P3483_REG_DEVICE_CTRL_FRS_AT BIT(3) /** Bit field for exit dead battery mode */ #define NX20P3483_REG_DEVICE_CTRL_DB_EXIT BIT(2) /** Bit field for VBUS discharge circuit enabled */ #define NX20P3483_REG_DEVICE_CTRL_VBUSDIS_EN BIT(1) /** Bit field for LDO shutdown */ #define NX20P3483_REG_DEVICE_CTRL_LDO_SD BIT(0) #endif /* ZEPHYR_DRIVERS_USBC_PPC_NXP_NX20P3483_PRIV_H_ */ ```
/content/code_sandbox/drivers/usb_c/ppc/nxp_nx20p3483_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,377
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_USBC_DEVICE_UCPD_STM32_PRIV_H_ #define ZEPHYR_DRIVERS_USBC_DEVICE_UCPD_STM32_PRIV_H_ #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/usb_c/usbc_tcpc.h> #include <zephyr/drivers/pinctrl.h> #include <stm32_ll_ucpd.h> /** * @brief The packet type(SOP*) consists of 2-bytes */ #define PACKET_TYPE_SIZE 2 /** * @brief The message header consists of 2-bytes */ #define MSG_HEADER_SIZE 2 /** * @brief USB PD message buffer length. */ #define UCPD_BUF_LEN (PD_MAX_EXTENDED_MSG_LEN + \ PACKET_TYPE_SIZE + \ MSG_HEADER_SIZE) /** * @brief UCPD alert mask used for enabling alerts * used to receive a message */ #define UCPD_IMR_RX_INT_MASK (UCPD_IMR_RXNEIE | \ UCPD_IMR_RXORDDETIE | \ UCPD_IMR_RXHRSTDETIE | \ UCPD_IMR_RXOVRIE | \ UCPD_IMR_RXMSGENDIE) /** * @brief UCPD alert mask used for clearing alerts * used to receive a message */ #define UCPD_ICR_RX_INT_MASK (UCPD_ICR_RXORDDETCF | \ UCPD_ICR_RXHRSTDETCF | \ UCPD_ICR_RXOVRCF | \ UCPD_ICR_RXMSGENDCF) /** * @brief UCPD alert mask used for enabling alerts * used to transmit a message */ #define UCPD_IMR_TX_INT_MASK (UCPD_IMR_TXISIE | \ UCPD_IMR_TXMSGDISCIE | \ UCPD_IMR_TXMSGSENTIE | \ UCPD_IMR_TXMSGABTIE | \ UCPD_IMR_TXUNDIE) /** * @brief UCPD alert mask used for clearing alerts * used to transmit a message */ #define UCPD_ICR_TX_INT_MASK (UCPD_ICR_TXMSGDISCCF | \ UCPD_ICR_TXMSGSENTCF | \ UCPD_ICR_TXMSGABTCF | \ UCPD_ICR_TXUNDCF) /** * @brief UCPD alert mask for all alerts */ #define UCPD_ICR_ALL_INT_MASK (UCPD_ICR_FRSEVTCF | \ UCPD_ICR_TYPECEVT2CF | \ UCPD_ICR_TYPECEVT1CF | \ UCPD_ICR_RXMSGENDCF | \ UCPD_ICR_RXOVRCF | \ UCPD_ICR_RXHRSTDETCF | \ UCPD_ICR_RXORDDETCF | \ UCPD_ICR_TXUNDCF | \ UCPD_ICR_HRSTSENTCF | \ UCPD_ICR_HRSTDISCCF | \ UCPD_ICR_TXMSGABTCF | \ UCPD_ICR_TXMSGSENTCF | \ UCPD_ICR_TXMSGDISCCF) /** * @brief For STM32G0X devices, this macro enables * Dead Battery functionality */ #define UCPD_CR_DBATTEN BIT(15) /** * @brief Map UCPD ANASUB value to TCPC RP value * * @param r UCPD ANASUB value */ #define UCPD_ANASUB_TO_RP(r) ((r - 1) & 0x3) /** * @brief Map TCPC RP value to UCPD ANASUB value * * @param r TCPC RP value */ #define UCPD_RP_TO_ANASUB(r) ((r + 1) & 0x3) /** * @brief Create value for writing to UCPD CR ANASUBMOD */ #define STM32_UCPD_CR_ANASUBMODE_VAL(x) ((x) << UCPD_CR_ANASUBMODE_Pos) /** * @brief UCPD VSTATE CCx open value when in source mode */ #define STM32_UCPD_SR_VSTATE_OPEN 3 /** * @brief UCPD VSTATE CCx RA value when in source mode */ #define STM32_UCPD_SR_VSTATE_RA 0 /** * @brief PD message send retry count for Rev 2.0 */ #define UCPD_N_RETRY_COUNT_REV20 3 /** * @brief PD message send retry count for Rev 3.0 */ #define UCPD_N_RETRY_COUNT_REV30 2 /** * @brief Events for ucpd_alert_handler */ enum { /* Request to send a goodCRC message */ UCPD_EVT_GOOD_CRC_REQ, /* Request to send a power delivery message */ UCPD_EVT_TCPM_MSG_REQ, /* Request to send a Hard Reset message */ UCPD_EVT_HR_REQ, /* Transmission of power delivery message failed */ UCPD_EVT_TX_MSG_FAIL, /* Transmission of power delivery message was discarded */ UCPD_EVT_TX_MSG_DISC, /* Transmission of power delivery message was successful */ UCPD_EVT_TX_MSG_SUCCESS, /* Transmission of Hard Reset message was successful */ UCPD_EVT_HR_DONE, /* Transmission of Hard Reset message failed */ UCPD_EVT_HR_FAIL, /* A goodCRC message was received */ UCPD_EVT_RX_GOOD_CRC, /* A power delivery message was received */ UCPD_EVT_RX_MSG, /* A CC event occurred */ UCPD_EVT_EVENT_CC, /* A Hard Reset message was received */ UCPD_EVT_HARD_RESET_RECEIVED, }; /** * @brief GoodCRC message header roles */ struct msg_header_info { /* Power Role */ enum tc_power_role pr; /* Data Role */ enum tc_data_role dr; }; /** * @brief States for managing TX messages */ enum ucpd_state { /* Idle state */ STATE_IDLE, /* Transmitting a message state */ STATE_ACTIVE_TCPM, /* Transmitting a goodCRC message state */ STATE_ACTIVE_CRC, /* Transmitting a Hard Reset message state */ STATE_HARD_RESET, /* Waiting for a goodCRC message state */ STATE_WAIT_CRC_ACK }; /** * @brief Tx messages are initiated either by the application or from * the driver when a GoodCRC ack message needs to be sent. */ enum ucpd_tx_msg { /* Default value for not sending a message */ TX_MSG_NONE = -1, /* Message initiated from the application */ TX_MSG_TCPM = 0, /* Message initiated from the driver */ TX_MSG_GOOD_CRC = 1, /* Total number sources that can initiate a message */ TX_MSG_TOTAL = 2 }; /** * @brief Message from application mask */ #define MSG_TCPM_MASK BIT(TX_MSG_TCPM) /** * @brief Message from driver mask */ #define MSG_GOOD_CRC_MASK BIT(TX_MSG_GOOD_CRC) /** * @brief Buffer for holding the received message */ union pd_buffer { /* Power Delivery message header */ uint16_t header; /* Power Deliver message data including the message header */ uint8_t msg[UCPD_BUF_LEN]; }; /** * @brief Struct used for transmitting a power delivery message */ struct ucpd_tx_desc { /* Type of the message */ enum pd_packet_type type; /* Length of the message */ int msg_len; /* Index of the current byte to transmit */ int msg_index; /* Power Delivery message to transmit */ union pd_buffer data; }; /** * @brief Alert handler information */ struct alert_info { /* Runtime device structure */ const struct device *dev; /* Application supplied data that's passed to the * application's alert handler callback **/ void *data; /* Application's alert handler callback */ tcpc_alert_handler_cb_t handler; /* * Kernel worker used to call the application's * alert handler callback */ struct k_work work; /* Event flags used in the kernel worker */ atomic_t evt; }; /** * @brief Driver config */ struct tcpc_config { /* STM32 UCPC CC pin control */ const struct pinctrl_dev_config *ucpd_pcfg; /* STM32 UCPD port */ UCPD_TypeDef *ucpd_port; /* STM32 UCPD parameters */ LL_UCPD_InitTypeDef ucpd_params; /* STM32 UCPD dead battery support */ bool ucpd_dead_battery; }; /** * @brief Driver data */ struct tcpc_data { /* VCONN callback function */ tcpc_vconn_control_cb_t vconn_cb; /* VCONN Discharge callback function */ tcpc_vconn_discharge_cb_t vconn_discharge_cb; /* Alert information */ struct alert_info alert_info; /* CC Rp value */ enum tc_rp_value rp; /* PD Rx variables */ /* Number of RX bytes received */ int ucpd_rx_byte_count; /* Buffer to hold the received bytes */ uint8_t ucpd_rx_buffer[UCPD_BUF_LEN]; /* GoodCRC message ID */ int ucpd_crc_id; /* Flag to receive or ignore SOP Prime messages */ bool ucpd_rx_sop_prime_enabled; /* Flag set to true when receiving a message */ bool ucpd_rx_msg_active; /* Flag set to true when in RX BIST Mode */ bool ucpd_rx_bist_mode; /* Tx message variables */ /* Buffers to hold messages ready for transmission */ struct ucpd_tx_desc ucpd_tx_buffers[TX_MSG_TOTAL]; /* Current buffer being transmitted */ struct ucpd_tx_desc *ucpd_tx_active_buffer; /* Request to send a transmission message */ int ucpd_tx_request; /* State of the TX state machine */ enum ucpd_state ucpd_tx_state; /* Transmission message ID */ int msg_id_match; /* Retry count on failure to transmit a message */ int tx_retry_count; /* Max number of reties before giving up */ int tx_retry_max; /* GoodCRC message Header */ struct msg_header_info msg_header; /* Track VCONN on/off state */ bool ucpd_vconn_enable; /* Track CC line that VCONN was active on */ enum tc_cc_polarity ucpd_vconn_cc; /* Dead Battery active */ bool dead_battery_active; /* Timer for amount of time to wait for receiving a GoodCRC */ struct k_timer goodcrc_rx_timer; }; #endif /* ZEPHYR_DRIVERS_USBC_DEVICE_UCPD_STM32_PRIV_H_ */ ```
/content/code_sandbox/drivers/usb_c/tcpc/ucpd_stm32_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,301
```c /* */ #include <zephyr/device.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/i2c.h> #include <zephyr/shell/shell.h> #include <zephyr/drivers/usb_c/usbc_ppc.h> #include "nxp_nx20p3483_priv.h" #define DT_DRV_COMPAT nxp_nx20p3483 LOG_MODULE_REGISTER(nxp_nx20p3483, CONFIG_USBC_PPC_LOG_LEVEL); #ifdef CONFIG_USBC_PPC_NX20P3483_DUMP_FULL_REG_NAMES static const char *const nx20p3483_reg_names[] = { "Device ID ", "Device Status ", "Switch Control ", "Switch Status ", "Interrupt 1 ", "Interrupt 2 ", "Interrupt 1 Mask ", "Interrupt 2 Mask ", "OVLO Threshold ", "HV SRC OCP Threshold", "5V SRC OCP Threshold", "Device Control ", }; #endif /* Driver structures */ struct nx20p3483_cfg { /** Device address on I2C bus */ const struct i2c_dt_spec bus; /** GPIO used as interrupt request */ const struct gpio_dt_spec irq_gpio; /** Overvoltage protection threshold for sink role */ int snk_ovp_thresh; /** Boolean value whether to use high-voltage source if true or 5V source if false */ bool src_use_hv; /** Overcurrent protection threshold for 5V source role */ int src_5v_ocp_thresh; /** Overcurrent protection threshold for HV source role */ int src_hv_ocp_thresh; }; struct nx20p3483_data { /** Device structure to get from data structure */ const struct device *dev; /** Interrupt request callback object */ struct gpio_callback irq_cb; /** Workqueue object for handling interrupts */ struct k_work irq_work; /** Callback used to notify about PPC events, like overcurrent or short */ usbc_ppc_event_cb_t event_cb; /** Data sent as parameter to the callback */ void *event_cb_data; }; /* Helper functions */ static int read_reg(const struct device *dev, uint8_t reg, uint8_t *value) { const struct nx20p3483_cfg *cfg = dev->config; int ret; ret = i2c_reg_read_byte(cfg->bus.bus, cfg->bus.addr, reg, value); if (ret != 0) { LOG_ERR("Error reading reg %02x: %d", reg, ret); return ret; } return 0; } static int write_reg(const struct device *dev, uint8_t reg, uint8_t value) { const struct nx20p3483_cfg *cfg = dev->config; int ret; ret = i2c_reg_write_byte(cfg->bus.bus, cfg->bus.addr, reg, value); if (ret != 0) { LOG_ERR("Error writing reg %02x: %d", reg, ret); return ret; } return 0; } static int nx20p3483_set_snk_ovp_limit(const struct device *dev, uint8_t u_thresh) { int ret; if (u_thresh < NX20P3483_I_THRESHOLD_0_400 || u_thresh > NX20P3483_I_THRESHOLD_3_400) { return -EINVAL; } ret = write_reg(dev, NX20P3483_REG_OVLO_THRESHOLD, u_thresh); if (ret != 0) { LOG_ERR("Couldn't set SNK OVP: %d", ret); return ret; } LOG_DBG("Set SNK OVP: %d", u_thresh); return 0; } /* API functions */ int nx20p3483_is_dead_battery_mode(const struct device *dev) { uint8_t sts_reg; int ret; ret = read_reg(dev, NX20P3483_REG_DEVICE_STATUS, &sts_reg); if (ret != 0) { return ret; } return ((sts_reg & NX20P3483_REG_DEVICE_STATUS_MODE_MASK) == NX20P3483_MODE_DEAD_BATTERY); } int nx20p3483_exit_dead_battery_mode(const struct device *dev) { uint8_t ctrl_reg; int ret; ret = read_reg(dev, NX20P3483_REG_DEVICE_CTRL, &ctrl_reg); if (ret != 0) { return ret; } ctrl_reg |= NX20P3483_REG_DEVICE_CTRL_DB_EXIT; ret = write_reg(dev, NX20P3483_REG_DEVICE_CTRL, ctrl_reg); if (ret != 0) { return ret; } return 0; } static int nx20p3483_is_vbus_source(const struct device *dev) { uint8_t sts_reg; int ret; ret = read_reg(dev, NX20P3483_REG_SWITCH_STATUS, &sts_reg); if (ret != 0) { return ret; } return !!(sts_reg & (NX20P3483_REG_SWITCH_STATUS_5VSRC | NX20P3483_REG_SWITCH_STATUS_HVSRC)); } static int nx20p3483_is_vbus_sink(const struct device *dev) { uint8_t sts_reg; int ret; ret = read_reg(dev, NX20P3483_REG_SWITCH_STATUS, &sts_reg); if (ret != 0) { return ret; } return !!(sts_reg & NX20P3483_REG_SWITCH_STATUS_HVSNK); } static int nx20p3483_set_vbus_sink(const struct device *dev, bool enable) { const struct nx20p3483_cfg *cfg = dev->config; /* * The nx20p3483 is enabled by external GPIO signal, however enabling it sets the * overvoltage threshold to the highest possible value. Due to that, the threshold has * to be set here again. Must be called after enabling the path by the external signal. */ return nx20p3483_set_snk_ovp_limit(dev, cfg->snk_ovp_thresh); } static int nx20p3483_set_vbus_discharge(const struct device *dev, bool enable) { uint8_t ctrl_reg; int ret; ret = read_reg(dev, NX20P3483_REG_DEVICE_CTRL, &ctrl_reg); if (ret != 0) { return ret; } if (enable) { ctrl_reg |= NX20P3483_REG_DEVICE_CTRL_VBUSDIS_EN; } else { ctrl_reg &= ~NX20P3483_REG_DEVICE_CTRL_VBUSDIS_EN; } ret = write_reg(dev, NX20P3483_REG_DEVICE_CTRL, ctrl_reg); return ret; } static int nx20p3483_set_event_handler(const struct device *dev, usbc_ppc_event_cb_t handler, void *handler_data) { struct nx20p3483_data *data = dev->data; data->event_cb = handler; data->event_cb_data = handler_data; return 0; } static int nx20p3483_dump_regs(const struct device *dev) { const struct nx20p3483_cfg *cfg = dev->config; uint8_t val; LOG_INF("NX20P alert: %d", gpio_pin_get(cfg->irq_gpio.port, cfg->irq_gpio.pin)); LOG_INF("PPC %s:%s registers:", cfg->bus.bus->name, dev->name); for (int a = 0; a <= NX20P3483_REG_DEVICE_CTRL; a++) { i2c_reg_read_byte(cfg->bus.bus, cfg->bus.addr, a, &val); #ifdef CONFIG_USBC_PPC_NX20P3483_DUMP_FULL_REG_NAMES LOG_INF("- [%s] = 0x%02x", nx20p3483_reg_names[a], val); #else LOG_INF("- [%02x] = 0x%02x", a, val); #endif } return 0; } static struct usbc_ppc_driver_api nx20p3483_driver_api = { .is_dead_battery_mode = nx20p3483_is_dead_battery_mode, .exit_dead_battery_mode = nx20p3483_exit_dead_battery_mode, .is_vbus_source = nx20p3483_is_vbus_source, .is_vbus_sink = nx20p3483_is_vbus_sink, .set_snk_ctrl = nx20p3483_set_vbus_sink, .set_vbus_discharge = nx20p3483_set_vbus_discharge, .set_event_handler = nx20p3483_set_event_handler, .dump_regs = nx20p3483_dump_regs, }; static int nx20p3483_set_src_ovc_limit(const struct device *dev, uint8_t i_thresh_5v, uint8_t i_thresh_hv) { int ret; if (i_thresh_5v < NX20P3483_I_THRESHOLD_0_400 || i_thresh_5v > NX20P3483_I_THRESHOLD_3_400) { LOG_ERR("Invalid SRC 5V ovc threshold: %d", i_thresh_5v); return -EINVAL; } if (i_thresh_hv < NX20P3483_I_THRESHOLD_0_400 || i_thresh_hv > NX20P3483_I_THRESHOLD_3_400) { LOG_ERR("Invalid SRC HV ovc threshold: %d", i_thresh_hv); return -EINVAL; } ret = write_reg(dev, NX20P3483_REG_5V_SRC_OCP_THRESHOLD, i_thresh_5v); if (ret != 0) { return ret; } ret = write_reg(dev, NX20P3483_REG_HV_SRC_OCP_THRESHOLD, i_thresh_hv); if (ret != 0) { return ret; } LOG_DBG("Set SRC OVC 5V: %d, HV: %d", i_thresh_5v, i_thresh_hv); return 0; } static void nx20p3483_send_event(const struct device *dev, enum usbc_ppc_event ev) { struct nx20p3483_data *data = dev->data; if (data->event_cb != NULL) { data->event_cb(dev, data->event_cb_data, ev); } } static void nx20p3483_irq_handler(const struct device *port, struct gpio_callback *cb, gpio_port_pins_t pins) { struct nx20p3483_data *data = CONTAINER_OF(cb, struct nx20p3483_data, irq_cb); k_work_submit(&data->irq_work); } static void nx20p3483_irq_worker(struct k_work *work) { struct nx20p3483_data *data = CONTAINER_OF(work, struct nx20p3483_data, irq_work); const struct device *dev = data->dev; uint8_t irq1, irq2; int ret; ret = read_reg(dev, NX20P3483_REG_INT1, &irq1); if (ret != 0) { LOG_ERR("Couldn't read irq1"); return; } ret = read_reg(dev, NX20P3483_REG_INT2, &irq2); if (ret != 0) { LOG_ERR("Couldn't read irq2"); return; } if (data->event_cb == NULL) { LOG_DBG("No callback set: %02x %02x", irq1, irq1); } /* Generic alerts */ if (irq1 & NX20P3483_REG_INT1_DBEXIT_ERR) { LOG_INF("PPC dead battery exit failed"); nx20p3483_send_event(dev, USBC_PPC_EVENT_DEAD_BATTERY_ERROR); } if (irq1 & NX20P3483_REG_INT1_OTP) { LOG_INF("PPC over temperature"); nx20p3483_send_event(dev, USBC_PPC_EVENT_OVER_TEMPERATURE); } if (irq1 & NX20P3483_REG_INT2_EN_ERR) { LOG_INF("PPC source and sink enabled"); nx20p3483_send_event(dev, USBC_PPC_EVENT_BOTH_SNKSRC_ENABLED); } /* Source */ if (irq1 & NX20P3483_REG_INT1_OV_5VSRC || irq2 & NX20P3483_REG_INT2_OV_HVSRC) { LOG_INF("PPC source overvoltage"); nx20p3483_send_event(dev, USBC_PPC_EVENT_SRC_OVERVOLTAGE); } if (irq1 & NX20P3483_REG_INT1_RCP_5VSRC || irq2 & NX20P3483_REG_INT2_RCP_HVSRC) { LOG_INF("PPC source reverse current"); nx20p3483_send_event(dev, USBC_PPC_EVENT_SRC_REVERSE_CURRENT); } if (irq1 & NX20P3483_REG_INT1_OC_5VSRC || irq2 & NX20P3483_REG_INT2_OC_HVSRC) { LOG_INF("PPC source overcurrent"); nx20p3483_send_event(dev, USBC_PPC_EVENT_SRC_OVERCURRENT); } if (irq1 & NX20P3483_REG_INT1_SC_5VSRC || irq2 & NX20P3483_REG_INT2_SC_HVSRC) { LOG_INF("PPC source short"); nx20p3483_send_event(dev, USBC_PPC_EVENT_SRC_SHORT); } /* Sink */ if (irq2 & NX20P3483_REG_INT2_RCP_HVSNK) { LOG_INF("PPC sink reverse current"); nx20p3483_send_event(dev, USBC_PPC_EVENT_SNK_REVERSE_CURRENT); } if (irq2 & NX20P3483_REG_INT2_SC_HVSNK) { LOG_INF("PPC sink short"); nx20p3483_send_event(dev, USBC_PPC_EVENT_SNK_SHORT); } if (irq2 & NX20P3483_REG_INT2_OV_HVSNK) { LOG_INF("PPC sink overvoltage"); nx20p3483_send_event(dev, USBC_PPC_EVENT_SNK_OVERVOLTAGE); } } static int nx20p3483_dev_init(const struct device *dev) { const struct nx20p3483_cfg *cfg = dev->config; struct nx20p3483_data *data = dev->data; uint8_t reg; int ret; LOG_INF("Initializing PPC"); /* Initialize irq */ ret = gpio_pin_configure(cfg->irq_gpio.port, cfg->irq_gpio.pin, GPIO_INPUT | GPIO_PULL_UP); if (ret != 0) { return ret; } ret = gpio_pin_interrupt_configure(cfg->irq_gpio.port, cfg->irq_gpio.pin, GPIO_INT_EDGE_FALLING); if (ret != 0) { return ret; } gpio_init_callback(&data->irq_cb, nx20p3483_irq_handler, BIT(cfg->irq_gpio.pin)); ret = gpio_add_callback(cfg->irq_gpio.port, &data->irq_cb); if (ret != 0) { return ret; } /* Initialize work_q */ k_work_init(&data->irq_work, nx20p3483_irq_worker); k_work_submit(&data->irq_work); /* If src_use_hv, select the HV src path but do not enable it yet */ read_reg(dev, NX20P3483_REG_SWITCH_CTRL, &reg); if (cfg->src_use_hv) { reg |= NX20P3483_REG_SWITCH_CTRL_SRC; } else { reg &= ~NX20P3483_REG_SWITCH_CTRL_SRC; } write_reg(dev, NX20P3483_REG_SWITCH_CTRL, reg); /* Set limits */ ret = nx20p3483_set_snk_ovp_limit(dev, cfg->snk_ovp_thresh); if (ret != 0) { return ret; } ret = nx20p3483_set_src_ovc_limit(dev, cfg->src_5v_ocp_thresh, cfg->src_hv_ocp_thresh); if (ret != 0) { return ret; } return 0; } #define NX20P3483_DRIVER_CFG_INIT(node) \ { \ .bus = I2C_DT_SPEC_GET(node), .irq_gpio = GPIO_DT_SPEC_GET(node, irq_gpios), \ .snk_ovp_thresh = DT_PROP(node, snk_ovp), .src_use_hv = DT_PROP(node, src_hv), \ .src_5v_ocp_thresh = DT_PROP(node, src_5v_ocp), \ .src_hv_ocp_thresh = DT_PROP(node, src_hv_ocp), \ } #define NX20P3483_DRIVER_CFG_ASSERTS(node) \ BUILD_ASSERT(DT_PROP(node, snk_ovp) >= NX20P3483_U_THRESHOLD_6_0 && \ DT_PROP(node, snk_ovp) <= NX20P3483_U_THRESHOLD_23_0, \ "Invalid overvoltage threshold"); \ BUILD_ASSERT(DT_PROP(node, src_5v_ocp) >= NX20P3483_I_THRESHOLD_0_400 && \ DT_PROP(node, src_5v_ocp) <= NX20P3483_I_THRESHOLD_3_400, \ "Invalid overcurrent threshold"); \ BUILD_ASSERT(DT_PROP(node, src_hv_ocp) >= NX20P3483_I_THRESHOLD_0_400 && \ DT_PROP(node, src_hv_ocp) <= NX20P3483_I_THRESHOLD_3_400, \ "Invalid overcurrent threshold"); #define NX20P3483_DRIVER_DATA_INIT(node) \ { \ .dev = DEVICE_DT_GET(node), \ } #define NX20P3483_DRIVER_INIT(inst) \ static struct nx20p3483_data drv_data_nx20p3483##inst = \ NX20P3483_DRIVER_DATA_INIT(DT_DRV_INST(inst)); \ NX20P3483_DRIVER_CFG_ASSERTS(DT_DRV_INST(inst)); \ static struct nx20p3483_cfg drv_cfg_nx20p3483##inst = \ NX20P3483_DRIVER_CFG_INIT(DT_DRV_INST(inst)); \ DEVICE_DT_INST_DEFINE(inst, &nx20p3483_dev_init, NULL, &drv_data_nx20p3483##inst, \ &drv_cfg_nx20p3483##inst, POST_KERNEL, \ CONFIG_USBC_PPC_INIT_PRIORITY, &nx20p3483_driver_api); DT_INST_FOREACH_STATUS_OKAY(NX20P3483_DRIVER_INIT) ```
/content/code_sandbox/drivers/usb_c/ppc/nxp_nx20p3483.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,008
```c /* */ #include <zephyr/device.h> #include <zephyr/logging/log.h> #include <zephyr/usb_c/usbc.h> #include <zephyr/shell/shell.h> /** Macro used to call the dump_std_reg function from the TCPC device pointer */ #define TCPC_DUMP_DEV(dev) ret |= tcpc_dump_std_reg(dev); /** Macro used to call the dump_std_reg function from the USB-C connector node */ #define TCPC_DUMP_CONN_NODE(node) TCPC_DUMP_DEV(DEVICE_DT_GET(DT_PROP(node, tcpc))) /** Macro used to call the vbus_measure function from the VBUS device pointer */ #define TCPC_VBUS_DEV(dev) \ { \ int val; \ ret |= usbc_vbus_measure(dev, &val); \ shell_print(sh, "%s vbus: %d mV", dev->name, val); \ } /** Macro used to call the vbus_measure function from the USB-C connector node */ #define TCPC_VBUS_CONN_NODE(node) TCPC_VBUS_DEV(DEVICE_DT_GET(DT_PROP(node, vbus))) /** Macro used to call the get_chip function from the TCPC device pointer */ #define TCPC_GET_CHIP_DEV(dev) \ { \ ret |= tcpc_get_chip_info(dev, &chip_info); \ shell_print(sh, "Chip: %s", dev->name); \ shell_print(sh, "\tVendor: %04x", chip_info.vendor_id); \ shell_print(sh, "\tProduct: %04x", chip_info.product_id); \ shell_print(sh, "\tDevice: %04x", chip_info.device_id); \ shell_print(sh, "\tFirmware: %llx", chip_info.fw_version_number); \ } /** Macro used to call the get_chip function from the USB-C connector node */ #define TCPC_GET_CHIP_CONN_NODE(node) TCPC_GET_CHIP_DEV(DEVICE_DT_GET(DT_PROP(node, tcpc))) /** * @brief Shell command that dumps standard registers of TCPCs for all available USB-C ports * * @param sh Shell structure * @param argc Arguments count * @param argv Device name * @return int ORed return values of all the functions executed, 0 in case of success */ static int cmd_tcpc_dump(const struct shell *sh, size_t argc, char **argv) { int ret = 0; if (argc <= 1) { DT_FOREACH_STATUS_OKAY(usb_c_connector, TCPC_DUMP_CONN_NODE); } else { const struct device *dev = device_get_binding(argv[1]); if (dev != NULL) { TCPC_DUMP_DEV(dev); } else { ret = -ENODEV; } } return ret; } /** * @brief Shell command that prints the vbus measures for all available USB-C ports * * @param sh Shell structure * @param argc Arguments count * @param argv Device name * @return int ORed return values of all the functions executed, 0 in case of success */ static int cmd_tcpc_vbus(const struct shell *sh, size_t argc, char **argv) { int ret = 0; if (argc <= 1) { DT_FOREACH_STATUS_OKAY(usb_c_connector, TCPC_VBUS_CONN_NODE); } else { const struct device *dev = device_get_binding(argv[1]); if (dev != NULL) { TCPC_VBUS_DEV(dev); } else { ret = -ENODEV; } } return ret; } /** * @brief Shell command that prints the TCPCs chips information for all available USB-C ports * * @param sh Shell structure * @param argc Arguments count * @param argv Device name * @return int ORed return values of all the functions executed, 0 in case of success */ static int cmd_tcpc_chip_info(const struct shell *sh, size_t argc, char **argv) { struct tcpc_chip_info chip_info; int ret = 0; if (argc <= 1) { DT_FOREACH_STATUS_OKAY(usb_c_connector, TCPC_GET_CHIP_CONN_NODE); } else { const struct device *dev = device_get_binding(argv[1]); if (dev != NULL) { TCPC_GET_CHIP_DEV(dev); } else { ret = -ENODEV; } } return ret; } /** * @brief Function used to create subcommands with devices names * * @param idx counter of devices * @param entry shell structure that will be filled */ static void device_name_get(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; } SHELL_DYNAMIC_CMD_CREATE(list_device_names, device_name_get); SHELL_STATIC_SUBCMD_SET_CREATE(sub_tcpc_cmds, SHELL_CMD_ARG(dump, &list_device_names, "Dump TCPC registers\n" "Usage: tcpc dump [<tcpc device>]", cmd_tcpc_dump, 1, 1), SHELL_CMD_ARG(vbus, &list_device_names, "Display VBUS voltage\n" "Usage: tcpc vbus [<vbus device>]", cmd_tcpc_vbus, 1, 1), SHELL_CMD_ARG(chip, &list_device_names, "Display chip information\n" "Usage: tcpc chip [<tcpc device>]", cmd_tcpc_chip_info, 1, 1), SHELL_SUBCMD_SET_END); SHELL_CMD_REGISTER(tcpc, &sub_tcpc_cmds, "TCPC (USB-C PD) diagnostics", NULL); ```
/content/code_sandbox/drivers/usb_c/tcpc/shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,266
```c /* * */ #define DT_DRV_COMPAT st_stm32_ucpd #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ucpd_stm32, CONFIG_USBC_LOG_LEVEL); #include <zephyr/device.h> #include <zephyr/sys/util.h> #include <zephyr/kernel.h> #include <soc.h> #include <stddef.h> #include <zephyr/math/ilog2.h> #include <stm32_ll_system.h> #include <zephyr/irq.h> #include "ucpd_stm32_priv.h" static void config_tcpc_irq(void); /** * @brief UCPD TX ORDSET values */ static int ucpd_txorderset[] = { /* SOP ORDSET */ LL_UCPD_ORDERED_SET_SOP, /* SOP PRIME ORDSET */ LL_UCPD_ORDERED_SET_SOP1, /* SOP PRIME PRIME ORDSET */ LL_UCPD_ORDERED_SET_SOP2, /* SOP PRIME DEBUG ORDSET */ LL_UCPD_ORDERED_SET_SOP1_DEBUG, /* SOP PRIME PRIME DEBUG ORDSET */ LL_UCPD_ORDERED_SET_SOP2_DEBUG, /* HARD RESET ORDSET */ LL_UCPD_ORDERED_SET_HARD_RESET, /* CABLE RESET ORDSET */ LL_UCPD_ORDERED_SET_CABLE_RESET, }; /** * @brief Test for a goodCRC message * * @retval true if message is goodCRC, else false */ static bool ucpd_msg_is_good_crc(union pd_header header) { /* * Good CRC is a control message (no data objects) with GOOD_CRC * message type in the header. */ return (header.number_of_data_objects == 0 && header.extended == 0 && header.message_type == PD_CTRL_GOOD_CRC); } #ifdef CONFIG_SOC_SERIES_STM32G0X /** * @brief Apply the UCPD CC1 and CC2 pin configurations. * * UCPDx_STROBE: UCPDx pull-down configuration strobe: * when UCPDx is enabled, with CC1 and CC2 pin UCPD * control bits configured: apply that configuration. */ static void update_stm32g0x_cc_line(UCPD_TypeDef *ucpd_port) { if ((uint32_t)(ucpd_port) == UCPD1_BASE) { SYSCFG->CFGR1 |= SYSCFG_CFGR1_UCPD1_STROBE_Msk; } else { SYSCFG->CFGR1 |= SYSCFG_CFGR1_UCPD2_STROBE_Msk; } } #endif /** * @brief Transmits a data byte from the TX data buffer */ static void ucpd_tx_data_byte(const struct device *dev) { struct tcpc_data *data = dev->data; const struct tcpc_config *const config = dev->config; int index = data->ucpd_tx_active_buffer->msg_index++; LL_UCPD_WriteData(config->ucpd_port, data->ucpd_tx_active_buffer->data.msg[index]); } /** * @brief Receives a data byte and store it in the RX data buffer */ static void ucpd_rx_data_byte(const struct device *dev) { struct tcpc_data *data = dev->data; const struct tcpc_config *const config = dev->config; if (data->ucpd_rx_byte_count < UCPD_BUF_LEN) { data->ucpd_rx_buffer[data->ucpd_rx_byte_count++] = LL_UCPD_ReadData(config->ucpd_port); } } /** * @brief Enables or Disables TX interrupts */ static void ucpd_tx_interrupts_enable(const struct device *dev, bool enable) { const struct tcpc_config *const config = dev->config; uint32_t imr; imr = LL_UCPD_ReadReg(config->ucpd_port, IMR); if (enable) { LL_UCPD_WriteReg(config->ucpd_port, ICR, UCPD_ICR_TX_INT_MASK); LL_UCPD_WriteReg(config->ucpd_port, IMR, imr | UCPD_IMR_TX_INT_MASK); } else { LL_UCPD_WriteReg(config->ucpd_port, IMR, imr & ~UCPD_IMR_TX_INT_MASK); } } /** * @brief Initializes the RX and TX state machine variables */ static void stm32_ucpd_state_init(const struct device *dev) { struct tcpc_data *data = dev->data; /* Init variables used to manage tx process */ data->ucpd_tx_request = 0; data->tx_retry_count = 0; data->ucpd_tx_state = STATE_IDLE; /* Init variables used to manage rx */ data->ucpd_rx_sop_prime_enabled = false; data->ucpd_rx_msg_active = false; data->ucpd_rx_bist_mode = false; /* Vconn tracking variable */ data->ucpd_vconn_enable = false; } /** * @brief Get the CC enable mask. The mask indicates which CC line * is enabled. * * @retval CC Enable mask (bit 0: CC1, bit 1: CC2) */ static uint32_t ucpd_get_cc_enable_mask(const struct device *dev) { struct tcpc_data *data = dev->data; const struct tcpc_config *const config = dev->config; uint32_t mask = UCPD_CR_CCENABLE_Msk; /* * When VCONN is enabled, it is supplied on the CC line that's * not being used for Power Delivery messages. */ if (data->ucpd_vconn_enable) { uint32_t cr = LL_UCPD_ReadReg(config->ucpd_port, CR); int pol = (cr & UCPD_CR_PHYCCSEL); /* Dissable CC line that's used for VCONN */ mask &= ~BIT(UCPD_CR_CCENABLE_Pos + !pol); } return mask; } /** * @brief Get the state of the CC1 and CC2 lines * * @retval 0 on success * @retval -EIO on failure */ static int ucpd_get_cc(const struct device *dev, enum tc_cc_voltage_state *cc1, enum tc_cc_voltage_state *cc2) { const struct tcpc_config *const config = dev->config; int vstate_cc1; int vstate_cc2; int anamode; uint32_t sr; uint32_t cc_msk; /* * cc_voltage_state is determined from vstate_cc bit field in the * status register. The meaning of the value vstate_cc depends on * current value of ANAMODE (src/snk). * * vstate_cc maps directly to cc_state from tcpci spec when * ANAMODE(snk) = 1, but needs to be modified slightly for case * ANAMODE(src) = 0. * * If presenting Rp (source), then need to do a circular shift of * vstate_ccx value: * vstate_cc | cc_state * ------------------ * 0 -> 1 * 1 -> 2 * 2 -> 0 */ /* Get vstate_ccx values and power role */ sr = LL_UCPD_ReadReg(config->ucpd_port, SR); /* Get Rp or Rd active */ anamode = LL_UCPD_GetRole(config->ucpd_port); vstate_cc1 = (sr & UCPD_SR_TYPEC_VSTATE_CC1_Msk) >> UCPD_SR_TYPEC_VSTATE_CC1_Pos; vstate_cc2 = (sr & UCPD_SR_TYPEC_VSTATE_CC2_Msk) >> UCPD_SR_TYPEC_VSTATE_CC2_Pos; /* Do circular shift if port == source */ if (anamode) { if (vstate_cc1 != STM32_UCPD_SR_VSTATE_RA) { vstate_cc1 += 4; } if (vstate_cc2 != STM32_UCPD_SR_VSTATE_RA) { vstate_cc2 += 4; } } else { if (vstate_cc1 != STM32_UCPD_SR_VSTATE_OPEN) { vstate_cc1 = (vstate_cc1 + 1) % 3; } if (vstate_cc2 != STM32_UCPD_SR_VSTATE_OPEN) { vstate_cc2 = (vstate_cc2 + 1) % 3; } } /* CC connection detection */ cc_msk = ucpd_get_cc_enable_mask(dev); /* CC1 connection detection */ if (cc_msk & UCPD_CR_CCENABLE_0) { *cc1 = vstate_cc1; } else { *cc1 = TC_CC_VOLT_OPEN; } /* CC2 connection detection */ if (cc_msk & UCPD_CR_CCENABLE_1) { *cc2 = vstate_cc2; } else { *cc2 = TC_CC_VOLT_OPEN; } return 0; } /** * @brief Enable or Disable VCONN * * @retval 0 on success * @retval -EIO on failure * @retval -ENOTSUP if not supported */ static int ucpd_set_vconn(const struct device *dev, bool enable) { struct tcpc_data *data = dev->data; const struct tcpc_config *const config = dev->config; int cr; int ret; if (data->vconn_cb == NULL) { return -ENOTSUP; } /* Update VCONN on/off status. Do this before getting cc enable mask */ data->ucpd_vconn_enable = enable; cr = LL_UCPD_ReadReg(config->ucpd_port, CR); cr &= ~UCPD_CR_CCENABLE_Msk; cr |= ucpd_get_cc_enable_mask(dev); /* Apply cc pull resistor change */ LL_UCPD_WriteReg(config->ucpd_port, CR, cr); #ifdef CONFIG_SOC_SERIES_STM32G0X update_stm32g0x_cc_line(config->ucpd_port); #endif /* Get CC line that VCONN is active on */ data->ucpd_vconn_cc = (cr & UCPD_CR_CCENABLE_0) ? TC_POLARITY_CC2 : TC_POLARITY_CC1; /* Call user supplied callback to set vconn */ ret = data->vconn_cb(dev, data->ucpd_vconn_cc, enable); return ret; } /** * @brief Discharge VCONN * * @retval 0 on success * @retval -EIO on failure * @retval -ENOTSUP if not supported */ static int ucpd_vconn_discharge(const struct device *dev, bool enable) { struct tcpc_data *data = dev->data; const struct tcpc_config *const config = dev->config; /* VCONN should not be discharged while it's enabled */ if (data->ucpd_vconn_enable) { return -EIO; } if (data->vconn_discharge_cb) { /* Use DPM supplied VCONN Discharge */ return data->vconn_discharge_cb(dev, data->ucpd_vconn_cc, enable); } /* Use TCPC VCONN Discharge */ if (enable) { LL_UCPD_VconnDischargeEnable(config->ucpd_port); } else { LL_UCPD_VconnDischargeDisable(config->ucpd_port); } #ifdef CONFIG_SOC_SERIES_STM32G0X update_stm32g0x_cc_line(config->ucpd_port); #endif return 0; } /** * @brief Sets the value of the CC pull up resistor used when operating as a Source * * @retval 0 on success */ static int ucpd_select_rp_value(const struct device *dev, enum tc_rp_value rp) { struct tcpc_data *data = dev->data; data->rp = rp; return 0; } /** * @brief Gets the value of the CC pull up resistor used when operating as a Source * * @retval 0 on success */ static int ucpd_get_rp_value(const struct device *dev, enum tc_rp_value *rp) { struct tcpc_data *data = dev->data; *rp = data->rp; return 0; } /** * @brief Enable or disable Dead Battery resistors */ static void dead_battery(const struct device *dev, bool en) { struct tcpc_data *data = dev->data; #ifdef CONFIG_SOC_SERIES_STM32G0X const struct tcpc_config *const config = dev->config; uint32_t cr; cr = LL_UCPD_ReadReg(config->ucpd_port, CR); if (en) { cr |= UCPD_CR_DBATTEN; } else { cr &= ~UCPD_CR_DBATTEN; } LL_UCPD_WriteReg(config->ucpd_port, CR, cr); update_stm32g0x_cc_line(config->ucpd_port); #else if (en) { CLEAR_BIT(PWR->CR3, PWR_CR3_UCPD_DBDIS); } else { SET_BIT(PWR->CR3, PWR_CR3_UCPD_DBDIS); } #endif data->dead_battery_active = en; } /** * @brief Set the CC pull up or pull down resistors * * @retval 0 on success * @retval -EIO on failure */ static int ucpd_set_cc(const struct device *dev, enum tc_cc_pull cc_pull) { const struct tcpc_config *const config = dev->config; struct tcpc_data *data = dev->data; uint32_t cr; /* Disable dead battery if it's active */ if (data->dead_battery_active) { dead_battery(dev, false); } cr = LL_UCPD_ReadReg(config->ucpd_port, CR); /* * Always set ANASUBMODE to match desired Rp. TCPM layer has a valid * range of 0, 1, or 2. This range maps to 1, 2, or 3 in ucpd for * ANASUBMODE. */ cr &= ~UCPD_CR_ANASUBMODE_Msk; cr |= STM32_UCPD_CR_ANASUBMODE_VAL(UCPD_RP_TO_ANASUB(data->rp)); /* Disconnect both pull from both CC lines for R_open case */ cr &= ~UCPD_CR_CCENABLE_Msk; /* Set ANAMODE if cc_pull is Rd */ if (cc_pull == TC_CC_RD) { cr |= (UCPD_CR_ANAMODE | UCPD_CR_CCENABLE_Msk); /* Clear ANAMODE if cc_pull is Rp */ } else if (cc_pull == TC_CC_RP) { cr &= ~(UCPD_CR_ANAMODE); cr |= ucpd_get_cc_enable_mask(dev); } /* Update pull values */ LL_UCPD_WriteReg(config->ucpd_port, CR, cr); #ifdef CONFIG_SOC_SERIES_STM32G0X update_stm32g0x_cc_line(config->ucpd_port); #endif return 0; } /** * @brief Set the polarity of the CC line, which is the active CC line * used for power delivery. * * @retval 0 on success * @retval -EIO on failure * @retval -ENOTSUP if polarity is not supported */ static int ucpd_cc_set_polarity(const struct device *dev, enum tc_cc_polarity polarity) { const struct tcpc_config *const config = dev->config; uint32_t cr; cr = LL_UCPD_ReadReg(config->ucpd_port, CR); /* * Polarity impacts the PHYCCSEL, CCENABLE, and CCxTCDIS fields. This * function is called when polarity is updated at TCPM layer. STM32Gx * only supports POLARITY_CC1 or POLARITY_CC2 and this is stored in the * PHYCCSEL bit in the CR register. */ if (polarity == TC_POLARITY_CC1) { cr &= ~UCPD_CR_PHYCCSEL; } else if (polarity == TC_POLARITY_CC2) { cr |= UCPD_CR_PHYCCSEL; } else { return -ENOTSUP; } /* Update polarity */ LL_UCPD_WriteReg(config->ucpd_port, CR, cr); return 0; } /** * @brief Enable or Disable Power Delivery * * @retval 0 on success * @retval -EIO on failure */ static int ucpd_set_rx_enable(const struct device *dev, bool enable) { const struct tcpc_config *const config = dev->config; uint32_t imr; uint32_t cr; imr = LL_UCPD_ReadReg(config->ucpd_port, IMR); cr = LL_UCPD_ReadReg(config->ucpd_port, CR); /* * USB PD receiver enable is controlled by the bit PHYRXEN in * UCPD_CR. Enable Rx interrupts when RX PD decoder is active. */ if (enable) { /* Clear the RX alerts bits */ LL_UCPD_WriteReg(config->ucpd_port, ICR, UCPD_ICR_RX_INT_MASK); imr |= UCPD_IMR_RX_INT_MASK; cr |= UCPD_CR_PHYRXEN; LL_UCPD_WriteReg(config->ucpd_port, IMR, imr); LL_UCPD_WriteReg(config->ucpd_port, CR, cr); } else { imr &= ~UCPD_IMR_RX_INT_MASK; cr &= ~UCPD_CR_PHYRXEN; LL_UCPD_WriteReg(config->ucpd_port, CR, cr); LL_UCPD_WriteReg(config->ucpd_port, IMR, imr); } return 0; } /** * @brief Set the Power and Data role used when sending goodCRC messages * * @retval 0 on success * @retval -EIO on failure */ static int ucpd_set_roles(const struct device *dev, enum tc_power_role power_role, enum tc_data_role data_role) { struct tcpc_data *data = dev->data; data->msg_header.pr = power_role; data->msg_header.dr = data_role; return 0; } /** * @brief Enable the reception of SOP Prime messages * * @retval 0 on success * @retval -EIO on failure */ static int ucpd_sop_prime_enable(const struct device *dev, bool enable) { struct tcpc_data *data = dev->data; /* Update static variable used to filter SOP//SOP'' messages */ data->ucpd_rx_sop_prime_enabled = enable; return 0; } /** * @brief State transmitting a message */ static void ucpd_start_transmit(const struct device *dev, enum ucpd_tx_msg msg_type) { struct tcpc_data *data = dev->data; const struct tcpc_config *const config = dev->config; enum pd_packet_type type; uint32_t cr; uint32_t imr; cr = LL_UCPD_ReadReg(config->ucpd_port, CR); /* Select the correct tx descriptor */ data->ucpd_tx_active_buffer = &data->ucpd_tx_buffers[msg_type]; type = data->ucpd_tx_active_buffer->type; if (type == PD_PACKET_TX_HARD_RESET) { /* * From RM0440 45.4.4: * In order to facilitate generation of a Hard Reset, a special * code of TXMODE field is used. No other fields need to be * written. On writing the correct code, the hardware forces * Hard Reset Tx under the correct (optimal) timings with * respect to an on-going Tx message, which (if still in * progress) is cleanly terminated by truncating the current * sequence and directly appending an EOP K-code sequence. No * specific interrupt is generated relating to this truncation * event. * * Because Hard Reset can interrupt ongoing Tx operations, it is * started differently than all other tx messages. Only need to * enable hard reset interrupts, and then set a bit in the CR * register to initiate. */ /* Enable interrupt for Hard Reset sent/discarded */ LL_UCPD_WriteReg(config->ucpd_port, ICR, UCPD_ICR_HRSTDISCCF | UCPD_ICR_HRSTSENTCF); imr = LL_UCPD_ReadReg(config->ucpd_port, IMR); imr |= UCPD_IMR_HRSTDISCIE | UCPD_IMR_HRSTSENTIE; LL_UCPD_WriteReg(config->ucpd_port, IMR, imr); /* Initiate Hard Reset */ cr |= UCPD_CR_TXHRST; LL_UCPD_WriteReg(config->ucpd_port, CR, cr); } else if (type != PD_PACKET_MSG_INVALID) { int msg_len = 0; int mode; /* * These types are normal transmission, TXMODE = 0. To transmit * regular message, control or data, requires the following: * 1. Set TXMODE: * Normal -> 0 * Cable Reset -> 1 * Bist -> 2 * 2. Set TX_ORDSETR based on message type * 3. Set TX_PAYSZR which must account for 2 bytes of header * 4. Configure DMA (optional if DMA is desired) * 5. Enable transmit interrupts * 6. Start TX by setting TXSEND in CR * */ /* * Set tx length parameter (in bytes). Note the count field in * the header is number of 32 bit objects. Also, the length * field must account for the 2 header bytes. */ if (type == PD_PACKET_TX_BIST_MODE_2) { mode = LL_UCPD_TXMODE_BIST_CARRIER2; } else if (type == PD_PACKET_CABLE_RESET) { mode = LL_UCPD_TXMODE_CABLE_RESET; } else { mode = LL_UCPD_TXMODE_NORMAL; msg_len = data->ucpd_tx_active_buffer->msg_len; } LL_UCPD_WriteTxPaySize(config->ucpd_port, msg_len); /* Set tx mode */ cr &= ~UCPD_CR_TXMODE_Msk; cr |= mode; LL_UCPD_WriteReg(config->ucpd_port, CR, cr); /* Index into ordset enum for start of packet */ if (type <= PD_PACKET_CABLE_RESET) { LL_UCPD_WriteTxOrderSet(config->ucpd_port, ucpd_txorderset[type]); } /* Reset msg byte index */ data->ucpd_tx_active_buffer->msg_index = 0; /* Enable interrupts */ ucpd_tx_interrupts_enable(dev, 1); /* Trigger ucpd peripheral to start pd message transmit */ LL_UCPD_SendMessage(config->ucpd_port); } } /** * @brief Set the current state of the TX state machine */ static void ucpd_set_tx_state(const struct device *dev, enum ucpd_state state) { struct tcpc_data *data = dev->data; data->ucpd_tx_state = state; } /** * @brief Wrapper function for calling alert handler */ static void ucpd_notify_handler(struct alert_info *info, enum tcpc_alert alert) { if (info->handler) { info->handler(info->dev, info->data, alert); } } /** * @brief This is the TX state machine */ static void ucpd_manage_tx(struct alert_info *info) { struct tcpc_data *data = info->dev->data; enum ucpd_tx_msg msg_src = TX_MSG_NONE; union pd_header hdr; if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_HR_REQ)) { /* * Hard reset control messages are treated as a priority. The * control message will already be set up as it comes from the * PRL layer like any other PD ctrl/data message. So just need * to indicate the correct message source and set the state to * hard reset here. */ ucpd_set_tx_state(info->dev, STATE_HARD_RESET); msg_src = TX_MSG_TCPM; data->ucpd_tx_request &= ~BIT(msg_src); } switch (data->ucpd_tx_state) { case STATE_IDLE: if (data->ucpd_tx_request & MSG_GOOD_CRC_MASK) { ucpd_set_tx_state(info->dev, STATE_ACTIVE_CRC); msg_src = TX_MSG_GOOD_CRC; } else if (data->ucpd_tx_request & MSG_TCPM_MASK) { if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_MSG)) { /* * USB-PD Specification rev 3.0, section 6.10 * On receiving a received message, the protocol * layer shall discard any pending message. * * Since the pending message from the PRL has * not been sent yet, it needs to be discarded * based on the received message event. */ ucpd_notify_handler(info, TCPC_ALERT_TRANSMIT_MSG_DISCARDED); data->ucpd_tx_request &= ~MSG_TCPM_MASK; } else if (!data->ucpd_rx_msg_active) { ucpd_set_tx_state(info->dev, STATE_ACTIVE_TCPM); msg_src = TX_MSG_TCPM; /* Save msgID required for GoodCRC check */ hdr.raw_value = data->ucpd_tx_buffers[TX_MSG_TCPM].data.header; data->msg_id_match = hdr.message_id; data->tx_retry_max = hdr.specification_revision == PD_REV30 ? UCPD_N_RETRY_COUNT_REV30 : UCPD_N_RETRY_COUNT_REV20; } } /* If state is not idle, then start tx message */ if (data->ucpd_tx_state != STATE_IDLE) { data->ucpd_tx_request &= ~BIT(msg_src); data->tx_retry_count = 0; } break; case STATE_ACTIVE_TCPM: /* * Check if tx msg has finished. For TCPM messages * transmit is not complete until a GoodCRC message * matching the msgID just sent is received. But, a tx * message can fail due to collision or underrun, * etc. If that failure occurs, dont' wait for GoodCrc * and just go to failure path. */ if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_SUCCESS)) { ucpd_set_tx_state(info->dev, STATE_WAIT_CRC_ACK); /* Start the GoodCRC RX Timer */ k_timer_start(&data->goodcrc_rx_timer, K_USEC(1000), K_NO_WAIT); } else if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_DISC) || atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL)) { if (data->tx_retry_count < data->tx_retry_max) { if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_MSG)) { /* * A message was received so there is no * need to retry this tx message which * had failed to send previously. * Likely, due to the wire * being active from the message that * was just received. */ ucpd_set_tx_state(info->dev, STATE_IDLE); ucpd_notify_handler(info, TCPC_ALERT_TRANSMIT_MSG_DISCARDED); ucpd_set_tx_state(info->dev, STATE_IDLE); } else { /* * Tx attempt failed. Remain in this * state, but trigger new tx attempt. */ msg_src = TX_MSG_TCPM; data->tx_retry_count++; } } else { enum tcpc_alert status; status = (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL)) ? TCPC_ALERT_TRANSMIT_MSG_FAILED : TCPC_ALERT_TRANSMIT_MSG_DISCARDED; ucpd_set_tx_state(info->dev, STATE_IDLE); ucpd_notify_handler(info, status); } } break; case STATE_ACTIVE_CRC: if (atomic_test_bit(&info->evt, UCPD_EVT_TX_MSG_SUCCESS) || atomic_test_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL) || atomic_test_bit(&info->evt, UCPD_EVT_TX_MSG_DISC)) { atomic_clear_bit(&info->evt, UCPD_EVT_TX_MSG_SUCCESS); atomic_clear_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL); atomic_clear_bit(&info->evt, UCPD_EVT_TX_MSG_DISC); ucpd_set_tx_state(info->dev, STATE_IDLE); if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL)) { LOG_INF("ucpd: Failed to send GoodCRC!"); } else if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_DISC)) { LOG_INF("ucpd: GoodCRC message discarded!"); } } break; case STATE_WAIT_CRC_ACK: if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_GOOD_CRC) && data->ucpd_crc_id == data->msg_id_match) { /* GoodCRC with matching ID was received */ ucpd_notify_handler(info, TCPC_ALERT_TRANSMIT_MSG_SUCCESS); ucpd_set_tx_state(info->dev, STATE_IDLE); } else if (k_timer_status_get(&data->goodcrc_rx_timer)) { /* Stop the GoodCRC RX Timer */ k_timer_stop(&data->goodcrc_rx_timer); /* GoodCRC w/out match or timeout waiting */ if (data->tx_retry_count < data->tx_retry_max) { ucpd_set_tx_state(info->dev, STATE_ACTIVE_TCPM); msg_src = TX_MSG_TCPM; data->tx_retry_count++; } else { ucpd_set_tx_state(info->dev, STATE_IDLE); ucpd_notify_handler(info, TCPC_ALERT_TRANSMIT_MSG_FAILED); } } else if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_MSG)) { /* * In the case of a collision, it's possible the port * partner may not send a GoodCRC and instead send the * message that was colliding. If a message is received * in this state, then treat it as a discard from an * incoming message. */ ucpd_notify_handler(info, TCPC_ALERT_TRANSMIT_MSG_DISCARDED); ucpd_set_tx_state(info->dev, STATE_IDLE); } break; case STATE_HARD_RESET: if (atomic_test_bit(&info->evt, UCPD_EVT_HR_DONE) || atomic_test_bit(&info->evt, UCPD_EVT_HR_FAIL)) { atomic_clear_bit(&info->evt, UCPD_EVT_HR_DONE); atomic_clear_bit(&info->evt, UCPD_EVT_HR_FAIL); /* HR complete, reset tx state values */ ucpd_set_tx_state(info->dev, STATE_IDLE); data->ucpd_tx_request = 0; data->tx_retry_count = 0; } break; } /* * NOTE: TX_MSG_GOOD_CRC messages are sent from the ISR to reduce latency * when sending those messages, so don't resend them here. * * If msg_src is valid and not a TX_MSG_GOOD_CRC, then start transmit. */ if (msg_src != TX_MSG_GOOD_CRC && msg_src > TX_MSG_NONE) { ucpd_start_transmit(info->dev, msg_src); } } /** * @brief Alert handler */ static void ucpd_alert_handler(struct k_work *item) { struct alert_info *info = CONTAINER_OF(item, struct alert_info, work); struct tcpc_data *data = info->dev->data; if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_EVENT_CC)) { ucpd_notify_handler(info, TCPC_ALERT_CC_STATUS); } if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_HARD_RESET_RECEIVED)) { ucpd_notify_handler(info, TCPC_ALERT_HARD_RESET_RECEIVED); } if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_MSG)) { ucpd_notify_handler(info, TCPC_ALERT_MSG_STATUS); } /* * USB-PD messages are initiated in TCPM stack (PRL * layer). However, GoodCRC messages are initiated within the * UCPD driver based on USB-PD rx messages. These 2 types of * transmit paths are managed via events. * * UCPD generated GoodCRC messages, are the priority path as * they must be sent immediately following a successful USB-PD * rx message. As long as a transmit operation is not underway, * then a transmit message will be started upon request. The ISR * routine sets the event to indicate that the transmit * operation is complete. * * Hard reset requests are sent as a TCPM message, but in terms * of the ucpd transmitter, they are treated as a 3rd tx msg * source since they can interrupt an ongoing tx msg, and there * is no requirement to wait for a GoodCRC reply message. */ if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_GOOD_CRC_REQ)) { data->ucpd_tx_request |= MSG_GOOD_CRC_MASK; } if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TCPM_MSG_REQ)) { data->ucpd_tx_request |= MSG_TCPM_MASK; } /* * Manage PD tx messages. The state machine may need to be * called more than once. For instance, if * the task is woken at the completion of sending a GoodCRC, * there may be a TCPM message request pending and just changing * the state back to idle would not trigger start of transmit. */ do { ucpd_manage_tx(info); } while (data->ucpd_tx_state != STATE_IDLE); } /** * @brief Sends a goodCRC message */ static void ucpd_send_good_crc(const struct device *dev, union pd_header rx_header) { struct tcpc_data *data = dev->data; const struct tcpc_config *const config = dev->config; union pd_header tx_header; enum pd_packet_type tx_type; struct alert_info *info = &data->alert_info; /* * A GoodCRC message shall be sent by receiver to ack that the previous * message was correctly received. The GoodCRC message shall return the * rx message's msg_id field. The one exception is for GoodCRC messages, * which do not generate a GoodCRC response */ if (ucpd_msg_is_good_crc(rx_header)) { return; } /* * Get the rx ordered set code just detected. SOP -> SOP''_Debug are in * the same order as enum tcpc_packet_type and so can be used * directly. */ tx_type = LL_UCPD_ReadRxOrderSet(config->ucpd_port); /* * PD Header(SOP): * Extended b15 -> set to 0 for control messages * Count b14:12 -> number of 32 bit data objects = 0 for ctrl msg * MsgID b11:9 -> running byte counter (extracted from rx msg) * Power Role b8 -> stored in static, from set_msg_header() * Spec Rev b7:b6 -> PD spec revision (extracted from rx msg) * Data Role b5 -> stored in static, from set_msg_header * Msg Type b4:b0 -> data or ctrl type = PD_CTRL_GOOD_CRC */ /* construct header message */ tx_header.message_type = PD_CTRL_GOOD_CRC; if (tx_type == PD_PACKET_SOP) { tx_header.port_power_role = data->msg_header.pr; tx_header.port_data_role = data->msg_header.dr; } else { tx_header.port_power_role = 0; tx_header.port_data_role = 0; } tx_header.message_id = rx_header.message_id; tx_header.number_of_data_objects = 0; tx_header.specification_revision = rx_header.specification_revision; tx_header.extended = 0; /* Good CRC is header with no other objects */ data->ucpd_tx_buffers[TX_MSG_GOOD_CRC].msg_len = MSG_HEADER_SIZE; data->ucpd_tx_buffers[TX_MSG_GOOD_CRC].data.header = tx_header.raw_value; data->ucpd_tx_buffers[TX_MSG_GOOD_CRC].type = tx_type; /* Notify ucpd task that a GoodCRC message tx request is pending */ atomic_set_bit(&info->evt, UCPD_EVT_GOOD_CRC_REQ); /* Send TX_MSG_GOOD_CRC message here to reduce latency */ ucpd_start_transmit(dev, TX_MSG_GOOD_CRC); } /** * @brief Transmit a power delivery message * * @retval 0 on success * @retval -EFAULT on failure */ static int ucpd_transmit_data(const struct device *dev, struct pd_msg *msg) { struct tcpc_data *data = dev->data; /* Length in bytes = (4 * object len) + 2 header bytes */ int len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(msg->header.number_of_data_objects) + 2; if (len > UCPD_BUF_LEN) { return -EFAULT; } /* Store tx msg info in TCPM msg descriptor */ data->ucpd_tx_buffers[TX_MSG_TCPM].msg_len = len; data->ucpd_tx_buffers[TX_MSG_TCPM].type = msg->type; data->ucpd_tx_buffers[TX_MSG_TCPM].data.header = msg->header.raw_value; /* Copy msg objects to ucpd data buffer, after 2 header bytes */ memcpy(data->ucpd_tx_buffers[TX_MSG_TCPM].data.msg + 2, (uint8_t *)msg->data, len - 2); /* * Check for hard reset message here. A different event is used for hard * resets as they are able to interrupt ongoing transmit, and should * have priority over any pending message. */ if (msg->type == PD_PACKET_TX_HARD_RESET) { atomic_set_bit(&data->alert_info.evt, UCPD_EVT_HR_REQ); } else { atomic_set_bit(&data->alert_info.evt, UCPD_EVT_TCPM_MSG_REQ); } /* Start transmission */ k_work_submit(&data->alert_info.work); return 0; } /** * @brief Retrieves the Power Delivery message from the TCPC * * @retval number of bytes received if msg parameter is provided * @retval 0 if there is a message pending and the msg parameter is NULL * @retval -ENODATA if there is no pending message */ static int ucpd_get_rx_pending_msg(const struct device *dev, struct pd_msg *msg) { struct tcpc_data *data = dev->data; int ret = 0; /* Make sure we have a message to retrieve */ if (*(uint32_t *)data->ucpd_rx_buffer == 0) { return -ENODATA; } if (msg == NULL) { return 0; } msg->type = *(uint16_t *)data->ucpd_rx_buffer; msg->header.raw_value = *((uint16_t *)data->ucpd_rx_buffer + 1); msg->len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(msg->header.number_of_data_objects); memcpy(msg->data, (data->ucpd_rx_buffer + PACKET_TYPE_SIZE + MSG_HEADER_SIZE), msg->len); ret = msg->len + MSG_HEADER_SIZE; /* All done. Clear type and header */ *(uint32_t *)data->ucpd_rx_buffer = 0; return ret; } /** * @brief Enable or Disable BIST Test mode * * return 0 on success * return -EIO on failure */ static int ucpd_set_bist_test_mode(const struct device *dev, bool enable) { struct tcpc_data *data = dev->data; data->ucpd_rx_bist_mode = enable; LOG_INF("ucpd: Bist test mode = %d", enable); return 0; } /** * @brief UCPD interrupt handler */ static void ucpd_isr(const struct device *dev_inst[]) { const struct device *dev; const struct tcpc_config *config; struct tcpc_data *data; uint32_t sr; struct alert_info *info; uint32_t tx_done_mask = UCPD_SR_TXUND | UCPD_SR_TXMSGSENT | UCPD_SR_TXMSGABT | UCPD_SR_TXMSGDISC | UCPD_SR_HRSTSENT | UCPD_SR_HRSTDISC; #if DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) > 1 /* * Multiple UCPD ports are available */ uint32_t sr0; uint32_t sr1; /* * Since the UCPD peripherals share the same interrupt line, determine * which one generated the interrupt. */ /* Read UCPD1 and UCPD2 Status Registers */ sr0 = LL_UCPD_ReadReg(((const struct tcpc_config *)dev_inst[0]->config)->ucpd_port, SR); sr1 = LL_UCPD_ReadReg(((const struct tcpc_config *)dev_inst[1]->config)->ucpd_port, SR); if (sr0) { dev = dev_inst[0]; } else if (sr1) { dev = dev_inst[1]; } else { /* * The interrupt was triggered by some other device sharing this * interrupt line. */ return; } #else /* * Only one UCPD port available */ dev = dev_inst[0]; #endif /* Get the UCPD port that initiated that interrupt */ config = dev->config; data = dev->data; info = &data->alert_info; /* Read the status register */ sr = LL_UCPD_ReadReg(config->ucpd_port, SR); /* Check for CC events, set event to wake PD task */ if (sr & (UCPD_SR_TYPECEVT1 | UCPD_SR_TYPECEVT2)) { /* Set CC event bit */ atomic_set_bit(&info->evt, UCPD_EVT_EVENT_CC); } /* * Check for Tx events. tx_mask includes all status bits related to the * end of a USB-PD tx message. If any of these bits are set, the * transmit attempt is completed. Set an event to notify ucpd tx state * machine that transmit operation is complete. */ if (sr & tx_done_mask) { /* Check for tx message complete */ if (sr & UCPD_SR_TXMSGSENT) { atomic_set_bit(&info->evt, UCPD_EVT_TX_MSG_SUCCESS); } else if (sr & (UCPD_SR_TXMSGABT | UCPD_SR_TXUND)) { atomic_set_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL); } else if (sr & (UCPD_SR_TXMSGDISC | UCPD_SR_HRSTDISC)) { atomic_set_bit(&info->evt, UCPD_EVT_TX_MSG_DISC); } else if (sr & UCPD_SR_HRSTSENT) { atomic_set_bit(&info->evt, UCPD_EVT_HR_DONE); } else if (sr & UCPD_SR_HRSTDISC) { atomic_set_bit(&info->evt, UCPD_EVT_HR_FAIL); } /* Disable Tx interrupts */ ucpd_tx_interrupts_enable(dev, 0); } /* Check for data register empty */ if (sr & UCPD_SR_TXIS) { ucpd_tx_data_byte(dev); } /* Check for Rx Events */ /* Check first for start of new message */ if (sr & UCPD_SR_RXORDDET) { /* Add message type to pd message buffer */ *(uint16_t *)data->ucpd_rx_buffer = LL_UCPD_ReadRxOrderSet(config->ucpd_port); data->ucpd_rx_byte_count = 2; data->ucpd_rx_msg_active = true; } /* Check for byte received */ if (sr & UCPD_SR_RXNE) { ucpd_rx_data_byte(dev); } /* Check for end of message */ if (sr & UCPD_SR_RXMSGEND) { data->ucpd_rx_msg_active = false; /* Check for errors */ if (!(sr & UCPD_SR_RXERR)) { enum pd_packet_type type; union pd_header rx_header; int good_crc; type = *(uint16_t *)data->ucpd_rx_buffer; rx_header.raw_value = *((uint16_t *)data->ucpd_rx_buffer + 1); good_crc = ucpd_msg_is_good_crc(rx_header); /* * Don't pass GoodCRC control messages to the TCPM * layer. In addition, need to filter for SOP'/SOP'' * packets if those are not enabled. SOP'/SOP'' * reception is controlled by a static variable. The * hardware orderset detection pattern can't be changed * without disabling the ucpd peripheral. */ if (!good_crc && (data->ucpd_rx_sop_prime_enabled || type == PD_PACKET_SOP)) { /* * If BIST test mode is active, then still need * to send GoodCRC reply, but there is no need * to send the message up to the tcpm layer. */ if (!data->ucpd_rx_bist_mode) { atomic_set_bit(&info->evt, UCPD_EVT_RX_MSG); } /* Send GoodCRC message (if required) */ ucpd_send_good_crc(dev, rx_header); } else if (good_crc) { atomic_set_bit(&info->evt, UCPD_EVT_RX_GOOD_CRC); data->ucpd_crc_id = rx_header.message_id; } } else { /* Rx message is complete, but there were bit errors */ LOG_ERR("ucpd: rx message error"); } } /* Check for fault conditions */ if (sr & UCPD_SR_RXHRSTDET) { /* hard reset received */ atomic_set_bit(&info->evt, UCPD_EVT_HARD_RESET_RECEIVED); } /* Clear interrupts now that PD events have been set */ LL_UCPD_WriteReg(config->ucpd_port, ICR, sr & UCPD_ICR_ALL_INT_MASK); /* Notify application of events */ k_work_submit(&info->work); } /** * @brief Dump a set of TCPC registers * * @retval 0 on success * @retval -EIO on failure */ static int ucpd_dump_std_reg(const struct device *dev) { const struct tcpc_config *const config = dev->config; LOG_INF("CFGR1: %08x", LL_UCPD_ReadReg(config->ucpd_port, CFG1)); LOG_INF("CFGR2: %08x", LL_UCPD_ReadReg(config->ucpd_port, CFG2)); LOG_INF("CR: %08x", LL_UCPD_ReadReg(config->ucpd_port, CR)); LOG_INF("IMR: %08x", LL_UCPD_ReadReg(config->ucpd_port, IMR)); LOG_INF("SR: %08x", LL_UCPD_ReadReg(config->ucpd_port, SR)); LOG_INF("ICR: %08x\n", LL_UCPD_ReadReg(config->ucpd_port, ICR)); return 0; } /** * @brief Sets the alert function that's called when an interrupt is triggered * due to a TCPC alert * * @retval 0 on success * @retval -EINVAL on failure */ static int ucpd_set_alert_handler_cb(const struct device *dev, tcpc_alert_handler_cb_t handler, void *alert_data) { struct tcpc_data *data = dev->data; data->alert_info.handler = handler; data->alert_info.data = alert_data; return 0; } /** * @brief Sets a callback that can enable or disable VCONN if the TCPC is * unable to or the system is configured in a way that does not use * the VCONN control capabilities of the TCPC * */ static void ucpd_set_vconn_cb(const struct device *dev, tcpc_vconn_control_cb_t vconn_cb) { struct tcpc_data *data = dev->data; data->vconn_cb = vconn_cb; } /** * @brief Sets a callback that can discharge VCONN if the TCPC is * unable to or the system is configured in a way that does not use * the VCONN discharge capabilities of the TCPC * */ static void ucpd_set_vconn_discharge_cb(const struct device *dev, tcpc_vconn_discharge_cb_t cb) { struct tcpc_data *data = dev->data; data->vconn_discharge_cb = cb; } /** * @brief UCPD interrupt init */ static void ucpd_isr_init(const struct device *dev) { const struct tcpc_config *const config = dev->config; struct tcpc_data *data = dev->data; struct alert_info *info = &data->alert_info; /* Init GoodCRC Receive timer */ k_timer_init(&data->goodcrc_rx_timer, NULL, NULL); /* Disable all alert bits */ LL_UCPD_WriteReg(config->ucpd_port, IMR, 0); /* Clear all alert handler */ ucpd_set_alert_handler_cb(dev, NULL, NULL); /* Save device structure for use in the alert handlers */ info->dev = dev; /* Initialize the work handler */ k_work_init(&info->work, ucpd_alert_handler); /* Configure CC change alerts */ LL_UCPD_WriteReg(config->ucpd_port, IMR, UCPD_IMR_TYPECEVT1IE | UCPD_IMR_TYPECEVT2IE); LL_UCPD_WriteReg(config->ucpd_port, ICR, UCPD_ICR_TYPECEVT1CF | UCPD_ICR_TYPECEVT2CF); /* SOP'/SOP'' must be enabled via TCPCI call */ data->ucpd_rx_sop_prime_enabled = false; stm32_ucpd_state_init(dev); /* Configure and enable the IRQ */ config_tcpc_irq(); } /** * @brief Initializes the TCPC * * @retval 0 on success * @retval -EIO on failure */ static int ucpd_init(const struct device *dev) { const struct tcpc_config *const config = dev->config; struct tcpc_data *data = dev->data; uint32_t cfg1; int ret; LOG_DBG("Pinctrl signals configuration"); ret = pinctrl_apply_state(config->ucpd_pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("USB pinctrl setup failed (%d)", ret); return ret; } /* * The UCPD port is disabled in the LL_UCPD_Init function * * NOTE: For proper Power Management operation, this function * should not be used because it circumvents the zephyr * clock API. Instead, DTS clock settings and the zephyr * clock API should be used to enable clocks. */ ret = LL_UCPD_Init(config->ucpd_port, (LL_UCPD_InitTypeDef *)&config->ucpd_params); if (ret == SUCCESS) { /* Init Rp to USB */ data->rp = TC_RP_USB; /* * Set RXORDSETEN field to control which types of ordered sets the PD * receiver must receive. */ cfg1 = LL_UCPD_ReadReg(config->ucpd_port, CFG1); cfg1 |= LL_UCPD_ORDERSET_SOP | LL_UCPD_ORDERSET_SOP1 | LL_UCPD_ORDERSET_SOP2 | LL_UCPD_ORDERSET_HARDRST; LL_UCPD_WriteReg(config->ucpd_port, CFG1, cfg1); /* Enable UCPD port */ LL_UCPD_Enable(config->ucpd_port); /* Enable Dead Battery Support */ if (config->ucpd_dead_battery) { dead_battery(dev, true); } else { /* * Some devices have dead battery enabled by default * after power up, so disable it */ dead_battery(dev, false); } /* Initialize the isr */ ucpd_isr_init(dev); } else { return -EIO; } return 0; } static const struct tcpc_driver_api driver_api = { .init = ucpd_init, .set_alert_handler_cb = ucpd_set_alert_handler_cb, .get_cc = ucpd_get_cc, .set_rx_enable = ucpd_set_rx_enable, .get_rx_pending_msg = ucpd_get_rx_pending_msg, .transmit_data = ucpd_transmit_data, .select_rp_value = ucpd_select_rp_value, .get_rp_value = ucpd_get_rp_value, .set_cc = ucpd_set_cc, .set_roles = ucpd_set_roles, .set_vconn_cb = ucpd_set_vconn_cb, .set_vconn_discharge_cb = ucpd_set_vconn_discharge_cb, .set_vconn = ucpd_set_vconn, .vconn_discharge = ucpd_vconn_discharge, .set_cc_polarity = ucpd_cc_set_polarity, .dump_std_reg = ucpd_dump_std_reg, .set_bist_test_mode = ucpd_set_bist_test_mode, .sop_prime_enable = ucpd_sop_prime_enable, }; #define DEV_INST_INIT(n) dev_inst[n] = DEVICE_DT_INST_GET(n); static void config_tcpc_irq(void) { static int inst_num; static const struct device *dev_inst[DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT)]; /* Initialize and enable shared irq on last instance */ if (++inst_num == DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT)) { DT_INST_FOREACH_STATUS_OKAY(DEV_INST_INIT) IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), ucpd_isr, dev_inst, 0); irq_enable(DT_INST_IRQN(0)); } } BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) > 0, "No compatible STM32 TCPC instance found"); #define TCPC_DRIVER_INIT(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ static struct tcpc_data drv_data_##inst; \ static const struct tcpc_config drv_config_##inst = { \ .ucpd_pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .ucpd_port = (UCPD_TypeDef *)DT_INST_REG_ADDR(inst), \ .ucpd_params.psc_ucpdclk = ilog2(DT_INST_PROP(inst, psc_ucpdclk)), \ .ucpd_params.transwin = DT_INST_PROP(inst, transwin) - 1, \ .ucpd_params.IfrGap = DT_INST_PROP(inst, ifrgap) - 1, \ .ucpd_params.HbitClockDiv = DT_INST_PROP(inst, hbitclkdiv) - 1, \ .ucpd_dead_battery = DT_INST_PROP(inst, dead_battery), \ }; \ DEVICE_DT_INST_DEFINE(inst, \ &ucpd_init, \ NULL, \ &drv_data_##inst, \ &drv_config_##inst, \ POST_KERNEL, \ CONFIG_USBC_TCPC_INIT_PRIORITY, \ &driver_api); DT_INST_FOREACH_STATUS_OKAY(TCPC_DRIVER_INIT) ```
/content/code_sandbox/drivers/usb_c/tcpc/ucpd_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,358
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_USBC_TCPC_UCPD_NUMAKER_H_ #define ZEPHYR_DRIVERS_USBC_TCPC_UCPD_NUMAKER_H_ #include <zephyr/drivers/usb_c/usbc_ppc.h> #include <zephyr/drivers/usb_c/usbc_vbus.h> /* TCPC exported for PPC */ int numaker_tcpc_ppc_is_dead_battery_mode(const struct device *dev); int numaker_tcpc_ppc_exit_dead_battery_mode(const struct device *dev); int numaker_tcpc_ppc_is_vbus_source(const struct device *dev); int numaker_tcpc_ppc_is_vbus_sink(const struct device *dev); int numaker_tcpc_ppc_set_snk_ctrl(const struct device *dev, bool enable); int numaker_tcpc_ppc_set_src_ctrl(const struct device *dev, bool enable); int numaker_tcpc_ppc_set_vbus_discharge(const struct device *dev, bool enable); int numaker_tcpc_ppc_is_vbus_present(const struct device *dev); int numaker_tcpc_ppc_set_event_handler(const struct device *dev, usbc_ppc_event_cb_t handler, void *data); int numaker_tcpc_ppc_dump_regs(const struct device *dev); /* TCPC exported for VBUS */ bool numaker_tcpc_vbus_check_level(const struct device *dev, enum tc_vbus_level level); int numaker_tcpc_vbus_measure(const struct device *dev, int *vbus_meas); int numaker_tcpc_vbus_discharge(const struct device *dev, bool enable); int numaker_tcpc_vbus_enable(const struct device *dev, bool enable); #endif /* ZEPHYR_DRIVERS_USBC_TCPC_UCPD_NUMAKER_H_ */ ```
/content/code_sandbox/drivers/usb_c/tcpc/ucpd_numaker.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
369
```unknown # USB-C STM32 TCPC device configuration options config USBC_TCPC_STM32 bool "USB-C TCPC device controller driver" default y depends on DT_HAS_ST_STM32_UCPD_ENABLED select USE_STM32_LL_UCPD help Enable USB-C TCPC support on the STM32 G0, G4, L5, and U5 family of processors. ```
/content/code_sandbox/drivers/usb_c/tcpc/Kconfig.tcpc_stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
87
```unknown # Nuvoton NuMaker USB-C TCPC device configuration options config USBC_TCPC_NUMAKER bool "Nuvoton NuMaker USB-C TCPC device controller driver" default y select HAS_NUMAKER_ADC select HAS_NUMAKER_TMR depends on DT_HAS_NUVOTON_NUMAKER_TCPC_ENABLED help Enable USB-C TCPC support for Nuvoton NuMaker chip with UTCPD. ```
/content/code_sandbox/drivers/usb_c/tcpc/Kconfig.tcpc_numaker
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
90
```unknown # USBC TCPC configuration options menuconfig USBC_TCPC_DRIVER bool "USB-C TCPC drivers" help Enable USB TypeC Port Controller (TCPC) drivers if USBC_TCPC_DRIVER config USBC_TCPC_INIT_PRIORITY int "USB-C TCPC driver init priority" default 80 help USB-C device driver initialization priority. Do not mess with it unless you know what you are doing. Note that the priority needs to be lower than the USBC stack so that it can start before the USBC sub-system. config USBC_TCPC_SHELL bool "Shell commands for TCPC subsystem" help Enable support for TCPC shell commands that helps with USB-C diagnostics. Example functions are printing vbus, chip information and dumping registers. source "drivers/usb_c/tcpc/Kconfig.tcpc_stm32" source "drivers/usb_c/tcpc/Kconfig.tcpc_numaker" module = USBC module-str = usbc source "subsys/logging/Kconfig.template.log_config" endif # USBC_TCPC_DRIVER ```
/content/code_sandbox/drivers/usb_c/tcpc/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
232
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_vbus #include <zephyr/kernel.h> #include <zephyr/drivers/usb_c/usbc_vbus.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/clock_control_numaker.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(vbus_numaker, CONFIG_USBC_LOG_LEVEL); #include <soc.h> #include <NuMicro.h> #include "../tcpc/ucpd_numaker.h" /* Implementation notes on NuMaker TCPC/PPC/VBUS * * PPC and VBUS rely on TCPC/UTCPD and are just pseudo. They are completely * implemented in TCPC/UTCPD. */ /** * @brief Immutable device context */ struct numaker_vbus_config { const struct device *tcpc_dev; }; /** * @brief Initializes the usb-c vbus driver * * @retval 0 on success * @retval -ENODEV if dependent TCPC device is not ready */ static int numaker_vbus_init(const struct device *dev) { const struct numaker_vbus_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; /* Rely on TCPC */ if (!device_is_ready(tcpc_dev)) { LOG_ERR("TCPC device not ready"); return -ENODEV; } return 0; } /** * @brief Checks if VBUS is at a particular level * * @retval true if VBUS is at the level voltage * @retval false if VBUS is not at that level voltage */ static bool numaker_vbus_check_level(const struct device *dev, enum tc_vbus_level level) { const struct numaker_vbus_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_vbus_check_level(tcpc_dev, level); } /** * @brief Reads and returns VBUS measured in mV * * @retval 0 on success * @retval -EIO on failure */ static int numaker_vbus_measure(const struct device *dev, int *vbus_meas) { const struct numaker_vbus_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_vbus_measure(tcpc_dev, vbus_meas); } /** * @brief Controls a pin that discharges VBUS * * @retval 0 on success * @retval -EIO on failure */ static int numaker_vbus_discharge(const struct device *dev, bool enable) { const struct numaker_vbus_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_vbus_discharge(tcpc_dev, enable); } /** * @brief Controls a pin that enables VBUS measurments * * @retval 0 on success * @retval -EIO on failure */ static int numaker_vbus_enable(const struct device *dev, bool enable) { const struct numaker_vbus_config *const config = dev->config; const struct device *tcpc_dev = config->tcpc_dev; return numaker_tcpc_vbus_enable(tcpc_dev, enable); } static const struct usbc_vbus_driver_api numaker_vbus_driver_api = { .check_level = numaker_vbus_check_level, .measure = numaker_vbus_measure, .discharge = numaker_vbus_discharge, .enable = numaker_vbus_enable, }; #define NUMAKER_TCPC(inst) DT_INST_PARENT(inst) #define VBUS_NUMAKER_INIT(inst) \ static const struct numaker_vbus_config numaker_vbus_config_##inst = { \ .tcpc_dev = DEVICE_DT_GET(NUMAKER_TCPC(inst)), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, numaker_vbus_init, NULL, NULL, &numaker_vbus_config_##inst, \ POST_KERNEL, CONFIG_USBC_VBUS_INIT_PRIORITY, \ &numaker_vbus_driver_api); DT_INST_FOREACH_STATUS_OKAY(VBUS_NUMAKER_INIT); ```
/content/code_sandbox/drivers/usb_c/vbus/usbc_vbus_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
911
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_USBC_VBUS_ADC_PRIV_H_ #define ZEPHYR_DRIVERS_USBC_VBUS_ADC_PRIV_H_ #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/adc.h> /** * @brief Driver config */ struct usbc_vbus_config { uint32_t output_ohm; uint32_t full_ohm; struct adc_dt_spec adc_channel; const struct gpio_dt_spec power_gpios; const struct gpio_dt_spec discharge_gpios; }; /** * @brief Driver data */ struct usbc_vbus_data { int sample; struct adc_sequence sequence; }; #endif /* ZEPHYR_DRIVERS_USBC_VBUS_ADC_PRIV_H_ */ ```
/content/code_sandbox/drivers/usb_c/vbus/usbc_vbus_adc_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
151
```unknown # Nuvoton NuMaker USB-C VBUS device configuration options config USBC_VBUS_NUMAKER bool "Nuvoton NuMaker USB-C VBUS" default y depends on DT_HAS_NUVOTON_NUMAKER_VBUS_ENABLED && USBC_TCPC_NUMAKER help Enable USB-C VBUS support for Nuvoton NuMaker chip with UTCPD. ```
/content/code_sandbox/drivers/usb_c/vbus/Kconfig.numaker
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
81
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_tcpc #include <zephyr/kernel.h> #include <zephyr/drivers/usb_c/usbc_tcpc.h> #include <zephyr/drivers/usb_c/usbc_ppc.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/clock_control_numaker.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/adc.h> #include <zephyr/sys/byteorder.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(tcpc_numaker, CONFIG_USBC_LOG_LEVEL); #include <soc.h> #include <NuMicro.h> #include "ucpd_numaker.h" /* Implementation notes on NuMaker TCPC/PPC/VBUS * * 1. UTCPD, interfacing to external circuit on VBUS/VCONN voltage measurement, * VBUS/VCONN overcurrent protection, VBUS overvoltage protection, etc., * can implement all functions defined in TCPC, PPC, and VBUS. For this, * TCPC is implemented in UTCPD majorly; PPC and VBUS rely on TCPC for * their implementation. * 2. For VBUS/VCONN voltage measurement, UTCPD is updated periodically * by Timer-trigger EADC. To implement this interconnection, TCPC node_id * will cover UTCPD, EADC, and Timer H/W characteristics of registers, * interrupts, resets, and clocks. * NOTE: EADC and Timer interrupts needn't enable for Timer-triggered EADC. * In BSP sample, they are enabled just for development/debug purpose. * 3. About VCONN per PCB * (1) Support only VCONN source, no VCONN sink (like Plug Cable) * (2) Separate pins for VCONN enable on CC1/CC2 (VCNEN1/VCNEN2) * (3) Single pin for VCONN discharge (DISCHG) * 4. VBUS discharge precedence * (1) GPIO * (2) UTCPD * 5. VCONN discharge precedence * (1) DPM-supplied callback * (2) GPIO * (3) UTCPD */ /** * @brief Invalid or missing value */ #define NUMAKER_INVALID_VALUE UINT32_MAX /** * @brief UTCPD VBUS threshold default in mV * * These are default values of UTCPD VBUS threshold registers. They need * to be reconfigured by taking the following factors into consideration: * 1. Analog Vref * 2. UTCPD VBVOL.VBSCALE */ #define NUMAKER_UTCPD_VBUS_THRESHOLD_OVERVOLTAGE_MV 25000 #define NUMAKER_UTCPD_VBUS_THRESHOLD_VSAFE5V_MV 5000 #define NUMAKER_UTCPD_VBUS_THRESHOLD_VSAFE0V_MV 0 #define NUMAKER_UTCPD_VBUS_THRESHOLD_STOP_FORCE_DISCHARGE_MV 800 #define NUMAKER_UTCPD_VBUS_THRESHOLD_SINK_DISCONNECT_MV 3500 /** * @brief SYS register dump */ #define NUMAKER_SYS_REG_DUMP(dev, reg_name) LOG_INF("SYS: %8s: 0x%08x", #reg_name, SYS->reg_name); /** * @brief GPIO register dump */ #define NUMAKER_GPIO_REG_DUMP(dev, port, reg_name) \ LOG_INF("%s: %8s: 0x%08x", #port, #reg_name, port->reg_name); /** * @brief UTCPD register write timeout in microseconds */ #define NUMAKER_UTCPD_REG_WRITE_BY_NAME_TIMEOUT_US 20000 /** * @brief UTCPD register write by name */ #define NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, reg_name, val) \ ({ \ int rc_intern = numaker_utcpd_reg_write_wait_ready(dev); \ if (rc_intern < 0) { \ LOG_ERR("UTCPD register (%s) write timeout", #reg_name); \ } else { \ utcpd_base->reg_name = (val); \ } \ rc_intern; \ }) /** * @brief UTCPD register force write by name */ #define NUMAKER_UTCPD_REG_FORCE_WRITE_BY_NAME(dev, reg_name, val) \ ({ \ int rc_intern = numaker_utcpd_reg_write_wait_ready(dev); \ if (rc_intern < 0) { \ LOG_ERR("UTCPD register (%s) write timeout, force-write", #reg_name); \ } \ utcpd_base->reg_name = (val); \ rc_intern; \ }) /** * @brief UTCPD register write by offset */ #define NUMAKER_UTCPD_REG_WRITE_BY_OFFSET(dev, reg_offset, val) \ ({ \ int rc_intern = numaker_utcpd_reg_write_wait_ready(dev); \ if (rc_intern < 0) { \ LOG_ERR("UTCPD register (0x%04x) write timeout", reg_offset); \ } else { \ sys_write32((val), ((uintptr_t)utcpd_base) + reg_offset); \ } \ rc_intern; \ }) /** * @brief UTCPD register force write by offset */ #define NUMAKER_UTCPD_REG_FORCE_WRITE_BY_OFFSET(dev, reg_offset, val) \ ({ \ int rc_intern = numaker_utcpd_reg_write_wait_ready(dev); \ if (rc_intern < 0) { \ LOG_ERR("UTCPD register (0x%04x) write timeout, force-write", reg_offset); \ } \ sys_write32((val), ((uintptr_t)utcpd_base) + reg_offset); \ rc_intern; \ }) /** * @brief UTCPD register read by name */ #define NUMAKER_UTCPD_REG_READ_BY_NAME(dev, reg_name) ({ utcpd_base->reg_name; }) /** * @brief UTCPD register read by offset */ #define NUMAKER_UTCPD_REG_READ_BY_OFFSET(dev, reg_offset) \ ({ sys_read32(((uintptr_t)utcpd_base) + reg_offset); }) /** * @brief UTCPD register dump */ #define NUMAKER_UTCPD_REG_DUMP(dev, reg_name) \ LOG_INF("UTCPD: %8s: 0x%08x", #reg_name, NUMAKER_UTCPD_REG_READ_BY_NAME(dev, reg_name)); /** * @brief Helper to write UTCPD VBUS threshold */ #define NUMAKER_UTCPD_VBUS_THRESHOLD_WRITE(dev, reg_name, mv_norm) \ ({ \ uint32_t mv_bit; \ mv_bit = numaker_utcpd_vbus_volt_mv2bit(dev, mv_norm); \ mv_bit <<= UTCPD_##reg_name##_##reg_name##_Pos; \ mv_bit &= UTCPD_##reg_name##_##reg_name##_Msk; \ NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, reg_name, mv_bit); \ }) /** * @brief Helper to read UTCPD VBUS threshold */ #define NUMAKER_UTCPD_VBUS_THRESHOLD_READ(dev, reg_name) \ ({ \ uint32_t mv_bit; \ mv_bit = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, reg_name); \ mv_bit &= UTCPD_##reg_name##_##reg_name##_Msk; \ mv_bit >>= UTCPD_##reg_name##_##reg_name##_Pos; \ numaker_utcpd_vbus_volt_bit2mv(dev, mv_bit); \ }) /** * @brief Immutable device context */ struct numaker_tcpc_config { UTCPD_T *utcpd_base; EADC_T *eadc_base; TIMER_T *timer_base; const struct device *clkctrl_dev; struct numaker_scc_subsys pcc_utcpd; struct numaker_scc_subsys pcc_timer; struct reset_dt_spec reset_utcpd; struct reset_dt_spec reset_timer; void (*irq_config_func_utcpd)(const struct device *dev); void (*irq_unconfig_func_utcpd)(const struct device *dev); const struct pinctrl_dev_config *pincfg; struct { struct { struct gpio_dt_spec vbus_detect; struct gpio_dt_spec vbus_discharge; struct gpio_dt_spec vconn_discharge; } gpios; bool dead_battery; struct { uint32_t bit; } pinpl; struct { struct { uint32_t bit; uint32_t value; } vbscale; } vbvol; } utcpd; struct { const struct adc_dt_spec *spec_vbus; const struct adc_dt_spec *spec_vconn; /* Rate of timer-triggered voltage measurement (Hz) */ uint32_t timer_trigger_rate; /* Trigger source for measuring VBUS/VCONN voltage */ uint32_t trgsel_vbus; uint32_t trgsel_vconn; } eadc; }; /** * @brief Mutable device context */ struct numaker_tcpc_data { enum tc_rp_value rp; bool rx_sop_prime_enabled; /* One-slot Rx FIFO */ bool rx_msg_ready; struct pd_msg rx_msg; /* The fields below must persist across tcpc_init(). */ uint32_t vref_mv; /* TCPC alert */ struct { tcpc_alert_handler_cb_t handler; void *data; } tcpc_alert; /* PPC event */ struct { usbc_ppc_event_cb_t handler; void *data; } ppc_event; /* DPM supplied */ struct { /* VCONN callback function */ tcpc_vconn_control_cb_t vconn_cb; /* VCONN discharge callback function */ tcpc_vconn_discharge_cb_t vconn_discharge_cb; } dpm; }; /** * @brief Wait ready for next write access to UTCPD register * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_reg_write_wait_ready(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; if (!WAIT_FOR((utcpd_base->CLKINFO & UTCPD_CLKINFO_ReadyFlag_Msk), NUMAKER_UTCPD_REG_WRITE_BY_NAME_TIMEOUT_US, NULL)) { return -EIO; } return 0; } /** * @brief Convert VBUS voltage format from H/W bit to mV * * The following factors are taken into consideration: * 1. Analog Vref * 2. UTCPD VBVOL.VBSCALE * * @note UTCPD VBVOL.VBVOL = MSB 10-bit of EADC DAT.RESULT[11:0], * that is, discarding LSB 2-bit. */ static uint32_t numaker_utcpd_vbus_volt_bit2mv(const struct device *dev, uint32_t bit) { const struct numaker_tcpc_config *const config = dev->config; struct numaker_tcpc_data *data = dev->data; __ASSERT_NO_MSG(data->vref_mv); return (uint32_t)(((uint64_t)bit) * data->vref_mv * config->utcpd.vbvol.vbscale.value / BIT_MASK(10)); } /** * @brief Convert VBUS voltage format from mV to H/W bit * * The following factors are taken into consideration: * 1. Analog Vref * 2. UTCPD VBVOL.VBSCALE * * @note UTCPD VBVOL.VBVOL = MSB 10-bit of EADC DAT.RESULT[11:0], * that is, discarding LSB 2-bit. */ static uint32_t numaker_utcpd_vbus_volt_mv2bit(const struct device *dev, uint32_t mv) { const struct numaker_tcpc_config *const config = dev->config; struct numaker_tcpc_data *data = dev->data; __ASSERT_NO_MSG(data->vref_mv); return mv * BIT_MASK(10) / data->vref_mv / config->utcpd.vbvol.vbscale.value; } /** * @brief UTCPD register dump * * @retval 0 on success */ static int numaker_utcpd_dump_regs(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; /* SYS register */ NUMAKER_SYS_REG_DUMP(dev, VREFCTL); NUMAKER_SYS_REG_DUMP(dev, UTCPDCTL); /* UTCPD register */ NUMAKER_UTCPD_REG_DUMP(dev, IS); NUMAKER_UTCPD_REG_DUMP(dev, IE); NUMAKER_UTCPD_REG_DUMP(dev, PWRSTSIE); NUMAKER_UTCPD_REG_DUMP(dev, FUTSTSIE); NUMAKER_UTCPD_REG_DUMP(dev, CTL); NUMAKER_UTCPD_REG_DUMP(dev, PINPL); NUMAKER_UTCPD_REG_DUMP(dev, ROLCTL); NUMAKER_UTCPD_REG_DUMP(dev, FUTCTL); NUMAKER_UTCPD_REG_DUMP(dev, PWRCTL); NUMAKER_UTCPD_REG_DUMP(dev, CCSTS); NUMAKER_UTCPD_REG_DUMP(dev, PWRSTS); NUMAKER_UTCPD_REG_DUMP(dev, FUTSTS); NUMAKER_UTCPD_REG_DUMP(dev, DVCAP1); NUMAKER_UTCPD_REG_DUMP(dev, DVCAP2); NUMAKER_UTCPD_REG_DUMP(dev, MSHEAD); NUMAKER_UTCPD_REG_DUMP(dev, DTRXEVNT); NUMAKER_UTCPD_REG_DUMP(dev, VBVOL); NUMAKER_UTCPD_REG_DUMP(dev, SKVBDCTH); NUMAKER_UTCPD_REG_DUMP(dev, SPDGTH); NUMAKER_UTCPD_REG_DUMP(dev, VBAMH); NUMAKER_UTCPD_REG_DUMP(dev, VBAML); NUMAKER_UTCPD_REG_DUMP(dev, VNDIS); NUMAKER_UTCPD_REG_DUMP(dev, VNDIE); NUMAKER_UTCPD_REG_DUMP(dev, MUXSEL); NUMAKER_UTCPD_REG_DUMP(dev, VCDGCTL); NUMAKER_UTCPD_REG_DUMP(dev, ADGTM); NUMAKER_UTCPD_REG_DUMP(dev, VSAFE0V); NUMAKER_UTCPD_REG_DUMP(dev, VSAFE5V); NUMAKER_UTCPD_REG_DUMP(dev, VBOVTH); NUMAKER_UTCPD_REG_DUMP(dev, VCPSVOL); NUMAKER_UTCPD_REG_DUMP(dev, VCUV); NUMAKER_UTCPD_REG_DUMP(dev, PHYCTL); NUMAKER_UTCPD_REG_DUMP(dev, FRSRXCTL); NUMAKER_UTCPD_REG_DUMP(dev, VCVOL); NUMAKER_UTCPD_REG_DUMP(dev, CLKINFO); return 0; } /** * @brief Initializes EADC Vref * * @retval 0 on success */ static int numaker_eadc_vref_init(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; struct numaker_tcpc_data *data = dev->data; const struct adc_dt_spec *spec; enum adc_reference reference; if (data->vref_mv) { return 0; } /* NOTE: Register protection lock will restore automatically. Unlock it again. */ SYS_UnlockReg(); /* Analog reference voltage * * NOTE: For Vref being internal, external Vref pin must be floating, * or it can disturb. */ spec = config->eadc.spec_vbus ? config->eadc.spec_vbus : config->eadc.spec_vconn; if (spec == NULL) { return 0; } /* ADC device ready */ if (!adc_is_ready_dt(spec)) { LOG_ERR("ADC device for VBUS/VCONN not ready"); return -ENODEV; } /* ADC channel configuration ready */ if (!spec->channel_cfg_dt_node_exists) { LOG_ERR("ADC channel configuration for VBUS/VCONN not specified"); return -ENODEV; } reference = spec->channel_cfg.reference; SYS->VREFCTL &= ~SYS_VREFCTL_VREFCTL_Msk; if (reference == ADC_REF_EXTERNAL0 || reference == ADC_REF_EXTERNAL1) { SYS->VREFCTL |= SYS_VREFCTL_VREF_PIN; } else if (reference == ADC_REF_INTERNAL) { switch (spec->vref_mv) { case 1600: SYS->VREFCTL |= SYS_VREFCTL_VREF_1_6V; break; case 2000: SYS->VREFCTL |= SYS_VREFCTL_VREF_2_0V; break; case 2500: SYS->VREFCTL |= SYS_VREFCTL_VREF_2_5V; break; case 3000: SYS->VREFCTL |= SYS_VREFCTL_VREF_3_0V; break; default: LOG_ERR("Invalid Vref voltage"); return -ENOTSUP; } } else { LOG_ERR("Invalid Vref source"); return -ENOTSUP; } data->vref_mv = spec->vref_mv; return 0; } /** * @brief Reads and returns UTCPD VBUS measured in mV * * @retval 0 on success * @retval -EIO on failure */ int numaker_utcpd_vbus_measure(const struct device *dev, uint32_t *mv) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; int rc; if (mv == NULL) { return -EINVAL; } *mv = 0; if (config->eadc.spec_vbus == NULL) { return -ENOTSUP; } /* Vref */ rc = numaker_eadc_vref_init(dev); if (rc < 0) { return rc; } *mv = NUMAKER_UTCPD_VBUS_THRESHOLD_READ(dev, VBVOL); return 0; } /** * @brief Check if the UTCPD VBUS is present * * @retval 1 if UTCPD VBUS is present * @retval 0 if UTCPD VBUS is not present */ int numaker_utcpd_vbus_is_present(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t pwrsts = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PWRSTS); if (pwrsts & UTCPD_PWRSTS_VBPS_Msk) { return 1; } else { return 0; } } /** * @brief Check if the UTCPD VBUS is sourcing * * @retval 1 if UTCPD VBUS is sourcing * @retval 0 if UTCPD VBUS is not sourcing */ int numaker_utcpd_vbus_is_source(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t pwrsts = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PWRSTS); if (pwrsts & (UTCPD_PWRSTS_SRHV_Msk | UTCPD_PWRSTS_SRVB_Msk)) { return 1; } else { return 0; } } /** * @brief Check if the UTCPD VBUS is sinking * * @retval 1 if UTCPD VBUS is sinking * @retval 0 if UTCPD VBUS is not sinking */ int numaker_utcpd_vbus_is_sink(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t pwrsts = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PWRSTS); if (pwrsts & UTCPD_PWRSTS_SKVB_Msk) { return 1; } else { return 0; } } /** * @brief Enable or disable discharge on UTCPD VBUS * * @retval 0 on success * @retval -EIO on failure */ int numaker_utcpd_vbus_set_discharge(const struct device *dev, bool enable) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; int rc; const struct gpio_dt_spec *vbus_discharge_spec = &config->utcpd.gpios.vbus_discharge; uint32_t pwrctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PWRCTL); /* Use GPIO VBUS discharge */ if (vbus_discharge_spec->port != NULL) { return gpio_pin_set_dt(vbus_discharge_spec, enable); } /* Use UTCPD VBUS discharge */ if (enable) { pwrctl |= UTCPD_PWRCTL_FDGEN_Msk; } else { pwrctl &= ~UTCPD_PWRCTL_FDGEN_Msk; } rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, PWRCTL, pwrctl); if (rc < 0) { return rc; } return 0; } /** * @brief Enable or disable UTCPD BIST test mode * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_bist_test_mode_set_enable(const struct device *dev, bool enable) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; int rc; uint32_t ctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, CTL); /* Enable or not BIST test mode */ if (enable) { ctl |= UTCPD_CTL_BISTEN_Msk; } else { ctl &= ~UTCPD_CTL_BISTEN_Msk; } rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, CTL, ctl); if (rc < 0) { return rc; } return 0; } /** * @brief Check if UTCPD BIST test mode is enabled * * @retval 1 if UTCPD BIST test mode is enabled * @retval 0 if UTCPD BIST test mode is not enabled */ static int numaker_utcpd_bist_test_mode_is_enabled(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t ctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, CTL); if (ctl & UTCPD_CTL_BISTEN_Msk) { return 1; } else { return 0; } } /** * @brief Clears UTCPD Rx message FIFO * * @retval 0 on success */ static int numaker_utcpd_rx_fifo_clear(const struct device *dev) { struct numaker_tcpc_data *data = dev->data; data->rx_msg_ready = false; return 0; } /** * @brief Reads Rx message data from UTCPD * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_rx_read_data(const struct device *dev, uint8_t *rx_data, uint32_t rx_data_size) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t data_rmn = rx_data_size; uint8_t *data_pos = rx_data; uintptr_t data_reg_offset = offsetof(UTCPD_T, RXDA0); uint32_t data_value; /* 32-bit aligned */ while (data_rmn >= 4) { data_value = NUMAKER_UTCPD_REG_READ_BY_OFFSET(dev, data_reg_offset); sys_put_le32(data_value, data_pos); /* Next data */ data_reg_offset += 4; data_pos += 4; data_rmn -= 4; } /* Remaining non-32-bit aligned */ __ASSERT_NO_MSG(data_rmn < 4); if (data_rmn) { data_value = NUMAKER_UTCPD_REG_READ_BY_OFFSET(dev, data_reg_offset); data_reg_offset += 4; switch (data_rmn) { case 3: sys_put_le24(data_value, data_pos); data_pos += 3; data_rmn -= 3; break; case 2: sys_put_le16(data_value, data_pos); data_pos += 2; data_rmn -= 2; break; case 1: *data_pos = data_value; data_pos += 1; data_rmn -= 1; break; } } __ASSERT_NO_MSG(data_rmn == 0); return 0; } /** * @brief Writes Tx message data to UTCPD * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_tx_write_data(const struct device *dev, const uint8_t *tx_data, uint32_t tx_data_size) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; int rc; uint32_t data_rmn = tx_data_size; const uint8_t *data_pos = tx_data; uint32_t data_reg_offset = offsetof(UTCPD_T, TXDA0); uint32_t data_value; /* 32-bit aligned */ while (data_rmn >= 4) { data_value = sys_get_le32(data_pos); rc = NUMAKER_UTCPD_REG_WRITE_BY_OFFSET(dev, data_reg_offset, data_value); if (rc < 0) { return rc; } /* Next data */ data_pos += 4; data_reg_offset += 4; data_rmn -= 4; } /* Remaining non-32-bit aligned */ __ASSERT_NO_MSG(data_rmn < 4); if (data_rmn) { switch (data_rmn) { case 3: data_value = sys_get_le24(data_pos); data_pos += 3; data_rmn -= 3; break; case 2: data_value = sys_get_le16(data_pos); data_pos += 2; data_rmn -= 2; break; case 1: data_value = *data_pos; data_pos += 1; data_rmn -= 1; break; } rc = NUMAKER_UTCPD_REG_WRITE_BY_OFFSET(dev, data_reg_offset, data_value); if (rc < 0) { return rc; } data_reg_offset += 4; } __ASSERT_NO_MSG(data_rmn == 0); return 0; } /** * @brief Enqueues UTCPD Rx message * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_rx_fifo_enqueue(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; struct numaker_tcpc_data *data = dev->data; UTCPD_T *utcpd_base = config->utcpd_base; int rc = 0; uint32_t rxbcnt = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, RXBCNT); uint32_t rxftype = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, RXFTYPE); uint32_t rxhead = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, RXHEAD); uint32_t is = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, IS); uint32_t rx_data_size; struct pd_msg *msg = &data->rx_msg; /* Rx message pending? */ if (!(is & UTCPD_IS_RXSOPIS_Msk)) { goto cleanup; } /* rxbcnt = 1 (frame type) + 2 (Message Header) + Rx data byte count */ if (rxbcnt < 3) { LOG_ERR("Invalid UTCPD.RXBCNT: %d", rxbcnt); rc = -EIO; goto cleanup; } rx_data_size = rxbcnt - 3; /* Not support Unchunked Extended Message exceeding PD_CONVERT_PD_HEADER_COUNT_TO_BYTES */ if (rx_data_size > (PD_MAX_EXTENDED_MSG_LEGACY_LEN + 2)) { LOG_ERR("Not support Unchunked Extended Message exceeding " "PD_CONVERT_PD_HEADER_COUNT_TO_BYTES: %d", rx_data_size); rc = -EIO; goto cleanup; } /* Rx FIFO has room? */ if (data->rx_msg_ready) { LOG_WRN("Rx FIFO overflow"); } /* Rx frame type */ /* NOTE: Needn't extra cast for UTCPD_RXFTYPE.RXFTYPE aligning with pd_packet_type */ msg->type = (rxftype & UTCPD_RXFTYPE_RXFTYPE_Msk) >> UTCPD_RXFTYPE_RXFTYPE_Pos; /* Rx header */ msg->header.raw_value = (uint16_t)rxhead; /* Rx data size */ msg->len = rx_data_size; /* Rx data */ rc = numaker_utcpd_rx_read_data(dev, msg->data, rx_data_size); if (rc < 0) { goto cleanup; } /* Finish enqueue of this Rx message */ data->rx_msg_ready = true; cleanup: /* This has side effect of clearing UTCPD_RXBCNT and friends. */ NUMAKER_UTCPD_REG_FORCE_WRITE_BY_NAME(dev, IS, UTCPD_IS_RXSOPIS_Msk); return rc; } /** * @brief Notify TCPC alert */ static void numaker_utcpd_notify_tcpc_alert(const struct device *dev, enum tcpc_alert alert) { struct numaker_tcpc_data *data = dev->data; tcpc_alert_handler_cb_t alert_handler = data->tcpc_alert.handler; void *alert_data = data->tcpc_alert.data; if (alert_handler) { alert_handler(dev, alert_data, alert); } } /** * @brief Notify PPC event */ static void numaker_utcpd_notify_ppc_event(const struct device *dev, enum usbc_ppc_event event) { struct numaker_tcpc_data *data = dev->data; usbc_ppc_event_cb_t event_handler = data->ppc_event.handler; void *event_data = data->ppc_event.data; if (event_handler) { event_handler(dev, event_data, event); } } /** * @brief UTCPD ISR * * @note UTCPD register write cannot be failed, or we may trap in ISR for * interrupt bits not cleared. To avoid that, we use "force-write" * version clear interrupt bits for sure. */ static void numaker_utcpd_isr(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t is = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, IS); uint32_t futsts = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, FUTSTS); uint32_t vndis = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, VNDIS); uint32_t ie = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, IE); uint32_t futstsie = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, FUTSTSIE); /* CC status changed */ if (is & UTCPD_IS_CCSCHIS_Msk) { numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_CC_STATUS); } /* Power status changed */ if (is & UTCPD_IS_PWRSCHIS_Msk) { numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_POWER_STATUS); } /* Received SOP Message */ if (is & UTCPD_IS_RXSOPIS_Msk) { numaker_utcpd_rx_fifo_enqueue(dev); /* Per TCPCI 4.4.5.1 TCPC_CONTROL, BIST Test Mode * Incoming messages enabled by RECEIVE_DETECT result * in GoodCRC response but may not be passed to the TCPM * via Alert. TCPC may temporarily store incoming messages * in the Receive Message Buffer, but this may or may not * result in a Receive SOP* Message Status or a Rx Buffer * Overflow alert. */ if (numaker_utcpd_bist_test_mode_is_enabled(dev) == 1) { numaker_utcpd_rx_fifo_clear(dev); } else { numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_MSG_STATUS); } } /* Rx buffer overflow */ if (is & UTCPD_IS_RXOFIS_Msk) { LOG_WRN("Rx buffer overflow"); numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_RX_BUFFER_OVERFLOW); } /* Received Hard Reset */ if (is & UTCPD_IS_RXHRSTIS_Msk) { numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_HARD_RESET_RECEIVED); } /* SOP* message transmission not successful, no GoodCRC response received on SOP* message * transmission */ if (is & UTCPD_IS_TXFALIS_Msk) { numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_TRANSMIT_MSG_FAILED); } /* Reset or SOP* message transmission not sent due to incoming receive message */ if (is & UTCPD_IS_TXDCUDIS_Msk) { numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_TRANSMIT_MSG_DISCARDED); } /* Reset or SOP* message transmission successful, GoodCRC response received on SOP* message * transmission */ if (is & UTCPD_IS_TXOKIS_Msk) { numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_TRANSMIT_MSG_SUCCESS); } /* VBUS voltage alarm high */ if ((is & UTCPD_IS_VBAMHIS_Msk) && (ie & UTCPD_IS_VBAMHIS_Msk)) { LOG_WRN("UTCPD VBUS voltage alarm high not addressed, disable the alert"); ie &= ~UTCPD_IS_VBAMHIS_Msk; NUMAKER_UTCPD_REG_FORCE_WRITE_BY_NAME(dev, IE, ie); numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_VBUS_ALARM_HI); } /* VBUS voltage alarm low */ if ((is & UTCPD_IS_VBAMLIS_Msk) && (ie & UTCPD_IS_VBAMLIS_Msk)) { LOG_WRN("UTCPD VBUS voltage alarm low not addressed, disable the alert"); ie &= ~UTCPD_IS_VBAMLIS_Msk; NUMAKER_UTCPD_REG_FORCE_WRITE_BY_NAME(dev, IE, ie); numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_VBUS_ALARM_LO); } /* Fault */ if ((is & UTCPD_IS_FUTIS_Msk) && (futstsie & futsts)) { LOG_ERR("UTCPD fault (FUTSTS=0x%08x)", futsts); NUMAKER_UTCPD_REG_FORCE_WRITE_BY_OFFSET(dev, offsetof(UTCPD_T, FUTSTS), futsts); /* NOTE: FUTSTSIE will restore to default on Hard Reset. We may re-enter * here and redo mask. */ LOG_WRN("UTCPD fault (FUTSTS=0x%08x) not addressed, disable fault alert (FUTSTSIE)", futsts); futstsie &= ~futsts; NUMAKER_UTCPD_REG_FORCE_WRITE_BY_NAME(dev, FUTSTSIE, futstsie); numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_FAULT_STATUS); /* VBUS overvoltage */ if (futsts & UTCPD_FUTSTS_VBOVFUT_Msk) { if (numaker_utcpd_vbus_is_source(dev)) { numaker_utcpd_notify_ppc_event(dev, USBC_PPC_EVENT_SRC_OVERVOLTAGE); } if (numaker_utcpd_vbus_is_sink(dev)) { numaker_utcpd_notify_ppc_event(dev, USBC_PPC_EVENT_SNK_OVERVOLTAGE); } } /* VBUS overcurrent */ if (futsts & UTCPD_FUTSTS_VBOCFUT_Msk) { if (numaker_utcpd_vbus_is_source(dev)) { numaker_utcpd_notify_ppc_event(dev, USBC_PPC_EVENT_SRC_OVERCURRENT); } } } /* VBUS Sink disconnect threshold crossing has been detected */ if (is & UTCPD_IS_SKDCDTIS_Msk) { numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_VBUS_SNK_DISCONNECT); } /* Vendor defined event detected */ if (is & UTCPD_IS_VNDIS_Msk) { NUMAKER_UTCPD_REG_FORCE_WRITE_BY_NAME(dev, VNDIS, vndis); numaker_utcpd_notify_tcpc_alert(dev, TCPC_ALERT_VENDOR_DEFINED); } NUMAKER_UTCPD_REG_FORCE_WRITE_BY_NAME(dev, IS, is); } /** * @brief Configures EADC sample module with trigger source, channel, etc. */ static int numaker_eadc_smplmod_init(const struct device *dev, const struct adc_dt_spec *spec, uint32_t trgsel) { const struct numaker_tcpc_config *const config = dev->config; EADC_T *eadc_base = config->eadc_base; uint16_t acquisition_time; uint16_t acq_time_unit; uint16_t acq_time_value; __ASSERT_NO_MSG(spec); /* ADC device ready */ if (!adc_is_ready_dt(spec)) { LOG_ERR("ADC device for VBUS/VCONN not ready"); return -ENODEV; } /* ADC channel configuration ready */ if (!spec->channel_cfg_dt_node_exists) { LOG_ERR("ADC channel configuration for VBUS/VCONN not specified"); return -ENODEV; } acquisition_time = spec->channel_cfg.acquisition_time; acq_time_unit = ADC_ACQ_TIME_UNIT(acquisition_time); acq_time_value = ADC_ACQ_TIME_VALUE(acquisition_time); if (acq_time_unit != ADC_ACQ_TIME_TICKS) { LOG_ERR("Invalid acquisition time unit for VBUS/VCONN"); return -ENOTSUP; } /* Bind sample module with trigger source and channel */ EADC_ConfigSampleModule(eadc_base, spec->channel_id, trgsel, spec->channel_id); /* Extend sampling time */ EADC_SetExtendSampleTime(eadc_base, spec->channel_id, acq_time_value); return 0; } /** * @brief Initializes VBUS threshold and monitor * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_vbus_init(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; int rc; uint32_t vbvol = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, VBVOL); uint32_t pwrctl = 0; /* UTCPD VBUS scale factor */ vbvol &= ~UTCPD_VBVOL_VBSCALE_Msk; vbvol |= config->utcpd.vbvol.vbscale.bit; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, VBVOL, vbvol); if (rc < 0) { return rc; } if (config->eadc.spec_vbus != NULL) { /* Vref */ rc = numaker_eadc_vref_init(dev); if (rc < 0) { return rc; } /* UTCPD VBUS overvoltage threshold */ rc = NUMAKER_UTCPD_VBUS_THRESHOLD_WRITE( dev, VBOVTH, NUMAKER_UTCPD_VBUS_THRESHOLD_OVERVOLTAGE_MV); if (rc < 0) { return rc; } /* UTCPD VBUS vSafe5V threshold */ rc = NUMAKER_UTCPD_VBUS_THRESHOLD_WRITE(dev, VSAFE5V, NUMAKER_UTCPD_VBUS_THRESHOLD_VSAFE5V_MV); if (rc < 0) { return rc; } /* UTCPD VBUS vSafe0V threshold */ rc = NUMAKER_UTCPD_VBUS_THRESHOLD_WRITE(dev, VSAFE0V, NUMAKER_UTCPD_VBUS_THRESHOLD_VSAFE0V_MV); if (rc < 0) { return rc; } /* UTCPD VBUS stop force discharge threshold */ rc = NUMAKER_UTCPD_VBUS_THRESHOLD_WRITE( dev, SPDGTH, NUMAKER_UTCPD_VBUS_THRESHOLD_STOP_FORCE_DISCHARGE_MV); if (rc < 0) { return rc; } /* UTCPD VBUS sink disconnect threshold */ rc = NUMAKER_UTCPD_VBUS_THRESHOLD_WRITE( dev, SKVBDCTH, NUMAKER_UTCPD_VBUS_THRESHOLD_SINK_DISCONNECT_MV); if (rc < 0) { return rc; } } /* Enable UTCPD VBUS voltage monitor so that UTCPD.VBVOL is available */ if (config->eadc.spec_vbus != NULL) { pwrctl &= ~UTCPD_PWRCTL_VBMONI_DIS; } else { pwrctl |= UTCPD_PWRCTL_VBMONI_DIS; } /* Disable UTCPD VBUS voltage alarms */ pwrctl |= UTCPD_PWRCTL_DSVBAM_DIS; /* Disable UTCPD VBUS auto-discharge on disconnect * NOTE: UTCPD may not integrate with discharge, so this feature is * disabled and discharge is handled separately. */ pwrctl &= ~UTCPD_PWRCTL_ADGDC; return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, PWRCTL, pwrctl); } /** * @brief Initializes UTCPD GPIO pins * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_gpios_init(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; int rc; const struct gpio_dt_spec *spec; /* Configure VBUS detect pin to INPUT to avoid intervening its power measurement */ spec = &config->utcpd.gpios.vbus_detect; if (spec->port == NULL) { LOG_ERR("VBUS detect pin not specified"); return -ENODEV; } if (!gpio_is_ready_dt(spec)) { LOG_ERR("VBUS detect pin port device not ready"); return -ENODEV; } rc = gpio_pin_configure_dt(spec, GPIO_INPUT); if (rc < 0) { LOG_ERR("VBUS detect pin configured to INPUT failed: %d", rc); return rc; } /* Configure VBUS discharge pin to OUTPUT INACTIVE */ spec = &config->utcpd.gpios.vbus_discharge; if (spec->port != NULL) { if (!gpio_is_ready_dt(spec)) { LOG_ERR("VBUS discharge pin port device not ready"); return -ENODEV; } rc = gpio_pin_configure_dt(spec, GPIO_OUTPUT_INACTIVE); if (rc < 0) { LOG_ERR("VBUS discharge pin configured to OUTPUT INACTIVE failed: %d", rc); return rc; } } /* Configure VCONN discharge pin to OUTPUT INACTIVE */ spec = &config->utcpd.gpios.vconn_discharge; if (spec->port != NULL) { if (!gpio_is_ready_dt(spec)) { LOG_ERR("VCONN discharge pin port device not ready"); return -ENODEV; } rc = gpio_pin_configure_dt(spec, GPIO_OUTPUT_INACTIVE); if (rc < 0) { LOG_ERR("VCONN discharge pin configured to OUTPUT INACTIVE failed: %d", rc); return rc; } } return 0; } /** * @brief Initializes UTCPD PHY * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_phy_init(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t phyctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PHYCTL); /* Enable PHY * * NOTE: Only UTCPD0 is supported. */ SYS->UTCPDCTL |= SYS_UTCPDCTL_POREN0_Msk; phyctl |= UTCPD_PHYCTL_PHYPWR_Msk; return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, PHYCTL, phyctl); } /** * @brief Checks if UTCPD Dead Battery mode is enabled * * @retval true Dead Battery mode is enabled * @retval false Dead Battery mode is not enabled */ static bool numaker_utcpd_deadbattery_query_enable(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t phyctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PHYCTL); /* 0 = Dead Battery circuit controls internal Rd/Rp. * 1 = Role Control Register controls internal Rd/ */ return !(phyctl & UTCPD_PHYCTL_DBCTL_Msk); } /** * @brief Enables or disables UTCPD Dead Battery mode * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_deadbattery_set_enable(const struct device *dev, bool enable) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t phyctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PHYCTL); if (enable) { /* Dead Battery circuit controls internal Rd/Rp */ phyctl &= ~UTCPD_PHYCTL_DBCTL_Msk; } else { /* UTCPD.ROLCTL controls internal Rd/Rp */ phyctl |= UTCPD_PHYCTL_DBCTL_Msk; } return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, PHYCTL, phyctl); } /** * @brief Initializes UTCPD Dead Battery mode * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_deadbattery_init(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; return numaker_utcpd_deadbattery_set_enable(dev, config->utcpd.dead_battery); } /** * @brief Initializes UTCPD interrupts * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_interrupts_init(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; int rc; uint32_t ie; uint32_t pwrstsie; uint32_t futstsie; uint32_t vndie; ie = UTCPD_IE_VNDIE_Msk | UTCPD_IE_SKDCDTIE_Msk | UTCPD_IE_RXOFIE_Msk | UTCPD_IE_FUTIE_Msk | UTCPD_IE_VBAMLIE_Msk | UTCPD_IE_VBAMHIE_Msk | UTCPD_IE_TXOKIE_Msk | UTCPD_IE_TXDCUDIE_Msk | UTCPD_IE_TXFAILIE_Msk | UTCPD_IE_RXHRSTIE_Msk | UTCPD_IE_RXSOPIE_Msk | UTCPD_IE_PWRSCHIE_Msk | UTCPD_IE_CCSCHIE_Msk; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, IE, ie); if (rc < 0) { return rc; } pwrstsie = UTCPD_PWRSTSIE_DACONIE_Msk | UTCPD_PWRSTSIE_SRHVIE_Msk | UTCPD_PWRSTSIE_SRVBIE_Msk | UTCPD_PWRSTSIE_VBDTDGIE_Msk | UTCPD_PWRSTSIE_VBPSIE_Msk | UTCPD_PWRSTSIE_VCPSIE_Msk | UTCPD_PWRSTSIE_SKVBIE_Msk; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, PWRSTSIE, pwrstsie); if (rc < 0) { return rc; } futstsie = UTCPD_FUTSTSIE_FOFFVBIE_Msk | UTCPD_FUTSTSIE_ADGFALIE_Msk | UTCPD_FUTSTSIE_FDGFALIE_Msk | UTCPD_FUTSTSIE_VBOCIE_Msk | UTCPD_FUTSTSIE_VBOVIE_Msk | UTCPD_FUTSTSIE_VCOCIE_Msk; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, FUTSTSIE, futstsie); if (rc < 0) { return rc; } vndie = UTCPD_VNDIE_VCDGIE_Msk | UTCPD_VNDIE_CRCERRIE_Msk | UTCPD_VNDIE_TXFRSIE_Msk | UTCPD_VNDIE_RXFRSIE_Msk; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, VNDIE, vndie); if (rc < 0) { return rc; } return 0; } /** * @brief Initializes UTCPD at stack recycle * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_init_recycle(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; int rc; uint32_t value; /* Disable BIST, CC1/CC2 for CC/VCOON */ value = 0; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, CTL, value); if (rc < 0) { return rc; } /* Rp default, CC1/CC2 Rd */ value = UTCPD_ROLECTL_RPVALUE_DEF | UTCPD_ROLECTL_CC1_RD | UTCPD_ROLECTL_CC2_RD; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, ROLCTL, value); if (rc < 0) { return rc; } /* Disable VCONN source */ value = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PWRCTL); value &= ~UTCPD_PWRCTL_VCEN_Msk; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, PWRCTL, value); if (rc < 0) { return rc; } /* Disable detecting Rx events */ value = 0; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, DTRXEVNT, value); if (rc < 0) { return rc; } return 0; } /** * @brief Initializes UTCPD at device startup * * @retval 0 on success * @retval -EIO on failure */ static int numaker_utcpd_init_startup(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; int rc; uint32_t pinpl; uint32_t futctl; uint32_t muxsel; /* UTCPD GPIO */ rc = numaker_utcpd_gpios_init(dev); if (rc < 0) { return rc; } /* UTCPD PHY */ rc = numaker_utcpd_phy_init(dev); if (rc < 0) { return rc; } /* UTCPD Dead Battery */ rc = numaker_utcpd_deadbattery_init(dev); if (rc < 0) { return rc; } /* UTCPD pin polarity */ pinpl = config->utcpd.pinpl.bit; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, PINPL, pinpl); if (rc < 0) { return rc; } /* VBUS voltage and monitor */ rc = numaker_utcpd_vbus_init(dev); if (rc < 0) { return rc; } /* UTCPD fault * * Disable the following fault detects which rely on external circuit: * 1. VBUS force-off * 2. VBUS overcurrent protection * 3. VCONN overcurrent protection */ futctl = UTCPD_FUTCTL_FOFFVBDS_Msk | UTCPD_FUTCTL_VBOCDTDS_Msk | UTCPD_FUTCTL_VCOCDTDS_Msk; rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, FUTCTL, futctl); if (rc < 0) { return rc; } /* UTCPD interconnection select * * NOTE: Just configure CC2FRSS/CC2VCENS/CC1FRSS/CC1VCENS to non-merged * to follow TCPCI */ muxsel = UTCPD_MUXSEL_CC2FRSS_Msk | UTCPD_MUXSEL_CC2VCENS_Msk | UTCPD_MUXSEL_CC1FRSS_Msk | UTCPD_MUXSEL_CC1VCENS_Msk; /* NOTE: For absence of EADC channel measurement for VCONN, we configure with all-one which * is supposed to be invalid EADC channel number so that UTCPD won't get updated * on VCONN by accident. */ if (config->eadc.spec_vbus != NULL) { muxsel |= (config->eadc.spec_vbus->channel_id << UTCPD_MUXSEL_ADCSELVB_Pos); } else { muxsel |= UTCPD_MUXSEL_ADCSELVB_Msk; } if (config->eadc.spec_vconn != NULL) { muxsel |= (config->eadc.spec_vconn->channel_id << UTCPD_MUXSEL_ADCSELVC_Pos); } else { muxsel |= UTCPD_MUXSEL_ADCSELVC_Msk; } rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, MUXSEL, muxsel); if (rc < 0) { return rc; } /* Interrupts */ rc = numaker_utcpd_interrupts_init(dev); if (rc < 0) { return rc; } /* IRQ */ config->irq_config_func_utcpd(dev); return 0; } /** * @brief Initializes EADC to be timer-triggered for measuring * VBUS/VCONN voltage at device startup * * @retval 0 on success * @retval -EIO on failure */ static int numaker_eadc_init_startup(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; EADC_T *eadc_base = config->eadc_base; int rc; const struct adc_dt_spec *spec; /* Vref */ rc = numaker_eadc_vref_init(dev); if (rc < 0) { return rc; } /* Set input mode as single-end and enable the A/D converter */ EADC_Open(eadc_base, EADC_CTL_DIFFEN_SINGLE_END); /* Configure sample module for measuring VBUS voltage * * NOTE: Make sample module number the same as channel number for * easy implementation. * NOTE: EADC measurement channel for VBUS can be absent with PWRSTS.VBPS as fallback */ spec = config->eadc.spec_vbus; if (spec) { rc = numaker_eadc_smplmod_init(dev, spec, config->eadc.trgsel_vbus); if (rc < 0) { return rc; } } /* Configure sample module for measuring VCONN voltage * * NOTE: Make sample module number the same as channel number for * easy implementation. * NOTE: EADC measurement channel for VCONN can be absent for VCONN unsupported */ spec = config->eadc.spec_vconn; if (spec) { rc = numaker_eadc_smplmod_init(dev, spec, config->eadc.trgsel_vconn); if (rc < 0) { return rc; } } return 0; } /** * @brief Initializes Timer to trigger EADC for measuring VBUS/VCONN * voltage at device startup * * @retval 0 on success */ static int numaker_timer_init_startup(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; TIMER_T *timer_base = config->timer_base; /* Configure Timer to trigger EADC periodically */ TIMER_Open(timer_base, TIMER_PERIODIC_MODE, config->eadc.timer_trigger_rate); TIMER_SetTriggerSource(timer_base, TIMER_TRGSRC_TIMEOUT_EVENT); TIMER_SetTriggerTarget(timer_base, TIMER_TRG_TO_EADC); TIMER_Start(timer_base); return 0; } /** * @brief Initializes TCPC at stack recycle * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_init_recycle(const struct device *dev) { struct numaker_tcpc_data *data = dev->data; int rc; /* Initialize UTCPD for attach/detach recycle */ rc = numaker_utcpd_init_recycle(dev); if (rc < 0) { return rc; } /* The fields below must (re-)initialize for tcpc_init(). */ data->rp = TC_RP_USB; data->rx_sop_prime_enabled = false; data->rx_msg_ready = false; memset(&data->rx_msg, 0x00, sizeof(data->rx_msg)); return 0; } /** * @brief Initializes TCPC at device startup * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_init_startup(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; int rc; SYS_UnlockReg(); /* Configure pinmux (NuMaker's SYS MFP) */ rc = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (rc < 0) { return rc; } /* Invoke Clock controller to enable module clock */ /* Equivalent to CLK_EnableModuleClock() */ rc = clock_control_on(config->clkctrl_dev, (clock_control_subsys_t)&config->pcc_utcpd); if (rc < 0) { return rc; } rc = clock_control_on(config->clkctrl_dev, (clock_control_subsys_t)&config->pcc_timer); if (rc < 0) { return rc; } /* Equivalent to CLK_SetModuleClock() */ rc = clock_control_configure(config->clkctrl_dev, (clock_control_subsys_t)&config->pcc_utcpd, NULL); if (rc < 0) { return rc; } rc = clock_control_configure(config->clkctrl_dev, (clock_control_subsys_t)&config->pcc_timer, NULL); if (rc < 0) { return rc; } /* Invoke Reset controller to reset module to default state */ /* Equivalent to SYS_ResetModule() */ rc = reset_line_toggle_dt(&config->reset_utcpd); if (rc < 0) { return rc; } rc = reset_line_toggle_dt(&config->reset_timer); if (rc < 0) { return rc; } /* Initialize UTCPD */ rc = numaker_utcpd_init_startup(dev); if (rc < 0) { return rc; } if (config->eadc.spec_vbus != NULL || config->eadc.spec_vconn != NULL) { /* Initialize EADC */ rc = numaker_eadc_init_startup(dev); if (rc < 0) { return rc; } /* Initialize Timer */ rc = numaker_timer_init_startup(dev); if (rc < 0) { return rc; } } return numaker_tcpc_init_recycle(dev); } /** * @brief Reads the status of the CC lines * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_get_cc(const struct device *dev, enum tc_cc_voltage_state *cc1, enum tc_cc_voltage_state *cc2) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t rolctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, ROLCTL); uint32_t rolctl_cc1 = rolctl & UTCPD_ROLCTL_CC1_Msk; uint32_t rolctl_cc2 = rolctl & UTCPD_ROLCTL_CC2_Msk; uint32_t ccsts = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, CCSTS); uint32_t ccsts_cc1state = ccsts & UTCPD_CCSTS_CC1STATE_Msk; uint32_t ccsts_cc2state = ccsts & UTCPD_CCSTS_CC2STATE_Msk; uint32_t ccsts_conrlt = ccsts & UTCPD_CCSTS_CONRLT_Msk; /* CC1 */ if (rolctl_cc1 == UTCPD_ROLECTL_CC1_RP || ccsts_conrlt == UTCPD_CONN_RESULT_RP) { switch (ccsts_cc1state) { case UTCPD_CCSTS_CC1STATE_SRC_RA: *cc1 = TC_CC_VOLT_RA; break; case UTCPD_CCSTS_CC1STATE_SRC_RD: *cc1 = TC_CC_VOLT_RD; break; default: *cc1 = TC_CC_VOLT_OPEN; } } else if (rolctl_cc1 == UTCPD_ROLECTL_CC1_RD || ccsts_conrlt == UTCPD_CONN_RESULT_RD) { switch (ccsts_cc1state) { case UTCPD_CCSTS_CC1STATE_SNK_DEF: *cc1 = TC_CC_VOLT_RP_DEF; break; case UTCPD_CCSTS_CC1STATE_SNK_1P5A: *cc1 = TC_CC_VOLT_RP_1A5; break; case UTCPD_CCSTS_CC1STATE_SNK_3A: *cc1 = TC_CC_VOLT_RP_3A0; break; default: *cc1 = TC_CC_VOLT_OPEN; } } else { *cc1 = TC_CC_VOLT_OPEN; } /* CC2 */ if (rolctl_cc2 == UTCPD_ROLECTL_CC2_RP || ccsts_conrlt == UTCPD_CONN_RESULT_RP) { switch (ccsts_cc2state) { case UTCPD_CCSTS_CC2STATE_SRC_RA: *cc2 = TC_CC_VOLT_RA; break; case UTCPD_CCSTS_CC2STATE_SRC_RD: *cc2 = TC_CC_VOLT_RD; break; default: *cc2 = TC_CC_VOLT_OPEN; } } else if (rolctl_cc2 == UTCPD_ROLECTL_CC2_RD || ccsts_conrlt == UTCPD_CONN_RESULT_RD) { switch (ccsts_cc2state) { case UTCPD_CCSTS_CC2STATE_SNK_DEF: *cc2 = TC_CC_VOLT_RP_DEF; break; case UTCPD_CCSTS_CC2STATE_SNK_1P5A: *cc2 = TC_CC_VOLT_RP_1A5; break; case UTCPD_CCSTS_CC2STATE_SNK_3A: *cc2 = TC_CC_VOLT_RP_3A0; break; default: *cc2 = TC_CC_VOLT_OPEN; } } else { *cc2 = TC_CC_VOLT_OPEN; } return 0; } /** * @brief Sets the value of CC pull up resistor used when operating as a Source * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_select_rp_value(const struct device *dev, enum tc_rp_value rp) { struct numaker_tcpc_data *data = dev->data; data->rp = rp; return 0; } /** * @brief Gets the value of the CC pull up resistor used when operating as a Source * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_get_rp_value(const struct device *dev, enum tc_rp_value *rp) { struct numaker_tcpc_data *data = dev->data; *rp = data->rp; return 0; } /** * @brief Sets the CC pull resistor and sets the role as either Source or Sink * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_set_cc(const struct device *dev, enum tc_cc_pull pull) { const struct numaker_tcpc_config *const config = dev->config; struct numaker_tcpc_data *data = dev->data; UTCPD_T *utcpd_base = config->utcpd_base; int rc; uint32_t rolctl = 0; /* Disable Dead Battery mode if it is active, so that * internal Rd/Rp gets controlled by to UTCPD.ROLCTL * from Dead Battery circuit. */ if (numaker_utcpd_deadbattery_query_enable(dev)) { rc = numaker_utcpd_deadbattery_set_enable(dev, false); if (rc < 0) { return rc; } } /* Rp value: default, 1.5A, or 3.0A */ switch (data->rp) { case TC_RP_USB: rolctl |= UTCPD_ROLECTL_RPVALUE_DEF; break; case TC_RP_1A5: rolctl |= UTCPD_ROLECTL_RPVALUE_1P5A; break; case TC_RP_3A0: rolctl |= UTCPD_ROLECTL_RPVALUE_3A; break; default: LOG_ERR("Invalid Rp value: %d", data->rp); return -EINVAL; } /* Pull on both CC1/CC2, determining source/sink role */ switch (pull) { case TC_CC_RA: rolctl |= (UTCPD_ROLECTL_CC1_RA | UTCPD_ROLECTL_CC2_RA); break; case TC_CC_RP: rolctl |= (UTCPD_ROLECTL_CC1_RP | UTCPD_ROLECTL_CC2_RP); break; case TC_CC_RD: rolctl |= (UTCPD_ROLECTL_CC1_RD | UTCPD_ROLECTL_CC2_RD); break; case TC_CC_OPEN: rolctl |= (UTCPD_ROLECTL_CC1_OPEN | UTCPD_ROLECTL_CC2_OPEN); break; default: LOG_ERR("Invalid pull: %d", pull); return -EINVAL; } /* Update CC1/CC2 pull values */ rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, ROLCTL, rolctl); if (rc < 0) { return rc; } return 0; } /** * @brief Sets a callback that can enable or discharge VCONN if the TCPC is * unable to or the system is configured in a way that does not use * the VCONN control capabilities of the TCPC */ static void numaker_tcpc_set_vconn_discharge_cb(const struct device *dev, tcpc_vconn_discharge_cb_t cb) { struct numaker_tcpc_data *data = dev->data; data->dpm.vconn_discharge_cb = cb; } /** * @brief Sets a callback that can enable or disable VCONN if the TCPC is * unable to or the system is configured in a way that does not use * the VCONN control capabilities of the TCPC */ static void numaker_tcpc_set_vconn_cb(const struct device *dev, tcpc_vconn_control_cb_t vconn_cb) { struct numaker_tcpc_data *data = dev->data; data->dpm.vconn_cb = vconn_cb; } /** * @brief Discharges VCONN * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_vconn_discharge(const struct device *dev, bool enable) { const struct numaker_tcpc_config *const config = dev->config; struct numaker_tcpc_data *data = dev->data; UTCPD_T *utcpd_base = config->utcpd_base; const struct gpio_dt_spec *vconn_discharge_spec = &config->utcpd.gpios.vconn_discharge; uint32_t ctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, CTL); uint32_t vcdgctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, VCDGCTL); enum tc_cc_polarity polarity = (ctl & UTCPD_CTL_ORIENT_Msk) ? TC_POLARITY_CC2 : TC_POLARITY_CC1; /* Use DPM supplied VCONN discharge */ if (data->dpm.vconn_discharge_cb) { return data->dpm.vconn_discharge_cb(dev, polarity, enable); } /* Use GPIO VCONN discharge */ if (vconn_discharge_spec->port != NULL) { return gpio_pin_set_dt(vconn_discharge_spec, enable); } /* Use UTCPD VCONN discharge */ if (enable) { vcdgctl |= UTCPD_VCDGCTL_VCDGEN_Msk; } else { vcdgctl &= ~UTCPD_VCDGCTL_VCDGEN_Msk; } return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, VCDGCTL, vcdgctl); } /** * @brief Enables or disables VCONN * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_set_vconn(const struct device *dev, bool enable) { const struct numaker_tcpc_config *const config = dev->config; struct numaker_tcpc_data *data = dev->data; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t pwrctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PWRCTL); uint32_t ctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, CTL); enum tc_cc_polarity polarity = (ctl & UTCPD_CTL_ORIENT_Msk) ? TC_POLARITY_CC2 : TC_POLARITY_CC1; /* Use DPM supplied VCONN */ if (data->dpm.vconn_cb) { return data->dpm.vconn_cb(dev, polarity, enable); } /* Use UTCPD VCONN */ if (enable) { pwrctl |= UTCPD_PWRCTL_VCEN_Msk; } else { pwrctl &= ~UTCPD_PWRCTL_VCEN_Msk; } return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, PWRCTL, pwrctl); } /** * @brief Sets the Power and Data Role of the PD message header * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_set_roles(const struct device *dev, enum tc_power_role power_role, enum tc_data_role data_role) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t mshead = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, MSHEAD); /* Power role for auto-reply GoodCRC */ mshead &= ~UTCPD_MSHEAD_PWRROL_Msk; if (power_role == TC_ROLE_SOURCE) { mshead |= UTCPD_MHINFO_PROLE_SRC; } else { mshead |= UTCPD_MHINFO_PROLE_SNK; } /* Data role for auto-reply GoodCRC */ mshead &= ~UTCPD_MSHEAD_DAROL_Msk; if (data_role == TC_ROLE_DFP) { mshead |= UTCPD_MHINFO_DROLE_DFP; } else { mshead |= UTCPD_MHINFO_DROLE_UFP; } /* Message Header for auto-reply GoodCRC */ return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, MSHEAD, mshead); } /** * @brief Retrieves the Power Delivery message from the TCPC. * If buf is NULL, then only the status is returned, where 0 means there is a message pending and * -ENODATA means there is no pending message. * * @retval Greater or equal to 0 is the number of bytes received if buf parameter is provided * @retval 0 if there is a message pending and buf parameter is NULL * @retval -EIO on failure * @retval -ENODATA if no message is pending */ static int numaker_tcpc_get_rx_pending_msg(const struct device *dev, struct pd_msg *msg) { struct numaker_tcpc_data *data = dev->data; /* Rx message pending? */ if (!data->rx_msg_ready) { return -ENODATA; } /* Query status only? */ if (msg == NULL) { return 0; } /* Dequeue Rx FIFO */ *msg = data->rx_msg; data->rx_msg_ready = false; /* Indicate Rx message returned */ return 1; } /** * @brief Enables the reception of SOP* message types * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_set_rx_enable(const struct device *dev, bool enable) { const struct numaker_tcpc_config *const config = dev->config; struct numaker_tcpc_data *data = dev->data; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t dtrxevnt = 0; /* Enable receive */ if (enable) { /* Enable receive of SOP messages */ dtrxevnt |= UTCPD_DTRXEVNT_SOPEN_Msk; /* Enable receive of SOP'/SOP'' messages */ if (data->rx_sop_prime_enabled) { dtrxevnt |= UTCPD_DTRXEVNT_SOPPEN_Msk | UTCPD_DTRXEVNT_SOPPPEN_Msk; } /* Enable receive of Hard Reset */ dtrxevnt |= UTCPD_DTRXEVNT_HRSTEN_Msk; /* Don't enable receive of Cable Reset for not being Cable Plug */ } return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, DTRXEVNT, dtrxevnt); } /** * @brief Sets the polarity of the CC lines * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_set_cc_polarity(const struct device *dev, enum tc_cc_polarity polarity) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t ctl = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, CTL); /* Update CC polarity */ switch (polarity) { case TC_POLARITY_CC1: ctl &= ~UTCPD_CTL_ORIENT_Msk; break; case TC_POLARITY_CC2: ctl |= UTCPD_CTL_ORIENT_Msk; break; default: LOG_ERR("Invalid CC polarity: %d", polarity); return -EINVAL; } return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, CTL, ctl); } /** * @brief Transmits a Power Delivery message * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_transmit_data(const struct device *dev, struct pd_msg *msg) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; int rc; uint32_t txctl; uint32_t txctl_retrycnt; uint32_t txctl_txstype; /* Not support Unchunked Extended Message exceeding PD_CONVERT_PD_HEADER_COUNT_TO_BYTES */ if (msg->len > (PD_MAX_EXTENDED_MSG_LEGACY_LEN + 2)) { LOG_ERR("Not support Unchunked Extended Message exceeding " "PD_CONVERT_PD_HEADER_COUNT_TO_BYTES: %d", msg->len); return -EIO; } /* txbcnt = 2 (Message Header) + Tx data byte count */ rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, TXBCNT, msg->len + 2); if (rc < 0) { return rc; } /* Tx header */ rc = NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, TXHEAD, msg->header.raw_value); if (rc < 0) { return rc; } /* Tx data */ rc = numaker_utcpd_tx_write_data(dev, msg->data, msg->len); if (rc < 0) { return rc; } /* Tx control */ if (msg->type < PD_PACKET_TX_HARD_RESET) { /* nRetryCount = 2 for PD REV 3.0 */ txctl_retrycnt = 2 << UTCPD_TXCTL_RETRYCNT_Pos; } else if (msg->type <= PD_PACKET_TX_BIST_MODE_2) { /* Per TCPCI spec, no retry for non-SOP* transmission */ txctl_retrycnt = 0; } else { LOG_ERR("Invalid PD packet type: %d", msg->type); return -EINVAL; } /* NOTE: Needn't extra cast for UTCPD_TXCTL.TXSTYPE aligning with pd_packet_type */ txctl_txstype = ((uint32_t)msg->type) << UTCPD_TXCTL_TXSTYPE_Pos; txctl = txctl_retrycnt | txctl_txstype; return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, TXCTL, txctl); } /** * @brief Dump a set of TCPC registers * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_dump_std_reg(const struct device *dev) { return numaker_utcpd_dump_regs(dev); } /** * @brief Queries the current sinking state of the TCPC * * @retval true if sinking power * @retval false if not sinking power */ static int numaker_tcpc_get_snk_ctrl(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t pwrsts = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PWRSTS); return (pwrsts & UTCPD_PWRSTS_SKVB_Msk) ? true : false; } /** * @brief Queries the current sourcing state of the TCPC * * @retval true if sourcing power * @retval false if not sourcing power */ static int numaker_tcpc_get_src_ctrl(const struct device *dev) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t pwrsts = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PWRSTS); return (pwrsts & (UTCPD_PWRSTS_SRVB_Msk | UTCPD_PWRSTS_SRHV_Msk)) ? true : false; } /** * @brief Enables the reception of SOP Prime messages * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_sop_prime_enable(const struct device *dev, bool enable) { struct numaker_tcpc_data *data = dev->data; data->rx_sop_prime_enabled = enable; return 0; } /** * @brief Controls the BIST Mode of the TCPC. It disables RX alerts while the * mode is active. * * @retval 0 on success * @retval -EIO on failure */ static int numaker_tcpc_set_bist_test_mode(const struct device *dev, bool enable) { return numaker_utcpd_bist_test_mode_set_enable(dev, enable); } /** * @brief Sets the alert function that's called when an interrupt is triggered * due to an alert bit * * @retval 0 on success */ static int numaker_tcpc_set_alert_handler_cb(const struct device *dev, tcpc_alert_handler_cb_t alert_handler, void *alert_data) { struct numaker_tcpc_data *data = dev->data; data->tcpc_alert.handler = alert_handler; data->tcpc_alert.data = alert_data; return 0; } /* Functions below with name pattern "*_tcpc_ppc_*" are to invoke by NuMaker PPC driver */ int numaker_tcpc_ppc_is_dead_battery_mode(const struct device *dev) { return numaker_utcpd_deadbattery_query_enable(dev); } int numaker_tcpc_ppc_exit_dead_battery_mode(const struct device *dev) { return numaker_utcpd_deadbattery_set_enable(dev, false); } int numaker_tcpc_ppc_is_vbus_source(const struct device *dev) { return numaker_utcpd_vbus_is_source(dev); } int numaker_tcpc_ppc_is_vbus_sink(const struct device *dev) { return numaker_utcpd_vbus_is_sink(dev); } int numaker_tcpc_ppc_set_snk_ctrl(const struct device *dev, bool enable) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t cmd; if (enable) { cmd = UTCPD_CMD_SINK_VBUS; } else { cmd = UTCPD_CMD_DISABLE_SINK_VBUS; } return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, CMD, cmd); } int numaker_tcpc_ppc_set_src_ctrl(const struct device *dev, bool enable) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t cmd; if (enable) { /* NOTE: Source VBUS high voltage (UTCPD_CMD_SRC_VBUS_NONDEFAULT) N/A */ cmd = UTCPD_CMD_SRC_VBUS_DEFAULT; } else { cmd = UTCPD_CMD_DISABLE_SRC_VBUS; } return NUMAKER_UTCPD_REG_WRITE_BY_NAME(dev, CMD, cmd); } int numaker_tcpc_ppc_set_vbus_discharge(const struct device *dev, bool enable) { return numaker_utcpd_vbus_set_discharge(dev, enable); } int numaker_tcpc_ppc_is_vbus_present(const struct device *dev) { return numaker_utcpd_vbus_is_present(dev); } int numaker_tcpc_ppc_set_event_handler(const struct device *dev, usbc_ppc_event_cb_t event_handler, void *event_data) { struct numaker_tcpc_data *data = dev->data; data->ppc_event.handler = event_handler; data->ppc_event.data = event_data; return 0; } int numaker_tcpc_ppc_dump_regs(const struct device *dev) { return numaker_utcpd_dump_regs(dev); } /* End of "*_tcpc_ppc_*" functions */ /* Functions below with name pattern "*_tcpc_vbus_*" are to invoke by NuMaker VBUS driver */ bool numaker_tcpc_vbus_check_level(const struct device *dev, enum tc_vbus_level level) { const struct numaker_tcpc_config *const config = dev->config; UTCPD_T *utcpd_base = config->utcpd_base; uint32_t mv_norm; int rc = numaker_utcpd_vbus_measure(dev, &mv_norm); uint32_t pwrsts = NUMAKER_UTCPD_REG_READ_BY_NAME(dev, PWRSTS); /* Fall back to PWRSTS.VBPS if VBUS measurement by EADC is not available */ switch (level) { case TC_VBUS_SAFE0V: return (rc == 0) ? (mv_norm < PD_V_SAFE_0V_MAX_MV) : !(pwrsts & UTCPD_PWRSTS_VBPS_Msk); case TC_VBUS_PRESENT: return (rc == 0) ? (mv_norm >= PD_V_SAFE_5V_MIN_MV) : (pwrsts & UTCPD_PWRSTS_VBPS_Msk); case TC_VBUS_REMOVED: return (rc == 0) ? (mv_norm < TC_V_SINK_DISCONNECT_MAX_MV) : !(pwrsts & UTCPD_PWRSTS_VBPS_Msk); } return false; } int numaker_tcpc_vbus_measure(const struct device *dev, int *vbus_meas) { int rc; uint32_t mv; if (vbus_meas == NULL) { return -EINVAL; } *vbus_meas = 0; rc = numaker_utcpd_vbus_measure(dev, &mv); if (rc < 0) { return rc; } *vbus_meas = mv; return 0; } int numaker_tcpc_vbus_discharge(const struct device *dev, bool enable) { return numaker_utcpd_vbus_set_discharge(dev, enable); } int numaker_tcpc_vbus_enable(const struct device *dev, bool enable) { /* VBUS measurement is made automatic through Timer-triggered EADC. */ return 0; } /* End of "*_tcpc_vbus_*" functions */ static const struct tcpc_driver_api numaker_tcpc_driver_api = { .init = numaker_tcpc_init_recycle, .get_cc = numaker_tcpc_get_cc, .select_rp_value = numaker_tcpc_select_rp_value, .get_rp_value = numaker_tcpc_get_rp_value, .set_cc = numaker_tcpc_set_cc, .set_vconn_discharge_cb = numaker_tcpc_set_vconn_discharge_cb, .set_vconn_cb = numaker_tcpc_set_vconn_cb, .vconn_discharge = numaker_tcpc_vconn_discharge, .set_vconn = numaker_tcpc_set_vconn, .set_roles = numaker_tcpc_set_roles, .get_rx_pending_msg = numaker_tcpc_get_rx_pending_msg, .set_rx_enable = numaker_tcpc_set_rx_enable, .set_cc_polarity = numaker_tcpc_set_cc_polarity, .transmit_data = numaker_tcpc_transmit_data, .dump_std_reg = numaker_tcpc_dump_std_reg, .get_snk_ctrl = numaker_tcpc_get_snk_ctrl, .get_src_ctrl = numaker_tcpc_get_src_ctrl, .sop_prime_enable = numaker_tcpc_sop_prime_enable, .set_bist_test_mode = numaker_tcpc_set_bist_test_mode, .set_alert_handler_cb = numaker_tcpc_set_alert_handler_cb, }; /* Same as RESET_DT_SPEC_INST_GET_BY_IDX, except by name */ #define NUMAKER_RESET_DT_SPEC_INST_GET_BY_NAME(inst, name) \ { \ .dev = DEVICE_DT_GET(DT_INST_RESET_CTLR_BY_NAME(inst, name)), \ .id = DT_INST_RESET_CELL_BY_NAME(inst, name, id), \ } /* Same as GPIO_DT_SPEC_GET_BY_IDX, except by name */ #define NUMAKER_GPIO_DT_SPEC_GET_BY_NAME(node_id, prop, name) \ { \ .port = DEVICE_DT_GET(DT_PHANDLE_BY_NAME(node_id, prop, name)), \ .pin = DT_PHA_BY_NAME(node_id, prop, name, pin), \ .dt_flags = DT_PHA_BY_NAME(node_id, prop, name, flags), \ } /* Same as GPIO_DT_SPEC_INST_GET_BY_IDX_OR, except by name */ #define NUMAKER_GPIO_DT_SPEC_INST_GET_BY_NAME_OR(inst, prop, name, default_value) \ COND_CODE_1(DT_INST_PROP_HAS_NAME(inst, prop, name), \ (NUMAKER_GPIO_DT_SPEC_GET_BY_NAME(DT_DRV_INST(inst), prop, name)), \ (default_value)) /* Peripheral Clock Control by name */ #define NUMAKER_PCC_INST_GET_BY_NAME(inst, name) \ { \ .subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC, \ .pcc.clk_modidx = DT_INST_CLOCKS_CELL_BY_NAME(inst, name, clock_module_index), \ .pcc.clk_src = DT_INST_CLOCKS_CELL_BY_NAME(inst, name, clock_source), \ .pcc.clk_div = DT_INST_CLOCKS_CELL_BY_NAME(inst, name, clock_divider), \ } /* UTCPD GPIOs */ #define NUMAKER_UTCPD_GPIOS_INIT(inst) \ { \ .vbus_detect = \ NUMAKER_GPIO_DT_SPEC_GET_BY_NAME(DT_DRV_INST(inst), gpios, vbus_detect), \ .vbus_discharge = NUMAKER_GPIO_DT_SPEC_INST_GET_BY_NAME_OR(inst, gpios, \ vbus_discharge, {0}), \ .vconn_discharge = NUMAKER_GPIO_DT_SPEC_INST_GET_BY_NAME_OR(inst, gpios, \ vconn_discharge, {0}), \ } /* UTCPD.PINPL.<PIN> cast */ #define NUMAKER_UTCPD_PINPOL_CAST(inst, pin_dt, pin_utcpd) \ (DT_ENUM_HAS_VALUE(DT_DRV_INST(inst), pin_dt, high_active) ? UTCPD_PINPL_##pin_utcpd##_Msk \ : 0) /* UTCPD.VBVOL.VBSCALE cast */ #define NUMAKER_UTCPD_VBUS_DIVIDE_CAST(inst) NUMAKER_UTCPD_VBUS_DIVIDE_CAST_NO_DIVIDE(inst) /* no_divide */ #define NUMAKER_UTCPD_VBUS_DIVIDE_CAST_NO_DIVIDE(inst) \ COND_CODE_1(DT_ENUM_HAS_VALUE(DT_DRV_INST(inst), vbus_divide, no_divice), \ ({.bit = (0 << UTCPD_VBVOL_VBSCALE_Pos), .value = 1}), \ (NUMAKER_UTCPD_VBUS_DIVIDE_CAST_DIVIDE_10(inst))) /* divide_10 */ #define NUMAKER_UTCPD_VBUS_DIVIDE_CAST_DIVIDE_10(inst) \ COND_CODE_1(DT_ENUM_HAS_VALUE(DT_DRV_INST(inst), vbus_divide, divide_10), \ ({.bit = (1 << UTCPD_VBVOL_VBSCALE_Pos), .value = 10}), \ (NUMAKER_UTCPD_VBUS_DIVIDE_CAST_DIVIDE_20(inst))) /* divide_20 */ #define NUMAKER_UTCPD_VBUS_DIVIDE_CAST_DIVIDE_20(inst) \ COND_CODE_1(DT_ENUM_HAS_VALUE(DT_DRV_INST(inst), vbus_divide, divide_20), \ ({.bit = (2 << UTCPD_VBVOL_VBSCALE_Pos), .value = 20}), (no_divide error)) /* UTCPD.PINPL */ #define NUMAKER_UTCPD_PINPL_INIT(inst) \ { \ .bit = NUMAKER_UTCPD_PINPOL_CAST(inst, vconn_overcurrent_event_polarity, VCOCPL) | \ NUMAKER_UTCPD_PINPOL_CAST(inst, vconn_discharge_polarity, VCDGENPL) | \ NUMAKER_UTCPD_PINPOL_CAST(inst, vconn_enable_polarity, VCENPL) | \ NUMAKER_UTCPD_PINPOL_CAST(inst, vbus_overcurrent_event_polarity, VBOCPL) | \ NUMAKER_UTCPD_PINPOL_CAST(inst, vbus_forceoff_event_polarity, FOFFVBPL) | \ NUMAKER_UTCPD_PINPOL_CAST(inst, frs_tx_polarity, TXFRSPL) | \ NUMAKER_UTCPD_PINPOL_CAST(inst, vbus_discharge_enable_polarity, VBDGENPL) | \ NUMAKER_UTCPD_PINPOL_CAST(inst, vbus_sink_enable_polarity, VBSKENPL) | \ NUMAKER_UTCPD_PINPOL_CAST(inst, vbus_source_enable_polarity, VBSRENPL) \ } /* UTCPD.VBVOL */ #define NUMAKER_UTCPD_VBVOL_INIT(inst) \ { \ .vbscale = NUMAKER_UTCPD_VBUS_DIVIDE_CAST(inst), \ } #define NUMAKER_UTCPD_INIT(inst) \ { \ .gpios = NUMAKER_UTCPD_GPIOS_INIT(inst), \ .dead_battery = DT_INST_PROP(inst, dead_battery), \ .pinpl = NUMAKER_UTCPD_PINPL_INIT(inst), .vbvol = NUMAKER_UTCPD_VBVOL_INIT(inst), \ } /* EADC register address is duplicated for easy implementation. * They must be the same. */ #define BUILD_ASSERT_NUMAKER_EADC_REG(inst) \ IF_ENABLED(DT_NODE_HAS_PROP(DT_DRV_INST(inst), io_channels), \ (BUILD_ASSERT(DT_INST_REG_ADDR_BY_NAME(inst, eadc) == \ DT_REG_ADDR(DT_INST_IO_CHANNELS_CTLR(inst)));)) #define NUMAKER_EADC_TRGSRC_CAST(inst) \ ((DT_INST_REG_ADDR_BY_NAME(inst, timer) == TIMER0_BASE) ? EADC_TIMER0_TRIGGER \ : (DT_INST_REG_ADDR_BY_NAME(inst, timer) == TIMER1_BASE) ? EADC_TIMER1_TRIGGER \ : (DT_INST_REG_ADDR_BY_NAME(inst, timer) == TIMER2_BASE) ? EADC_TIMER2_TRIGGER \ : (DT_INST_REG_ADDR_BY_NAME(inst, timer) == TIMER3_BASE) ? EADC_TIMER3_TRIGGER \ : NUMAKER_INVALID_VALUE) #define BUILD_ASSERT_NUMAKER_EADC_TRGSRC_CAST(inst) \ BUILD_ASSERT(NUMAKER_EADC_TRGSRC_CAST(inst) != NUMAKER_INVALID_VALUE, \ "NUMAKER_EADC_TRGSRC_CAST error"); /* Notes on specifying EADC channels * * 1. Must be in order of chn_vbus, chn_vconn, etc. * 2. The front channel can be absent, e.g. only chn_vconn. * 3. Build assert will check the above rules. */ #define NUMAKER_EADC_SPEC_GET_BY_IDX_COMMA(node_id, prop, idx) ADC_DT_SPEC_GET_BY_IDX(node_id, idx), #define NUMAKER_EADC_SPEC_DEFINE(inst) \ IF_ENABLED( \ DT_NODE_HAS_PROP(DT_DRV_INST(inst), io_channels), \ (static const struct adc_dt_spec eadc_specs##inst[] = {DT_FOREACH_PROP_ELEM( \ DT_DRV_INST(inst), io_channels, NUMAKER_EADC_SPEC_GET_BY_IDX_COMMA)};)) /* Note on EADC spec index * * These indexes must be integer literal, or meet macro expansion error. * However, macro expansion just does text replacement, no evaluation. * To overcome this, UTIL_INC() and friends are invoked to do evaluation * at preprocess time. */ #define NUMAKER_EADC_SPEC_IDX_VBUS(inst) 0 #define NUMAKER_EADC_SPEC_IDX_VCONN(inst) \ COND_CODE_1(DT_INST_PROP_HAS_NAME(inst, io_channels, chn_vbus), \ (UTIL_INC(NUMAKER_EADC_SPEC_IDX_VBUS(inst))), \ (NUMAKER_EADC_SPEC_IDX_VBUS(inst))) #define NUMAKER_EADC_SPEC_PTR_VBUS(inst) \ COND_CODE_1(DT_INST_PROP_HAS_NAME(inst, io_channels, chn_vbus), \ (&eadc_specs##inst[NUMAKER_EADC_SPEC_IDX_VBUS(inst)]), (NULL)) #define NUMAKER_EADC_SPEC_PTR_VCONN(inst) \ COND_CODE_1(DT_INST_PROP_HAS_NAME(inst, io_channels, chn_vconn), \ (&eadc_specs##inst[NUMAKER_EADC_SPEC_IDX_VCONN(inst)]), (NULL)) #define NUMAKER_EADC_DEVICE_BY_NAME(inst, name) \ DEVICE_DT_GET(DT_IO_CHANNELS_CTLR_BY_NAME(DT_DRV_INST(inst), name)) #define NUMAKER_EADC_DEVICE_BY_IDX(inst, idx) \ DEVICE_DT_GET(DT_IO_CHANNELS_CTLR_BY_IDX(DT_DRV_INST(inst), idx)) #define NUMAKER_EADC_INPUT_BY_NAME(inst, name) DT_IO_CHANNELS_INPUT_BY_NAME(DT_DRV_INST(inst), name) #define NUMAKER_EADC_INPUT_BY_IDX(inst, idx) DT_IO_CHANNELS_INPUT_BY_IDX(DT_DRV_INST(inst), idx) #define BUILD_ASSERT_NUMAKER_EADC_SPEC_VBUS(inst) \ IF_ENABLED(DT_INST_PROP_HAS_NAME(inst, io_channels, chn_vbus), \ (BUILD_ASSERT(NUMAKER_EADC_DEVICE_BY_NAME(inst, chn_vbus) == \ NUMAKER_EADC_DEVICE_BY_IDX( \ inst, NUMAKER_EADC_SPEC_IDX_VBUS(inst)), \ "EADC device for VBUS error"); \ BUILD_ASSERT(NUMAKER_EADC_INPUT_BY_NAME(inst, chn_vbus) == \ NUMAKER_EADC_INPUT_BY_IDX( \ inst, NUMAKER_EADC_SPEC_IDX_VBUS(inst)), \ "EADC channel for VBUS error");)) #define BUILD_ASSERT_NUMAKER_EADC_SPEC_VCONN(inst) \ IF_ENABLED(DT_INST_PROP_HAS_NAME(inst, io_channels, chn_vconn), \ (BUILD_ASSERT(NUMAKER_EADC_DEVICE_BY_NAME(inst, chn_vconn) == \ NUMAKER_EADC_DEVICE_BY_IDX( \ inst, NUMAKER_EADC_SPEC_IDX_VCONN(inst)), \ "EADC device for VCONN error"); \ BUILD_ASSERT(NUMAKER_EADC_INPUT_BY_NAME(inst, chn_vconn) == \ NUMAKER_EADC_INPUT_BY_IDX( \ inst, NUMAKER_EADC_SPEC_IDX_VCONN(inst)), \ "EADC channel for VCONN error");)) #define NUMAKER_EADC_INIT(inst) \ { \ .spec_vbus = NUMAKER_EADC_SPEC_PTR_VBUS(inst), \ .spec_vconn = NUMAKER_EADC_SPEC_PTR_VCONN(inst), \ .timer_trigger_rate = DT_INST_PROP(inst, adc_measure_timer_trigger_rate), \ .trgsel_vbus = NUMAKER_EADC_TRGSRC_CAST(inst), \ .trgsel_vconn = NUMAKER_EADC_TRGSRC_CAST(inst), \ } #define NUMAKER_TCPC_INIT(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ \ NUMAKER_EADC_SPEC_DEFINE(inst); \ \ static void numaker_utcpd_irq_config_func_##inst(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(inst, utcpd, irq), \ DT_INST_IRQ_BY_NAME(inst, utcpd, priority), numaker_utcpd_isr, \ DEVICE_DT_INST_GET(inst), 0); \ \ irq_enable(DT_INST_IRQ_BY_NAME(inst, utcpd, irq)); \ } \ \ static void numaker_utcpd_irq_unconfig_func_##inst(const struct device *dev) \ { \ irq_disable(DT_INST_IRQ_BY_NAME(inst, utcpd, irq)); \ } \ \ static const struct numaker_tcpc_config numaker_tcpc_config_##inst = { \ .utcpd_base = (UTCPD_T *)DT_INST_REG_ADDR_BY_NAME(inst, utcpd), \ .eadc_base = (EADC_T *)DT_INST_REG_ADDR_BY_NAME(inst, eadc), \ .timer_base = (TIMER_T *)DT_INST_REG_ADDR_BY_NAME(inst, timer), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .clkctrl_dev = DEVICE_DT_GET(DT_PARENT(DT_INST_CLOCKS_CTLR(inst))), \ .pcc_utcpd = NUMAKER_PCC_INST_GET_BY_NAME(inst, utcpd), \ .pcc_timer = NUMAKER_PCC_INST_GET_BY_NAME(inst, timer), \ .reset_utcpd = NUMAKER_RESET_DT_SPEC_INST_GET_BY_NAME(inst, utcpd), \ .reset_timer = NUMAKER_RESET_DT_SPEC_INST_GET_BY_NAME(inst, timer), \ .irq_config_func_utcpd = numaker_utcpd_irq_config_func_##inst, \ .irq_unconfig_func_utcpd = numaker_utcpd_irq_unconfig_func_##inst, \ .utcpd = NUMAKER_UTCPD_INIT(inst), \ .eadc = NUMAKER_EADC_INIT(inst), \ }; \ \ BUILD_ASSERT_NUMAKER_EADC_REG(inst); \ BUILD_ASSERT_NUMAKER_EADC_TRGSRC_CAST(inst); \ BUILD_ASSERT_NUMAKER_EADC_SPEC_VBUS(inst); \ BUILD_ASSERT_NUMAKER_EADC_SPEC_VCONN(inst); \ \ static struct numaker_tcpc_data numaker_tcpc_data_##inst; \ \ DEVICE_DT_INST_DEFINE(inst, numaker_tcpc_init_startup, NULL, &numaker_tcpc_data_##inst, \ &numaker_tcpc_config_##inst, POST_KERNEL, \ CONFIG_USBC_TCPC_INIT_PRIORITY, &numaker_tcpc_driver_api); DT_INST_FOREACH_STATUS_OKAY(NUMAKER_TCPC_INIT); ```
/content/code_sandbox/drivers/usb_c/tcpc/ucpd_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
22,540
```c /* * */ #define DT_DRV_COMPAT zephyr_usb_c_vbus_adc #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(usbc_vbus_adc, CONFIG_USBC_LOG_LEVEL); #include <zephyr/device.h> #include <zephyr/sys/util.h> #include <zephyr/kernel.h> #include <zephyr/drivers/adc.h> #include <zephyr/drivers/usb_c/usbc_pd.h> #include <zephyr/drivers/usb_c/usbc_vbus.h> #include <soc.h> #include <stddef.h> #include "usbc_vbus_adc_priv.h" /** * @brief Reads and returns VBUS measured in mV * * @retval 0 on success * @retval -EIO on failure */ static int adc_vbus_measure(const struct device *dev, int *meas) { const struct usbc_vbus_config *const config = dev->config; struct usbc_vbus_data *data = dev->data; int value; int ret; __ASSERT(meas != NULL, "ADC VBUS meas must not be NULL"); ret = adc_read(config->adc_channel.dev, &data->sequence); if (ret != 0) { LOG_INF("ADC reading failed with error %d.", ret); return ret; } value = data->sample; ret = adc_raw_to_millivolts_dt(&config->adc_channel, &value); if (ret != 0) { LOG_INF("Scaling ADC failed with error %d.", ret); return ret; } if (config->full_ohm > 0) { /* VBUS is scaled down though a voltage divider */ value = (value * 1000) / ((config->output_ohm * 1000) / config->full_ohm); } *meas = value; return 0; } /** * @brief Checks if VBUS is at a particular level * * @retval true if VBUS is at the level voltage, else false */ static bool adc_vbus_check_level(const struct device *dev, enum tc_vbus_level level) { int meas; int ret; ret = adc_vbus_measure(dev, &meas); if (ret) { return false; } switch (level) { case TC_VBUS_SAFE0V: return (meas < PD_V_SAFE_0V_MAX_MV); case TC_VBUS_PRESENT: return (meas >= PD_V_SAFE_5V_MIN_MV); case TC_VBUS_REMOVED: return (meas < TC_V_SINK_DISCONNECT_MAX_MV); } return false; } /** * @brief Sets pin to discharge VBUS * * @retval 0 on success * @retval -EIO on failure * @retval -ENOENT if enable pin isn't defined */ static int adc_vbus_discharge(const struct device *dev, bool enable) { const struct usbc_vbus_config *const config = dev->config; const struct gpio_dt_spec *gcd = &config->discharge_gpios; int ret = -ENOENT; if (gcd->port) { ret = gpio_pin_set_dt(gcd, enable); } return ret; } /** * @brief Sets pin to enable VBUS measurments * * @retval 0 on success * @retval -EIO on failure * @retval -ENOENT if enable pin isn't defined */ static int adc_vbus_enable(const struct device *dev, bool enable) { const struct usbc_vbus_config *const config = dev->config; const struct gpio_dt_spec *gcp = &config->power_gpios; int ret = -ENOENT; if (gcp->port) { ret = gpio_pin_set_dt(gcp, enable); } return ret; } /** * @brief Initializes the ADC VBUS Driver * * @retval 0 on success * @retval -EIO on failure */ static int adc_vbus_init(const struct device *dev) { const struct usbc_vbus_config *const config = dev->config; struct usbc_vbus_data *data = dev->data; const struct gpio_dt_spec *gcp = &config->power_gpios; const struct gpio_dt_spec *gcd = &config->discharge_gpios; int ret; if (!adc_is_ready_dt(&config->adc_channel)) { LOG_ERR("ADC controller device is not ready"); return -ENODEV; } /* Configure VBUS Measurement enable pin if defined */ if (gcp->port) { if (!device_is_ready(gcp->port)) { LOG_ERR("%s: device not ready", gcp->port->name); return -EIO; } ret = gpio_pin_configure_dt(gcp, GPIO_OUTPUT_INACTIVE); if (ret != 0) { LOG_ERR("Failed to control feed %s.%u: %d", gcp->port->name, gcp->pin, ret); return ret; } } /* Configure VBUS Discharge pin if defined */ if (gcd->port) { if (!device_is_ready(gcd->port)) { LOG_ERR("%s: device not ready", gcd->port->name); return -EIO; } ret = gpio_pin_configure_dt(gcd, GPIO_OUTPUT_INACTIVE); if (ret != 0) { LOG_ERR("Failed to control feed %s.%u: %d", gcd->port->name, gcd->pin, ret); return ret; } } data->sequence.buffer = &data->sample; data->sequence.buffer_size = sizeof(data->sample); ret = adc_channel_setup_dt(&config->adc_channel); if (ret != 0) { LOG_INF("Could not setup channel (%d)\n", ret); return ret; } ret = adc_sequence_init_dt(&config->adc_channel, &data->sequence); if (ret != 0) { LOG_INF("Could not init sequence (%d)\n", ret); return ret; } return 0; } static const struct usbc_vbus_driver_api driver_api = { .measure = adc_vbus_measure, .check_level = adc_vbus_check_level, .discharge = adc_vbus_discharge, .enable = adc_vbus_enable }; BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) > 0, "No compatible USB-C VBUS Measurement instance found"); #define DRIVER_INIT(inst) \ static struct usbc_vbus_data drv_data_##inst; \ static const struct usbc_vbus_config drv_config_##inst = { \ .output_ohm = DT_INST_PROP(inst, output_ohms), \ .full_ohm = DT_INST_PROP_OR(inst, full_ohms, 0), \ .adc_channel = ADC_DT_SPEC_INST_GET(inst), \ .discharge_gpios = GPIO_DT_SPEC_INST_GET_OR(inst, discharge_gpios, {}), \ .power_gpios = GPIO_DT_SPEC_INST_GET_OR(inst, power_gpios, {}), \ }; \ DEVICE_DT_INST_DEFINE(inst, \ &adc_vbus_init, \ NULL, \ &drv_data_##inst, \ &drv_config_##inst, \ POST_KERNEL, \ CONFIG_USBC_VBUS_INIT_PRIORITY, \ &driver_api); DT_INST_FOREACH_STATUS_OKAY(DRIVER_INIT) ```
/content/code_sandbox/drivers/usb_c/vbus/usbc_vbus_adc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,609
```unknown # USB-C VBUS device configuration options config USBC_VBUS_ADC bool "USB-C VBUS ADC" default y depends on DT_HAS_ZEPHYR_USB_C_VBUS_ADC_ENABLED help Measure VBUS with an ADC through a voltage divider ```
/content/code_sandbox/drivers/usb_c/vbus/Kconfig.usbc_vbus_adc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
58
```unknown # USB-C VBUS Measurement configuration options menuconfig USBC_VBUS_DRIVER bool "USB-C VBUS drivers" help Enable USB-C drivers if USBC_VBUS_DRIVER config USBC_VBUS_INIT_PRIORITY int "USB-C VBUS driver init priority" default 85 help Initialization priority of the USB-C VBUS measurement drivers in POST_KERNEL. source "drivers/usb_c/vbus/Kconfig.usbc_vbus_adc" source "drivers/usb_c/vbus/Kconfig.numaker" endif # USBC_VBUS_DRIVER module = USBC module-str = usbc source "subsys/logging/Kconfig.template.log_config" ```
/content/code_sandbox/drivers/usb_c/vbus/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
139
```unknown config MIPI_DBI_NXP_LCDIC bool "NXP MIPI DBI LCDIC driver" default y depends on DT_HAS_NXP_LCDIC_ENABLED depends on CLOCK_CONTROL select PINCTRL help Enable support for NXP SPI LCDIC display controller driver if MIPI_DBI_NXP_LCDIC config MIPI_DBI_NXP_LCDIC_DMA bool "Use DMA for transfers with LCDIC driver" select DMA help Use DMA for transfers when sending data with the LCDIC driver. Commands will still be sent in polling mode. endif # MIPI_DBI_NXP_LCDIC ```
/content/code_sandbox/drivers/mipi_dbi/Kconfig.nxp_lcdic
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
135
```c /* * */ #define DT_DRV_COMPAT zephyr_mipi_dbi_spi #include <zephyr/drivers/mipi_dbi.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/gpio.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mipi_dbi_spi, CONFIG_MIPI_DBI_LOG_LEVEL); struct mipi_dbi_spi_config { /* SPI hardware used to send data */ const struct device *spi_dev; /* Command/Data gpio */ const struct gpio_dt_spec cmd_data; /* Reset GPIO */ const struct gpio_dt_spec reset; }; struct mipi_dbi_spi_data { struct k_mutex lock; /* Used for 3 wire mode */ uint16_t spi_byte; }; /* Expands to 1 if the node does not have the `write-only` property */ #define _WRITE_ONLY_ABSENT(n) (!DT_INST_PROP(n, write_only)) | /* This macro will evaluate to 1 if any of the nodes with zephyr,mipi-dbi-spi * lack a `write-only` property. The intention here is to allow the entire * command_read function to be optimized out when it is not needed. */ #define MIPI_DBI_SPI_READ_REQUIRED DT_INST_FOREACH_STATUS_OKAY(_WRITE_ONLY_ABSENT) 0 uint32_t var = MIPI_DBI_SPI_READ_REQUIRED; /* In Type C mode 1 MIPI BIT communication, the 9th bit of the word * (first bit sent in each word) indicates if the word is a command or * data. Typically 0 indicates a command and 1 indicates data, but some * displays may vary. * Index starts from 0 so that BIT(8) means 9th bit. */ #define MIPI_DBI_DC_BIT BIT(8) static int mipi_dbi_spi_write_helper(const struct device *dev, const struct mipi_dbi_config *dbi_config, bool cmd_present, uint8_t cmd, const uint8_t *data_buf, size_t len) { const struct mipi_dbi_spi_config *config = dev->config; struct mipi_dbi_spi_data *data = dev->data; struct spi_buf buffer; struct spi_buf_set buf_set = { .buffers = &buffer, .count = 1, }; int ret = 0; ret = k_mutex_lock(&data->lock, K_FOREVER); if (ret < 0) { return ret; } if (dbi_config->mode == MIPI_DBI_MODE_SPI_3WIRE && IS_ENABLED(CONFIG_MIPI_DBI_SPI_3WIRE)) { /* 9 bit word mode must be used, as the command/data bit * is stored before the data word. */ if ((dbi_config->config.operation & SPI_WORD_SIZE_MASK) != SPI_WORD_SET(9)) { return -ENOTSUP; } buffer.buf = &data->spi_byte; buffer.len = 2; /* Send command */ if (cmd_present) { data->spi_byte = cmd; ret = spi_write(config->spi_dev, &dbi_config->config, &buf_set); if (ret < 0) { goto out; } } /* Write data, byte by byte */ for (size_t i = 0; i < len; i++) { data->spi_byte = MIPI_DBI_DC_BIT | data_buf[i]; ret = spi_write(config->spi_dev, &dbi_config->config, &buf_set); if (ret < 0) { goto out; } } } else if (dbi_config->mode == MIPI_DBI_MODE_SPI_4WIRE) { /* 4 wire mode is much simpler. We just toggle the * command/data GPIO to indicate if we are sending * a command or data */ buffer.buf = &cmd; buffer.len = sizeof(cmd); if (cmd_present) { /* Set CD pin low for command */ gpio_pin_set_dt(&config->cmd_data, 0); ret = spi_write(config->spi_dev, &dbi_config->config, &buf_set); if (ret < 0) { goto out; } } if (len > 0) { buffer.buf = (void *)data_buf; buffer.len = len; /* Set CD pin high for data */ gpio_pin_set_dt(&config->cmd_data, 1); ret = spi_write(config->spi_dev, &dbi_config->config, &buf_set); if (ret < 0) { goto out; } } } else { /* Otherwise, unsupported mode */ ret = -ENOTSUP; } out: k_mutex_unlock(&data->lock); return ret; } static int mipi_dbi_spi_command_write(const struct device *dev, const struct mipi_dbi_config *dbi_config, uint8_t cmd, const uint8_t *data_buf, size_t len) { return mipi_dbi_spi_write_helper(dev, dbi_config, true, cmd, data_buf, len); } static int mipi_dbi_spi_write_display(const struct device *dev, const struct mipi_dbi_config *dbi_config, const uint8_t *framebuf, struct display_buffer_descriptor *desc, enum display_pixel_format pixfmt) { ARG_UNUSED(pixfmt); return mipi_dbi_spi_write_helper(dev, dbi_config, false, 0x0, framebuf, desc->buf_size); } #if MIPI_DBI_SPI_READ_REQUIRED static int mipi_dbi_spi_command_read(const struct device *dev, const struct mipi_dbi_config *dbi_config, uint8_t *cmds, size_t num_cmds, uint8_t *response, size_t len) { const struct mipi_dbi_spi_config *config = dev->config; struct mipi_dbi_spi_data *data = dev->data; struct spi_buf buffer; struct spi_buf_set buf_set = { .buffers = &buffer, .count = 1, }; int ret = 0; struct spi_config tmp_config; ret = k_mutex_lock(&data->lock, K_FOREVER); if (ret < 0) { return ret; } memcpy(&tmp_config, &dbi_config->config, sizeof(tmp_config)); if (dbi_config->mode == MIPI_DBI_MODE_SPI_3WIRE && IS_ENABLED(CONFIG_MIPI_DBI_SPI_3WIRE)) { /* We have to emulate 3 wire mode by packing the data/command * bit into the upper bit of the SPI transfer. * switch SPI to 9 bit mode, and write the transfer */ tmp_config.operation &= ~SPI_WORD_SIZE_MASK; tmp_config.operation |= SPI_WORD_SET(9); buffer.buf = &data->spi_byte; buffer.len = 1; /* Send each command */ for (size_t i = 0; i < num_cmds; i++) { data->spi_byte = cmds[i]; ret = spi_write(config->spi_dev, &tmp_config, &buf_set); if (ret < 0) { goto out; } } /* Now, we can switch to 8 bit mode, and read data */ buffer.buf = (void *)response; buffer.len = len; ret = spi_read(config->spi_dev, &dbi_config->config, &buf_set); } else if (dbi_config->mode == MIPI_DBI_MODE_SPI_4WIRE) { /* 4 wire mode is much simpler. We just toggle the * command/data GPIO to indicate if we are sending * a command or data. Note that since some SPI displays * require CS to be held low for the entire read sequence, * we set SPI_HOLD_ON_CS */ tmp_config.operation |= SPI_HOLD_ON_CS; if (num_cmds > 0) { buffer.buf = cmds; buffer.len = num_cmds; /* Set CD pin low for command */ gpio_pin_set_dt(&config->cmd_data, 0); ret = spi_write(config->spi_dev, &tmp_config, &buf_set); if (ret < 0) { goto out; } } if (len > 0) { /* Set CD pin high for data */ gpio_pin_set_dt(&config->cmd_data, 1); buffer.buf = (void *)response; buffer.len = len; ret = spi_read(config->spi_dev, &tmp_config, &buf_set); if (ret < 0) { goto out; } } } else { /* Otherwise, unsupported mode */ ret = -ENOTSUP; } out: spi_release(config->spi_dev, &tmp_config); k_mutex_unlock(&data->lock); return ret; } #endif /* MIPI_DBI_SPI_READ_REQUIRED */ static inline bool mipi_dbi_has_pin(const struct gpio_dt_spec *spec) { return spec->port != NULL; } static int mipi_dbi_spi_reset(const struct device *dev, k_timeout_t delay) { const struct mipi_dbi_spi_config *config = dev->config; int ret; if (!mipi_dbi_has_pin(&config->reset)) { return -ENOTSUP; } ret = gpio_pin_set_dt(&config->reset, 1); if (ret < 0) { return ret; } k_sleep(delay); return gpio_pin_set_dt(&config->reset, 0); } static int mipi_dbi_spi_release(const struct device *dev, const struct mipi_dbi_config *dbi_config) { const struct mipi_dbi_spi_config *config = dev->config; return spi_release(config->spi_dev, &dbi_config->config); } static int mipi_dbi_spi_init(const struct device *dev) { const struct mipi_dbi_spi_config *config = dev->config; struct mipi_dbi_spi_data *data = dev->data; int ret; if (!device_is_ready(config->spi_dev)) { LOG_ERR("SPI device is not ready"); return -ENODEV; } if (mipi_dbi_has_pin(&config->cmd_data)) { if (!gpio_is_ready_dt(&config->cmd_data)) { return -ENODEV; } ret = gpio_pin_configure_dt(&config->cmd_data, GPIO_OUTPUT); if (ret < 0) { LOG_ERR("Could not configure command/data GPIO (%d)", ret); return ret; } } if (mipi_dbi_has_pin(&config->reset)) { if (!gpio_is_ready_dt(&config->reset)) { return -ENODEV; } ret = gpio_pin_configure_dt(&config->reset, GPIO_OUTPUT_INACTIVE); if (ret < 0) { LOG_ERR("Could not configure reset GPIO (%d)", ret); return ret; } } k_mutex_init(&data->lock); return 0; } static const struct mipi_dbi_driver_api mipi_dbi_spi_driver_api = { .reset = mipi_dbi_spi_reset, .command_write = mipi_dbi_spi_command_write, .write_display = mipi_dbi_spi_write_display, .release = mipi_dbi_spi_release, #if MIPI_DBI_SPI_READ_REQUIRED .command_read = mipi_dbi_spi_command_read, #endif }; #define MIPI_DBI_SPI_INIT(n) \ static const struct mipi_dbi_spi_config \ mipi_dbi_spi_config_##n = { \ .spi_dev = DEVICE_DT_GET( \ DT_INST_PHANDLE(n, spi_dev)), \ .cmd_data = GPIO_DT_SPEC_INST_GET_OR(n, dc_gpios, {}), \ .reset = GPIO_DT_SPEC_INST_GET_OR(n, reset_gpios, {}), \ }; \ static struct mipi_dbi_spi_data mipi_dbi_spi_data_##n; \ \ DEVICE_DT_INST_DEFINE(n, mipi_dbi_spi_init, NULL, \ &mipi_dbi_spi_data_##n, \ &mipi_dbi_spi_config_##n, \ POST_KERNEL, \ CONFIG_MIPI_DBI_INIT_PRIORITY, \ &mipi_dbi_spi_driver_api); DT_INST_FOREACH_STATUS_OKAY(MIPI_DBI_SPI_INIT) ```
/content/code_sandbox/drivers/mipi_dbi/mipi_dbi_spi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,736
```c /* * */ #define DT_DRV_COMPAT st_stm32_fmc_mipi_dbi #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/mipi_dbi.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/sys/barrier.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/byteorder.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mipi_dbi_stm32_fmc, CONFIG_MIPI_DBI_LOG_LEVEL); struct mipi_dbi_stm32_fmc_config { /* Reset GPIO */ const struct gpio_dt_spec reset; /* Power GPIO */ const struct gpio_dt_spec power; mem_addr_t register_addr; mem_addr_t data_addr; uint32_t fmc_address_setup_time; uint32_t fmc_data_setup_time; uint32_t fmc_memory_width; }; struct mipi_dbi_stm32_fmc_data { const struct mipi_dbi_config *dbi_config; }; int mipi_dbi_stm32_fmc_check_config(const struct device *dev, const struct mipi_dbi_config *dbi_config) { const struct mipi_dbi_stm32_fmc_config *config = dev->config; struct mipi_dbi_stm32_fmc_data *data = dev->data; uint32_t fmc_write_cycles; if (data->dbi_config == dbi_config) { return 0; } if (dbi_config->mode != MIPI_DBI_MODE_8080_BUS_16_BIT) { LOG_ERR("Only support Intel 8080 16-bits"); return -ENOTSUP; } if (config->fmc_memory_width != FMC_NORSRAM_MEM_BUS_WIDTH_16) { LOG_ERR("Only supports 16-bit bus width"); return -EINVAL; } uint32_t hclk_freq = STM32_AHB_PRESCALER * DT_PROP(STM32_CLOCK_CONTROL_NODE, clock_frequency); /* According to the FMC documentation*/ fmc_write_cycles = ((config->fmc_address_setup_time + 1) + (config->fmc_data_setup_time + 1)) * 1; if (hclk_freq / fmc_write_cycles > dbi_config->config.frequency) { LOG_ERR("Frequency is too high for the display controller"); return -EINVAL; } data->dbi_config = dbi_config; return 0; } int mipi_dbi_stm32_fmc_command_write(const struct device *dev, const struct mipi_dbi_config *dbi_config, uint8_t cmd, const uint8_t *data_buf, size_t len) { const struct mipi_dbi_stm32_fmc_config *config = dev->config; int ret; size_t i; ret = mipi_dbi_stm32_fmc_check_config(dev, dbi_config); if (ret < 0) { return ret; } sys_write16(cmd, config->register_addr); if (IS_ENABLED(CONFIG_MIPI_DBI_STM32_FMC_MEM_BARRIER)) { barrier_dsync_fence_full(); } for (i = 0U; i < len; i++) { sys_write16((uint16_t)data_buf[i], config->data_addr); if (IS_ENABLED(CONFIG_MIPI_DBI_STM32_FMC_MEM_BARRIER)) { barrier_dsync_fence_full(); } } return 0; } static int mipi_dbi_stm32_fmc_write_display(const struct device *dev, const struct mipi_dbi_config *dbi_config, const uint8_t *framebuf, struct display_buffer_descriptor *desc, enum display_pixel_format pixfmt) { const struct mipi_dbi_stm32_fmc_config *config = dev->config; size_t i; int ret; ret = mipi_dbi_stm32_fmc_check_config(dev, dbi_config); if (ret < 0) { return ret; } for (i = 0U; i < desc->buf_size; i += 2) { sys_write16(sys_get_le16(&framebuf[i]), config->data_addr); if (IS_ENABLED(CONFIG_MIPI_DBI_STM32_FMC_MEM_BARRIER)) { barrier_dsync_fence_full(); } } return 0; } static int mipi_dbi_stm32_fmc_reset(const struct device *dev, uint32_t delay) { const struct mipi_dbi_stm32_fmc_config *config = dev->config; int ret; if (config->reset.port == NULL) { return -ENOTSUP; } ret = gpio_pin_set_dt(&config->reset, 1); if (ret < 0) { return ret; } k_msleep(delay); return gpio_pin_set_dt(&config->reset, 0); } static int mipi_dbi_stm32_fmc_init(const struct device *dev) { const struct mipi_dbi_stm32_fmc_config *config = dev->config; if (config->reset.port) { if (!gpio_is_ready_dt(&config->reset)) { LOG_ERR("Reset GPIO device not ready"); return -ENODEV; } if (gpio_pin_configure_dt(&config->reset, GPIO_OUTPUT_INACTIVE)) { LOG_ERR("Couldn't configure reset pin"); return -EIO; } } if (config->power.port) { if (!gpio_is_ready_dt(&config->power)) { LOG_ERR("Power GPIO device not ready"); return -ENODEV; } if (gpio_pin_configure_dt(&config->power, GPIO_OUTPUT)) { LOG_ERR("Couldn't configure power pin"); return -EIO; } } return 0; } static struct mipi_dbi_driver_api mipi_dbi_stm32_fmc_driver_api = { .reset = mipi_dbi_stm32_fmc_reset, .command_write = mipi_dbi_stm32_fmc_command_write, .write_display = mipi_dbi_stm32_fmc_write_display, }; #define MIPI_DBI_FMC_GET_ADDRESS(n) _CONCAT(FMC_BANK1_, UTIL_INC(DT_REG_ADDR(DT_INST_PARENT(n)))) #define MIPI_DBI_FMC_GET_DATA_ADDRESS(n) \ MIPI_DBI_FMC_GET_ADDRESS(n) + (1 << (DT_INST_PROP(n, register_select_pin) + 1)) #define MIPI_DBI_STM32_FMC_INIT(n) \ static const struct mipi_dbi_stm32_fmc_config mipi_dbi_stm32_fmc_config_##n = { \ .reset = GPIO_DT_SPEC_INST_GET_OR(n, reset_gpios, {}), \ .power = GPIO_DT_SPEC_INST_GET_OR(n, power_gpios, {}), \ .register_addr = MIPI_DBI_FMC_GET_ADDRESS(n), \ .data_addr = MIPI_DBI_FMC_GET_DATA_ADDRESS(n), \ .fmc_address_setup_time = DT_PROP_BY_IDX(DT_INST_PARENT(n), st_timing, 0), \ .fmc_data_setup_time = DT_PROP_BY_IDX(DT_INST_PARENT(n), st_timing, 2), \ .fmc_memory_width = DT_PROP_BY_IDX(DT_INST_PARENT(n), st_control, 2), \ }; \ \ static struct mipi_dbi_stm32_fmc_data mipi_dbi_stm32_fmc_data_##n; \ \ DEVICE_DT_INST_DEFINE(n, mipi_dbi_stm32_fmc_init, NULL, &mipi_dbi_stm32_fmc_data_##n, \ &mipi_dbi_stm32_fmc_config_##n, POST_KERNEL, \ CONFIG_MIPI_DBI_INIT_PRIORITY, &mipi_dbi_stm32_fmc_driver_api); DT_INST_FOREACH_STATUS_OKAY(MIPI_DBI_STM32_FMC_INIT) ```
/content/code_sandbox/drivers/mipi_dbi/mipi_dbi_stm32_fmc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,722
```unknown config MIPI_DBI_SPI bool "MIPI DBI SPI driver" default y depends on DT_HAS_ZEPHYR_MIPI_DBI_SPI_ENABLED select SPI help Enable support for MIPI DBI SPI driver. This driver implements a MIPI-DBI mode C compatible controller using a SPI device, as well as GPIO outputs for the reset and D/C signals if MIPI_DBI_SPI config MIPI_DBI_SPI_3WIRE bool "Emulated 3 wire SPI support" help Support 3 wire MIPI DBI (Mode C option 2) in MIPI DBI SPI driver. This requires manually packing each byte with a data/command bit, and may slow down display data transmission. endif # MIPI_DBI_SPI ```
/content/code_sandbox/drivers/mipi_dbi/Kconfig.spi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
174
```unknown # Smartbond MIPI DBI host configuration options config MIPI_DBI_SMARTBOND bool "Smartbond MIPI DBI host controller driver" depends on DT_HAS_RENESAS_SMARTBOND_MIPI_DBI_ENABLED default y help Enable Smartbond MIPI DBI host controller. ```
/content/code_sandbox/drivers/mipi_dbi/Kconfig.smartbond
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
68
```unknown config MIPI_DBI_STM32_FMC bool "MIPI DBI driver for STM32 FMC" default y depends on DT_HAS_ST_STM32_FMC_MIPI_DBI_ENABLED select MEMC help Enable support for MIPI DBI driver for controller based on the stm32 FMC. if MIPI_DBI_STM32_FMC config MIPI_DBI_STM32_FMC_MEM_BARRIER bool "Adds memory barrier after every address and data register access" default y endif # MIPI_DBI_STM32_FMC ```
/content/code_sandbox/drivers/mipi_dbi/Kconfig.stm32_fmc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
124
```c /* * */ #define DT_DRV_COMPAT nxp_lcdic #include <zephyr/drivers/mipi_dbi.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/dma.h> #include <zephyr/logging/log.h> #include <soc.h> #include <zephyr/drivers/dma/dma_mcux_lpc.h> LOG_MODULE_REGISTER(mipi_dbi_lcdic, CONFIG_MIPI_DBI_LOG_LEVEL); #include <fsl_inputmux.h> enum lcdic_data_fmt { LCDIC_DATA_FMT_BYTE = 0, LCDIC_DATA_FMT_HALFWORD = 1, /* 2 byte */ LCDIC_DATA_FMT_WORD = 2, /* 4 byte */ }; enum lcdic_cmd_dc { LCDIC_COMMAND = 0, LCDIC_DATA = 1, }; enum lcdic_cmd_type { LCDIC_RX = 0, LCDIC_TX = 1, }; /* Limit imposed by size of data length field in LCDIC command */ #define LCDIC_MAX_XFER 0x40000 /* Max reset width (in terms of Timer0_Period, see RST_CTRL register) */ #define LCDIC_MAX_RST_WIDTH 0x3F /* Descriptor for LCDIC command */ union lcdic_trx_cmd { struct { /* Data length in bytes. LCDIC transfers data_len + 1 */ uint32_t data_len: 18; /* Dummy SCLK cycles between TX and RX (for SPI mode) */ uint32_t dummy_count: 3; uint32_t rsvd: 2; /* Use auto repeat mode */ uint32_t auto_repeat: 1; /* Tearing enable sync mode */ uint32_t te_sync_mode: 2; /* TRX command timeout mode */ uint32_t trx_timeout_mode: 1; /* Data format, see lcdic_data_fmt */ uint32_t data_format: 2; /* Enable command done interrupt */ uint32_t cmd_done_int: 1; /* LCD command or LCD data, see lcdic_cmd_dc */ uint32_t cmd_data: 1; /* TX or RX command, see lcdic_cmd_type */ uint32_t trx: 1; } bits; uint32_t u32; }; struct mipi_dbi_lcdic_config { LCDIC_Type *base; void (*irq_config_func)(const struct device *dev); const struct pinctrl_dev_config *pincfg; const struct device *clock_dev; clock_control_subsys_t clock_subsys; bool swap_bytes; }; #ifdef CONFIG_MIPI_DBI_NXP_LCDIC_DMA struct stream { const struct device *dma_dev; uint32_t channel; struct dma_config dma_cfg; struct dma_block_config blk_cfg[2]; }; #endif struct mipi_dbi_lcdic_data { /* Tracks number of bytes remaining in command */ uint32_t cmd_bytes; /* Tracks number of bytes remaining in transfer */ uint32_t xfer_bytes; /* Tracks start of transfer buffer */ const uint8_t *xfer_buf; /* When sending data that does not evenly fit into 4 byte chunks, * this is used to store the last unaligned segment of the data. */ uint32_t unaligned_word __aligned(4); /* Tracks lcdic_data_fmt value we should use for pixel data */ uint8_t pixel_fmt; const struct mipi_dbi_config *active_cfg; struct k_sem xfer_sem; struct k_sem lock; #ifdef CONFIG_MIPI_DBI_NXP_LCDIC_DMA struct stream dma_stream; #endif }; #define LCDIC_ALL_INTERRUPTS \ (LCDIC_ICR_RFIFO_THRES_INTR_CLR_MASK | \ LCDIC_ICR_RFIFO_UNDERFLOW_INTR_CLR_MASK | \ LCDIC_ICR_TFIFO_THRES_INTR_CLR_MASK | \ LCDIC_ICR_TFIFO_OVERFLOW_INTR_CLR_MASK | \ LCDIC_ICR_TE_TO_INTR_CLR_MASK | \ LCDIC_ICR_CMD_TO_INTR_CLR_MASK | \ LCDIC_ICR_CMD_DONE_INTR_CLR_MASK | \ LCDIC_ICR_RST_DONE_INTR_CLR_MASK) /* RX and TX FIFO thresholds */ #ifdef CONFIG_MIPI_DBI_NXP_LCDIC_DMA #define LCDIC_RX_FIFO_THRESH 0x0 #define LCDIC_TX_FIFO_THRESH 0x0 #else #define LCDIC_RX_FIFO_THRESH 0x0 #define LCDIC_TX_FIFO_THRESH 0x3 #endif /* Timer0 and Timer1 bases. We choose a longer timer0 base to enable * long reset periods */ #define LCDIC_TIMER0_RATIO 0xF #define LCDIC_TIMER1_RATIO 0x9 /* After LCDIC is enabled or disabled, there should be a wait longer than * 5x the module clock before other registers are read */ static inline void mipi_dbi_lcdic_reset_delay(void) { k_busy_wait(1); } /* Resets state of the LCDIC TX/RX FIFO */ static inline void mipi_dbi_lcdic_reset_state(const struct device *dev) { const struct mipi_dbi_lcdic_config *config = dev->config; LCDIC_Type *base = config->base; base->CTRL &= ~LCDIC_CTRL_LCDIC_EN_MASK; mipi_dbi_lcdic_reset_delay(); base->CTRL |= LCDIC_CTRL_LCDIC_EN_MASK; mipi_dbi_lcdic_reset_delay(); } #ifdef CONFIG_MIPI_DBI_NXP_LCDIC_DMA /* Start DMA to send data using LCDIC TX FIFO */ static int mipi_dbi_lcdic_start_dma(const struct device *dev) { const struct mipi_dbi_lcdic_config *config = dev->config; struct mipi_dbi_lcdic_data *data = dev->data; struct stream *stream = &data->dma_stream; uint32_t aligned_len = data->cmd_bytes & (~0x3); uint32_t unaligned_len = data->cmd_bytes & 0x3; int ret; stream->dma_cfg.head_block = &stream->blk_cfg[0]; if (aligned_len == 0) { /* Only unaligned data exists, send it in the first block */ /* First DMA block configuration is used to send aligned data */ stream->blk_cfg[0].source_address = (uint32_t)&data->unaligned_word; stream->blk_cfg[0].dest_address = (uint32_t)&config->base->TFIFO_WDATA; /* Block size should be the aligned portion of the transfer */ stream->blk_cfg[0].block_size = sizeof(uint32_t); stream->dma_cfg.block_count = 1; stream->blk_cfg[0].next_block = NULL; } else { /* First DMA block configuration is used to send aligned data */ stream->blk_cfg[0].source_address = (uint32_t)data->xfer_buf; stream->blk_cfg[0].dest_address = (uint32_t)&config->base->TFIFO_WDATA; /* Block size should be the aligned portion of the transfer */ stream->blk_cfg[0].block_size = aligned_len; /* Second DMA block configuration sends unaligned block */ if (unaligned_len) { stream->dma_cfg.block_count = 2; stream->blk_cfg[0].next_block = &stream->blk_cfg[1]; stream->blk_cfg[1].source_address = (uint32_t)&data->unaligned_word; stream->blk_cfg[1].dest_address = (uint32_t)&config->base->TFIFO_WDATA; stream->blk_cfg[1].block_size = sizeof(uint32_t); } else { stream->dma_cfg.block_count = 1; stream->blk_cfg[0].next_block = NULL; } } ret = dma_config(stream->dma_dev, stream->channel, &stream->dma_cfg); if (ret) { return ret; } /* Enable DMA channel before we set up DMA request. This way, * the hardware DMA trigger does not fire until the DMA * start function has initialized the DMA. */ ret = dma_start(stream->dma_dev, stream->channel); if (ret) { return ret; } /* Enable DMA request */ config->base->CTRL |= LCDIC_CTRL_DMA_EN_MASK; return ret; } /* DMA completion callback */ static void mipi_dbi_lcdic_dma_callback(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { if (status < 0) { LOG_ERR("DMA callback with error %d", status); } } #endif /* CONFIG_MIPI_DBI_NXP_LCDIC_DMA */ /* Configure LCDIC */ static int mipi_dbi_lcdic_configure(const struct device *dev, const struct mipi_dbi_config *dbi_config) { const struct mipi_dbi_lcdic_config *config = dev->config; struct mipi_dbi_lcdic_data *data = dev->data; const struct spi_config *spi_cfg = &dbi_config->config; LCDIC_Type *base = config->base; int ret; uint32_t reg; if (dbi_config == data->active_cfg) { return 0; } /* Clear all interrupt flags */ base->ICR = LCDIC_ALL_INTERRUPTS; /* Mask all interrupts */ base->IMR = LCDIC_ALL_INTERRUPTS; /* Set LCDIC clock frequency */ ret = clock_control_set_rate(config->clock_dev, config->clock_subsys, (clock_control_subsys_rate_t)spi_cfg->frequency); if (ret) { LOG_ERR("Invalid clock frequency %d", spi_cfg->frequency); return ret; } if (!(spi_cfg->operation & SPI_HALF_DUPLEX)) { LOG_ERR("LCDIC only supports half duplex operation"); return -ENOTSUP; } if (spi_cfg->slave != 0) { /* Only one slave select line */ return -ENOTSUP; } if (SPI_WORD_SIZE_GET(spi_cfg->operation) > 8) { LOG_ERR("Unsupported word size"); return -ENOTSUP; } reg = base->CTRL; /* Disable LCD module during configuration */ reg &= ~LCDIC_CTRL_LCDIC_EN_MASK; /* Select SPI mode */ reg &= ~LCDIC_CTRL_LCDIC_MD_MASK; /* Select 3 or 4 wire mode based on config selection */ if (dbi_config->mode == MIPI_DBI_MODE_SPI_4WIRE) { reg |= LCDIC_CTRL_SPI_MD_MASK; } else { reg &= ~LCDIC_CTRL_SPI_MD_MASK; } /* Enable byte swapping if user requested it */ reg = (reg & ~LCDIC_CTRL_DAT_ENDIAN_MASK) | LCDIC_CTRL_DAT_ENDIAN(!config->swap_bytes); /* Disable DMA */ reg &= ~LCDIC_CTRL_DMA_EN_MASK; base->CTRL = reg; mipi_dbi_lcdic_reset_delay(); /* Setup SPI CPOL and CPHA selections */ reg = base->SPI_CTRL; reg = (reg & ~LCDIC_SPI_CTRL_SDAT_ENDIAN_MASK) | LCDIC_SPI_CTRL_SDAT_ENDIAN((spi_cfg->operation & SPI_TRANSFER_LSB) ? 1 : 0); reg = (reg & ~LCDIC_SPI_CTRL_CPHA_MASK) | LCDIC_SPI_CTRL_CPHA((spi_cfg->operation & SPI_MODE_CPHA) ? 1 : 0); reg = (reg & ~LCDIC_SPI_CTRL_CPOL_MASK) | LCDIC_SPI_CTRL_CPOL((spi_cfg->operation & SPI_MODE_CPOL) ? 1 : 0); base->SPI_CTRL = reg; /* Enable the module */ base->CTRL |= LCDIC_CTRL_LCDIC_EN_MASK; mipi_dbi_lcdic_reset_delay(); data->active_cfg = dbi_config; return 0; } /* Gets unaligned word data from array. Return value will be a 4 byte * value containing the last unaligned section of the array data */ static uint32_t mipi_dbi_lcdic_get_unaligned(const uint8_t *bytes, uint32_t buf_len) { uint32_t word = 0U; uint8_t unaligned_len = buf_len & 0x3; uint32_t aligned_len = buf_len - unaligned_len; while ((unaligned_len--)) { word <<= 8U; word |= bytes[aligned_len + unaligned_len]; } return word; } /* Fills the TX fifo with data. Returns number of bytes written. */ static int mipi_dbi_lcdic_fill_tx(LCDIC_Type *base, const uint8_t *buf, uint32_t buf_len, uint32_t last_word) { uint32_t *word_buf = (uint32_t *)buf; uint32_t bytes_written = 0U; uint32_t write_len; /* TX FIFO consumes 4 bytes on each write, so we can write up * to buf_len / 4 times before we send all data. * Write to FIFO it overflows or we send entire buffer. */ while (buf_len) { if (buf_len < 4) { /* Send last bytes */ base->TFIFO_WDATA = last_word; write_len = buf_len; } else { /* Otherwise, write one word */ base->TFIFO_WDATA = word_buf[bytes_written >> 2]; write_len = 4; } if (base->IRSR & LCDIC_IRSR_TFIFO_OVERFLOW_RAW_INTR_MASK) { /* TX FIFO has overflowed, last word write did not * complete. Return current number of bytes written. */ base->ICR |= LCDIC_ICR_TFIFO_OVERFLOW_INTR_CLR_MASK; return bytes_written; } bytes_written += write_len; buf_len -= write_len; } return bytes_written; } /* Writes command word */ static void mipi_dbi_lcdic_set_cmd(LCDIC_Type *base, enum lcdic_cmd_type dir, enum lcdic_cmd_dc dc, enum lcdic_data_fmt data_fmt, uint32_t buf_len) { union lcdic_trx_cmd cmd = {0}; /* TX FIFO will be clear, write command word */ cmd.bits.data_len = buf_len - 1; cmd.bits.cmd_data = dc; cmd.bits.trx = dir; cmd.bits.cmd_done_int = true; cmd.bits.data_format = data_fmt; /* Write command */ base->TFIFO_WDATA = cmd.u32; } static int mipi_dbi_lcdic_write_display(const struct device *dev, const struct mipi_dbi_config *dbi_config, const uint8_t *framebuf, struct display_buffer_descriptor *desc, enum display_pixel_format pixfmt) { const struct mipi_dbi_lcdic_config *config = dev->config; struct mipi_dbi_lcdic_data *dev_data = dev->data; LCDIC_Type *base = config->base; int ret; uint32_t interrupts = 0U; ret = k_sem_take(&dev_data->lock, K_FOREVER); if (ret) { goto out; } ret = mipi_dbi_lcdic_configure(dev, dbi_config); if (ret) { goto out; } /* State reset is required before transfer */ mipi_dbi_lcdic_reset_state(dev); if (desc->buf_size != 0) { dev_data->xfer_bytes = desc->buf_size; /* Cap command to max transfer size */ dev_data->cmd_bytes = MIN(desc->buf_size, LCDIC_MAX_XFER); dev_data->xfer_buf = framebuf; /* If the length of the transfer is not divisible by * 4, save the unaligned portion of the transfer into * a temporary buffer */ if (dev_data->cmd_bytes & 0x3) { dev_data->unaligned_word = mipi_dbi_lcdic_get_unaligned( dev_data->xfer_buf, dev_data->cmd_bytes); } /* Save pixel format */ if (DISPLAY_BITS_PER_PIXEL(pixfmt) == 32) { dev_data->pixel_fmt = LCDIC_DATA_FMT_WORD; } else if (DISPLAY_BITS_PER_PIXEL(pixfmt) == 16) { dev_data->pixel_fmt = LCDIC_DATA_FMT_HALFWORD; } else if (DISPLAY_BITS_PER_PIXEL(pixfmt) == 8) { dev_data->pixel_fmt = LCDIC_DATA_FMT_BYTE; } else { if (config->swap_bytes) { LOG_WRN("Unsupported pixel format, byte swapping disabled"); } } /* Use pixel format data width, so we can byte swap * if needed */ mipi_dbi_lcdic_set_cmd(base, LCDIC_TX, LCDIC_DATA, dev_data->pixel_fmt, dev_data->cmd_bytes); #ifdef CONFIG_MIPI_DBI_NXP_LCDIC_DMA /* Enable command complete interrupt */ interrupts |= LCDIC_IMR_CMD_DONE_INTR_MSK_MASK; /* Write interrupt mask */ base->IMR &= ~interrupts; /* Configure DMA to send data */ ret = mipi_dbi_lcdic_start_dma(dev); if (ret) { LOG_ERR("Could not start DMA (%d)", ret); goto out; } #else /* Enable TX FIFO threshold interrupt. This interrupt * should fire once enabled, which will kick off * the transfer */ interrupts |= LCDIC_IMR_TFIFO_THRES_INTR_MSK_MASK; /* Enable command complete interrupt */ interrupts |= LCDIC_IMR_CMD_DONE_INTR_MSK_MASK; /* Write interrupt mask */ base->IMR &= ~interrupts; #endif ret = k_sem_take(&dev_data->xfer_sem, K_FOREVER); } out: k_sem_give(&dev_data->lock); return ret; } static int mipi_dbi_lcdic_write_cmd(const struct device *dev, const struct mipi_dbi_config *dbi_config, uint8_t cmd, const uint8_t *data, size_t data_len) { const struct mipi_dbi_lcdic_config *config = dev->config; struct mipi_dbi_lcdic_data *dev_data = dev->data; LCDIC_Type *base = config->base; int ret; uint32_t interrupts = 0U; ret = k_sem_take(&dev_data->lock, K_FOREVER); if (ret) { goto out; } ret = mipi_dbi_lcdic_configure(dev, dbi_config); if (ret) { goto out; } /* State reset is required before transfer */ mipi_dbi_lcdic_reset_state(dev); /* Write command */ mipi_dbi_lcdic_set_cmd(base, LCDIC_TX, LCDIC_COMMAND, LCDIC_DATA_FMT_BYTE, 1); /* Use standard byte writes */ dev_data->pixel_fmt = LCDIC_DATA_FMT_BYTE; base->TFIFO_WDATA = cmd; /* Wait for command completion */ while ((base->IRSR & LCDIC_IRSR_CMD_DONE_RAW_INTR_MASK) == 0) { /* Spin */ } base->ICR |= LCDIC_ICR_CMD_DONE_INTR_CLR_MASK; if (data_len != 0) { dev_data->xfer_bytes = data_len; /* Cap command to max transfer size */ dev_data->cmd_bytes = MIN(data_len, LCDIC_MAX_XFER); dev_data->xfer_buf = data; /* If the length of the transfer is not divisible by * 4, save the unaligned portion of the transfer into * a temporary buffer */ if (dev_data->cmd_bytes & 0x3) { dev_data->unaligned_word = mipi_dbi_lcdic_get_unaligned( dev_data->xfer_buf, dev_data->cmd_bytes); } if (cmd == MIPI_DCS_WRITE_MEMORY_START) { /* Use pixel format data width, so we can byte swap * if needed */ mipi_dbi_lcdic_set_cmd(base, LCDIC_TX, LCDIC_DATA, dev_data->pixel_fmt, dev_data->cmd_bytes); } else { mipi_dbi_lcdic_set_cmd(base, LCDIC_TX, LCDIC_DATA, LCDIC_DATA_FMT_BYTE, dev_data->cmd_bytes); } #ifdef CONFIG_MIPI_DBI_NXP_LCDIC_DMA if (((((uint32_t)dev_data->xfer_buf) & 0x3) == 0) || (dev_data->cmd_bytes < 4)) { /* Data is aligned, we can use DMA */ /* Enable command complete interrupt */ interrupts |= LCDIC_IMR_CMD_DONE_INTR_MSK_MASK; /* Write interrupt mask */ base->IMR &= ~interrupts; /* Configure DMA to send data */ ret = mipi_dbi_lcdic_start_dma(dev); if (ret) { LOG_ERR("Could not start DMA (%d)", ret); goto out; } } else /* Data is not aligned */ #endif { /* Enable TX FIFO threshold interrupt. This interrupt * should fire once enabled, which will kick off * the transfer */ interrupts |= LCDIC_IMR_TFIFO_THRES_INTR_MSK_MASK; /* Enable command complete interrupt */ interrupts |= LCDIC_IMR_CMD_DONE_INTR_MSK_MASK; /* Write interrupt mask */ base->IMR &= ~interrupts; } ret = k_sem_take(&dev_data->xfer_sem, K_FOREVER); } out: k_sem_give(&dev_data->lock); return ret; } static int mipi_dbi_lcdic_reset(const struct device *dev, k_timeout_t delay) { const struct mipi_dbi_lcdic_config *config = dev->config; LCDIC_Type *base = config->base; uint32_t lcdic_freq; uint8_t rst_width, pulse_cnt; uint32_t delay_ms = k_ticks_to_ms_ceil32(delay); /* Calculate delay based off timer0 ratio. Formula given * by RM is as follows: * Reset pulse width = (RST_WIDTH + 1) * Timer0_Period * Timer0_Period = 2^(TIMER_RATIO0) / LCDIC_Clock_Freq */ if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &lcdic_freq)) { return -EIO; } rst_width = (delay_ms * (lcdic_freq)) / ((1 << LCDIC_TIMER0_RATIO) * MSEC_PER_SEC); /* If rst_width is larger than max value supported by hardware, * increase the pulse count (rounding up) */ pulse_cnt = ((rst_width + (LCDIC_MAX_RST_WIDTH - 1)) / LCDIC_MAX_RST_WIDTH); rst_width = MIN(LCDIC_MAX_RST_WIDTH, rst_width); /* Start the reset signal */ base->RST_CTRL = LCDIC_RST_CTRL_RST_WIDTH(rst_width - 1) | LCDIC_RST_CTRL_RST_SEQ_NUM(pulse_cnt - 1) | LCDIC_RST_CTRL_RST_START_MASK; /* Wait for reset to complete */ while ((base->IRSR & LCDIC_IRSR_RST_DONE_RAW_INTR_MASK) == 0) { /* Spin */ } base->ICR |= LCDIC_ICR_RST_DONE_INTR_CLR_MASK; return 0; } /* Initializes LCDIC peripheral */ static int mipi_dbi_lcdic_init(const struct device *dev) { const struct mipi_dbi_lcdic_config *config = dev->config; struct mipi_dbi_lcdic_data *data = dev->data; LCDIC_Type *base = config->base; int ret; ret = clock_control_on(config->clock_dev, config->clock_subsys); if (ret) { return ret; } /* Set initial clock rate of 10 MHz */ ret = clock_control_set_rate(config->clock_dev, config->clock_subsys, (clock_control_subsys_rate_t)MHZ(10)); if (ret) { return ret; } ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } ret = k_sem_init(&data->xfer_sem, 0, 1); if (ret) { return ret; } ret = k_sem_init(&data->lock, 1, 1); if (ret) { return ret; } /* Clear all interrupt flags */ base->ICR = LCDIC_ALL_INTERRUPTS; /* Mask all interrupts */ base->IMR = LCDIC_ALL_INTERRUPTS; /* Enable interrupts */ config->irq_config_func(dev); /* Setup RX and TX fifo thresholds */ base->FIFO_CTRL = LCDIC_FIFO_CTRL_RFIFO_THRES(LCDIC_RX_FIFO_THRESH) | LCDIC_FIFO_CTRL_TFIFO_THRES(LCDIC_TX_FIFO_THRESH); /* Disable command timeouts */ base->TO_CTRL &= ~(LCDIC_TO_CTRL_CMD_LONG_TO_MASK | LCDIC_TO_CTRL_CMD_SHORT_TO_MASK); /* Ensure LCDIC timer ratios are at reset values */ base->TIMER_CTRL = LCDIC_TIMER_CTRL_TIMER_RATIO1(LCDIC_TIMER1_RATIO) | LCDIC_TIMER_CTRL_TIMER_RATIO0(LCDIC_TIMER0_RATIO); #ifdef CONFIG_MIPI_DBI_NXP_LCDIC_DMA /* Attach the LCDIC DMA request signal to the DMA channel we will * use with hardware triggering. */ INPUTMUX_AttachSignal(INPUTMUX, data->dma_stream.channel, kINPUTMUX_LcdTxRegToDmaSingleToDma0); INPUTMUX_EnableSignal(INPUTMUX, kINPUTMUX_Dmac0InputTriggerLcdTxRegToDmaSingleEna, true); #endif return 0; } static const struct mipi_dbi_driver_api mipi_dbi_lcdic_driver_api = { .command_write = mipi_dbi_lcdic_write_cmd, .write_display = mipi_dbi_lcdic_write_display, .reset = mipi_dbi_lcdic_reset, }; static void mipi_dbi_lcdic_isr(const struct device *dev) { const struct mipi_dbi_lcdic_config *config = dev->config; struct mipi_dbi_lcdic_data *data = dev->data; LCDIC_Type *base = config->base; uint32_t bytes_written, isr_status; isr_status = base->ISR; /* Clear pending interrupts */ base->ICR |= isr_status; if (isr_status & LCDIC_ISR_CMD_DONE_INTR_MASK) { if (config->base->CTRL & LCDIC_CTRL_DMA_EN_MASK) { /* DMA completed. Update buffer tracking data */ data->xfer_bytes -= data->cmd_bytes; data->xfer_buf += data->cmd_bytes; /* Disable DMA request */ config->base->CTRL &= ~LCDIC_CTRL_DMA_EN_MASK; } if (data->xfer_bytes == 0) { /* Disable interrupts */ base->IMR |= LCDIC_ALL_INTERRUPTS; /* All data has been sent. */ k_sem_give(&data->xfer_sem); } else { /* Command done. Queue next command */ data->cmd_bytes = MIN(data->xfer_bytes, LCDIC_MAX_XFER); mipi_dbi_lcdic_set_cmd(base, LCDIC_TX, LCDIC_DATA, LCDIC_DATA_FMT_BYTE, data->cmd_bytes); if (data->cmd_bytes & 0x3) { /* Save unaligned portion of transfer into * a temporary buffer */ data->unaligned_word = mipi_dbi_lcdic_get_unaligned( data->xfer_buf, data->cmd_bytes); } #ifdef CONFIG_MIPI_DBI_NXP_LCDIC_DMA if (((((uint32_t)data->xfer_buf) & 0x3) == 0) || (data->cmd_bytes < 4)) { /* Data is aligned. We can use DMA */ mipi_dbi_lcdic_start_dma(dev); } else #endif { /* We must refill the FIFO here in order to continue * the next transfer, since the TX FIFO threshold * interrupt may have already fired. */ bytes_written = mipi_dbi_lcdic_fill_tx(base, data->xfer_buf, data->cmd_bytes, data->unaligned_word); if (bytes_written > 0) { data->xfer_buf += bytes_written; data->cmd_bytes -= bytes_written; data->xfer_bytes -= bytes_written; } } } } else if (isr_status & LCDIC_ISR_TFIFO_THRES_INTR_MASK) { /* If command is not done, continue filling TX FIFO from * current transfer buffer */ bytes_written = mipi_dbi_lcdic_fill_tx(base, data->xfer_buf, data->cmd_bytes, data->unaligned_word); if (bytes_written > 0) { data->xfer_buf += bytes_written; data->cmd_bytes -= bytes_written; data->xfer_bytes -= bytes_written; } } } #ifdef CONFIG_MIPI_DBI_NXP_LCDIC_DMA #define LCDIC_DMA_CHANNELS(n) \ .dma_stream = { \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR(n)), \ .channel = DT_INST_DMAS_CELL_BY_IDX(n, 0, channel), \ .dma_cfg = { \ .dma_slot = LPC_DMA_HWTRIG_EN | \ LPC_DMA_TRIGPOL_HIGH_RISING | \ LPC_DMA_TRIGBURST, \ .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_callback = mipi_dbi_lcdic_dma_callback, \ .source_data_size = 4, \ .dest_data_size = 4, \ .user_data = (void *)DEVICE_DT_INST_GET(n), \ }, \ }, #else #define LCDIC_DMA_CHANNELS(n) #endif #define MIPI_DBI_LCDIC_INIT(n) \ static void mipi_dbi_lcdic_config_func_##n( \ const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ mipi_dbi_lcdic_isr, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } \ \ PINCTRL_DT_INST_DEFINE(n); \ static const struct mipi_dbi_lcdic_config \ mipi_dbi_lcdic_config_##n = { \ .base = (LCDIC_Type *)DT_INST_REG_ADDR(n), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t) \ DT_INST_CLOCKS_CELL(n, name), \ .irq_config_func = mipi_dbi_lcdic_config_func_##n, \ .swap_bytes = DT_INST_PROP(n, nxp_swap_bytes), \ }; \ static struct mipi_dbi_lcdic_data mipi_dbi_lcdic_data_##n = { \ LCDIC_DMA_CHANNELS(n) \ }; \ DEVICE_DT_INST_DEFINE(n, mipi_dbi_lcdic_init, NULL, \ &mipi_dbi_lcdic_data_##n, \ &mipi_dbi_lcdic_config_##n, \ POST_KERNEL, \ CONFIG_MIPI_DBI_INIT_PRIORITY, \ &mipi_dbi_lcdic_driver_api); DT_INST_FOREACH_STATUS_OKAY(MIPI_DBI_LCDIC_INIT) ```
/content/code_sandbox/drivers/mipi_dbi/mipi_dbi_nxp_lcdic.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,954
```unknown # MIPI DBI controller options menuconfig MIPI_DBI bool "MIPI-DBI Host Controller drivers [EXPERIMENTAL]" select EXPERIMENTAL help Add support for MIPI-DBI compliant host controllers if MIPI_DBI module = MIPI_DBI module-str = mipi_dbi source "subsys/logging/Kconfig.template.log_config" config MIPI_DBI_INIT_PRIORITY int "Initialization priority" default 80 help MIPI-DBI Host Controllers initialization priority. source "drivers/mipi_dbi/Kconfig.spi" source "drivers/mipi_dbi/Kconfig.smartbond" source "drivers/mipi_dbi/Kconfig.nxp_lcdic" source "drivers/mipi_dbi/Kconfig.nxp_flexio_lcdif" source "drivers/mipi_dbi/Kconfig.stm32_fmc" endif ```
/content/code_sandbox/drivers/mipi_dbi/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
185
```c /* * */ #define DT_DRV_COMPAT nxp_mipi_dbi_flexio_lcdif #include <zephyr/drivers/dma.h> #include <zephyr/drivers/display.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/mipi_dbi.h> #include <zephyr/drivers/misc/nxp_flexio/nxp_flexio.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <fsl_edma.h> #include <fsl_flexio_mculcd.h> LOG_MODULE_REGISTER(display_mcux_flexio_lcdif, CONFIG_DISPLAY_LOG_LEVEL); struct stream { const struct device *dma_dev; uint32_t channel; /* stores the channel for dma */ struct dma_config dma_cfg; struct dma_block_config dma_blk_cfg; }; struct mcux_flexio_lcdif_config { FLEXIO_MCULCD_Type *flexio_lcd_dev; const struct device *flexio_dev; const struct pinctrl_dev_config *pincfg; const struct nxp_flexio_child *child; /* Reset GPIO */ const struct gpio_dt_spec reset; const struct gpio_dt_spec cs_gpio; const struct gpio_dt_spec rs_gpio; const struct gpio_dt_spec rdwr_gpio; }; struct mcux_flexio_lcdif_data { struct stream dma_tx; struct k_sem transfer_done; const struct mipi_dbi_config *active_cfg; uint8_t data_bus_width; }; static void flexio_lcdif_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status) { const struct device *flexio_dev = (struct device *)arg; struct mcux_flexio_lcdif_data *lcdif_data = flexio_dev->data; const struct mcux_flexio_lcdif_config *config = flexio_dev->config; FLEXIO_MCULCD_Type *flexio_lcd = config->flexio_lcd_dev; FLEXIO_MCULCD_EnableTxDMA(flexio_lcd, false); /* Now the data are in shifter, wait for the data send out from the shifter. */ FLEXIO_MCULCD_WaitTransmitComplete(); /* Disable the TX shifter and the timer. */ FLEXIO_MCULCD_ClearMultiBeatsWriteConfig(flexio_lcd); /* De-assert nCS. */ FLEXIO_MCULCD_StopTransfer(flexio_lcd); k_sem_give(&lcdif_data->transfer_done); } static void flexio_lcdif_set_cs(bool set, void *param) { const struct device *flexio_dev = (struct device *)param; const struct mcux_flexio_lcdif_config *config = flexio_dev->config; gpio_pin_set_dt(&config->cs_gpio, (int)set); } static void flexio_lcdif_set_rs(bool set, void *param) { const struct device *flexio_dev = (struct device *)param; const struct mcux_flexio_lcdif_config *config = flexio_dev->config; gpio_pin_set_dt(&config->rs_gpio, (int)set); } static void flexio_lcdif_set_rd_wr(bool set, void *param) { const struct device *flexio_dev = (struct device *)param; const struct mcux_flexio_lcdif_config *config = flexio_dev->config; gpio_pin_set_dt(&config->rdwr_gpio, (int)set); } static edma_modulo_t flexio_lcdif_get_edma_modulo(uint8_t shifterNum) { edma_modulo_t ret = kEDMA_ModuloDisable; switch (shifterNum) { case 1U: ret = kEDMA_Modulo4bytes; break; case 2U: ret = kEDMA_Modulo8bytes; break; case 4U: ret = kEDMA_Modulo16bytes; break; case 8U: ret = kEDMA_Modulo32bytes; break; default: ret = kEDMA_ModuloDisable; break; } return ret; } static void flexio_lcdif_write_data_array(FLEXIO_MCULCD_Type *base, const void *data, size_t size) { assert(size > 0U); uint32_t i; const uint8_t *data8Bit; FLEXIO_Type *flexioBase = base->flexioBase; /* Assert the RS pin. */ base->setRSPin(true, base->userData); /* For 6800, de-assert the RDWR pin. */ if (kFLEXIO_MCULCD_6800 == base->busType) { base->setRDWRPin(false, base->userData); } /* Configure the timer and TX shifter. */ FLEXIO_MCULCD_SetSingleBeatWriteConfig(base); data8Bit = (const uint8_t *)data; for (i = 0; i < size; i++) { flexioBase->SHIFTBUF[base->txShifterStartIndex] = data8Bit[i]; /* Wait for the data send out. */ while (0U == ((1UL << base->timerIndex) & flexioBase->TIMSTAT)) { } /* Clear the timer stat. */ flexioBase->TIMSTAT = 1UL << base->timerIndex; } /* Stop the timer and TX shifter. */ FLEXIO_MCULCD_ClearSingleBeatWriteConfig(base); } static int mipi_dbi_flexio_lcdif_configure(const struct device *dev, const struct mipi_dbi_config *dbi_config) { const struct mcux_flexio_lcdif_config *config = dev->config; struct mcux_flexio_lcdif_data *lcdif_data = dev->data; flexio_mculcd_config_t flexioMcuLcdConfig; int err; uint32_t clock_freq; uint32_t mipi_mode = dbi_config->mode; status_t status; /* 9-bit mode is not supported by the SDK driver */ if ((mipi_mode == MIPI_DBI_MODE_6800_BUS_9_BIT) || (mipi_mode == MIPI_DBI_MODE_8080_BUS_9_BIT)) { return -EINVAL; } if (dbi_config == lcdif_data->active_cfg) { return 0; } err = gpio_pin_configure_dt(&config->cs_gpio, GPIO_OUTPUT_HIGH); if (err) { return err; } err = gpio_pin_configure_dt(&config->rs_gpio, GPIO_OUTPUT_HIGH); if (err) { return err; } if ((mipi_mode == MIPI_DBI_MODE_6800_BUS_16_BIT) || (mipi_mode == MIPI_DBI_MODE_6800_BUS_8_BIT)) { /* RDWR GPIO is only used in 68K mode */ err = gpio_pin_configure_dt(&config->rdwr_gpio, GPIO_OUTPUT_HIGH); if (err) { return err; } config->flexio_lcd_dev->busType = kFLEXIO_MCULCD_6800; } else { config->flexio_lcd_dev->busType = kFLEXIO_MCULCD_8080; } if ((mipi_mode == MIPI_DBI_MODE_6800_BUS_8_BIT) || (mipi_mode == MIPI_DBI_MODE_8080_BUS_8_BIT)) { lcdif_data->data_bus_width = 8; } else { lcdif_data->data_bus_width = 16; } FLEXIO_MCULCD_GetDefaultConfig(&flexioMcuLcdConfig); flexioMcuLcdConfig.baudRate_Bps = dbi_config->config.frequency * lcdif_data->data_bus_width; if (nxp_flexio_get_rate(config->flexio_dev, &clock_freq)) { return -EINVAL; } nxp_flexio_lock(config->flexio_dev); /* Resets the FlexIO module, then configures FlexIO MCULCD */ status = FLEXIO_MCULCD_Init(config->flexio_lcd_dev, &flexioMcuLcdConfig, clock_freq); nxp_flexio_unlock(config->flexio_dev); if (kStatus_Success != status) { return -EINVAL; } lcdif_data->active_cfg = dbi_config; return 0; } static int mipi_dbi_flexio_ldcif_write_display(const struct device *dev, const struct mipi_dbi_config *dbi_config, const uint8_t *framebuf, struct display_buffer_descriptor *desc, enum display_pixel_format pixfmt) { const struct mcux_flexio_lcdif_config *config = dev->config; struct mcux_flexio_lcdif_data *lcdif_data = dev->data; FLEXIO_MCULCD_Type *flexio_lcd = config->flexio_lcd_dev; struct dma_block_config *blk_cfg; struct stream *stream = &lcdif_data->dma_tx; uint8_t num_of_shifters = 0; int ret; ARG_UNUSED(pixfmt); ret = mipi_dbi_flexio_lcdif_configure(dev, dbi_config); if (ret) { return ret; } num_of_shifters = (flexio_lcd->txShifterEndIndex - flexio_lcd->txShifterStartIndex + 1); blk_cfg = &stream->dma_blk_cfg; /* Assert the nCS. */ FLEXIO_MCULCD_StartTransfer(config->flexio_lcd_dev); /* prepare the block for this TX DMA channel */ memset(blk_cfg, 0, sizeof(struct dma_block_config)); /* tx direction has memory as source and periph as dest. */ blk_cfg->source_address = (uint32_t)framebuf; /* Destination is FLEXIO Shifters */ blk_cfg->dest_address = FLEXIO_MCULCD_GetTxDataRegisterAddress(flexio_lcd); blk_cfg->block_size = desc->buf_size; /* Transfer in each DMA loop is based on the number of shifters used */ stream->dma_cfg.source_burst_length = num_of_shifters * 4; stream->dma_cfg.head_block = &stream->dma_blk_cfg; /* Give the client dev as arg, as the callback comes from the dma */ stream->dma_cfg.user_data = (struct device *)dev; /* Set the source size in bytes */ stream->dma_cfg.source_data_size = lcdif_data->data_bus_width / 8; /* Configure the DMA */ dma_config(lcdif_data->dma_tx.dma_dev, lcdif_data->dma_tx.channel, &stream->dma_cfg); /* The DMA driver does not support setting this Modulo value which is required * in case of the flexio module to form a circular chain between the Shift buffer * in the FLEXIO module. */ EDMA_SetModulo(DMA0, lcdif_data->dma_tx.channel, kEDMA_ModuloDisable, flexio_lcdif_get_edma_modulo(num_of_shifters)); /* For 6800, de-assert the RDWR pin. */ if (kFLEXIO_MCULCD_6800 == flexio_lcd->busType) { flexio_lcdif_set_rd_wr(false, (void *)dev); } nxp_flexio_lock(config->flexio_dev); FLEXIO_MCULCD_SetMultiBeatsWriteConfig(flexio_lcd); FLEXIO_MCULCD_EnableTxDMA(flexio_lcd, true); nxp_flexio_unlock(config->flexio_dev); /* Start the data transfer */ dma_start(lcdif_data->dma_tx.dma_dev, lcdif_data->dma_tx.channel); /* Wait for transfer done. */ k_sem_take(&lcdif_data->transfer_done, K_FOREVER); return 0; } static int mipi_dbi_flexio_lcdif_command_write(const struct device *dev, const struct mipi_dbi_config *dbi_config, uint8_t cmd, const uint8_t *data_buf, size_t len) { const struct mcux_flexio_lcdif_config *config = dev->config; FLEXIO_MCULCD_Type *flexio_lcd = config->flexio_lcd_dev; int ret; ARG_UNUSED(dbi_config); ret = mipi_dbi_flexio_lcdif_configure(dev, dbi_config); if (ret) { return ret; } FLEXIO_MCULCD_StartTransfer(flexio_lcd); nxp_flexio_lock(config->flexio_dev); FLEXIO_MCULCD_WriteCommandBlocking(flexio_lcd, cmd); if ((data_buf != NULL) && (len != 0)) { flexio_lcdif_write_data_array(flexio_lcd, data_buf, len); } nxp_flexio_unlock(config->flexio_dev); FLEXIO_MCULCD_StopTransfer(flexio_lcd); return kStatus_Success; } static int mipi_dbi_flexio_lcdif_reset(const struct device *dev, k_timeout_t delay) { int err; const struct mcux_flexio_lcdif_config *config = dev->config; /* Check if a reset port is provided to reset the LCD controller */ if (config->reset.port == NULL) { return 0; } /* Reset the LCD controller. */ err = gpio_pin_configure_dt(&config->reset, GPIO_OUTPUT_HIGH); if (err) { return err; } err = gpio_pin_set_dt(&config->reset, 0); if (err < 0) { return err; } k_sleep(delay); err = gpio_pin_set_dt(&config->reset, 1); if (err < 0) { return err; } LOG_DBG("%s device reset complete", dev->name); return 0; } static int flexio_lcdif_init(const struct device *dev) { const struct mcux_flexio_lcdif_config *config = dev->config; struct mcux_flexio_lcdif_data *lcdif_data = dev->data; int err; uint8_t shifter_end = config->child->res.shifter_count - 1; if (!device_is_ready(lcdif_data->dma_tx.dma_dev)) { LOG_ERR("%s device is not ready", lcdif_data->dma_tx.dma_dev->name); return -ENODEV; } err = nxp_flexio_child_attach(config->flexio_dev, config->child); if (err < 0) { return err; } config->flexio_lcd_dev->txShifterStartIndex = config->child->res.shifter_index[0]; config->flexio_lcd_dev->txShifterEndIndex = config->child->res.shifter_index[shifter_end]; config->flexio_lcd_dev->rxShifterStartIndex = config->flexio_lcd_dev->txShifterStartIndex; config->flexio_lcd_dev->rxShifterEndIndex = config->flexio_lcd_dev->txShifterEndIndex; config->flexio_lcd_dev->timerIndex = config->child->res.timer_index[0]; if (config->flexio_lcd_dev->txShifterEndIndex != config->flexio_lcd_dev->txShifterStartIndex + shifter_end) { LOG_ERR("Shifters should be continuous"); return -ENODEV; } err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } /* Pass the FlexIO LCD device as parameter to the function * callbacks for setting GPIO signals. */ config->flexio_lcd_dev->userData = (void *)dev; k_sem_init(&lcdif_data->transfer_done, 0, 1); LOG_DBG("%s device init complete", dev->name); return 0; } static struct mipi_dbi_driver_api mipi_dbi_lcdif_driver_api = { .reset = mipi_dbi_flexio_lcdif_reset, .command_write = mipi_dbi_flexio_lcdif_command_write, .write_display = mipi_dbi_flexio_ldcif_write_display, }; #define MCUX_FLEXIO_LCDIF_DEVICE_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ \ static FLEXIO_MCULCD_Type flexio_mculcd_##n = { \ .flexioBase = (FLEXIO_Type *)DT_REG_ADDR(DT_INST_PARENT(n)), \ .dataPinStartIndex = DT_INST_PROP(n, data_pin_start), \ .ENWRPinIndex = DT_INST_PROP(n, enwr_pin), \ .RDPinIndex = DT_INST_PROP_OR(n, rd_pin, 0), \ .setCSPin = flexio_lcdif_set_cs, \ .setRSPin = flexio_lcdif_set_rs, \ .setRDWRPin = flexio_lcdif_set_rd_wr, \ }; \ \ static uint8_t mcux_flexio_lcdif_shifters_##n[DT_INST_PROP(n, shifters_count)]; \ static uint8_t mcux_flexio_lcdif_timers_##n[DT_INST_PROP(n, timers_count)]; \ \ static const struct nxp_flexio_child lcdif_child_##n = { \ .isr = NULL, \ .user_data = (void *)DEVICE_DT_INST_GET(n), \ .res = { \ .shifter_index = mcux_flexio_lcdif_shifters_##n, \ .shifter_count = ARRAY_SIZE(mcux_flexio_lcdif_shifters_##n), \ .timer_index = mcux_flexio_lcdif_timers_##n, \ .timer_count = ARRAY_SIZE(mcux_flexio_lcdif_timers_##n), \ } \ }; \ \ struct mcux_flexio_lcdif_config mcux_flexio_lcdif_config_##n = { \ .flexio_lcd_dev = &flexio_mculcd_##n, \ .flexio_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .child = &lcdif_child_##n, \ .reset = GPIO_DT_SPEC_INST_GET(n, reset_gpios), \ .cs_gpio = GPIO_DT_SPEC_INST_GET(n, cs_gpios), \ .rs_gpio = GPIO_DT_SPEC_INST_GET(n, rs_gpios), \ .rdwr_gpio = GPIO_DT_SPEC_INST_GET_OR(n, rdwr_gpios, {0}), \ }; \ struct mcux_flexio_lcdif_data mcux_flexio_lcdif_data_##n = { \ .dma_tx = { \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \ .channel = DT_INST_DMAS_CELL_BY_NAME(n, tx, mux), \ .dma_cfg = { \ .channel_direction = MEMORY_TO_MEMORY, \ .dma_callback = flexio_lcdif_dma_callback, \ .dest_data_size = 4, \ .block_count = 1, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, tx, source) \ } \ }, \ }; \ DEVICE_DT_INST_DEFINE(n, \ &flexio_lcdif_init, \ NULL, \ &mcux_flexio_lcdif_data_##n, \ &mcux_flexio_lcdif_config_##n, \ POST_KERNEL, \ CONFIG_MIPI_DBI_INIT_PRIORITY, \ &mipi_dbi_lcdif_driver_api); DT_INST_FOREACH_STATUS_OKAY(MCUX_FLEXIO_LCDIF_DEVICE_INIT) ```
/content/code_sandbox/drivers/mipi_dbi/mipi_dbi_nxp_flexio_lcdif.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,312
```unknown config MIPI_DBI_NXP_FLEXIO_LCDIF bool "MIPI DBI driver for NXP Flexio LCDIF" default y depends on DT_HAS_NXP_MIPI_DBI_FLEXIO_LCDIF_ENABLED depends on CLOCK_CONTROL select MCUX_FLEXIO select DMA help Enable support for MIPI DBI driver for NXP FlexIO based LCDIF controller. ```
/content/code_sandbox/drivers/mipi_dbi/Kconfig.nxp_flexio_lcdif
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
88
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_SMBUS_SMBUS_UTILS_H_ #define ZEPHYR_DRIVERS_SMBUS_SMBUS_UTILS_H_ #include <stdint.h> #include <zephyr/device.h> #include <zephyr/drivers/smbus.h> #include <zephyr/sys/slist.h> /** * @brief Generic function to insert a callback to a callback list * * @param callbacks A pointer to the original list of callbacks (can be NULL) * @param callback A pointer of the callback to insert to the list * * @return 0 on success, negative errno otherwise. */ static inline int smbus_callback_set(sys_slist_t *callbacks, struct smbus_callback *callback) { __ASSERT(callback, "No callback!"); __ASSERT(callback->handler, "No callback handler!"); if (!sys_slist_is_empty(callbacks)) { sys_slist_find_and_remove(callbacks, &callback->node); } sys_slist_prepend(callbacks, &callback->node); return 0; } /** * @brief Generic function to remove a callback from a callback list * * @param callbacks A pointer to the original list of callbacks (can be NULL) * @param callback A pointer of the callback to remove from the list * * @return 0 on success, negative errno otherwise. */ static inline int smbus_callback_remove(sys_slist_t *callbacks, struct smbus_callback *callback) { __ASSERT(callback, "No callback!"); __ASSERT(callback->handler, "No callback handler!"); if (sys_slist_is_empty(callbacks) || !sys_slist_find_and_remove(callbacks, &callback->node)) { return -ENOENT; } return 0; } /** * @brief Generic function to go through and fire callback from a callback list * * @param list A pointer on the SMBus callback list * @param dev A pointer on the SMBus device instance * @param addr A SMBus peripheral device address. */ static inline void smbus_fire_callbacks(sys_slist_t *list, const struct device *dev, uint8_t addr) { struct smbus_callback *cb, *tmp; SYS_SLIST_FOR_EACH_CONTAINER_SAFE(list, cb, tmp, node) { if (cb->addr == addr) { __ASSERT(cb->handler, "No callback handler!"); cb->handler(dev, cb, addr); } } } /** * @brief Helper to initialize a struct smbus_callback properly * * @param callback A valid Application's callback structure pointer. * @param handler A valid handler function pointer. * @param addr A SMBus peripheral device address. */ static inline void smbus_init_callback(struct smbus_callback *callback, smbus_callback_handler_t handler, uint8_t addr) { __ASSERT(callback, "Callback pointer should not be NULL"); __ASSERT(handler, "Callback handler pointer should not be NULL"); callback->handler = handler; callback->addr = addr; } /** * @brief Helper for handling an SMB alert * * This loops through all devices which triggered the SMB alert and * fires the callbacks. * * @param dev SMBus device * @param callbacks list of SMB alert callbacks */ void smbus_loop_alert_devices(const struct device *dev, sys_slist_t *callbacks); #endif /* ZEPHYR_DRIVERS_SMBUS_SMBUS_UTILS_H_ */ ```
/content/code_sandbox/drivers/smbus/smbus_utils.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
726
```c /* * */ #define DT_DRV_COMPAT renesas_smartbond_mipi_dbi #include <zephyr/drivers/mipi_dbi.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/gpio.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <DA1469xAB.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/smartbond_clock_control.h> #include <zephyr/drivers/display.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/spi.h> #include <da1469x_lcdc.h> #include <da1469x_pd.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(smartbond_mipi_dbi, CONFIG_MIPI_DBI_LOG_LEVEL); #define SMARTBOND_IRQN DT_INST_IRQN(0) #define SMARTBOND_IRQ_PRIO DT_INST_IRQ(0, priority) #define PINCTRL_STATE_READ PINCTRL_STATE_PRIV_START #define MIPI_DBI_SMARTBOND_IS_READ_SUPPORTED \ DT_INST_NODE_HAS_PROP(0, spi_dev) #define LCDC_SMARTBOND_CLK_DIV(_freq) \ ((32000000U % (_freq)) ? (96000000U / (_freq)) : (32000000U / (_freq))) #define MIPI_DBI_SMARTBOND_IS_PLL_REQUIRED \ !!(32000000U % DT_PROP(DT_CHOSEN(zephyr_display), mipi_max_frequency)) #define MIPI_DBI_SMARTBOND_IS_TE_ENABLED \ DT_INST_PROP_OR(0, te_enable, 0) #define MIPI_DBI_SMARTBOND_IS_DMA_PREFETCH_ENABLED \ DT_INST_ENUM_IDX_OR(0, dma_prefetch, 0) #define MIPI_DBI_SMARTBOND_IS_RESET_AVAILABLE \ DT_INST_NODE_HAS_PROP(0, reset_gpios) #define LCDC_LAYER0_OFFSETX_REG_SET_FIELD(_field, _var, _val) \ ((_var)) = \ ((_var) & ~(LCDC_LCDC_LAYER0_OFFSETX_REG_ ## _field ## _Msk)) | \ (((_var) << LCDC_LCDC_LAYER0_OFFSETX_REG_ ## _field ## _Pos) & \ LCDC_LCDC_LAYER0_OFFSETX_REG_ ## _field ## _Msk) struct mipi_dbi_smartbond_data { /* Provide mutual exclusion when a display operation is requested. */ struct k_sem device_sem; /* Provide synchronization between task return and ISR firing */ struct k_sem sync_sem; /* Flag indicating whether or not an underflow took place */ volatile bool underflow_flag; /* Layer settings */ lcdc_smartbond_layer_cfg layer; }; struct mipi_dbi_smartbond_config { /* Reference to device instance's pinctrl configurations */ const struct pinctrl_dev_config *pcfg; /* Reset GPIO */ const struct gpio_dt_spec reset; /* Host controller's timing settings */ lcdc_smartbond_timing_cfg timing_cfg; /* Background default color configuration */ lcdc_smartbond_bgcolor_cfg bgcolor_cfg; }; /* Mark the device is progress and so it's not allowed to enter the sleep state. */ static inline void mipi_dbi_smartbond_pm_policy_state_lock_get(void) { /* * Prevent the SoC from etering the normal sleep state as PDC does not support * waking up the application core following LCDC events. */ pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } /* Mark that device is inactive and so it's allowed to enter the sleep state */ static inline void mipi_dbi_smartbond_pm_policy_state_lock_put(void) { /* Allow the SoC to enter the nornmal sleep state once LCDC is inactive */ pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } /* Helper function to trigger the LCDC fetching data from frame buffer to the connected display */ static void mipi_dbi_smartbond_send_single_frame(const struct device *dev) { struct mipi_dbi_smartbond_data *data = dev->data; #if MIPI_DBI_SMARTBOND_IS_TE_ENABLED da1469x_lcdc_te_set_status(true, DT_INST_PROP_OR(0, te_polarity, false)); /* * Wait for the TE signal to be asserted so display's refresh status can be synchronized * with the current frame update. */ k_sem_take(&data->sync_sem, K_FOREVER); #endif LCDC->LCDC_INTERRUPT_REG |= LCDC_LCDC_INTERRUPT_REG_LCDC_VSYNC_IRQ_EN_Msk; /* Setting this bit will enable the host to start outputing pixel data */ LCDC->LCDC_MODE_REG |= LCDC_LCDC_MODE_REG_LCDC_SFRAME_UPD_Msk; /* Wait for frame update to complete */ k_sem_take(&data->sync_sem, K_FOREVER); if (data->underflow_flag) { LOG_WRN("Underflow took place"); data->underflow_flag = false; } } #if MIPI_DBI_SMARTBOND_IS_RESET_AVAILABLE static int mipi_dbi_smartbond_reset(const struct device *dev, k_timeout_t delay) { const struct mipi_dbi_smartbond_config *config = dev->config; int ret; if (!gpio_is_ready_dt(&config->reset)) { LOG_ERR("Reset signal not available"); return -ENODEV; } ret = gpio_pin_set_dt(&config->reset, 1); if (ret < 0) { LOG_ERR("Cannot drive reset signal"); return ret; } k_sleep(delay); return gpio_pin_set_dt(&config->reset, 0); } #endif /* Display pixel to output color format translation */ static inline uint8_t lcdc_smartbond_pixel_to_ocm(enum display_pixel_format pixfmt) { switch (pixfmt) { case PIXEL_FORMAT_RGB_565: return (uint8_t)LCDC_SMARTBOND_OCM_RGB565; case PIXEL_FORMAT_RGB_888: return (uint8_t)LCDC_SMARTBOND_OCM_RGB888; case PIXEL_FORMAT_MONO10: return (uint8_t)LCDC_SMARTBOND_L0_L1; default: LOG_ERR("Unsupported pixel format"); return 0; }; } static inline uint8_t lcdc_smartbond_line_mode_translation(uint8_t mode) { switch (mode) { case MIPI_DBI_MODE_SPI_3WIRE: return (uint8_t)LCDC_SMARTBOND_MODE_SPI3; case MIPI_DBI_MODE_SPI_4WIRE: return (uint8_t)LCDC_SMARTBOND_MODE_SPI4; default: LOG_ERR("Unsupported SPI mode"); return 0; } } static inline uint8_t lcdc_smartbond_pixel_to_lcm(enum display_pixel_format pixfmt) { switch (pixfmt) { case PIXEL_FORMAT_RGB_565: return (uint8_t)LCDC_SMARTBOND_L0_RGB565; case PIXEL_FORMAT_ARGB_8888: return (uint8_t)LCDC_SMARTBOND_L0_ARGB8888; default: LOG_ERR("Unsupported pixel format"); return 0; }; } static void lcdc_smartbond_mipi_dbi_translation(const struct mipi_dbi_config *dbi_config, lcdc_smartbond_mipi_dbi_cfg *mipi_dbi_cfg, enum display_pixel_format pixfmt) { mipi_dbi_cfg->cpha = dbi_config->config.operation & SPI_MODE_CPHA; mipi_dbi_cfg->cpol = dbi_config->config.operation & SPI_MODE_CPOL; mipi_dbi_cfg->cs_active_high = dbi_config->config.operation & SPI_CS_ACTIVE_HIGH; mipi_dbi_cfg->line_mode = lcdc_smartbond_line_mode_translation(dbi_config->mode); mipi_dbi_cfg->color_mode = lcdc_smartbond_pixel_to_ocm(pixfmt); } #if MIPI_DBI_SMARTBOND_IS_READ_SUPPORTED static int mipi_dbi_smartbond_command_read(const struct device *dev, const struct mipi_dbi_config *dbi_config, uint8_t *cmd, size_t num_cmds, uint8_t *response, size_t len) { struct mipi_dbi_smartbond_data *data = dev->data; const struct mipi_dbi_smartbond_config *config = dev->config; int ret = 0; lcdc_smartbond_mipi_dbi_cfg mipi_dbi_cfg; k_sem_take(&data->device_sem, K_FOREVER); mipi_dbi_smartbond_pm_policy_state_lock_get(); /* * Add an arbitrary valid color format to satisfy subroutine. The MIPI DBI command/data * engine should not be affected. */ lcdc_smartbond_mipi_dbi_translation(dbi_config, &mipi_dbi_cfg, PIXEL_FORMAT_RGB_565); ret = da1469x_lcdc_mipi_dbi_interface_configure(&mipi_dbi_cfg); if (ret < 0) { goto _mipi_dbi_read_exit; } /* Check if the cmd/data engine is busy since the #CS line will be overruled. */ if (da1469x_lcdc_is_busy()) { LOG_WRN("MIPI DBI host is busy"); ret = -EBUSY; goto _mipi_dbi_read_exit; } /* Force CS line to low. Typically, command and data are bound in the same #CS assertion */ da1469x_lcdc_force_cs_line(true, mipi_dbi_cfg.cs_active_high); da1469x_lcdc_send_cmd_data(true, cmd, num_cmds); if (len) { const struct device *spi_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, spi_dev)); struct spi_buf buffer = { .buf = (void *)response, .len = len, }; struct spi_buf_set buf_set = { .buffers = &buffer, .count = 1, }; if (!device_is_ready(spi_dev)) { LOG_ERR("SPI device is not ready"); ret = -ENODEV; goto _mipi_dbi_read_exit; } /* Overwrite CLK and enable DI lines. CS is driven forcefully. */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_READ); if (ret < 0) { LOG_ERR("Could not apply MIPI DBI pins' SPI read state (%d)", ret); goto _mipi_dbi_read_exit; } /* Get response */ ret = spi_read(spi_dev, &dbi_config->config, &buf_set); if (ret < 0) { LOG_ERR("Could not read data from SPI"); goto _mipi_dbi_read_exit; } } _mipi_dbi_read_exit: /* Restore #CS line */ da1469x_lcdc_force_cs_line(false, mipi_dbi_cfg.cs_active_high); /* Make sure default LCDC pins are applied upon exit */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("Could not apply MIPI DBI pins' default state (%d)", ret); } mipi_dbi_smartbond_pm_policy_state_lock_put(); k_sem_give(&data->device_sem); return ret; } #endif static int mipi_dbi_smartbond_command_write(const struct device *dev, const struct mipi_dbi_config *dbi_config, uint8_t cmd, const uint8_t *data_buf, size_t len) { struct mipi_dbi_smartbond_data *data = dev->data; int ret = 0; lcdc_smartbond_mipi_dbi_cfg mipi_dbi_cfg; k_sem_take(&data->device_sem, K_FOREVER); mipi_dbi_smartbond_pm_policy_state_lock_get(); /* * Add an arbitrary valid color format to satisfy subroutine. The MIPI DBI command/data * engine should not be affected. */ lcdc_smartbond_mipi_dbi_translation(dbi_config, &mipi_dbi_cfg, PIXEL_FORMAT_RGB_565); ret = da1469x_lcdc_mipi_dbi_interface_configure(&mipi_dbi_cfg); if (ret < 0) { goto finish; } /* Command and accompanied data should be transmitted via the DBIB interface */ da1469x_lcdc_send_cmd_data(true, &cmd, 1); if (len) { /* Data should be transmitted via the DBIB interface */ da1469x_lcdc_send_cmd_data(false, data_buf, len); } finish: mipi_dbi_smartbond_pm_policy_state_lock_put(); k_sem_give(&data->device_sem); return ret; } static int mipi_dbi_smartbond_write_display(const struct device *dev, const struct mipi_dbi_config *dbi_config, const uint8_t *framebuf, struct display_buffer_descriptor *desc, enum display_pixel_format pixfmt) { struct mipi_dbi_smartbond_data *data = dev->data; const struct mipi_dbi_smartbond_config *config = dev->config; lcdc_smartbond_layer_cfg *layer = &data->layer; int ret = 0; lcdc_smartbond_mipi_dbi_cfg mipi_dbi_cfg; uint8_t layer_color = lcdc_smartbond_pixel_to_lcm(pixfmt); if (desc->width * desc->height * (DISPLAY_BITS_PER_PIXEL(pixfmt) / 8) != desc->buf_size) { LOG_ERR("Incorrect buffer size for given width and height"); return -EINVAL; } k_sem_take(&data->device_sem, K_FOREVER); mipi_dbi_smartbond_pm_policy_state_lock_get(); /* * Mainly check if the frame generator is busy with a pending frame update (might happen * when two frame updates take place one after the other and the display interface is * quite slow). VSYNC interrupt line should be asserted when the last line is being * outputed. */ if (da1469x_lcdc_is_busy()) { LOG_WRN("MIPI DBI host is busy"); ret = -EBUSY; goto _mipi_dbi_write_exit; } lcdc_smartbond_mipi_dbi_translation(dbi_config, &mipi_dbi_cfg, pixfmt); ret = da1469x_lcdc_mipi_dbi_interface_configure(&mipi_dbi_cfg); if (ret < 0) { goto _mipi_dbi_write_exit; } ret = da1469x_lcdc_timings_configure(desc->width, desc->height, (lcdc_smartbond_timing_cfg *)&config->timing_cfg); if (ret < 0) { goto _mipi_dbi_write_exit; } LCDC_SMARTBOND_LAYER_CONFIG(layer, framebuf, 0, 0, desc->width, desc->height, layer_color, da1469x_lcdc_stride_calculation(layer_color, desc->width)); ret = da1469x_lcdc_layer_configure(layer); if (ret < 0) { goto _mipi_dbi_write_exit; } /* Trigger single frame update via the LCDC-DMA engine */ mipi_dbi_smartbond_send_single_frame(dev); _mipi_dbi_write_exit: mipi_dbi_smartbond_pm_policy_state_lock_put(); k_sem_give(&data->device_sem); return ret; } static int mipi_dbi_smartbond_configure(const struct device *dev) { uint8_t clk_div = LCDC_SMARTBOND_CLK_DIV(DT_PROP(DT_CHOSEN(zephyr_display), mipi_max_frequency)); const struct mipi_dbi_smartbond_config *config = dev->config; /* * First enable the controller so registers can be written. In serial interfaces * clock divider is further divided by 2. */ da1469x_lcdc_set_status(true, MIPI_DBI_SMARTBOND_IS_PLL_REQUIRED, (clk_div >= 2 ? clk_div / 2 : clk_div)); if (!da1469x_lcdc_check_id()) { LOG_ERR("Mismatching LCDC ID"); da1469x_lcdc_set_status(false, 0, 0); return -EINVAL; } da1469x_lcdc_te_set_status(false, DT_INST_PROP_OR(0, te_polarity, false)); da1469x_lcdc_bgcolor_configure((lcdc_smartbond_bgcolor_cfg *)&config->bgcolor_cfg); LCDC_LAYER0_OFFSETX_REG_SET_FIELD(LCDC_L0_DMA_PREFETCH, LCDC->LCDC_LAYER0_OFFSETX_REG, MIPI_DBI_SMARTBOND_IS_DMA_PREFETCH_ENABLED); return 0; } static void smartbond_mipi_dbi_isr(const void *arg) { struct mipi_dbi_smartbond_data *data = ((const struct device *)arg)->data; /* * Underflow sticky bit will remain high until cleared by writing * any value to LCDC_INTERRUPT_REG. */ data->underflow_flag = LCDC_STATUS_REG_GET_FIELD(LCDC_STICKY_UNDERFLOW); /* Default interrupt mode is level triggering so interrupt should be cleared */ da1469x_lcdc_te_set_status(false, DT_INST_PROP_OR(0, te_polarity, false)); k_sem_give(&data->sync_sem); } static int mipi_dbi_smartbond_resume(const struct device *dev) { const struct mipi_dbi_smartbond_config *config = dev->config; int ret; /* Select default state */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("Could not apply LCDC pins' default state (%d)", ret); return -EIO; } #if MIPI_DBI_SMARTBOND_IS_PLL_REQUIRED const struct device *clock_dev = DEVICE_DT_GET(DT_NODELABEL(osc)); if (!device_is_ready(clock_dev)) { LOG_WRN("Clock device is not available; PLL cannot be used"); } else { ret = z_smartbond_select_sys_clk(SMARTBOND_CLK_PLL96M); if (ret < 0) { LOG_WRN("Could not switch to PLL. Requested speed should not be achieved."); } } #endif return mipi_dbi_smartbond_configure(dev); } #if defined(CONFIG_PM_DEVICE) static int mipi_dbi_smartbond_suspend(const struct device *dev) { const struct mipi_dbi_smartbond_config *config = dev->config; int ret; /* Select sleep state; it's OK if settings fails for any reason. */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP); if (ret < 0) { LOG_WRN("Could not apply MIPI DBI pins' sleep state"); } /* Disable host controller to minimize power consumption. */ da1469x_lcdc_set_status(false, false, 0); return 0; } static int mipi_dbi_smartbond_pm_action(const struct device *dev, enum pm_device_action action) { int ret = 0; switch (action) { case PM_DEVICE_ACTION_SUSPEND: /* A non-zero value should not affect sleep */ (void)mipi_dbi_smartbond_suspend(dev); break; case PM_DEVICE_ACTION_RESUME: /* * The resume error code should not be taken into consideration * by the PM subsystem. */ ret = mipi_dbi_smartbond_resume(dev); break; default: return -ENOTSUP; } return ret; } #endif static int mipi_dbi_smartbond_init(const struct device *dev) { __unused const struct mipi_dbi_smartbond_config *config = dev->config; struct mipi_dbi_smartbond_data *data = dev->data; int ret; /* Device should be ready to be acquired */ k_sem_init(&data->device_sem, 1, 1); /* Event should be signaled by LCDC ISR */ k_sem_init(&data->sync_sem, 0, 1); #if MIPI_DBI_SMARTBOND_IS_RESET_AVAILABLE if (gpio_is_ready_dt(&config->reset)) { ret = gpio_pin_configure_dt(&config->reset, GPIO_OUTPUT_INACTIVE); if (ret < 0) { LOG_ERR("Could not configure reset line (%d)", ret); return -EIO; } } #endif IRQ_CONNECT(SMARTBOND_IRQN, SMARTBOND_IRQ_PRIO, smartbond_mipi_dbi_isr, DEVICE_DT_INST_GET(0), 0); ret = mipi_dbi_smartbond_resume(dev); return ret; } static const struct mipi_dbi_driver_api mipi_dbi_smartbond_driver_api = { #if MIPI_DBI_SMARTBOND_IS_RESET_AVAILABLE .reset = mipi_dbi_smartbond_reset, #endif .command_write = mipi_dbi_smartbond_command_write, .write_display = mipi_dbi_smartbond_write_display, #if MIPI_DBI_SMARTBOND_IS_READ_SUPPORTED .command_read = mipi_dbi_smartbond_command_read, #endif }; #define SMARTBOND_MIPI_DBI_INIT(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ \ static const struct mipi_dbi_smartbond_config mipi_dbi_smartbond_config_## inst = { \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .reset = GPIO_DT_SPEC_INST_GET_OR(inst, reset_gpios, {}), \ .timing_cfg = { 0 }, \ .bgcolor_cfg = { 0xFF, 0xFF, 0xFF, 0 }, \ }; \ \ static struct mipi_dbi_smartbond_data mipi_dbi_smartbond_data_## inst; \ \ PM_DEVICE_DT_INST_DEFINE(inst, mipi_dbi_smartbond_pm_action); \ \ DEVICE_DT_INST_DEFINE(inst, mipi_dbi_smartbond_init, \ PM_DEVICE_DT_INST_GET(inst), \ &mipi_dbi_smartbond_data_## inst, \ &mipi_dbi_smartbond_config_## inst, \ POST_KERNEL, \ CONFIG_MIPI_DBI_INIT_PRIORITY, \ &mipi_dbi_smartbond_driver_api); SMARTBOND_MIPI_DBI_INIT(0); ```
/content/code_sandbox/drivers/mipi_dbi/mipi_dbi_smartbond.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,876
```c /* */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/i2c/stm32.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/smbus.h> #include <zephyr/logging/log.h> #include <zephyr/sys/byteorder.h> #include <soc.h> #include "smbus_utils.h" LOG_MODULE_REGISTER(stm32_smbus, CONFIG_SMBUS_LOG_LEVEL); struct smbus_stm32_config { const struct pinctrl_dev_config *pcfg; const struct device *i2c_dev; }; struct smbus_stm32_data { uint32_t config; const struct device *dev; #ifdef CONFIG_SMBUS_STM32_SMBALERT sys_slist_t smbalert_callbacks; struct k_work smbalert_work; #endif /* CONFIG_SMBUS_STM32_SMBALERT */ }; #ifdef CONFIG_SMBUS_STM32_SMBALERT static void smbus_stm32_smbalert_isr(const struct device *dev) { struct smbus_stm32_data *data = dev->data; k_work_submit(&data->smbalert_work); } static void smbus_stm32_smbalert_work(struct k_work *work) { struct smbus_stm32_data *data = CONTAINER_OF(work, struct smbus_stm32_data, smbalert_work); const struct device *dev = data->dev; LOG_DBG("%s: got SMB alert", dev->name); smbus_loop_alert_devices(dev, &data->smbalert_callbacks); } static int smbus_stm32_smbalert_set_cb(const struct device *dev, struct smbus_callback *cb) { struct smbus_stm32_data *data = dev->data; return smbus_callback_set(&data->smbalert_callbacks, cb); } static int smbus_stm32_smbalert_remove_cb(const struct device *dev, struct smbus_callback *cb) { struct smbus_stm32_data *data = dev->data; return smbus_callback_remove(&data->smbalert_callbacks, cb); } #endif /* CONFIG_SMBUS_STM32_SMBALERT */ static int smbus_stm32_init(const struct device *dev) { const struct smbus_stm32_config *config = dev->config; struct smbus_stm32_data *data = dev->data; int result; data->dev = dev; if (!device_is_ready(config->i2c_dev)) { LOG_ERR("%s: I2C device is not ready", dev->name); return -ENODEV; } result = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (result < 0) { LOG_ERR("%s: pinctrl setup failed (%d)", dev->name, result); return result; } #ifdef CONFIG_SMBUS_STM32_SMBALERT k_work_init(&data->smbalert_work, smbus_stm32_smbalert_work); i2c_stm32_smbalert_set_callback(config->i2c_dev, smbus_stm32_smbalert_isr, dev); #endif /* CONFIG_SMBUS_STM32_SMBALERT */ return 0; } static int smbus_stm32_configure(const struct device *dev, uint32_t config_value) { const struct smbus_stm32_config *config = dev->config; struct smbus_stm32_data *data = dev->data; if (config_value & SMBUS_MODE_PEC) { LOG_ERR("%s: not implemented", dev->name); return -EINVAL; } if (config_value & SMBUS_MODE_HOST_NOTIFY) { LOG_ERR("%s: not available", dev->name); return -EINVAL; } if (config_value & SMBUS_MODE_CONTROLLER) { LOG_DBG("%s: configuring SMB in host mode", dev->name); i2c_stm32_set_smbus_mode(config->i2c_dev, I2CSTM32MODE_SMBUSHOST); } else { LOG_DBG("%s: configuring SMB in device mode", dev->name); i2c_stm32_set_smbus_mode(config->i2c_dev, I2CSTM32MODE_SMBUSDEVICE); } if (config_value & SMBUS_MODE_SMBALERT) { LOG_DBG("%s: activating SMB alert", dev->name); i2c_stm32_smbalert_enable(config->i2c_dev); } else { LOG_DBG("%s: deactivating SMB alert", dev->name); i2c_stm32_smbalert_disable(config->i2c_dev); } data->config = config_value; return 0; } static int smbus_stm32_get_config(const struct device *dev, uint32_t *config) { struct smbus_stm32_data *data = dev->data; *config = data->config; return 0; } static int smbus_stm32_quick(const struct device *dev, uint16_t periph_addr, enum smbus_direction rw) { const struct smbus_stm32_config *config = dev->config; switch (rw) { case SMBUS_MSG_WRITE: return i2c_write(config->i2c_dev, NULL, 0, periph_addr); case SMBUS_MSG_READ: return i2c_read(config->i2c_dev, NULL, 0, periph_addr); default: LOG_ERR("%s: invalid smbus direction %i", dev->name, rw); return -EINVAL; } } static int smbus_stm32_byte_write(const struct device *dev, uint16_t periph_addr, uint8_t command) { const struct smbus_stm32_config *config = dev->config; return i2c_write(config->i2c_dev, &command, sizeof(command), periph_addr); } static int smbus_stm32_byte_read(const struct device *dev, uint16_t periph_addr, uint8_t *byte) { const struct smbus_stm32_config *config = dev->config; return i2c_read(config->i2c_dev, byte, sizeof(*byte), periph_addr); } static int smbus_stm32_byte_data_write(const struct device *dev, uint16_t periph_addr, uint8_t command, uint8_t byte) { const struct smbus_stm32_config *config = dev->config; uint8_t buffer[] = { command, byte, }; return i2c_write(config->i2c_dev, buffer, ARRAY_SIZE(buffer), periph_addr); } static int smbus_stm32_byte_data_read(const struct device *dev, uint16_t periph_addr, uint8_t command, uint8_t *byte) { const struct smbus_stm32_config *config = dev->config; return i2c_write_read(config->i2c_dev, periph_addr, &command, sizeof(command), byte, sizeof(*byte)); } static int smbus_stm32_word_data_write(const struct device *dev, uint16_t periph_addr, uint8_t command, uint16_t word) { const struct smbus_stm32_config *config = dev->config; uint8_t buffer[sizeof(command) + sizeof(word)]; buffer[0] = command; sys_put_le16(word, buffer + 1); return i2c_write(config->i2c_dev, buffer, ARRAY_SIZE(buffer), periph_addr); } static int smbus_stm32_word_data_read(const struct device *dev, uint16_t periph_addr, uint8_t command, uint16_t *word) { const struct smbus_stm32_config *config = dev->config; int result; result = i2c_write_read(config->i2c_dev, periph_addr, &command, sizeof(command), word, sizeof(*word)); *word = sys_le16_to_cpu(*word); return result; } static int smbus_stm32_pcall(const struct device *dev, uint16_t periph_addr, uint8_t command, uint16_t send_word, uint16_t *recv_word) { const struct smbus_stm32_config *config = dev->config; uint8_t buffer[sizeof(command) + sizeof(send_word)]; int result; buffer[0] = command; sys_put_le16(send_word, buffer + 1); result = i2c_write_read(config->i2c_dev, periph_addr, buffer, ARRAY_SIZE(buffer), recv_word, sizeof(*recv_word)); *recv_word = sys_le16_to_cpu(*recv_word); return result; } static int smbus_stm32_block_write(const struct device *dev, uint16_t periph_addr, uint8_t command, uint8_t count, uint8_t *buf) { const struct smbus_stm32_config *config = dev->config; struct i2c_msg messages[] = { { .buf = &command, .len = sizeof(command), .flags = 0, }, { .buf = buf, .len = count, .flags = 0, }, }; return i2c_transfer(config->i2c_dev, messages, ARRAY_SIZE(messages), periph_addr); } static const struct smbus_driver_api smbus_stm32_api = { .configure = smbus_stm32_configure, .get_config = smbus_stm32_get_config, .smbus_quick = smbus_stm32_quick, .smbus_byte_write = smbus_stm32_byte_write, .smbus_byte_read = smbus_stm32_byte_read, .smbus_byte_data_write = smbus_stm32_byte_data_write, .smbus_byte_data_read = smbus_stm32_byte_data_read, .smbus_word_data_write = smbus_stm32_word_data_write, .smbus_word_data_read = smbus_stm32_word_data_read, .smbus_pcall = smbus_stm32_pcall, .smbus_block_write = smbus_stm32_block_write, #ifdef CONFIG_SMBUS_STM32_SMBALERT .smbus_smbalert_set_cb = smbus_stm32_smbalert_set_cb, .smbus_smbalert_remove_cb = smbus_stm32_smbalert_remove_cb, #else .smbus_smbalert_set_cb = NULL, .smbus_smbalert_remove_cb = NULL, #endif /* CONFIG_SMBUS_STM32_SMBALERT */ .smbus_block_read = NULL, .smbus_block_pcall = NULL, .smbus_host_notify_set_cb = NULL, .smbus_host_notify_remove_cb = NULL, }; #define DT_DRV_COMPAT st_stm32_smbus #define SMBUS_STM32_DEVICE_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static struct smbus_stm32_config smbus_stm32_config_##n = { \ .i2c_dev = DEVICE_DT_GET(DT_INST_PROP(n, i2c)), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ }; \ \ static struct smbus_stm32_data smbus_stm32_data_##n; \ \ SMBUS_DEVICE_DT_INST_DEFINE(n, smbus_stm32_init, NULL, &smbus_stm32_data_##n, \ &smbus_stm32_config_##n, POST_KERNEL, \ CONFIG_SMBUS_INIT_PRIORITY, &smbus_stm32_api); DT_INST_FOREACH_STATUS_OKAY(SMBUS_STM32_DEVICE_INIT) ```
/content/code_sandbox/drivers/smbus/smbus_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,511
```c /* */ #include "smbus_utils.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(smbus_utils, CONFIG_SMBUS_LOG_LEVEL); void smbus_loop_alert_devices(const struct device *dev, sys_slist_t *callbacks) { int result; uint8_t address; /** * There might be several peripheral devices which could have triggered the alert and * the one with the highest priority (lowest address) device wins the arbitration. In * any case, we will have to loop through all of them. * * The format of the transaction is: * * 0 1 2 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |S| Alert Addr |R|A| Address |X|N|P| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ while (true) { result = smbus_byte_read(dev, SMBUS_ADDRESS_ARA, &address); if (result != 0) { LOG_DBG("%s: no more peripheral devices left which triggered an alert", dev->name); return; } LOG_DBG("%s: address 0x%02X triggered an alert", dev->name, address); smbus_fire_callbacks(callbacks, dev, address); } } ```
/content/code_sandbox/drivers/smbus/smbus_utils.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
328
```c /* * */ #include <zephyr/sys/slist.h> #include <zephyr/internal/syscall_handler.h> #include <zephyr/drivers/smbus.h> static inline int z_vrfy_smbus_configure(const struct device *dev, uint32_t dev_config) { K_OOPS(K_SYSCALL_DRIVER_SMBUS(dev, configure)); return z_impl_smbus_configure(dev, dev_config); } #include <zephyr/syscalls/smbus_configure_mrsh.c> static inline int z_vrfy_smbus_get_config(const struct device *dev, uint32_t *dev_config) { K_OOPS(K_SYSCALL_DRIVER_SMBUS(dev, get_config)); K_OOPS(K_SYSCALL_MEMORY_WRITE(dev_config, sizeof(uint32_t))); return z_impl_smbus_get_config(dev, dev_config); } #include <zephyr/syscalls/smbus_get_config_mrsh.c> static inline int z_vrfy_smbus_quick(const struct device *dev, uint16_t addr, enum smbus_direction rw) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); return z_impl_smbus_quick(dev, addr, rw); } #include <zephyr/syscalls/smbus_quick_mrsh.c> static inline int z_vrfy_smbus_byte_write(const struct device *dev, uint16_t addr, uint8_t byte) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); return z_impl_smbus_byte_write(dev, addr, byte); } #include <zephyr/syscalls/smbus_byte_write_mrsh.c> static inline int z_vrfy_smbus_byte_read(const struct device *dev, uint16_t addr, uint8_t *byte) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); K_OOPS(K_SYSCALL_MEMORY_WRITE(byte, sizeof(uint8_t))); return z_impl_smbus_byte_read(dev, addr, byte); } #include <zephyr/syscalls/smbus_byte_read_mrsh.c> static inline int z_vrfy_smbus_byte_data_write(const struct device *dev, uint16_t addr, uint8_t cmd, uint8_t byte) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); return z_impl_smbus_byte_data_write(dev, addr, cmd, byte); } #include <zephyr/syscalls/smbus_byte_data_write_mrsh.c> static inline int z_vrfy_smbus_byte_data_read(const struct device *dev, uint16_t addr, uint8_t cmd, uint8_t *byte) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); K_OOPS(K_SYSCALL_MEMORY_WRITE(byte, sizeof(uint8_t))); return z_impl_smbus_byte_data_read(dev, addr, cmd, byte); } #include <zephyr/syscalls/smbus_byte_data_read_mrsh.c> static inline int z_vrfy_smbus_word_data_write(const struct device *dev, uint16_t addr, uint8_t cmd, uint16_t word) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); return z_impl_smbus_word_data_write(dev, addr, cmd, word); } #include <zephyr/syscalls/smbus_word_data_write_mrsh.c> static inline int z_vrfy_smbus_word_data_read(const struct device *dev, uint16_t addr, uint8_t cmd, uint16_t *word) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); K_OOPS(K_SYSCALL_MEMORY_WRITE(word, sizeof(uint16_t))); return z_impl_smbus_word_data_read(dev, addr, cmd, word); } #include <zephyr/syscalls/smbus_word_data_read_mrsh.c> static inline int z_vrfy_smbus_pcall(const struct device *dev, uint16_t addr, uint8_t cmd, uint16_t send_word, uint16_t *recv_word) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); K_OOPS(K_SYSCALL_MEMORY_WRITE(recv_word, sizeof(uint16_t))); return z_impl_smbus_pcall(dev, addr, cmd, send_word, recv_word); } #include <zephyr/syscalls/smbus_pcall_mrsh.c> static inline int z_vrfy_smbus_block_write(const struct device *dev, uint16_t addr, uint8_t cmd, uint8_t count, uint8_t *buf) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); K_OOPS(K_SYSCALL_MEMORY_READ(buf, count)); return z_impl_smbus_block_write(dev, addr, cmd, count, buf); } #include <zephyr/syscalls/smbus_block_write_mrsh.c> static inline int z_vrfy_smbus_block_read(const struct device *dev, uint16_t addr, uint8_t cmd, uint8_t *count, uint8_t *buf) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); K_OOPS(K_SYSCALL_MEMORY_WRITE(count, sizeof(uint8_t))); return z_impl_smbus_block_read(dev, addr, cmd, count, buf); } #include <zephyr/syscalls/smbus_block_read_mrsh.c> static inline int z_vrfy_smbus_block_pcall(const struct device *dev, uint16_t addr, uint8_t cmd, uint8_t snd_count, uint8_t *snd_buf, uint8_t *rcv_count, uint8_t *rcv_buf) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); K_OOPS(K_SYSCALL_MEMORY_READ(snd_buf, snd_count)); K_OOPS(K_SYSCALL_MEMORY_WRITE(rcv_count, sizeof(uint8_t))); return z_impl_smbus_block_pcall(dev, addr, cmd, snd_count, snd_buf, rcv_count, rcv_buf); } #include <zephyr/syscalls/smbus_block_pcall_mrsh.c> static inline int z_vrfy_smbus_smbalert_remove_cb(const struct device *dev, struct smbus_callback *cb) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); return z_impl_smbus_smbalert_remove_cb(dev, cb); } #include <zephyr/syscalls/smbus_smbalert_remove_cb_mrsh.c> static inline int z_vrfy_smbus_host_notify_remove_cb(const struct device *dev, struct smbus_callback *cb) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_SMBUS)); return z_impl_smbus_host_notify_remove_cb(dev, cb); } #include <zephyr/syscalls/smbus_host_notify_remove_cb_mrsh.c> ```
/content/code_sandbox/drivers/smbus/smbus_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,464
```objective-c /* * * Intel I/O Controller Hub (ICH) later renamed to Intel Platform Controller * Hub (PCH) SMBus driver. * * PCH provides SMBus 2.0 - compliant Host Controller. * */ #ifndef ZEPHYR_DRIVERS_SMBUS_PCH_H_ #define ZEPHYR_DRIVERS_SMBUS_PCH_H_ /* PCI Configuration Space registers */ /* Host Configuration (HCFG) - Offset 40h, 8 bits */ #define PCH_SMBUS_HCFG 0x10 #define PCH_SMBUS_HCFG_HST_EN BIT(0) /* Enable SMBus controller */ /* PCH SMBus I/O and Memory Mapped registers */ /* Host Status Register Address (HSTS) */ #define PCH_SMBUS_HSTS 0x00 #define PCH_SMBUS_HSTS_HOST_BUSY BIT(0) /* Host Busy */ #define PCH_SMBUS_HSTS_INTERRUPT BIT(1) /* Interrupt */ #define PCH_SMBUS_HSTS_DEV_ERROR BIT(2) /* Device Error */ #define PCH_SMBUS_HSTS_BUS_ERROR BIT(3) /* Bus Error */ #define PCH_SMBUS_HSTS_FAILED BIT(4) /* Failed */ #define PCH_SMBUS_HSTS_SMB_ALERT BIT(5) /* SMB Alert */ #define PCH_SMBUS_HSTS_INUSE BIT(6) /* In Use */ #define PCH_SMBUS_HSTS_BYTE_DONE BIT(7) /* Byte Done */ #define PCH_SMBUS_HSTS_ERROR (PCH_SMBUS_HSTS_DEV_ERROR | \ PCH_SMBUS_HSTS_BUS_ERROR | \ PCH_SMBUS_HSTS_FAILED) /* Host Control Register (HCTL) */ #define PCH_SMBUS_HCTL 0x02 /* Host Control */ #define PCH_SMBUS_HCTL_INTR_EN BIT(0) /* INT Enable */ #define PCH_SMBUS_HCTL_KILL BIT(1) /* Kill current trans */ #define PCH_SMBUS_HCTL_CMD GENMASK(4, 2) /* Command */ /* SMBUS Commands */ #define PCH_SMBUS_HCTL_CMD_QUICK (0 << 2) /* Quick cmd*/ #define PCH_SMBUS_HCTL_CMD_BYTE (1 << 2) /* Byte cmd */ #define PCH_SMBUS_HCTL_CMD_BYTE_DATA (2 << 2) /* Byte Data cmd */ #define PCH_SMBUS_HCTL_CMD_WORD_DATA (3 << 2) /* Word Data cmd */ #define PCH_SMBUS_HCTL_CMD_PROC_CALL (4 << 2) /* Process Call cmd */ #define PCH_SMBUS_HCTL_CMD_BLOCK (5 << 2) /* Block cmd */ #define PCH_SMBUS_HCTL_CMD_I2C_READ (6 << 2) /* I2C Read cmd */ #define PCH_SMBUS_HCTL_CMD_BLOCK_PROC (7 << 2) /* Block Process cmd */ #define PCH_SMBUS_HCTL_CMD_SET(cmd) (cmd << 2) #define PCH_SMBUS_HCTL_CMD_GET(val) (val & PCH_SMBUS_HCTL_CMD) #define PCH_SMBUS_HCTL_LAST_BYTE BIT(5) /* Last byte block op */ #define PCH_SMBUS_HCTL_START BIT(6) /* Start SMBUS cmd */ #define PCH_SMBUS_HCTL_PEC_EN BIT(7) /* Enable PEC */ /* Host Command Register (HCMD) */ #define PCH_SMBUS_HCMD 0x03 /* Transmit Slave Address Register (TSA) */ #define PCH_SMBUS_TSA 0x04 #define PCH_SMBUS_TSA_RW BIT(0) /* Direction */ #define PCH_SMBUS_TSA_ADDR_MASK GENMASK(7, 1) /* Address mask */ /* Set 7-bit address */ #define PCH_SMBUS_TSA_ADDR_SET(addr) (((addr) & BIT_MASK(7)) << 1) /* Get Peripheral address from register value */ #define PCH_SMBUS_TSA_ADDR_GET(reg) ((reg >> 1) & BIT_MASK(7)) /* Data 0 Register (HD0) */ #define PCH_SMBUS_HD0 0x05 /* Data 0 / Count */ /* Data 1 Register (HD1) */ #define PCH_SMBUS_HD1 0x06 /* Data 1 */ /* Host Block Data (HBD) */ #define PCH_SMBUS_HBD 0x07 /* Host block data */ /* Packet Error Check Data Register (PEC) */ #define PCH_SMBUS_PEC 0x08 /* PEC data */ /* Receive Slave Address Register (RSA) */ #define PCH_SMBUS_RSA 0x09 /* Receive slave addr */ /* Slave Data Register (SD) (16 bits) */ #define PCH_SMBUS_SD 0x0a /* Slave data */ /* Auxiliary Status (AUXS) */ #define PCH_SMBUS_AUXS 0x0c /* Auxiliary Status */ #define PCH_SMBUS_AUXS_CRC_ERROR BIT(0) /* CRC error */ /* Auxiliary Control (AUXC) */ #define PCH_SMBUS_AUXC 0x0d /* Auxiliary Control */ #define PCH_SMBUS_AUXC_AAC BIT(0) /* Auto append CRC */ #define PCH_SMBUS_AUXC_EN_32BUF BIT(1) /* Enable 32-byte buf */ /* SMLink Pin Control Register (SMLC) */ #define PCH_SMBUS_SMLC 0x0e /* SMLink pin control */ /* SMBus Pin control Register (SMBC) */ #define PCH_SMBUS_SMBC 0x0f /* SMBus pin control */ #define PCH_SMBUS_SMBC_CLK_CUR_STS BIT(0) /* SMBCLK pin status */ #define PCH_SMBUS_SMBC_DATA_CUR_STS BIT(1) /* SMBDATA pin status */ #define PCH_SMBUS_SMBC_CLK_CTL BIT(2) /* SMBCLK pin CTL */ /* Slave Status Register (SSTS) */ #define PCH_SMBUS_SSTS 0x10 /* Slave Status */ #define PCH_SMBUS_SSTS_HNS BIT(0) /* Host Notify Status */ /* Slave Command Register (SCMD) */ #define PCH_SMBUS_SCMD 0x11 /* Slave Command */ #define PCH_SMBUS_SCMD_HNI_EN BIT(0) /* Host Notify INT En */ #define PCH_SMBUS_SCMD_HNW_EN BIT(1) /* Host Notify Wake */ #define PCH_SMBUS_SCMD_SMBALERT_DIS BIT(2) /* Disable Smbalert */ /* Notify Device Address Register (NDA) */ #define PCH_SMBUS_NDA 0x14 /* Notify Device addr */ /* Notify Data Low Byte Register (NDLB) */ #define PCH_SMBUS_NDLB 0x16 /* Notify Data low */ /* Notify Data High Byte Register (NDHB) */ #define PCH_SMBUS_NDHB 0x17 /* Notify Data high */ /* Debug helpers */ #if CONFIG_SMBUS_LOG_LEVEL >= LOG_LEVEL_DBG /* Dump HSTS register using define to show calling function */ #define pch_dump_register_hsts(reg) \ LOG_DBG("HSTS: 0x%02x: %s%s%s%s%s%s%s%s", reg, \ reg & PCH_SMBUS_HSTS_HOST_BUSY ? "[Host Busy] " : "", \ reg & PCH_SMBUS_HSTS_INTERRUPT ? "[Interrupt] " : "", \ reg & PCH_SMBUS_HSTS_DEV_ERROR ? "[Device Error] " : "",\ reg & PCH_SMBUS_HSTS_BUS_ERROR ? "[Bus Error] " : "", \ reg & PCH_SMBUS_HSTS_FAILED ? "[Failed] " : "", \ reg & PCH_SMBUS_HSTS_SMB_ALERT ? "[SMBALERT] " : "", \ reg & PCH_SMBUS_HSTS_BYTE_DONE ? "[Byte Done] " : "", \ reg & PCH_SMBUS_HSTS_INUSE ? "[In USE] " : ""); #else #define pch_dump_register_hsts(reg) #endif #endif /* ZEPHYR_DRIVERS_SMBUS_PCH_H_ */ ```
/content/code_sandbox/drivers/smbus/intel_pch_smbus.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,892
```c /* * * Intel I/O Controller Hub (ICH) later renamed to Intel Platform Controller * Hub (PCH) SMBus driver. * * PCH provides SMBus 2.0 - compliant Host Controller. * */ #include <zephyr/types.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/smbus.h> #include <zephyr/drivers/pcie/pcie.h> #define DT_DRV_COMPAT intel_pch_smbus #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(intel_pch, CONFIG_SMBUS_LOG_LEVEL); #include "smbus_utils.h" #include "intel_pch_smbus.h" /** * @note Following notions are used: * * periph_addr - Peripheral address (Slave address mentioned in the Specs) * * command - First byte to send in the SMBus protocol operations except for * Quick and Byte Read. Also known as register. */ /** * Intel PCH configuration acquired from DTS during device initialization */ struct pch_config { /* IRQ configuration function */ void (*config_func)(const struct device *dev); struct pcie_dev *pcie; }; /** * Intel PCH internal driver data */ struct pch_data { DEVICE_MMIO_RAM; io_port_t sba; uint32_t config; uint8_t status; struct k_mutex mutex; struct k_sem completion_sync; const struct device *dev; #if defined(CONFIG_SMBUS_INTEL_PCH_SMBALERT) /* smbalert callback list */ sys_slist_t smbalert_cbs; /* smbalert work */ struct k_work smb_alert_work; #endif /* CONFIG_SMBUS_INTEL_PCH_SMBALERT */ #if defined(CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY) /* Host Notify callback list */ sys_slist_t host_notify_cbs; /* Host Notify work */ struct k_work host_notify_work; /* Host Notify peripheral device address */ uint8_t notify_addr; /* Host Notify data received */ uint16_t notify_data; #endif /* CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY */ }; /** * Helpers for accessing Intel PCH SMBus registers. Depending on * configuration option MMIO or IO method will be used. */ #if defined(CONFIG_SMBUS_INTEL_PCH_ACCESS_MMIO) static uint8_t pch_reg_read(const struct device *dev, uint8_t reg) { return sys_read8(DEVICE_MMIO_GET(dev) + reg); } static void pch_reg_write(const struct device *dev, uint8_t reg, uint8_t val) { sys_write8(val, DEVICE_MMIO_GET(dev) + reg); } #elif defined(CONFIG_SMBUS_INTEL_PCH_ACCESS_IO) static uint8_t pch_reg_read(const struct device *dev, uint8_t reg) { struct pch_data *data = dev->data; return sys_in8(data->sba + reg); } static void pch_reg_write(const struct device *dev, uint8_t reg, uint8_t val) { struct pch_data *data = dev->data; sys_out8(val, data->sba + reg); } #else #error Wrong PCH Register Access Mode #endif #if defined(CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY) static void host_notify_work(struct k_work *work) { struct pch_data *data = CONTAINER_OF(work, struct pch_data, host_notify_work); const struct device *dev = data->dev; uint8_t addr = data->notify_addr; smbus_fire_callbacks(&data->host_notify_cbs, dev, addr); } static int pch_smbus_host_notify_set_cb(const struct device *dev, struct smbus_callback *cb) { struct pch_data *data = dev->data; LOG_DBG("dev %p cb %p", dev, cb); return smbus_callback_set(&data->host_notify_cbs, cb); } static int pch_smbus_host_notify_remove_cb(const struct device *dev, struct smbus_callback *cb) { struct pch_data *data = dev->data; LOG_DBG("dev %p cb %p", dev, cb); return smbus_callback_remove(&data->host_notify_cbs, cb); } #endif /* CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY */ #if defined(CONFIG_SMBUS_INTEL_PCH_SMBALERT) static void smbalert_work(struct k_work *work) { struct pch_data *data = CONTAINER_OF(work, struct pch_data, smb_alert_work); const struct device *dev = data->dev; smbus_loop_alert_devices(dev, &data->smbalert_cbs); } static int pch_smbus_smbalert_set_sb(const struct device *dev, struct smbus_callback *cb) { struct pch_data *data = dev->data; LOG_DBG("dev %p cb %p", dev, cb); return smbus_callback_set(&data->smbalert_cbs, cb); } static int pch_smbus_smbalert_remove_sb(const struct device *dev, struct smbus_callback *cb) { struct pch_data *data = dev->data; LOG_DBG("dev %p cb %p", dev, cb); return smbus_callback_remove(&data->smbalert_cbs, cb); } #endif /* CONFIG_SMBUS_INTEL_PCH_SMBALERT */ static int pch_configure(const struct device *dev, uint32_t config) { struct pch_data *data = dev->data; LOG_DBG("dev %p config 0x%x", dev, config); if (config & SMBUS_MODE_HOST_NOTIFY) { uint8_t status; if (!IS_ENABLED(CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY)) { LOG_ERR("Error configuring Host Notify"); return -EINVAL; } /* Enable Host Notify interrupts */ status = pch_reg_read(dev, PCH_SMBUS_SCMD); status |= PCH_SMBUS_SCMD_HNI_EN; pch_reg_write(dev, PCH_SMBUS_SCMD, status); } if (config & SMBUS_MODE_SMBALERT) { uint8_t status; if (!IS_ENABLED(CONFIG_SMBUS_INTEL_PCH_SMBALERT)) { LOG_ERR("Error configuring SMBALERT"); return -EINVAL; } /* Disable SMBALERT_DIS */ status = pch_reg_read(dev, PCH_SMBUS_SCMD); status &= ~PCH_SMBUS_SCMD_SMBALERT_DIS; pch_reg_write(dev, PCH_SMBUS_SCMD, status); } /* Keep config for a moment */ data->config = config; return 0; } static int pch_get_config(const struct device *dev, uint32_t *config) { struct pch_data *data = dev->data; *config = data->config; return 0; } /* Device initialization function */ static int pch_smbus_init(const struct device *dev) { const struct pch_config * const config = dev->config; struct pch_data *data = dev->data; struct pcie_bar mbar; uint32_t val; if (config->pcie->bdf == PCIE_BDF_NONE) { LOG_ERR("Cannot probe PCI device"); return -ENODEV; } val = pcie_conf_read(config->pcie->bdf, PCIE_CONF_CMDSTAT); if (val & PCIE_CONF_CMDSTAT_INTERRUPT) { LOG_WRN("Pending interrupt, continuing"); } if (IS_ENABLED(CONFIG_SMBUS_INTEL_PCH_ACCESS_MMIO)) { pcie_probe_mbar(config->pcie->bdf, 0, &mbar); pcie_set_cmd(config->pcie->bdf, PCIE_CONF_CMDSTAT_MEM, true); device_map(DEVICE_MMIO_RAM_PTR(dev), mbar.phys_addr, mbar.size, K_MEM_CACHE_NONE); LOG_DBG("Mapped 0x%lx size 0x%lx to 0x%lx", mbar.phys_addr, mbar.size, DEVICE_MMIO_GET(dev)); } else { pcie_set_cmd(config->pcie->bdf, PCIE_CONF_CMDSTAT_IO, true); val = pcie_conf_read(config->pcie->bdf, PCIE_CONF_BAR4); if (!PCIE_CONF_BAR_IO(val)) { LOG_ERR("Cannot read IO BAR"); return -EINVAL; } data->sba = PCIE_CONF_BAR_ADDR(val); LOG_DBG("Using I/O address 0x%x", data->sba); } val = pcie_conf_read(config->pcie->bdf, PCH_SMBUS_HCFG); if ((val & PCH_SMBUS_HCFG_HST_EN) == 0) { LOG_ERR("SMBus Host Controller is disabled"); return -EINVAL; } /* Initialize mutex and semaphore */ k_mutex_init(&data->mutex); k_sem_init(&data->completion_sync, 0, 1); data->dev = dev; /* Initialize work structures */ #if defined(CONFIG_SMBUS_INTEL_PCH_SMBALERT) k_work_init(&data->smb_alert_work, smbalert_work); #endif /* CONFIG_SMBUS_INTEL_PCH_SMBALERT */ #if defined(CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY) k_work_init(&data->host_notify_work, host_notify_work); #endif /* CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY */ config->config_func(dev); if (pch_configure(dev, SMBUS_MODE_CONTROLLER)) { LOG_ERR("SMBus: Cannot set default configuration"); return -EIO; } return 0; } static int pch_prepare_transfer(const struct device *dev) { uint8_t hsts; hsts = pch_reg_read(dev, PCH_SMBUS_HSTS); pch_dump_register_hsts(hsts); if (hsts & PCH_SMBUS_HSTS_HOST_BUSY) { LOG_ERR("Return BUSY status"); return -EBUSY; } /* Check and clear HSTS status bits */ hsts &= PCH_SMBUS_HSTS_ERROR | PCH_SMBUS_HSTS_BYTE_DONE | PCH_SMBUS_HSTS_INTERRUPT; if (hsts) { pch_reg_write(dev, PCH_SMBUS_HSTS, hsts); } /* TODO: Clear also CRC check bits */ return 0; } static int pch_check_status(const struct device *dev) { struct pch_data *data = dev->data; uint8_t status = data->status; /** * Device Error means following: * - unsupported Command Field Unclaimed Cycle * - Host Device timeout * - CRC Error */ if (status & PCH_SMBUS_HSTS_DEV_ERROR) { uint8_t auxs = pch_reg_read(dev, PCH_SMBUS_AUXS); LOG_WRN("Device Error (DERR) received"); if (auxs & PCH_SMBUS_AUXS_CRC_ERROR) { LOG_DBG("AUXS register 0x%02x", auxs); /* Clear CRC error status bit */ pch_reg_write(dev, PCH_SMBUS_AUXS, PCH_SMBUS_AUXS_CRC_ERROR); } return -EIO; } /** * Transaction collision, several masters are trying to access * the bus and PCH detects arbitration lost. */ if (status & PCH_SMBUS_HSTS_BUS_ERROR) { LOG_WRN("Bus Error (BERR) received"); return -EAGAIN; } /** * Source of interrupt is failed bus transaction. This is set in * response to KILL set to terminate the host transaction */ if (status & PCH_SMBUS_HSTS_FAILED) { LOG_WRN("Failed (FAIL) received"); return -EIO; } return 0; } static int pch_smbus_block_start(const struct device *dev, uint16_t periph_addr, uint8_t rw, uint8_t command, uint8_t count, uint8_t *buf, uint8_t protocol) { uint8_t reg; int ret; LOG_DBG("addr %x rw %d command %x", periph_addr, rw, command); /* Set TSA register */ reg = PCH_SMBUS_TSA_ADDR_SET(periph_addr); reg |= rw & SMBUS_MSG_RW_MASK; pch_reg_write(dev, PCH_SMBUS_TSA, reg); /* Set HCMD register */ pch_reg_write(dev, PCH_SMBUS_HCMD, command); /* Enable 32-byte buffer mode (E32b) to send block of data */ reg = pch_reg_read(dev, PCH_SMBUS_AUXC); reg |= PCH_SMBUS_AUXC_EN_32BUF; pch_reg_write(dev, PCH_SMBUS_AUXC, reg); /* In E32B mode read and write to PCH_SMBUS_HBD translates to * read and write to 32 byte storage array, index needs to be * cleared by reading HCTL */ reg = pch_reg_read(dev, PCH_SMBUS_HCTL); ARG_UNUSED(reg); /* Avoid 'Dead assignment' warning */ if (rw == SMBUS_MSG_WRITE) { /* Write count */ pch_reg_write(dev, PCH_SMBUS_HD0, count); /* Write data to send */ for (int i = 0; i < count; i++) { pch_reg_write(dev, PCH_SMBUS_HBD, buf[i]); } } ret = pch_prepare_transfer(dev); if (ret < 0) { return ret; } /* Set HCTL register */ reg = PCH_SMBUS_HCTL_CMD_SET(protocol); reg |= PCH_SMBUS_HCTL_START; reg |= PCH_SMBUS_HCTL_INTR_EN; pch_reg_write(dev, PCH_SMBUS_HCTL, reg); return 0; } /* Start PCH SMBus operation */ static int pch_smbus_start(const struct device *dev, uint16_t periph_addr, enum smbus_direction rw, uint8_t command, uint8_t *buf, uint8_t protocol) { uint8_t reg; int ret; LOG_DBG("addr 0x%02x rw %d command %x", periph_addr, rw, command); /* Set TSA register */ reg = PCH_SMBUS_TSA_ADDR_SET(periph_addr); reg |= rw & SMBUS_MSG_RW_MASK; pch_reg_write(dev, PCH_SMBUS_TSA, reg); /* Write command for every but QUICK op */ if (protocol != SMBUS_CMD_QUICK) { /* Set HCMD register */ pch_reg_write(dev, PCH_SMBUS_HCMD, command); /* Set Host Data 0 (HD0) register */ if (rw == SMBUS_MSG_WRITE && protocol != SMBUS_CMD_BYTE) { pch_reg_write(dev, PCH_SMBUS_HD0, buf[0]); /* If we need to write second byte */ if (protocol == SMBUS_CMD_WORD_DATA || protocol == SMBUS_CMD_PROC_CALL) { pch_reg_write(dev, PCH_SMBUS_HD1, buf[1]); } } } ret = pch_prepare_transfer(dev); if (ret < 0) { return ret; } /* Set HCTL register */ reg = PCH_SMBUS_HCTL_CMD_SET(protocol); reg |= PCH_SMBUS_HCTL_START; reg |= PCH_SMBUS_HCTL_INTR_EN; pch_reg_write(dev, PCH_SMBUS_HCTL, reg); return 0; } /* Implementation of PCH SMBus API */ /* Implementation of SMBus Quick */ static int pch_smbus_quick(const struct device *dev, uint16_t periph_addr, enum smbus_direction rw) { struct pch_data *data = dev->data; int ret; LOG_DBG("dev %p addr 0x%02x direction %x", dev, periph_addr, rw); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_start(dev, periph_addr, rw, 0, NULL, SMBUS_CMD_QUICK); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Quick timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Byte Write */ static int pch_smbus_byte_write(const struct device *dev, uint16_t periph_addr, uint8_t command) { struct pch_data *data = dev->data; int ret; LOG_DBG("dev %p addr 0x%02x command 0x%02x", dev, periph_addr, command); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_start(dev, periph_addr, SMBUS_MSG_WRITE, command, NULL, SMBUS_CMD_BYTE); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Byte Write timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Byte Read */ static int pch_smbus_byte_read(const struct device *dev, uint16_t periph_addr, uint8_t *byte) { struct pch_data *data = dev->data; int ret; LOG_DBG("dev %p addr 0x%02x", dev, periph_addr); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_start(dev, periph_addr, SMBUS_MSG_READ, 0, NULL, SMBUS_CMD_BYTE); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Byte Read timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); if (ret < 0) { goto unlock; } *byte = pch_reg_read(dev, PCH_SMBUS_HD0); unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Byte Data Write */ static int pch_smbus_byte_data_write(const struct device *dev, uint16_t periph_addr, uint8_t command, uint8_t byte) { struct pch_data *data = dev->data; int ret; LOG_DBG("dev %p addr 0x%02x command 0x%02x", dev, periph_addr, command); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_start(dev, periph_addr, SMBUS_MSG_WRITE, command, &byte, SMBUS_CMD_BYTE_DATA); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Byte Data Write timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Byte Data Read */ static int pch_smbus_byte_data_read(const struct device *dev, uint16_t periph_addr, uint8_t command, uint8_t *byte) { struct pch_data *data = dev->data; int ret; LOG_DBG("dev %p addr 0x%02x command 0x%02x", dev, periph_addr, command); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_start(dev, periph_addr, SMBUS_MSG_READ, command, NULL, SMBUS_CMD_BYTE_DATA); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Byte Data Read timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); if (ret < 0) { goto unlock; } *byte = pch_reg_read(dev, PCH_SMBUS_HD0); unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Word Data Write */ static int pch_smbus_word_data_write(const struct device *dev, uint16_t periph_addr, uint8_t command, uint16_t word) { struct pch_data *data = dev->data; int ret; LOG_DBG("dev %p addr 0x%02x command 0x%02x", dev, periph_addr, command); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_start(dev, periph_addr, SMBUS_MSG_WRITE, command, (uint8_t *)&word, SMBUS_CMD_WORD_DATA); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Word Data Write timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Word Data Read */ static int pch_smbus_word_data_read(const struct device *dev, uint16_t periph_addr, uint8_t command, uint16_t *word) { struct pch_data *data = dev->data; uint8_t *p = (uint8_t *)word; int ret; LOG_DBG("dev %p addr 0x%02x command 0x%02x", dev, periph_addr, command); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_start(dev, periph_addr, SMBUS_MSG_READ, command, NULL, SMBUS_CMD_WORD_DATA); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Word Data Read timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); if (ret < 0) { goto unlock; } p[0] = pch_reg_read(dev, PCH_SMBUS_HD0); p[1] = pch_reg_read(dev, PCH_SMBUS_HD1); unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Process Call */ static int pch_smbus_pcall(const struct device *dev, uint16_t periph_addr, uint8_t command, uint16_t send_word, uint16_t *recv_word) { struct pch_data *data = dev->data; uint8_t *p = (uint8_t *)recv_word; int ret; LOG_DBG("dev %p addr 0x%02x command 0x%02x", dev, periph_addr, command); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_start(dev, periph_addr, SMBUS_MSG_WRITE, command, (uint8_t *)&send_word, SMBUS_CMD_PROC_CALL); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Proc Call timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); if (ret < 0) { goto unlock; } p[0] = pch_reg_read(dev, PCH_SMBUS_HD0); p[1] = pch_reg_read(dev, PCH_SMBUS_HD1); unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Block Write */ static int pch_smbus_block_write(const struct device *dev, uint16_t periph_addr, uint8_t command, uint8_t count, uint8_t *buf) { struct pch_data *data = dev->data; int ret; LOG_DBG("dev %p addr 0x%02x command 0x%02x count %u", dev, periph_addr, command, count); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_block_start(dev, periph_addr, SMBUS_MSG_WRITE, command, count, buf, SMBUS_CMD_BLOCK); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Block Write timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Block Read */ static int pch_smbus_block_read(const struct device *dev, uint16_t periph_addr, uint8_t command, uint8_t *count, uint8_t *buf) { struct pch_data *data = dev->data; int ret; LOG_DBG("dev %p addr 0x%02x command 0x%02x", dev, periph_addr, command); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_block_start(dev, periph_addr, SMBUS_MSG_READ, command, 0, buf, SMBUS_CMD_BLOCK); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Block Read timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); if (ret < 0) { goto unlock; } *count = pch_reg_read(dev, PCH_SMBUS_HD0); if (*count == 0 || *count > SMBUS_BLOCK_BYTES_MAX) { ret = -ENODATA; goto unlock; } for (int i = 0; i < *count; i++) { buf[i] = pch_reg_read(dev, PCH_SMBUS_HBD); } unlock: k_mutex_unlock(&data->mutex); return ret; } /* Implementation of SMBus Block Process Call */ static int pch_smbus_block_pcall(const struct device *dev, uint16_t periph_addr, uint8_t command, uint8_t send_count, uint8_t *send_buf, uint8_t *recv_count, uint8_t *recv_buf) { struct pch_data *data = dev->data; int ret; LOG_DBG("dev %p addr 0x%02x command 0x%02x", dev, periph_addr, command); k_mutex_lock(&data->mutex, K_FOREVER); ret = pch_smbus_block_start(dev, periph_addr, SMBUS_MSG_WRITE, command, send_count, send_buf, SMBUS_CMD_BLOCK_PROC); if (ret < 0) { goto unlock; } /* Wait for completion from ISR */ ret = k_sem_take(&data->completion_sync, K_MSEC(30)); if (ret != 0) { LOG_ERR("SMBus Block Process Call timed out"); ret = -ETIMEDOUT; goto unlock; } ret = pch_check_status(dev); if (ret < 0) { goto unlock; } *recv_count = pch_reg_read(dev, PCH_SMBUS_HD0); if (*recv_count == 0 || *recv_count + send_count > SMBUS_BLOCK_BYTES_MAX) { ret = -ENODATA; goto unlock; } for (int i = 0; i < *recv_count; i++) { recv_buf[i] = pch_reg_read(dev, PCH_SMBUS_HBD); } unlock: k_mutex_unlock(&data->mutex); return ret; } static const struct smbus_driver_api funcs = { .configure = pch_configure, .get_config = pch_get_config, .smbus_quick = pch_smbus_quick, .smbus_byte_write = pch_smbus_byte_write, .smbus_byte_read = pch_smbus_byte_read, .smbus_byte_data_write = pch_smbus_byte_data_write, .smbus_byte_data_read = pch_smbus_byte_data_read, .smbus_word_data_write = pch_smbus_word_data_write, .smbus_word_data_read = pch_smbus_word_data_read, .smbus_pcall = pch_smbus_pcall, .smbus_block_write = pch_smbus_block_write, .smbus_block_read = pch_smbus_block_read, .smbus_block_pcall = pch_smbus_block_pcall, #if defined(CONFIG_SMBUS_INTEL_PCH_SMBALERT) .smbus_smbalert_set_cb = pch_smbus_smbalert_set_sb, .smbus_smbalert_remove_cb = pch_smbus_smbalert_remove_sb, #endif /* CONFIG_SMBUS_INTEL_PCH_SMBALERT */ #if defined(CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY) .smbus_host_notify_set_cb = pch_smbus_host_notify_set_cb, .smbus_host_notify_remove_cb = pch_smbus_host_notify_remove_cb, #endif /* CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY */ }; static void smbus_isr(const struct device *dev) { const struct pch_config * const config = dev->config; struct pch_data *data = dev->data; uint32_t sts; uint8_t status; sts = pcie_conf_read(config->pcie->bdf, PCIE_CONF_CMDSTAT); if (!(sts & PCIE_CONF_CMDSTAT_INTERRUPT)) { LOG_ERR("Not our interrupt"); return; } /** * Handle first Host Notify since for that we need to read SSTS * register and for all other sources HSTS. * * Intel PCH implements Host Notify protocol in hardware. */ #if defined(CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY) if (data->config & SMBUS_MODE_HOST_NOTIFY) { status = pch_reg_read(dev, PCH_SMBUS_SSTS); if (status & PCH_SMBUS_SSTS_HNS) { /* Notify address */ data->notify_addr = pch_reg_read(dev, PCH_SMBUS_NDA) >> 1; /* Notify data */ data->notify_data = pch_reg_read(dev, PCH_SMBUS_NDLB); data->notify_data |= pch_reg_read(dev, PCH_SMBUS_NDHB) << 8; k_work_submit(&data->host_notify_work); /* Clear Host Notify */ pch_reg_write(dev, PCH_SMBUS_SSTS, PCH_SMBUS_SSTS_HNS); return; } } #endif /* CONFIG_SMBUS_INTEL_PCH_HOST_NOTIFY */ status = pch_reg_read(dev, PCH_SMBUS_HSTS); /* HSTS dump if logging is enabled */ pch_dump_register_hsts(status); if (status & PCH_SMBUS_HSTS_BYTE_DONE) { LOG_WRN("BYTE_DONE interrupt is not used"); } /* Handle SMBALERT# signal */ #if defined(CONFIG_SMBUS_INTEL_PCH_SMBALERT) if (data->config & SMBUS_MODE_SMBALERT && status & PCH_SMBUS_HSTS_SMB_ALERT) { k_work_submit(&data->smb_alert_work); } #endif /* CONFIG_SMBUS_INTEL_PCH_SMBALERT */ /* Clear IRQ sources */ pch_reg_write(dev, PCH_SMBUS_HSTS, status); data->status = status; k_sem_give(&data->completion_sync); } /* Device macro initialization / DTS hackery */ #define SMBUS_PCH_IRQ_FLAGS(n) \ COND_CODE_1(DT_INST_IRQ_HAS_CELL(n, sense), \ (DT_INST_IRQ(n, sense)), \ (0)) #define SMBUS_IRQ_CONFIG(n) \ BUILD_ASSERT(IS_ENABLED(CONFIG_DYNAMIC_INTERRUPTS), \ "SMBus PCIe requires dynamic interrupts"); \ static void pch_config_##n(const struct device *dev) \ { \ const struct pch_config * const config = dev->config; \ unsigned int irq; \ if (DT_INST_IRQN(n) == PCIE_IRQ_DETECT) { \ irq = pcie_alloc_irq(config->pcie->bdf); \ if (irq == PCIE_CONF_INTR_IRQ_NONE) { \ return; \ } \ } else { \ irq = DT_INST_IRQN(n); \ pcie_conf_write(config->pcie->bdf, \ PCIE_CONF_INTR, irq); \ } \ pcie_connect_dynamic_irq(config->pcie->bdf, irq, \ DT_INST_IRQ(n, priority), \ (void (*)(const void *))smbus_isr, \ DEVICE_DT_INST_GET(n), \ SMBUS_PCH_IRQ_FLAGS(n)); \ pcie_irq_enable(config->pcie->bdf, irq); \ LOG_DBG("Configure irq %d", irq); \ } #define SMBUS_DEVICE_INIT(n) \ DEVICE_PCIE_INST_DECLARE(n); \ static void pch_config_##n(const struct device *dev); \ static const struct pch_config pch_config_data_##n = { \ DEVICE_PCIE_INST_INIT(n, pcie), \ .config_func = pch_config_##n, \ }; \ static struct pch_data smbus_##n##_data; \ SMBUS_DEVICE_DT_INST_DEFINE(n, pch_smbus_init, NULL, \ &smbus_##n##_data, &pch_config_data_##n, \ POST_KERNEL, CONFIG_SMBUS_INIT_PRIORITY, \ &funcs); \ SMBUS_IRQ_CONFIG(n); DT_INST_FOREACH_STATUS_OKAY(SMBUS_DEVICE_INIT) ```
/content/code_sandbox/drivers/smbus/intel_pch_smbus.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,711
```unknown # SMBus configuration options menuconfig SMBUS bool "System Management Bus (SMBus) drivers" help Enable SMBus Driver Configuration if SMBUS config SMBUS_SHELL bool "SMBus Shell" depends on SHELL help Enable SMBus Shell. config SMBUS_STATS bool "SMBus device Stats" depends on STATS help Enable SMBus Stats. config SMBUS_INIT_PRIORITY int "Init priority" default KERNEL_INIT_PRIORITY_DEFAULT help SMBus device driver initialization priority. module = SMBUS module-str = smbus source "subsys/logging/Kconfig.template.log_config" config SMBUS_INTEL_PCH bool "SMBus Intel PCH driver" default y depends on DT_HAS_INTEL_PCH_SMBUS_ENABLED select PCIE select DYNAMIC_INTERRUPTS help Enable Intel Platform Controller Hub (PCH) SMBus driver. if SMBUS_INTEL_PCH choice SMBUS_INTEL_PCH_ACCESS bool "SMBus register access mode" default SMBUS_INTEL_PCH_ACCESS_IO help Default PCH register access mode. Set default access IO so that both Qemu Q35 and Intel hardware are supported. config SMBUS_INTEL_PCH_ACCESS_IO bool "I/O PCH SMBus Register Access Mode" help Access PCH SMBus registers through I/O space. config SMBUS_INTEL_PCH_ACCESS_MMIO bool "MMIO PCH SMBus Register Access Mode" help Access PCH SMBus registers though MMIO space. endchoice config SMBUS_INTEL_PCH_HOST_NOTIFY bool "SMBus Intel PCH Host Notify support" default y help Support Host Notify from peripheral devices. config SMBUS_INTEL_PCH_SMBALERT bool "SMBus Intel PCH SMBALERT signal support" default y help Support SMBALERT signal from peripheral devices. endif # SMBUS_INTEL_PCH config SMBUS_STM32 bool "STM32 SMBus driver" default y depends on DT_HAS_ST_STM32_SMBUS_ENABLED depends on I2C_STM32 help Enable STM32 SMBus driver. if SMBUS_STM32 config SMBUS_STM32_SMBALERT bool "SMBus STM32 SMBALERT signal support" default y help Support SMBALERT signal from peripheral devices. endif # SMBUS_STM32 endif # SMBUS ```
/content/code_sandbox/drivers/smbus/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
537
```c /* * */ #include <stdlib.h> #include <errno.h> #include <zephyr/sys/slist.h> #include <zephyr/drivers/smbus.h> #include <zephyr/shell/shell.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(smbus_shell, CONFIG_LOG_DEFAULT_LEVEL); /** * smbus_shell is a highly modified version from i2c_shell. Basically only scan * logic remains from i2c_shell */ /** * Simplify argument parsing, smbus arguments always go in this order: * smbus <shell command> <device> <peripheral address> <command byte> */ #define ARGV_DEV 1 #define ARGV_ADDR 2 #define ARGV_CMD 3 /** * This sends SMBUS messages without any data (i.e. stop condition after * sending just the address). If there is an ACK for the address, it * is assumed there is a device present. * * WARNING: As there is no standard SMBUS detection command, this code * uses arbitrary SMBus commands (namely SMBus quick write to probe for * devices. * This operation can confuse your SMBUS bus, cause data loss, and is * known to corrupt the Atmel AT24RF08 EEPROM found on many IBM * Thinkpad laptops. * * path_to_url */ /* smbus scan <device> */ static int cmd_smbus_scan(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t cnt = 0, first = 0x04, last = 0x77; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } shell_print(sh, " 0 1 2 3 4 5 6 7 8 9 a b c d e f"); for (uint8_t i = 0; i <= last; i += 16) { shell_fprintf(sh, SHELL_NORMAL, "%02x: ", i); for (uint8_t j = 0; j < 16; j++) { if (i + j < first || i + j > last) { shell_fprintf(sh, SHELL_NORMAL, " "); continue; } if (smbus_quick(dev, i + j, SMBUS_MSG_WRITE) == 0) { shell_fprintf(sh, SHELL_NORMAL, "%02x ", i + j); ++cnt; } else { shell_fprintf(sh, SHELL_NORMAL, "-- "); } } shell_print(sh, ""); } shell_print(sh, "%u devices found on %s", cnt, argv[ARGV_DEV]); return 0; } /* smbus quick <device> <dev_addr> */ static int cmd_smbus_quick(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t addr; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[ARGV_ADDR], NULL, 16); ret = smbus_quick(dev, addr, SMBUS_MSG_WRITE); if (ret < 0) { shell_error(sh, "SMBus: Failed quick cmd, perip: 0x%02x", addr); } return ret; } /* smbus byte_read <device> <dev_addr> */ static int cmd_smbus_byte_read(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t addr; uint8_t out; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[ARGV_ADDR], NULL, 16); ret = smbus_byte_read(dev, addr, &out); if (ret < 0) { shell_error(sh, "SMBus: Failed to read from periph: 0x%02x", addr); return -EIO; } shell_print(sh, "Output: 0x%x", out); return 0; } /* smbus byte_write <device> <dev_addr> <value> */ static int cmd_smbus_byte_write(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t addr; uint8_t value; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[ARGV_ADDR], NULL, 16); /* First byte is command */ value = strtol(argv[ARGV_CMD], NULL, 16); ret = smbus_byte_write(dev, addr, value); if (ret < 0) { shell_error(sh, "SMBus: Failed to write to periph: 0x%02x", addr); return -EIO; } return 0; } /* smbus byte_data_read <device> <dev_addr> <cmd> */ static int cmd_smbus_byte_data_read(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t addr, command; uint8_t out; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[ARGV_ADDR], NULL, 16); command = strtol(argv[ARGV_CMD], NULL, 16); ret = smbus_byte_data_read(dev, addr, command, &out); if (ret < 0) { shell_error(sh, "SMBus: Failed to read from periph: 0x%02x", addr); return -EIO; } shell_print(sh, "Output: 0x%x", out); return 0; } /* smbus byte_data_write <device> <dev_addr> <cmd> <value> */ static int cmd_smbus_byte_data_write(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t addr, command; uint8_t value; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[ARGV_ADDR], NULL, 16); command = strtol(argv[ARGV_CMD], NULL, 16); value = strtol(argv[4], NULL, 16); ret = smbus_byte_data_write(dev, addr, command, value); if (ret < 0) { shell_error(sh, "SMBus: Failed to write to periph: 0x%02x", addr); return -EIO; } return 0; } /* smbus word_data_read <device> <dev_addr> <cmd> */ static int cmd_smbus_word_data_read(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t addr, command; uint16_t out; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[ARGV_ADDR], NULL, 16); command = strtol(argv[ARGV_CMD], NULL, 16); ret = smbus_word_data_read(dev, addr, command, &out); if (ret < 0) { shell_error(sh, "SMBus: Failed to read from periph: 0x%02x", addr); return -EIO; } shell_print(sh, "Output: 0x%04x", out); return 0; } /* smbus word_data_write <device> <dev_addr> <cmd> <value> */ static int cmd_smbus_word_data_write(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t addr, command; uint16_t value; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[ARGV_ADDR], NULL, 16); command = strtol(argv[ARGV_CMD], NULL, 16); value = strtol(argv[4], NULL, 16); ret = smbus_word_data_write(dev, addr, command, value); if (ret < 0) { shell_error(sh, "SMBus: Failed to write to periph: 0x%02x", addr); return -EIO; } return 0; } /* smbus block_write <device> <dev_addr> <cmd> <bytes ... > */ static int cmd_smbus_block_write(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t addr, command; uint8_t count = argc - 4; char **p = &argv[4]; /* start data bytes */ uint8_t buf[32]; /* max block count */ int ret; if (count == 0 || count > sizeof(buf)) { return -EINVAL; } dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[ARGV_ADDR], NULL, 16); command = strtol(argv[ARGV_CMD], NULL, 16); for (int i = 0; i < count; i++) { buf[i] = (uint8_t)strtoul(p[i], NULL, 16); } LOG_HEXDUMP_DBG(buf, count, "Constructed block buffer"); ret = smbus_block_write(dev, addr, command, count, buf); if (ret < 0) { shell_error(sh, "Failed block write to periph: 0x%02x", addr); return ret; } return 0; } /* smbus block_read <device> <dev_addr> <cmd> */ static int cmd_smbus_block_read(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint8_t addr, command; uint8_t buf[32]; /* max block count */ uint8_t count; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(sh, "SMBus: Device %s not found", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[ARGV_ADDR], NULL, 16); command = strtol(argv[ARGV_CMD], NULL, 16); ret = smbus_block_read(dev, addr, command, &count, buf); if (ret < 0) { shell_error(sh, "Failed block read from periph: 0x%02x", addr); return ret; } if (count == 0 || count > sizeof(buf)) { shell_error(sh, "Returned count %u", count); return -ENODATA; } shell_hexdump(sh, buf, count); return 0; } /* Device name autocompletion support */ static void device_name_get(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, "smbus"); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; } SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get); SHELL_STATIC_SUBCMD_SET_CREATE(sub_smbus_cmds, SHELL_CMD_ARG(quick, &dsub_device_name, "SMBus Quick command\n" "Usage: quick <device> <addr>", cmd_smbus_quick, 3, 0), SHELL_CMD_ARG(scan, &dsub_device_name, "Scan SMBus peripheral devices command\n" "Usage: scan <device>", cmd_smbus_scan, 2, 0), SHELL_CMD_ARG(byte_read, &dsub_device_name, "SMBus: byte read command\n" "Usage: byte_read <device> <addr>", cmd_smbus_byte_read, 3, 0), SHELL_CMD_ARG(byte_write, &dsub_device_name, "SMBus: byte write command\n" "Usage: byte_write <device> <addr> <value>", cmd_smbus_byte_write, 4, 0), SHELL_CMD_ARG(byte_data_read, &dsub_device_name, "SMBus: byte data read command\n" "Usage: byte_data_read <device> <addr> <cmd>", cmd_smbus_byte_data_read, 4, 0), SHELL_CMD_ARG(byte_data_write, &dsub_device_name, "SMBus: byte data write command\n" "Usage: byte_data_write <device> <addr> <cmd> <value>", cmd_smbus_byte_data_write, 5, 0), SHELL_CMD_ARG(word_data_read, &dsub_device_name, "SMBus: word data read command\n" "Usage: word_data_read <device> <addr> <cmd>", cmd_smbus_word_data_read, 4, 0), SHELL_CMD_ARG(word_data_write, &dsub_device_name, "SMBus: word data write command\n" "Usage: word_data_write <device> <addr> <cmd> <value>", cmd_smbus_word_data_write, 5, 0), SHELL_CMD_ARG(block_write, &dsub_device_name, "SMBus: Block Write command\n" "Usage: block_write <device> <addr> <cmd> [<byte1>, ...]", cmd_smbus_block_write, 4, 32), SHELL_CMD_ARG(block_read, &dsub_device_name, "SMBus: Block Read command\n" "Usage: block_read <device> <addr> <cmd>", cmd_smbus_block_read, 4, 0), SHELL_SUBCMD_SET_END /* Array terminated. */ ); SHELL_CMD_REGISTER(smbus, &sub_smbus_cmds, "smbus commands", NULL); ```
/content/code_sandbox/drivers/smbus/smbus_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,262
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_ps2_ctrl /** * @file * @brief Nuvoton NPCX PS/2 module (controller) driver * * This file contains the driver of PS/2 module (controller) which provides a * hardware accelerator mechanism to handle both incoming and outgoing data. * The hardware accelerator mechanism is shared by four PS/2 channels. */ #include <zephyr/kernel.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/ps2.h> #include <zephyr/dt-bindings/clock/npcx_clock.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(ps2_npcx_ctrl, CONFIG_PS2_LOG_LEVEL); #define NPCX_PS2_CH_COUNT 4 /* * Set WDAT3-0 and clear CLK3-0 in the PSOSIG register to * reset the shift mechanism. */ #define NPCX_PS2_SHIFT_MECH_RESET (uint8_t)~NPCX_PSOSIG_CLK_MASK_ALL /* in 50us units */ #define PS2_RETRY_COUNT 10000 /* * The max duration of a PS/2 clock is about 100 micro-seconds. * A PS/2 transaction needs 11 clock cycles. It will take about 1.1 ms for a * complete transaction. */ #define PS2_TRANSACTION_TIMEOUT K_MSEC(2) /* Device config */ struct ps2_npcx_ctrl_config { uintptr_t base; struct npcx_clk_cfg clk_cfg; }; /* Driver data */ struct ps2_npcx_ctrl_data { /* * Bit mask to record the enabled PS/2 channels. * Only bit[7] and bit[5:3] are used * (i.e. the bit position of CLK3-0 in the PS2_PSOSIG register) */ uint8_t channel_enabled_mask; /* The mutex of the PS/2 controller */ struct k_sem lock; /* The semaphore to synchronize the Tx transaction */ struct k_sem tx_sync_sem; /* * The callback function to handle the data received from PS/2 device */ ps2_callback_t callback_isr[NPCX_PS2_CH_COUNT]; }; /* Driver convenience defines */ #define HAL_PS2_INSTANCE(dev) \ ((struct ps2_reg *)((const struct ps2_npcx_ctrl_config *)(dev)->config)->base) static uint8_t ps2_npcx_ctrl_get_ch_clk_mask(uint8_t channel_id) { return BIT(NPCX_PSOSIG_CLK(channel_id)); } int ps2_npcx_ctrl_configure(const struct device *dev, uint8_t channel_id, ps2_callback_t callback_isr) { struct ps2_npcx_ctrl_data *const data = dev->data; if (channel_id >= NPCX_PS2_CH_COUNT) { LOG_ERR("unexpected channel ID: %d", channel_id); return -EINVAL; } if (callback_isr == NULL) { return -EINVAL; } k_sem_take(&data->lock, K_FOREVER); data->callback_isr[channel_id] = callback_isr; k_sem_give(&data->lock); return 0; } int ps2_npcx_ctrl_enable_interface(const struct device *dev, uint8_t channel_id, bool enable) { struct ps2_npcx_ctrl_data *const data = dev->data; struct ps2_reg *const inst = HAL_PS2_INSTANCE(dev); uint8_t ch_clk_mask; k_sem_take(&data->lock, K_FOREVER); /* * Disable the interrupt during changing the enabled channel mask to * prevent from preemption. */ irq_disable(DT_INST_IRQN(0)); if (channel_id >= NPCX_PS2_CH_COUNT) { LOG_ERR("unexpected channel ID: %d", channel_id); irq_enable(DT_INST_IRQN(0)); k_sem_give(&data->lock); return -EINVAL; } ch_clk_mask = ps2_npcx_ctrl_get_ch_clk_mask(channel_id); if (enable) { data->channel_enabled_mask |= ch_clk_mask; /* Enable the relevant channel clock */ inst->PSOSIG |= ch_clk_mask; } else { data->channel_enabled_mask &= ~ch_clk_mask; /* Disable the relevant channel clock */ inst->PSOSIG &= ~ch_clk_mask; } irq_enable(DT_INST_IRQN(0)); k_sem_give(&data->lock); return 0; } static int ps2_npcx_ctrl_bus_busy(const struct device *dev) { struct ps2_reg *const inst = HAL_PS2_INSTANCE(dev); /* * The driver pulls the CLK for non-active channels to low when Start * bit is detected and pull the CLK of the active channel low after * Stop bit detected. The EOT bit is set when Stop bit is detected, * but both SOT and EOT are cleared when all CLKs are pull low * (due to Shift Mechanism is reset) */ return (IS_BIT_SET(inst->PSTAT, NPCX_PSTAT_SOT) || IS_BIT_SET(inst->PSTAT, NPCX_PSTAT_EOT)) ? -EBUSY : 0; } int ps2_npcx_ctrl_write(const struct device *dev, uint8_t channel_id, uint8_t value) { struct ps2_npcx_ctrl_data *const data = dev->data; struct ps2_reg *const inst = HAL_PS2_INSTANCE(dev); int i = 0; if (channel_id >= NPCX_PS2_CH_COUNT) { LOG_ERR("unexpected channel ID: %d", channel_id); return -EINVAL; } if (!(ps2_npcx_ctrl_get_ch_clk_mask(channel_id) & data->channel_enabled_mask)) { LOG_ERR("channel %d is not enabled", channel_id); return -EINVAL; } k_sem_take(&data->lock, K_FOREVER); while (ps2_npcx_ctrl_bus_busy(dev) && (i < PS2_RETRY_COUNT)) { k_busy_wait(50); i++; } if (unlikely(i == PS2_RETRY_COUNT)) { LOG_ERR("PS2 write attempt timed out"); goto timeout_invalid; } /* Set PS/2 in transmit mode */ inst->PSCON |= BIT(NPCX_PSCON_XMT); /* Enable Start Of Transaction interrupt */ inst->PSIEN |= BIT(NPCX_PSIEN_SOTIE); /* Reset the shift mechanism */ inst->PSOSIG = NPCX_PS2_SHIFT_MECH_RESET; /* Inhibit communication should last at least 100 micro-seconds */ k_busy_wait(100); /* Write the data to be transmitted */ inst->PSDAT = value; /* Apply the Request-to-send */ inst->PSOSIG &= ~BIT(NPCX_PSOSIG_WDAT(channel_id)); inst->PSOSIG |= ps2_npcx_ctrl_get_ch_clk_mask(channel_id); if (k_sem_take(&data->tx_sync_sem, PS2_TRANSACTION_TIMEOUT) != 0) { irq_disable(DT_INST_IRQN(0)); LOG_ERR("PS/2 Tx timeout"); /* Reset the shift mechanism */ inst->PSOSIG = NPCX_PS2_SHIFT_MECH_RESET; /* Change the PS/2 module to receive mode */ inst->PSCON &= ~BIT(NPCX_PSCON_XMT); /* * Restore the channel back to enable according to * channel_enabled_mask. */ inst->PSOSIG |= data->channel_enabled_mask; irq_enable(DT_INST_IRQN(0)); goto timeout_invalid; } k_sem_give(&data->lock); return 0; timeout_invalid: k_sem_give(&data->lock); return -ETIMEDOUT; } static int ps2_npcx_ctrl_is_rx_error(const struct device *dev) { struct ps2_reg *const inst = HAL_PS2_INSTANCE(dev); uint8_t status; status = inst->PSTAT & (BIT(NPCX_PSTAT_PERR) | BIT(NPCX_PSTAT_RFERR)); if (status) { if (status & BIT(NPCX_PSTAT_PERR)) { LOG_ERR("RX parity error"); } if (status & BIT(NPCX_PSTAT_RFERR)) { LOG_ERR("RX Frame error"); } return -EIO; } return 0; } static void ps2_npcx_ctrl_isr(const struct device *dev) { uint8_t active_ch, mask; struct ps2_reg *const inst = HAL_PS2_INSTANCE(dev); struct ps2_npcx_ctrl_data *const data = dev->data; /* * ACH = 1 : Channel 0 * ACH = 2 : Channel 1 * ACH = 4 : Channel 2 * ACH = 5 : Channel 3 */ active_ch = GET_FIELD(inst->PSTAT, NPCX_PSTAT_ACH); active_ch = active_ch > 2 ? (active_ch - 2) : (active_ch - 1); LOG_DBG("ACH: %d\n", active_ch); /* * Inhibit PS/2 transaction of the other non-active channels by * pulling down the clock signal */ mask = ~NPCX_PSOSIG_CLK_MASK_ALL | BIT(NPCX_PSOSIG_CLK(active_ch)); inst->PSOSIG &= mask; /* PS/2 Start of Transaction */ if (IS_BIT_SET(inst->PSTAT, NPCX_PSTAT_SOT) && IS_BIT_SET(inst->PSIEN, NPCX_PSIEN_SOTIE)) { /* * Once set, SOT is not cleared until the shift mechanism * is reset. Therefore, SOTIE should be cleared on the * first occurrence of an SOT interrupt. */ inst->PSIEN &= ~BIT(NPCX_PSIEN_SOTIE); LOG_DBG("SOT"); /* PS/2 End of Transaction */ } else if (IS_BIT_SET(inst->PSTAT, NPCX_PSTAT_EOT)) { inst->PSIEN &= ~BIT(NPCX_PSIEN_EOTIE); /* * Clear the CLK of active channel to reset * the shift mechanism */ inst->PSOSIG &= ~BIT(NPCX_PSOSIG_CLK(active_ch)); /* Tx is done */ if (IS_BIT_SET(inst->PSCON, NPCX_PSCON_XMT)) { /* Change the PS/2 module to receive mode */ inst->PSCON &= ~BIT(NPCX_PSCON_XMT); k_sem_give(&data->tx_sync_sem); } else { if (ps2_npcx_ctrl_is_rx_error(dev) == 0) { ps2_callback_t callback; uint8_t data_in = inst->PSDAT; LOG_DBG("Recv:0x%02x", data_in); callback = data->callback_isr[active_ch]; if (callback != NULL) { callback(dev, data_in); } } } /* Restore the enabled channel */ inst->PSOSIG |= data->channel_enabled_mask; /* * Re-enable the Start Of Transaction interrupt when * the shift mechanism is reset */ inst->PSIEN |= BIT(NPCX_PSIEN_SOTIE); inst->PSIEN |= BIT(NPCX_PSIEN_EOTIE); LOG_DBG("EOT"); } } /* PS/2 driver registration */ static int ps2_npcx_ctrl_init(const struct device *dev); static struct ps2_npcx_ctrl_data ps2_npcx_ctrl_data_0; static const struct ps2_npcx_ctrl_config ps2_npcx_ctrl_config_0 = { .base = DT_INST_REG_ADDR(0), .clk_cfg = NPCX_DT_CLK_CFG_ITEM(0), }; DEVICE_DT_INST_DEFINE(0, &ps2_npcx_ctrl_init, NULL, &ps2_npcx_ctrl_data_0, &ps2_npcx_ctrl_config_0, POST_KERNEL, CONFIG_PS2_INIT_PRIORITY, NULL); static int ps2_npcx_ctrl_init(const struct device *dev) { const struct ps2_npcx_ctrl_config *const config = dev->config; struct ps2_npcx_ctrl_data *const data = dev->data; struct ps2_reg *const inst = HAL_PS2_INSTANCE(dev); const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE); int ret; if (!device_is_ready(clk_dev)) { LOG_ERR("%s device not ready", clk_dev->name); return -ENODEV; } /* Turn on PS/2 controller device clock */ ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg); if (ret < 0) { LOG_ERR("Turn on PS/2 clock fail %d", ret); return ret; } /* Disable shift mechanism and configure PS/2 to received mode. */ inst->PSCON = 0x0; /* Set WDAT3-0 and clear CLK3-0 before enabling shift mechanism */ inst->PSOSIG = NPCX_PS2_SHIFT_MECH_RESET; /* * PS/2 interrupt enable register * [0] - : SOTIE = 1: Start Of Transaction Interrupt Enable * [1] - : EOTIE = 1: End Of Transaction Interrupt Enable * [4] - : WUE = 1: Wake-Up Enable * [7] - : CLK_SEL = 1: Select Free-Run clock as the basic clock * 0: Select APB1 clock as the basic clock */ inst->PSIEN = BIT(NPCX_PSIEN_SOTIE) | BIT(NPCX_PSIEN_EOTIE) | BIT(NPCX_PSIEN_PS2_WUE); if (config->clk_cfg.bus == NPCX_CLOCK_BUS_FREERUN) inst->PSIEN |= BIT(NPCX_PSIEN_PS2_CLK_SEL); /* Enable weak internal pull-up */ inst->PSCON |= BIT(NPCX_PSCON_WPUED); /* Enable shift mechanism */ inst->PSCON |= BIT(NPCX_PSCON_EN); k_sem_init(&data->lock, 1, 1); k_sem_init(&data->tx_sync_sem, 0, 1); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), ps2_npcx_ctrl_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); return 0; } ```
/content/code_sandbox/drivers/ps2/ps2_npcx_controller.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,166
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_PS2_PS2_NPCX_CONTROLLER_H_ #define ZEPHYR_DRIVERS_PS2_PS2_NPCX_CONTROLLER_H_ #include <zephyr/device.h> #ifdef __cplusplus extern "C" { #endif /** * @brief Write @p value to a PS/2 device via the PS/2 controller. * * @param dev Pointer to the device structure for PS/2 controller instance. * @param channel_id Channel ID of the PS/2 to write data. * @param value the data write to the PS/2 device. * * @retval 0 If successful. * @retval -EINVAL Channel ID is invalid. * @retval -ETIMEDOUT Timeout occurred for a PS/2 write transaction. */ int ps2_npcx_ctrl_write(const struct device *dev, uint8_t channel_id, uint8_t value); /** * @brief Set the PS/2 controller to turn on/off the PS/2 channel. * * @param dev Pointer to the device structure for PS/2 controller instance. * @param channel_id Channel ID of the PS/2 to enable or disable. * @param enable True to enable channel, false to disable channel. * * @retval 0 If successful. * @retval -EINVAL Channel ID is invalid. */ int ps2_npcx_ctrl_enable_interface(const struct device *dev, uint8_t channel, bool enable); /** * @brief Record the callback_isr function pointer for the given PS/2 channel. * * @param dev Pointer to the device structure for PS/2 controller instance. * @param channel_id Channel ID of the PS/2 to configure the callback_isr. * @param callback_isr Pointer to the callback_isr. * * @retval 0 If successful. * @retval -EINVAL callback_isr is NULL. */ int ps2_npcx_ctrl_configure(const struct device *dev, uint8_t channel_id, ps2_callback_t callback_isr); #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_PS2_PS2_NPCX_CONTROLLER_H_ */ ```
/content/code_sandbox/drivers/ps2/ps2_npcx_controller.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
433
```unknown # Microchip XEC PS2 configuration options config PS2_XEC bool "XEC Microchip PS2 driver" depends on SOC_FAMILY_MICROCHIP_MEC && ESPI_PERIPHERAL_8042_KBC select PINCTRL help Enable the Microchip XEC PS2 IO driver. The driver also depends on the KBC 8042 keyboard controller. Note, MEC15xx series has two controllers and MEC172x series has one. ```
/content/code_sandbox/drivers/ps2/Kconfig.xec
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
104
```unknown # NPCX PS2 configuration options menuconfig PS2_NPCX bool "Nuvoton NPCX embedded controller (EC) PS2 driver" depends on DT_HAS_NUVOTON_NPCX_PS2_CHANNEL_ENABLED default y help Enable the NPCX family PS2 driver. It provides four PS/2 channels. Each channel has two quasi-bidirectional signals that serve as direct interfaces to an external keyboard, mouse or any other PS/2-compatible pointing device.The driver also depends on the KBC 8042 keyboard controller. if PS2_NPCX config PS2_CHANNEL_INIT_PRIORITY int "PS/2 channel driver init priority" default 41 help PS/2 channel device driver initialization priority. This should be lower than the PS2_INIT_PRIORITY as NPCX PS/2 controller device driver should initialize prior to channel device driver. endif # PS2_NPCX ```
/content/code_sandbox/drivers/ps2/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
199
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_ps2_channel /** * @file * @brief Nuvoton NPCX PS/2 driver * * This file contains the driver of PS/2 buses (channels) which provides the * connection between Zephyr PS/2 API functions and NPCX PS/2 controller driver * to support PS/2 transactions. * */ #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/ps2.h> #include <soc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ps2_npcx_channel, CONFIG_PS2_LOG_LEVEL); #include "ps2_npcx_controller.h" /* Device config */ struct ps2_npcx_ch_config { /* Indicate the channel's number of the PS/2 channel device */ uint8_t channel_id; const struct device *ps2_ctrl; /* pinmux configuration */ const struct pinctrl_dev_config *pcfg; }; /* PS/2 api functions */ static int ps2_npcx_ch_configure(const struct device *dev, ps2_callback_t callback_isr) { const struct ps2_npcx_ch_config *const config = dev->config; int ret; ret = ps2_npcx_ctrl_configure(config->ps2_ctrl, config->channel_id, callback_isr); if (ret != 0) { return ret; } return ps2_npcx_ctrl_enable_interface(config->ps2_ctrl, config->channel_id, 1); } static int ps2_npcx_ch_write(const struct device *dev, uint8_t value) { const struct ps2_npcx_ch_config *const config = dev->config; return ps2_npcx_ctrl_write(config->ps2_ctrl, config->channel_id, value); } static int ps2_npcx_ch_enable_interface(const struct device *dev) { const struct ps2_npcx_ch_config *const config = dev->config; return ps2_npcx_ctrl_enable_interface(config->ps2_ctrl, config->channel_id, 1); } static int ps2_npcx_ch_inhibit_interface(const struct device *dev) { const struct ps2_npcx_ch_config *const config = dev->config; return ps2_npcx_ctrl_enable_interface(config->ps2_ctrl, config->channel_id, 0); } /* PS/2 driver registration */ static int ps2_npcx_channel_init(const struct device *dev) { const struct ps2_npcx_ch_config *const config = dev->config; int ret; if (!device_is_ready(config->ps2_ctrl)) { LOG_ERR("%s device not ready", config->ps2_ctrl->name); return -ENODEV; } /* Configure pin-mux for PS/2 device */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("PS2 pinctrl setup failed (%d)", ret); return ret; } return 0; } static const struct ps2_driver_api ps2_channel_npcx_driver_api = { .config = ps2_npcx_ch_configure, .read = NULL, .write = ps2_npcx_ch_write, .disable_callback = ps2_npcx_ch_inhibit_interface, .enable_callback = ps2_npcx_ch_enable_interface, }; /* PS/2 channel initialization macro functions */ #define NPCX_PS2_CHANNEL_INIT(inst) \ \ PINCTRL_DT_INST_DEFINE(inst); \ \ static const struct ps2_npcx_ch_config ps2_npcx_ch_cfg_##inst = { \ .channel_id = DT_INST_PROP(inst, channel), \ .ps2_ctrl = DEVICE_DT_GET(DT_INST_PARENT(inst)), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, ps2_npcx_channel_init, NULL, NULL, \ &ps2_npcx_ch_cfg_##inst, POST_KERNEL, \ CONFIG_PS2_CHANNEL_INIT_PRIORITY, \ &ps2_channel_npcx_driver_api); DT_INST_FOREACH_STATUS_OKAY(NPCX_PS2_CHANNEL_INIT) /* PS/2 channel driver must be initialized after PS/2 controller driver */ BUILD_ASSERT(CONFIG_PS2_CHANNEL_INIT_PRIORITY > CONFIG_PS2_INIT_PRIORITY); ```
/content/code_sandbox/drivers/ps2/ps2_npcx_channel.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
916
```unknown # PS/2 configuration options menuconfig PS2 bool "PS/2 drivers" help Include PS/2 drivers in system config. if PS2 source "drivers/ps2/Kconfig.xec" source "drivers/ps2/Kconfig.npcx" module = PS2 module-str = ps2 source "subsys/logging/Kconfig.template.log_config" config PS2_INIT_PRIORITY int "PS/2 driver init priority" default 40 help PS/2 device driver initialization priority. There isn't any critical component relying on this priority at the moment. endif # PS2 ```
/content/code_sandbox/drivers/ps2/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
133
```c /* * */ #include <zephyr/drivers/ps2.h> #include <zephyr/internal/syscall_handler.h> static inline int z_vrfy_ps2_config(const struct device *dev, ps2_callback_t callback_isr) { K_OOPS(K_SYSCALL_DRIVER_PS2(dev, config)); K_OOPS(K_SYSCALL_VERIFY_MSG(callback_isr == NULL, "callback not be set from user mode")); return z_impl_ps2_config(dev, callback_isr); } #include <zephyr/syscalls/ps2_config_mrsh.c> static inline int z_vrfy_ps2_write(const struct device *dev, uint8_t value) { K_OOPS(K_SYSCALL_DRIVER_PS2(dev, write)); return z_impl_ps2_write(dev, value); } #include <zephyr/syscalls/ps2_write_mrsh.c> static inline int z_vrfy_ps2_read(const struct device *dev, uint8_t *value) { K_OOPS(K_SYSCALL_DRIVER_PS2(dev, read)); K_OOPS(K_SYSCALL_MEMORY_WRITE(value, sizeof(uint8_t))); return z_impl_ps2_read(dev, value); } #include <zephyr/syscalls/ps2_read_mrsh.c> static inline int z_vrfy_ps2_enable_callback(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_PS2(dev, enable_callback)); return z_impl_ps2_enable_callback(dev); } #include <zephyr/syscalls/ps2_enable_callback_mrsh.c> static inline int z_vrfy_ps2_disable_callback(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_PS2(dev, disable_callback)); return z_impl_ps2_disable_callback(dev); } #include <zephyr/syscalls/ps2_disable_callback_mrsh.c> ```
/content/code_sandbox/drivers/ps2/ps2_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
368
```objective-c /** * */ #ifndef __DRIVERS_NET_NSOS_NETDB_H__ #define __DRIVERS_NET_NSOS_NETDB_H__ enum nsos_resolve_status { /** Invalid value for `ai_flags' field */ NSOS_MID_EAI_BADFLAGS = -1, /** NAME or SERVICE is unknown */ NSOS_MID_EAI_NONAME = -2, /** Temporary failure in name resolution */ NSOS_MID_EAI_AGAIN = -3, /** Non-recoverable failure in name res */ NSOS_MID_EAI_FAIL = -4, /** `ai_family' not supported */ NSOS_MID_EAI_FAMILY = -6, /** `ai_socktype' not supported */ NSOS_MID_EAI_SOCKTYPE = -7, /** SRV not supported for `ai_socktype' */ NSOS_MID_EAI_SERVICE = -8, /** Memory allocation failure */ NSOS_MID_EAI_MEMORY = -10, /** System error returned in `errno' */ NSOS_MID_EAI_SYSTEM = -11, /** Argument buffer overflow */ NSOS_MID_EAI_OVERFLOW = -12, }; int eai_to_nsos_mid(int err); int eai_from_nsos_mid(int err); #endif /* __DRIVERS_NET_NSOS_NETDB_H__ */ ```
/content/code_sandbox/drivers/net/nsos_netdb.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
283
```c /* * */ #define DT_DRV_COMPAT microchip_xec_ps2 #include <cmsis_core.h> #include <errno.h> #include <zephyr/device.h> #include <zephyr/kernel.h> #ifdef CONFIG_SOC_SERIES_MEC172X #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h> #endif #include <zephyr/drivers/pinctrl.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <zephyr/drivers/ps2.h> #include <soc.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/drivers/gpio.h> #define LOG_LEVEL CONFIG_PS2_LOG_LEVEL LOG_MODULE_REGISTER(ps2_mchp_xec); /* in 50us units */ #define PS2_TIMEOUT 10000 struct ps2_xec_config { struct ps2_regs * const regs; int isr_nvic; uint8_t girq_id; uint8_t girq_bit; uint8_t girq_id_wk; uint8_t girq_bit_wk; uint8_t pcr_idx; uint8_t pcr_pos; void (*irq_config_func)(void); const struct pinctrl_dev_config *pcfg; #ifdef CONFIG_PM_DEVICE struct gpio_dt_spec wakerx_gpio; bool wakeup_source; #endif }; struct ps2_xec_data { ps2_callback_t callback_isr; struct k_sem tx_lock; }; #ifdef CONFIG_SOC_SERIES_MEC172X static inline void ps2_xec_slp_en_clr(const struct device *dev) { const struct ps2_xec_config * const cfg = dev->config; z_mchp_xec_pcr_periph_sleep(cfg->pcr_idx, cfg->pcr_pos, 0); } static inline void ps2_xec_girq_clr(uint8_t girq_idx, uint8_t girq_posn) { mchp_soc_ecia_girq_src_clr(girq_idx, girq_posn); } static inline void ps2_xec_girq_en(uint8_t girq_idx, uint8_t girq_posn) { mchp_xec_ecia_girq_src_en(girq_idx, girq_posn); } static inline void ps2_xec_girq_dis(uint8_t girq_idx, uint8_t girq_posn) { mchp_xec_ecia_girq_src_dis(girq_idx, girq_posn); } #else static inline void ps2_xec_slp_en_clr(const struct device *dev) { const struct ps2_xec_config * const cfg = dev->config; if (cfg->pcr_pos == MCHP_PCR3_PS2_0_POS) { mchp_pcr_periph_slp_ctrl(PCR_PS2_0, 0); } else { mchp_pcr_periph_slp_ctrl(PCR_PS2_1, 0); } } static inline void ps2_xec_girq_clr(uint8_t girq_idx, uint8_t girq_posn) { MCHP_GIRQ_SRC(girq_idx) = BIT(girq_posn); } static inline void ps2_xec_girq_en(uint8_t girq_idx, uint8_t girq_posn) { MCHP_GIRQ_ENSET(girq_idx) = BIT(girq_posn); } static inline void ps2_xec_girq_dis(uint8_t girq_idx, uint8_t girq_posn) { MCHP_GIRQ_ENCLR(girq_idx) = MCHP_KBC_IBF_GIRQ; } #endif /* CONFIG_SOC_SERIES_MEC172X */ static int ps2_xec_configure(const struct device *dev, ps2_callback_t callback_isr) { const struct ps2_xec_config * const config = dev->config; struct ps2_xec_data * const data = dev->data; struct ps2_regs * const regs = config->regs; uint8_t __attribute__((unused)) temp; if (!callback_isr) { return -EINVAL; } data->callback_isr = callback_isr; /* In case the self test for a PS2 device already finished and * set the SOURCE bit to 1 we clear it before enabling the * interrupts. Instances must be allocated before the BAT * (Basic Assurance Test) or the host may time out. */ temp = regs->TRX_BUFF; regs->STATUS = MCHP_PS2_STATUS_RW1C_MASK; /* clear next higher level */ ps2_xec_girq_clr(config->girq_id, config->girq_bit); /* Enable FSM and init instance in rx mode*/ regs->CTRL = MCHP_PS2_CTRL_EN_POS; /* We enable the interrupts in the EC aggregator so that the * result can be forwarded to the ARM NVIC */ ps2_xec_girq_en(config->girq_id, config->girq_bit); k_sem_give(&data->tx_lock); return 0; } static int ps2_xec_write(const struct device *dev, uint8_t value) { const struct ps2_xec_config * const config = dev->config; struct ps2_xec_data * const data = dev->data; struct ps2_regs * const regs = config->regs; int i = 0; uint8_t __attribute__((unused)) temp; if (k_sem_take(&data->tx_lock, K_NO_WAIT)) { return -EACCES; } /* Allow the PS2 controller to complete a RX transaction. This * is because the channel may be actively receiving data. * In addition, it is necessary to wait for a previous TX * transaction to complete. The PS2 block has a single * FSM. */ while (((regs->STATUS & (MCHP_PS2_STATUS_RX_BUSY | MCHP_PS2_STATUS_TX_IDLE)) != MCHP_PS2_STATUS_TX_IDLE) && (i < PS2_TIMEOUT)) { k_busy_wait(50); i++; } if (unlikely(i == PS2_TIMEOUT)) { LOG_DBG("PS2 write timed out"); return -ETIMEDOUT; } pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); /* Inhibit ps2 controller and clear status register */ regs->CTRL = 0x00; /* Read to clear data ready bit in the status register*/ temp = regs->TRX_BUFF; k_sleep(K_MSEC(1)); regs->STATUS = MCHP_PS2_STATUS_RW1C_MASK; /* Switch the interface to TX mode and enable state machine */ regs->CTRL = MCHP_PS2_CTRL_TR_TX | MCHP_PS2_CTRL_EN; /* Write value to TX/RX register */ regs->TRX_BUFF = value; k_sem_give(&data->tx_lock); return 0; } static int ps2_xec_inhibit_interface(const struct device *dev) { const struct ps2_xec_config * const config = dev->config; struct ps2_xec_data * const data = dev->data; struct ps2_regs * const regs = config->regs; if (k_sem_take(&data->tx_lock, K_MSEC(10)) != 0) { return -EACCES; } regs->CTRL = 0x00; regs->STATUS = MCHP_PS2_STATUS_RW1C_MASK; ps2_xec_girq_clr(config->girq_id, config->girq_bit); NVIC_ClearPendingIRQ(config->isr_nvic); k_sem_give(&data->tx_lock); return 0; } static int ps2_xec_enable_interface(const struct device *dev) { const struct ps2_xec_config * const config = dev->config; struct ps2_xec_data * const data = dev->data; struct ps2_regs * const regs = config->regs; ps2_xec_girq_clr(config->girq_id, config->girq_bit); regs->CTRL = MCHP_PS2_CTRL_EN; k_sem_give(&data->tx_lock); return 0; } #ifdef CONFIG_PM_DEVICE static int ps2_xec_pm_action(const struct device *dev, enum pm_device_action action) { const struct ps2_xec_config *const devcfg = dev->config; struct ps2_regs * const regs = devcfg->regs; int ret = 0; switch (action) { case PM_DEVICE_ACTION_RESUME: if (devcfg->wakeup_source) { /* Disable PS2 wake interrupt * Disable interrupt on PS2DAT pin */ if (devcfg->wakerx_gpio.port != NULL) { ret = gpio_pin_interrupt_configure_dt( &devcfg->wakerx_gpio, GPIO_INT_DISABLE); if (ret < 0) { LOG_ERR("Fail to disable PS2 wake interrupt (ret %d)", ret); return ret; } } ps2_xec_girq_dis(devcfg->girq_id_wk, devcfg->girq_bit_wk); ps2_xec_girq_clr(devcfg->girq_id_wk, devcfg->girq_bit_wk); } else { ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_DEFAULT); regs->CTRL |= MCHP_PS2_CTRL_EN; } break; case PM_DEVICE_ACTION_SUSPEND: if (devcfg->wakeup_source) { /* Enable PS2 wake interrupt * Configure Falling Edge Trigger interrupt on PS2DAT pin */ ps2_xec_girq_clr(devcfg->girq_id_wk, devcfg->girq_bit_wk); ps2_xec_girq_en(devcfg->girq_id_wk, devcfg->girq_bit_wk); if (devcfg->wakerx_gpio.port != NULL) { ret = gpio_pin_interrupt_configure_dt( &devcfg->wakerx_gpio, GPIO_INT_MODE_EDGE | GPIO_INT_TRIG_LOW); if (ret < 0) { LOG_ERR("Fail to enable PS2 wake interrupt(ret %d)", ret); return ret; } } } else { regs->CTRL &= ~MCHP_PS2_CTRL_EN; /* If application does not want to turn off PS2 pins it will * not define pinctrl-1 for this node. */ ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_SLEEP); if (ret == -ENOENT) { /* pinctrl-1 does not exist. */ ret = 0; } } break; default: ret = -ENOTSUP; } return ret; } #endif /* CONFIG_PM_DEVICE */ static void ps2_xec_isr(const struct device *dev) { const struct ps2_xec_config * const config = dev->config; struct ps2_xec_data * const data = dev->data; struct ps2_regs * const regs = config->regs; uint32_t status; /* Read and clear status */ status = regs->STATUS; /* clear next higher level the GIRQ */ ps2_xec_girq_clr(config->girq_id, config->girq_bit); if (status & MCHP_PS2_STATUS_RXD_RDY) { pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); regs->CTRL = 0x00; if (data->callback_isr) { data->callback_isr(dev, regs->TRX_BUFF); } pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } else if (status & (MCHP_PS2_STATUS_TX_TMOUT | MCHP_PS2_STATUS_TX_ST_TMOUT)) { /* Clear sticky bits and go to read mode */ regs->STATUS = MCHP_PS2_STATUS_RW1C_MASK; LOG_ERR("TX time out: %0x", status); pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } else if (status & (MCHP_PS2_STATUS_RX_TMOUT | MCHP_PS2_STATUS_PE | MCHP_PS2_STATUS_FE)) { /* catch and clear rx error if any */ regs->STATUS = MCHP_PS2_STATUS_RW1C_MASK; } else if (status & MCHP_PS2_STATUS_TX_IDLE) { /* Transfer completed, release the lock to enter low per mode */ pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } /* The control register reverts to RX automatically after * transmitting the data */ regs->CTRL = MCHP_PS2_CTRL_EN; } static const struct ps2_driver_api ps2_xec_driver_api = { .config = ps2_xec_configure, .read = NULL, .write = ps2_xec_write, .disable_callback = ps2_xec_inhibit_interface, .enable_callback = ps2_xec_enable_interface, }; static int ps2_xec_init(const struct device *dev) { const struct ps2_xec_config * const cfg = dev->config; struct ps2_xec_data * const data = dev->data; int ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("XEC PS2 pinctrl init failed (%d)", ret); return ret; } ps2_xec_slp_en_clr(dev); k_sem_init(&data->tx_lock, 0, 1); cfg->irq_config_func(); return 0; } /* To enable wakeup on the PS2, the DTS needs to have two entries defined * in the corresponding PS2 node in the DTS specifying it as a wake source * and specifying the PS2DAT GPIO; example as below * * wakerx-gpios = <MCHP_GPIO_DECODE_115 GPIO_ACTIVE_HIGH> * wakeup-source; */ #ifdef CONFIG_PM_DEVICE #define XEC_PS2_PM_WAKEUP(n) \ .wakeup_source = (uint8_t)DT_INST_PROP_OR(n, wakeup_source, 0), \ .wakerx_gpio = GPIO_DT_SPEC_INST_GET_OR(n, wakerx_gpios, {0}), #else #define XEC_PS2_PM_WAKEUP(index) /* Not used */ #endif #define XEC_PS2_PINCTRL_CFG(inst) PINCTRL_DT_INST_DEFINE(inst) #define XEC_PS2_CONFIG(inst) \ static const struct ps2_xec_config ps2_xec_config_##inst = { \ .regs = (struct ps2_regs * const)(DT_INST_REG_ADDR(inst)), \ .isr_nvic = DT_INST_IRQN(inst), \ .girq_id = (uint8_t)(DT_INST_PROP_BY_IDX(inst, girqs, 0)), \ .girq_bit = (uint8_t)(DT_INST_PROP_BY_IDX(inst, girqs, 1)), \ .girq_id_wk = (uint8_t)(DT_INST_PROP_BY_IDX(inst, girqs, 2)), \ .girq_bit_wk = (uint8_t)(DT_INST_PROP_BY_IDX(inst, girqs, 3)), \ .pcr_idx = (uint8_t)(DT_INST_PROP_BY_IDX(inst, pcrs, 0)), \ .pcr_pos = (uint8_t)(DT_INST_PROP_BY_IDX(inst, pcrs, 1)), \ .irq_config_func = ps2_xec_irq_config_func_##inst, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ XEC_PS2_PM_WAKEUP(inst) \ } #define PS2_XEC_DEVICE(i) \ \ static void ps2_xec_irq_config_func_##i(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(i), \ DT_INST_IRQ(i, priority), \ ps2_xec_isr, \ DEVICE_DT_INST_GET(i), 0); \ irq_enable(DT_INST_IRQN(i)); \ } \ \ static struct ps2_xec_data ps2_xec_port_data_##i; \ \ XEC_PS2_PINCTRL_CFG(i); \ \ XEC_PS2_CONFIG(i); \ \ PM_DEVICE_DT_INST_DEFINE(i, ps2_xec_pm_action); \ \ DEVICE_DT_INST_DEFINE(i, &ps2_xec_init, \ PM_DEVICE_DT_INST_GET(i), \ &ps2_xec_port_data_##i, &ps2_xec_config_##i, \ POST_KERNEL, CONFIG_PS2_INIT_PRIORITY, \ &ps2_xec_driver_api); DT_INST_FOREACH_STATUS_OKAY(PS2_XEC_DEVICE) ```
/content/code_sandbox/drivers/ps2/ps2_mchp_xec.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,700
```c /** * */ /** * @file * * fcntl.h related code common to Zephyr (top: nsos_sockets.c) and Linux * (bottom: nsos_adapt.c). * * It is needed by both sides to share the same macro definitions/values * (prefixed with NSOS_MID_), which is not possible to achieve with two separate * standard libc libraries, since they use different values for the same * symbols. */ /* * When building for Zephyr, use Zephyr specific fcntl definitions. */ #ifdef __ZEPHYR__ #include <zephyr/posix/fcntl.h> #else #include <fcntl.h> #endif #include "nsos_errno.h" #include "nsos_fcntl.h" #include <stdbool.h> static int fl_to_nsos_mid_(int flags, bool strict) { int flags_mid = 0; #define TO_NSOS_MID(_flag) \ if (flags & (_flag)) { \ flags &= ~(_flag); \ flags_mid |= NSOS_MID_ ## _flag; \ } TO_NSOS_MID(O_RDONLY); TO_NSOS_MID(O_WRONLY); TO_NSOS_MID(O_RDWR); TO_NSOS_MID(O_APPEND); TO_NSOS_MID(O_EXCL); TO_NSOS_MID(O_NONBLOCK); #undef TO_NSOS_MID if (strict && flags != 0) { return -NSOS_MID_EINVAL; } return flags_mid; } int fl_to_nsos_mid(int flags) { return fl_to_nsos_mid_(flags, false); } int fl_to_nsos_mid_strict(int flags) { return fl_to_nsos_mid_(flags, true); } int fl_from_nsos_mid(int flags_mid) { int flags = 0; #define FROM_NSOS_MID(_flag) \ if (flags_mid & NSOS_MID_ ## _flag) { \ flags_mid &= ~NSOS_MID_ ## _flag; \ flags |= _flag; \ } FROM_NSOS_MID(O_RDONLY); FROM_NSOS_MID(O_WRONLY); FROM_NSOS_MID(O_RDWR); FROM_NSOS_MID(O_APPEND); FROM_NSOS_MID(O_EXCL); FROM_NSOS_MID(O_NONBLOCK); #undef FROM_NSOS_MID return flags; } ```
/content/code_sandbox/drivers/net/nsos_fcntl.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
506
```objective-c /** * */ #ifndef __DRIVERS_NET_NSOS_H__ #define __DRIVERS_NET_NSOS_H__ #include <stddef.h> #include <stdint.h> /* Protocol families. */ #define NSOS_MID_PF_UNSPEC 0 /**< Unspecified protocol family. */ #define NSOS_MID_PF_INET 1 /**< IP protocol family version 4. */ #define NSOS_MID_PF_INET6 2 /**< IP protocol family version 6. */ /* Address families. */ #define NSOS_MID_AF_UNSPEC NSOS_MID_PF_UNSPEC /**< Unspecified address family. */ #define NSOS_MID_AF_INET NSOS_MID_PF_INET /**< IP protocol family version 4. */ #define NSOS_MID_AF_INET6 NSOS_MID_PF_INET6 /**< IP protocol family version 6. */ /** Protocol numbers from IANA/BSD */ enum nsos_mid_net_ip_protocol { NSOS_MID_IPPROTO_IP = 0, /**< IP protocol (pseudo-val for setsockopt() */ NSOS_MID_IPPROTO_ICMP = 1, /**< ICMP protocol */ NSOS_MID_IPPROTO_IGMP = 2, /**< IGMP protocol */ NSOS_MID_IPPROTO_IPIP = 4, /**< IPIP tunnels */ NSOS_MID_IPPROTO_TCP = 6, /**< TCP protocol */ NSOS_MID_IPPROTO_UDP = 17, /**< UDP protocol */ NSOS_MID_IPPROTO_IPV6 = 41, /**< IPv6 protocol */ NSOS_MID_IPPROTO_ICMPV6 = 58, /**< ICMPv6 protocol */ NSOS_MID_IPPROTO_RAW = 255, /**< RAW IP packets */ }; /** Socket type */ enum nsos_mid_net_sock_type { NSOS_MID_SOCK_STREAM = 1, /**< Stream socket type */ NSOS_MID_SOCK_DGRAM, /**< Datagram socket type */ NSOS_MID_SOCK_RAW /**< RAW socket type */ }; #define NSOS_MID_MSG_PEEK 0x02 #define NSOS_MID_MSG_TRUNC 0x20 #define NSOS_MID_MSG_DONTWAIT 0x40 #define NSOS_MID_MSG_WAITALL 0x100 struct nsos_mid_sockaddr { uint16_t sa_family; /* Address family */ char sa_data[]; /* Socket address */ }; struct nsos_mid_sockaddr_in { uint16_t sin_family; /* AF_INET */ uint16_t sin_port; /* Port number */ uint32_t sin_addr; /* IPv4 address */ }; struct nsos_mid_sockaddr_in6 { uint16_t sin6_family; /* AF_INET6 */ uint16_t sin6_port; /* Port number */ uint8_t sin6_addr[16]; uint32_t sin6_scope_id; /* Set of interfaces for a scope */ }; struct nsos_mid_sockaddr_storage { union { struct nsos_mid_sockaddr_in sockaddr_in; struct nsos_mid_sockaddr_in6 sockaddr_in6; }; }; struct nsos_mid_pollfd { int fd; short events; short revents; void (*cb)(struct nsos_mid_pollfd *pollfd_mid); }; struct nsos_mid_addrinfo { int ai_flags; int ai_family; int ai_socktype; int ai_protocol; size_t ai_addrlen; struct nsos_mid_sockaddr *ai_addr; char *ai_canonname; struct nsos_mid_addrinfo *ai_next; }; struct nsos_mid_iovec { void *iov_base; size_t iov_len; }; struct nsos_mid_msghdr { void *msg_name; /* optional socket address, big endian */ size_t msg_namelen; /* size of socket address */ struct nsos_mid_iovec *msg_iov; /* scatter/gather array */ size_t msg_iovlen; /* number of elements in msg_iov */ void *msg_control; /* ancillary data */ size_t msg_controllen; /* ancillary data buffer len */ int msg_flags; /* flags on received message */ }; static inline void nsos_socket_flag_convert(int *flags_a, int flag_a, int *flags_b, int flag_b) { if ((*flags_a) & flag_a) { *flags_a &= ~flag_a; *flags_b |= flag_b; } } int nsos_adapt_get_errno(void); int nsos_adapt_socket(int family, int type, int proto); int nsos_adapt_bind(int fd, const struct nsos_mid_sockaddr *addr, size_t addrlen); int nsos_adapt_connect(int fd, const struct nsos_mid_sockaddr *addr, size_t addrlen); int nsos_adapt_listen(int fd, int backlog); int nsos_adapt_accept(int fd, struct nsos_mid_sockaddr *addr, size_t *addrlen); int nsos_adapt_sendto(int fd, const void *buf, size_t len, int flags, const struct nsos_mid_sockaddr *addr, size_t addrlen); int nsos_adapt_sendmsg(int fd, const struct nsos_mid_msghdr *msg_mid, int flags); int nsos_adapt_recvfrom(int fd, void *buf, size_t len, int flags, struct nsos_mid_sockaddr *addr, size_t *addrlen); int nsos_adapt_getsockopt(int fd, int level, int optname, void *optval, size_t *optlen); int nsos_adapt_setsockopt(int fd, int level, int optname, const void *optval, size_t optlen); void nsos_adapt_poll_add(struct nsos_mid_pollfd *pollfd); void nsos_adapt_poll_remove(struct nsos_mid_pollfd *pollfd); void nsos_adapt_poll_update(struct nsos_mid_pollfd *pollfd); int nsos_adapt_fcntl_getfl(int fd); int nsos_adapt_fcntl_setfl(int fd, int flags); int nsos_adapt_fionread(int fd, int *avail); int nsos_adapt_dup(int oldfd); int nsos_adapt_getaddrinfo(const char *node, const char *service, const struct nsos_mid_addrinfo *hints, struct nsos_mid_addrinfo **res, int *system_errno); void nsos_adapt_freeaddrinfo(struct nsos_mid_addrinfo *res); #endif /* __DRIVERS_NET_NSOS_H__ */ ```
/content/code_sandbox/drivers/net/nsos.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,383
```c /** * * * netdb.h related code common to Zephyr (top: nsos_sockets.c) and Linux * (bottom: nsos_adapt.c). * * It is needed by both sides to share the same macro definitions/values * (prefixed with NSOS_MID_), which is not possible to achieve with two separate * standard libc libraries, since they use different values for the same * symbols. */ #include "nsos_netdb.h" #ifdef __ZEPHYR__ #include <zephyr/net/socket.h> #define ERR(_name) \ { DNS_ ## _name, NSOS_MID_ ## _name } #else #include <netdb.h> #define ERR(_name) \ { _name, NSOS_MID_ ## _name } #endif #ifndef ARRAY_SIZE #define ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) #endif struct nsos_eai_map { int err; int mid_err; }; static const struct nsos_eai_map map[] = { ERR(EAI_BADFLAGS), ERR(EAI_NONAME), ERR(EAI_AGAIN), ERR(EAI_FAIL), ERR(EAI_FAMILY), ERR(EAI_SOCKTYPE), ERR(EAI_SERVICE), ERR(EAI_MEMORY), ERR(EAI_SYSTEM), ERR(EAI_OVERFLOW), }; int eai_to_nsos_mid(int err) { for (int i = 0; i < ARRAY_SIZE(map); i++) { if (map[i].err == err) { return map[i].mid_err; } } return err; } int eai_from_nsos_mid(int err) { for (int i = 0; i < ARRAY_SIZE(map); i++) { if (map[i].mid_err == err) { return map[i].err; } } return err; } ```
/content/code_sandbox/drivers/net/nsos_netdb.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
389
```objective-c /** * */ #ifndef __DRIVERS_NET_NSOS_ERRNO_H__ #define __DRIVERS_NET_NSOS_ERRNO_H__ #include <errno.h> #define NSOS_MID_EPERM 1 /**< Not owner */ #define NSOS_MID_ENOENT 2 /**< No such file or directory */ #define NSOS_MID_ESRCH 3 /**< No such context */ #define NSOS_MID_EINTR 4 /**< Interrupted system call */ #define NSOS_MID_EIO 5 /**< I/O error */ #define NSOS_MID_ENXIO 6 /**< No such device or address */ #define NSOS_MID_E2BIG 7 /**< Arg list too long */ #define NSOS_MID_ENOEXEC 8 /**< Exec format error */ #define NSOS_MID_EBADF 9 /**< Bad file number */ #define NSOS_MID_ECHILD 10 /**< No children */ #define NSOS_MID_EAGAIN 11 /**< No more contexts */ #define NSOS_MID_ENOMEM 12 /**< Not enough core */ #define NSOS_MID_EACCES 13 /**< Permission denied */ #define NSOS_MID_EFAULT 14 /**< Bad address */ #define NSOS_MID_ENOTBLK 15 /**< Block device required */ #define NSOS_MID_EBUSY 16 /**< Mount device busy */ #define NSOS_MID_EEXIST 17 /**< File exists */ #define NSOS_MID_EXDEV 18 /**< Cross-device link */ #define NSOS_MID_ENODEV 19 /**< No such device */ #define NSOS_MID_ENOTDIR 20 /**< Not a directory */ #define NSOS_MID_EISDIR 21 /**< Is a directory */ #define NSOS_MID_EINVAL 22 /**< Invalid argument */ #define NSOS_MID_ENFILE 23 /**< File table overflow */ #define NSOS_MID_EMFILE 24 /**< Too many open files */ #define NSOS_MID_ENOTTY 25 /**< Not a typewriter */ #define NSOS_MID_ETXTBSY 26 /**< Text file busy */ #define NSOS_MID_EFBIG 27 /**< File too large */ #define NSOS_MID_ENOSPC 28 /**< No space left on device */ #define NSOS_MID_ESPIPE 29 /**< Illegal seek */ #define NSOS_MID_EROFS 30 /**< Read-only file system */ #define NSOS_MID_EMLINK 31 /**< Too many links */ #define NSOS_MID_EPIPE 32 /**< Broken pipe */ #define NSOS_MID_EDOM 33 /**< Argument too large */ #define NSOS_MID_ERANGE 34 /**< Result too large */ #define NSOS_MID_ENOMSG 35 /**< Unexpected message type */ #define NSOS_MID_EDEADLK 45 /**< Resource deadlock avoided */ #define NSOS_MID_ENOLCK 46 /**< No locks available */ #define NSOS_MID_ENOSTR 60 /**< STREAMS device required */ #define NSOS_MID_ENODATA 61 /**< Missing expected message data */ #define NSOS_MID_ETIME 62 /**< STREAMS timeout occurred */ #define NSOS_MID_ENOSR 63 /**< Insufficient memory */ #define NSOS_MID_EPROTO 71 /**< Generic STREAMS error */ #define NSOS_MID_EBADMSG 77 /**< Invalid STREAMS message */ #define NSOS_MID_ENOSYS 88 /**< Function not implemented */ #define NSOS_MID_ENOTEMPTY 90 /**< Directory not empty */ #define NSOS_MID_ENAMETOOLONG 91 /**< File name too long */ #define NSOS_MID_ELOOP 92 /**< Too many levels of symbolic links */ #define NSOS_MID_EOPNOTSUPP 95 /**< Operation not supported on socket */ #define NSOS_MID_EPFNOSUPPORT 96 /**< Protocol family not supported */ #define NSOS_MID_ECONNRESET 104 /**< Connection reset by peer */ #define NSOS_MID_ENOBUFS 105 /**< No buffer space available */ #define NSOS_MID_EAFNOSUPPORT 106 /**< Addr family not supported */ #define NSOS_MID_EPROTOTYPE 107 /**< Protocol wrong type for socket */ #define NSOS_MID_ENOTSOCK 108 /**< Socket operation on non-socket */ #define NSOS_MID_ENOPROTOOPT 109 /**< Protocol not available */ #define NSOS_MID_ESHUTDOWN 110 /**< Can't send after socket shutdown */ #define NSOS_MID_ECONNREFUSED 111 /**< Connection refused */ #define NSOS_MID_EADDRINUSE 112 /**< Address already in use */ #define NSOS_MID_ECONNABORTED 113 /**< Software caused connection abort */ #define NSOS_MID_ENETUNREACH 114 /**< Network is unreachable */ #define NSOS_MID_ENETDOWN 115 /**< Network is down */ #define NSOS_MID_ETIMEDOUT 116 /**< Connection timed out */ #define NSOS_MID_EHOSTDOWN 117 /**< Host is down */ #define NSOS_MID_EHOSTUNREACH 118 /**< No route to host */ #define NSOS_MID_EINPROGRESS 119 /**< Operation now in progress */ #define NSOS_MID_EALREADY 120 /**< Operation already in progress */ #define NSOS_MID_EDESTADDRREQ 121 /**< Destination address required */ #define NSOS_MID_EMSGSIZE 122 /**< Message size */ #define NSOS_MID_EPROTONOSUPPORT 123 /**< Protocol not supported */ #define NSOS_MID_ESOCKTNOSUPPORT 124 /**< Socket type not supported */ #define NSOS_MID_EADDRNOTAVAIL 125 /**< Can't assign requested address */ #define NSOS_MID_ENETRESET 126 /**< Network dropped connection on reset */ #define NSOS_MID_EISCONN 127 /**< Socket is already connected */ #define NSOS_MID_ENOTCONN 128 /**< Socket is not connected */ #define NSOS_MID_ETOOMANYREFS 129 /**< Too many references: can't splice */ #define NSOS_MID_ENOTSUP 134 /**< Unsupported value */ #define NSOS_MID_EILSEQ 138 /**< Illegal byte sequence */ #define NSOS_MID_EOVERFLOW 139 /**< Value overflow */ #define NSOS_MID_ECANCELED 140 /**< Operation canceled */ int errno_to_nsos_mid(int err); int errno_from_nsos_mid(int err); #endif /* __DRIVERS_NET_NSOS_ERRNO_H__ */ ```
/content/code_sandbox/drivers/net/nsos_errno.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,367
```c /* * * */ #include <zephyr/net/net_pkt.h> #include <zephyr/net/canbus.h> #include <zephyr/net/socketcan.h> #include <zephyr/drivers/can.h> #include <zephyr/devicetree.h> #include <zephyr/device.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(net_canbus, CONFIG_NET_CANBUS_LOG_LEVEL); #define SEND_TIMEOUT K_MSEC(100) struct net_canbus_context { struct net_if *iface; }; struct net_canbus_config { const struct device *can_dev; }; static void net_canbus_recv(const struct device *dev, struct can_frame *frame, void *user_data) { struct net_canbus_context *ctx = user_data; struct net_pkt *pkt; int ret; ARG_UNUSED(dev); LOG_DBG("pkt on interface %p", ctx->iface); pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, sizeof(struct can_frame), AF_CAN, 0, K_NO_WAIT); if (pkt == NULL) { LOG_ERR("Failed to obtain net_pkt"); return; } if (net_pkt_write(pkt, frame, sizeof(struct can_frame))) { LOG_ERR("Failed to append RX data"); net_pkt_unref(pkt); return; } ret = net_recv_data(ctx->iface, pkt); if (ret < 0) { LOG_DBG("net_recv_data failed [%d]", ret); net_pkt_unref(pkt); } } static int net_canbus_setsockopt(const struct device *dev, void *obj, int level, int optname, const void *optval, socklen_t optlen) { const struct net_canbus_config *cfg = dev->config; struct net_canbus_context *context = dev->data; struct net_context *ctx = obj; int ret; if (level != SOL_CAN_RAW && optname != CAN_RAW_FILTER) { errno = EINVAL; return -1; } __ASSERT_NO_MSG(optlen == sizeof(struct can_filter)); ret = can_add_rx_filter(cfg->can_dev, net_canbus_recv, context, optval); if (ret == -ENOSPC) { errno = ENOSPC; return -1; } net_context_set_can_filter_id(ctx, ret); return 0; } static void net_canbus_close(const struct device *dev, int filter_id) { const struct net_canbus_config *cfg = dev->config; can_remove_rx_filter(cfg->can_dev, filter_id); } static void net_canbus_send_tx_callback(const struct device *dev, int error, void *user_data) { ARG_UNUSED(dev); ARG_UNUSED(user_data); if (error != 0) { LOG_DBG("CAN bus TX error [%d]", error); } } static int net_canbus_send(const struct device *dev, struct net_pkt *pkt) { const struct net_canbus_config *cfg = dev->config; int ret; if (net_pkt_family(pkt) != AF_CAN) { return -EPFNOSUPPORT; } ret = can_send(cfg->can_dev, (struct can_frame *)pkt->frags->data, SEND_TIMEOUT, net_canbus_send_tx_callback, NULL); if (ret == 0) { net_pkt_unref(pkt); } else { LOG_DBG("Cannot send CAN msg (%d)", ret); } /* If something went wrong, then we need to return negative value to * net_if.c:net_if_tx() so that the net_pkt will get released. */ return ret; } static void net_canbus_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct net_canbus_context *context = dev->data; context->iface = iface; LOG_DBG("Init CAN interface %p dev %p", iface, dev); } static int net_canbus_init(const struct device *dev) { const struct net_canbus_config *cfg = dev->config; if (!device_is_ready(cfg->can_dev)) { LOG_ERR("CAN device not ready"); return -ENODEV; } return 0; } static struct canbus_api net_canbus_api = { .iface_api.init = net_canbus_iface_init, .send = net_canbus_send, .close = net_canbus_close, .setsockopt = net_canbus_setsockopt, }; static struct net_canbus_context net_canbus_ctx; static const struct net_canbus_config net_canbus_cfg = { .can_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_canbus)) }; NET_DEVICE_INIT(net_canbus, "NET_CANBUS", net_canbus_init, NULL, &net_canbus_ctx, &net_canbus_cfg, CONFIG_NET_CANBUS_INIT_PRIORITY, &net_canbus_api, CANBUS_RAW_L2, NET_L2_GET_CTX_TYPE(CANBUS_RAW_L2), CAN_MTU); ```
/content/code_sandbox/drivers/net/canbus.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,041
```c /** * */ /** * @file * * Zephyr (top) side of NSOS (Native Simulator Offloaded Sockets). */ #undef _POSIX_C_SOURCE #define _POSIX_C_SOURCE 200809L #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nsos_sockets); #include <soc.h> #include <string.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/net_ip.h> #include <zephyr/net/offloaded_netdev.h> #include <zephyr/net/socket_offload.h> #include <zephyr/posix/fcntl.h> #include <zephyr/sys/fdtable.h> #include <zephyr/sys/dlist.h> #include "sockets_internal.h" #include "nsos.h" #include "nsos_errno.h" #include "nsos_fcntl.h" #include "nsos_netdb.h" #include "nsos_socket.h" #include "nsi_host_trampolines.h" BUILD_ASSERT(CONFIG_HEAP_MEM_POOL_SIZE > 0); #define NSOS_IRQ_FLAGS (0) #define NSOS_IRQ_PRIORITY (2) struct nsos_socket; struct nsos_socket_poll { struct nsos_mid_pollfd mid; struct k_poll_signal signal; sys_dnode_t node; }; struct nsos_socket { int fd; k_timeout_t recv_timeout; k_timeout_t send_timeout; struct nsos_socket_poll poll; }; static sys_dlist_t nsos_polls = SYS_DLIST_STATIC_INIT(&nsos_polls); static int socket_family_to_nsos_mid(int family, int *family_mid) { switch (family) { case AF_UNSPEC: *family_mid = NSOS_MID_AF_UNSPEC; break; case AF_INET: *family_mid = NSOS_MID_AF_INET; break; case AF_INET6: *family_mid = NSOS_MID_AF_INET6; break; default: return -NSOS_MID_EAFNOSUPPORT; } return 0; } static int socket_proto_to_nsos_mid(int proto, int *proto_mid) { switch (proto) { case IPPROTO_IP: *proto_mid = NSOS_MID_IPPROTO_IP; break; case IPPROTO_ICMP: *proto_mid = NSOS_MID_IPPROTO_ICMP; break; case IPPROTO_IGMP: *proto_mid = NSOS_MID_IPPROTO_IGMP; break; case IPPROTO_IPIP: *proto_mid = NSOS_MID_IPPROTO_IPIP; break; case IPPROTO_TCP: *proto_mid = NSOS_MID_IPPROTO_TCP; break; case IPPROTO_UDP: *proto_mid = NSOS_MID_IPPROTO_UDP; break; case IPPROTO_IPV6: *proto_mid = NSOS_MID_IPPROTO_IPV6; break; case IPPROTO_RAW: *proto_mid = NSOS_MID_IPPROTO_RAW; break; default: return -NSOS_MID_EPROTONOSUPPORT; } return 0; } static int socket_type_to_nsos_mid(int type, int *type_mid) { switch (type) { case SOCK_STREAM: *type_mid = NSOS_MID_SOCK_STREAM; break; case SOCK_DGRAM: *type_mid = NSOS_MID_SOCK_DGRAM; break; case SOCK_RAW: *type_mid = NSOS_MID_SOCK_RAW; break; default: return -NSOS_MID_ESOCKTNOSUPPORT; } return 0; } static int socket_flags_to_nsos_mid(int flags) { int flags_mid = 0; nsos_socket_flag_convert(&flags, ZSOCK_MSG_PEEK, &flags_mid, NSOS_MID_MSG_PEEK); nsos_socket_flag_convert(&flags, ZSOCK_MSG_TRUNC, &flags_mid, NSOS_MID_MSG_TRUNC); nsos_socket_flag_convert(&flags, ZSOCK_MSG_DONTWAIT, &flags_mid, NSOS_MID_MSG_DONTWAIT); nsos_socket_flag_convert(&flags, ZSOCK_MSG_WAITALL, &flags_mid, NSOS_MID_MSG_WAITALL); if (flags != 0) { return -NSOS_MID_EINVAL; } return flags_mid; } static const struct socket_op_vtable nsos_socket_fd_op_vtable; static int nsos_socket_create(int family, int type, int proto) { int fd; struct nsos_socket *sock; int family_mid; int type_mid; int proto_mid; int err; err = socket_family_to_nsos_mid(family, &family_mid); if (err) { errno = errno_from_nsos_mid(-err); return -1; } err = socket_type_to_nsos_mid(type, &type_mid); if (err) { errno = errno_from_nsos_mid(-err); return -1; } err = socket_proto_to_nsos_mid(proto, &proto_mid); if (err) { errno = errno_from_nsos_mid(-err); return -1; } fd = zvfs_reserve_fd(); if (fd < 0) { return -1; } sock = k_malloc(sizeof(*sock)); if (!sock) { errno = ENOMEM; goto free_fd; } sock->fd = fd; sock->recv_timeout = K_FOREVER; sock->send_timeout = K_FOREVER; sock->poll.mid.fd = nsos_adapt_socket(family_mid, type_mid, proto_mid); if (sock->poll.mid.fd < 0) { errno = errno_from_nsos_mid(-sock->poll.mid.fd); goto free_sock; } zvfs_finalize_typed_fd(fd, sock, &nsos_socket_fd_op_vtable.fd_vtable, ZVFS_MODE_IFSOCK); return fd; free_sock: k_free(sock); free_fd: zvfs_free_fd(fd); return -1; } static int nsos_adapt_get_zephyr_errno(void) { return errno_from_nsos_mid(nsos_adapt_get_errno()); } static ssize_t nsos_read(void *obj, void *buf, size_t sz) { struct nsos_socket *sock = obj; int ret; ret = nsi_host_read(sock->poll.mid.fd, buf, sz); if (ret < 0) { errno = nsos_adapt_get_zephyr_errno(); } return ret; } static ssize_t nsos_write(void *obj, const void *buf, size_t sz) { struct nsos_socket *sock = obj; int ret; ret = nsi_host_write(sock->poll.mid.fd, buf, sz); if (ret < 0) { errno = nsos_adapt_get_zephyr_errno(); } return ret; } static int nsos_close(void *obj) { struct nsos_socket *sock = obj; int ret; ret = nsi_host_close(sock->poll.mid.fd); if (ret < 0) { errno = nsos_adapt_get_zephyr_errno(); } return ret; } static void pollcb(struct nsos_mid_pollfd *mid) { struct nsos_socket_poll *poll = CONTAINER_OF(mid, struct nsos_socket_poll, mid); k_poll_signal_raise(&poll->signal, poll->mid.revents); } static int nsos_poll_prepare(struct nsos_socket *sock, struct zsock_pollfd *pfd, struct k_poll_event **pev, struct k_poll_event *pev_end, struct nsos_socket_poll *poll) { unsigned int signaled; int flags; poll->mid.events = pfd->events; poll->mid.revents = 0; poll->mid.cb = pollcb; if (*pev == pev_end) { return -ENOMEM; } k_poll_signal_init(&poll->signal); k_poll_event_init(*pev, K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &poll->signal); sys_dlist_append(&nsos_polls, &poll->node); nsos_adapt_poll_add(&poll->mid); /* Let other sockets use another k_poll_event */ (*pev)++; signaled = 0; flags = 0; k_poll_signal_check(&poll->signal, &signaled, &flags); if (!signaled) { return 0; } /* Events are ready, don't wait */ return -EALREADY; } static int nsos_poll_update(struct nsos_socket *sock, struct zsock_pollfd *pfd, struct k_poll_event **pev, struct nsos_socket_poll *poll) { unsigned int signaled; int flags; (*pev)++; signaled = 0; flags = 0; if (!sys_dnode_is_linked(&poll->node)) { nsos_adapt_poll_update(&poll->mid); return 0; } nsos_adapt_poll_remove(&poll->mid); sys_dlist_remove(&poll->node); k_poll_signal_check(&poll->signal, &signaled, &flags); if (!signaled) { return 0; } pfd->revents = flags; return 0; } static int nsos_ioctl(void *obj, unsigned int request, va_list args) { struct nsos_socket *sock = obj; switch (request) { case ZFD_IOCTL_POLL_PREPARE: { struct zsock_pollfd *pfd; struct k_poll_event **pev; struct k_poll_event *pev_end; pfd = va_arg(args, struct zsock_pollfd *); pev = va_arg(args, struct k_poll_event **); pev_end = va_arg(args, struct k_poll_event *); return nsos_poll_prepare(obj, pfd, pev, pev_end, &sock->poll); } case ZFD_IOCTL_POLL_UPDATE: { struct zsock_pollfd *pfd; struct k_poll_event **pev; pfd = va_arg(args, struct zsock_pollfd *); pev = va_arg(args, struct k_poll_event **); return nsos_poll_update(obj, pfd, pev, &sock->poll); } case ZFD_IOCTL_POLL_OFFLOAD: return -EOPNOTSUPP; case F_GETFL: { int flags; flags = nsos_adapt_fcntl_getfl(sock->poll.mid.fd); return fl_from_nsos_mid(flags); } case F_SETFL: { int flags = va_arg(args, int); int ret; ret = fl_to_nsos_mid_strict(flags); if (ret < 0) { return -errno_from_nsos_mid(-ret); } ret = nsos_adapt_fcntl_setfl(sock->poll.mid.fd, flags); return -errno_from_nsos_mid(-ret); } case ZFD_IOCTL_FIONREAD: { int *avail = va_arg(args, int *); int ret; ret = nsos_adapt_fionread(sock->poll.mid.fd, avail); return -errno_from_nsos_mid(-ret); } } return -EINVAL; } static int sockaddr_to_nsos_mid(const struct sockaddr *addr, socklen_t addrlen, struct nsos_mid_sockaddr **addr_mid, size_t *addrlen_mid) { if (!addr || !addrlen) { *addr_mid = NULL; *addrlen_mid = 0; return 0; } switch (addr->sa_family) { case AF_INET: { const struct sockaddr_in *addr_in = (const struct sockaddr_in *)addr; struct nsos_mid_sockaddr_in *addr_in_mid = (struct nsos_mid_sockaddr_in *)*addr_mid; if (addrlen < sizeof(*addr_in)) { return -NSOS_MID_EINVAL; } addr_in_mid->sin_family = NSOS_MID_AF_INET; addr_in_mid->sin_port = addr_in->sin_port; addr_in_mid->sin_addr = addr_in->sin_addr.s_addr; *addrlen_mid = sizeof(*addr_in_mid); return 0; } case AF_INET6: { const struct sockaddr_in6 *addr_in = (const struct sockaddr_in6 *)addr; struct nsos_mid_sockaddr_in6 *addr_in_mid = (struct nsos_mid_sockaddr_in6 *)*addr_mid; if (addrlen < sizeof(*addr_in)) { return -NSOS_MID_EINVAL; } addr_in_mid->sin6_family = NSOS_MID_AF_INET6; addr_in_mid->sin6_port = addr_in->sin6_port; memcpy(addr_in_mid->sin6_addr, addr_in->sin6_addr.s6_addr, sizeof(addr_in_mid->sin6_addr)); addr_in_mid->sin6_scope_id = addr_in->sin6_scope_id; *addrlen_mid = sizeof(*addr_in_mid); return 0; } } return -NSOS_MID_EINVAL; } static int sockaddr_from_nsos_mid(struct sockaddr *addr, socklen_t *addrlen, const struct nsos_mid_sockaddr *addr_mid, size_t addrlen_mid) { if (!addr || !addrlen) { return 0; } switch (addr_mid->sa_family) { case NSOS_MID_AF_INET: { const struct nsos_mid_sockaddr_in *addr_in_mid = (const struct nsos_mid_sockaddr_in *)addr_mid; struct sockaddr_in addr_in; addr_in.sin_family = AF_INET; addr_in.sin_port = addr_in_mid->sin_port; addr_in.sin_addr.s_addr = addr_in_mid->sin_addr; memcpy(addr, &addr_in, MIN(*addrlen, sizeof(addr_in))); *addrlen = sizeof(addr_in); return 0; } case NSOS_MID_AF_INET6: { const struct nsos_mid_sockaddr_in6 *addr_in_mid = (const struct nsos_mid_sockaddr_in6 *)addr_mid; struct sockaddr_in6 addr_in; addr_in.sin6_family = AF_INET6; addr_in.sin6_port = addr_in_mid->sin6_port; memcpy(addr_in.sin6_addr.s6_addr, addr_in_mid->sin6_addr, sizeof(addr_in.sin6_addr.s6_addr)); addr_in.sin6_scope_id = addr_in_mid->sin6_scope_id; memcpy(addr, &addr_in, MIN(*addrlen, sizeof(addr_in))); *addrlen = sizeof(addr_in); return 0; } } return -NSOS_MID_EINVAL; } static int nsos_wait_for_poll(struct nsos_socket *sock, int events, k_timeout_t timeout) { struct zsock_pollfd pfd = { .fd = sock->fd, .events = events, }; struct k_poll_event poll_events[1]; struct k_poll_event *pev = poll_events; struct k_poll_event *pev_end = poll_events + ARRAY_SIZE(poll_events); struct nsos_socket_poll socket_poll = {}; int ret; ret = nsos_adapt_dup(sock->poll.mid.fd); if (ret < 0) { goto return_ret; } socket_poll.mid.fd = ret; ret = nsos_poll_prepare(sock, &pfd, &pev, pev_end, &socket_poll); if (ret == -EALREADY) { ret = 0; goto poll_update; } else if (ret < 0) { goto close_dup; } ret = k_poll(poll_events, ARRAY_SIZE(poll_events), timeout); if (ret != 0 && ret != -EAGAIN && ret != -EINTR) { goto poll_update; } ret = 0; poll_update: pev = poll_events; nsos_poll_update(sock, &pfd, &pev, &socket_poll); close_dup: nsi_host_close(socket_poll.mid.fd); return_ret: if (ret < 0) { return -errno_to_nsos_mid(-ret); } return 0; } static int nsos_poll_if_blocking(struct nsos_socket *sock, int events, k_timeout_t timeout, int flags) { int sock_flags; bool non_blocking; if (flags & ZSOCK_MSG_DONTWAIT) { non_blocking = true; } else { sock_flags = nsos_adapt_fcntl_getfl(sock->poll.mid.fd); non_blocking = sock_flags & NSOS_MID_O_NONBLOCK; } if (!non_blocking) { return nsos_wait_for_poll(sock, events, timeout); } return 0; } static int nsos_bind(void *obj, const struct sockaddr *addr, socklen_t addrlen) { struct nsos_socket *sock = obj; struct nsos_mid_sockaddr_storage addr_storage_mid; struct nsos_mid_sockaddr *addr_mid = (struct nsos_mid_sockaddr *)&addr_storage_mid; size_t addrlen_mid; int ret; ret = sockaddr_to_nsos_mid(addr, addrlen, &addr_mid, &addrlen_mid); if (ret < 0) { goto return_ret; } ret = nsos_adapt_bind(sock->poll.mid.fd, addr_mid, addrlen_mid); return_ret: if (ret < 0) { errno = errno_from_nsos_mid(-ret); return -1; } return ret; } static int nsos_connect_blocking(struct nsos_socket *sock, struct nsos_mid_sockaddr *addr_mid, size_t addrlen_mid, int fcntl_flags) { int clear_nonblock_ret; int ret; ret = nsos_adapt_fcntl_setfl(sock->poll.mid.fd, fcntl_flags | NSOS_MID_O_NONBLOCK); if (ret < 0) { return ret; } ret = nsos_adapt_connect(sock->poll.mid.fd, addr_mid, addrlen_mid); if (ret == -NSOS_MID_EINPROGRESS) { int so_err; size_t so_err_len = sizeof(so_err); ret = nsos_wait_for_poll(sock, ZSOCK_POLLOUT, sock->send_timeout); if (ret < 0) { goto clear_nonblock; } ret = nsos_adapt_getsockopt(sock->poll.mid.fd, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_ERROR, &so_err, &so_err_len); if (ret < 0) { goto clear_nonblock; } ret = so_err; } clear_nonblock: clear_nonblock_ret = nsos_adapt_fcntl_setfl(sock->poll.mid.fd, fcntl_flags); if (clear_nonblock_ret < 0) { LOG_ERR("Failed to clear O_NONBLOCK: %d", clear_nonblock_ret); } return ret; } static int nsos_connect(void *obj, const struct sockaddr *addr, socklen_t addrlen) { struct nsos_socket *sock = obj; struct nsos_mid_sockaddr_storage addr_storage_mid; struct nsos_mid_sockaddr *addr_mid = (struct nsos_mid_sockaddr *)&addr_storage_mid; size_t addrlen_mid; int flags; int ret; ret = sockaddr_to_nsos_mid(addr, addrlen, &addr_mid, &addrlen_mid); if (ret < 0) { goto return_ret; } flags = nsos_adapt_fcntl_getfl(sock->poll.mid.fd); if (flags & NSOS_MID_O_NONBLOCK) { ret = nsos_adapt_connect(sock->poll.mid.fd, addr_mid, addrlen_mid); } else { ret = nsos_connect_blocking(sock, addr_mid, addrlen_mid, flags); } return_ret: if (ret < 0) { errno = errno_from_nsos_mid(-ret); return -1; } return ret; } static int nsos_listen(void *obj, int backlog) { struct nsos_socket *sock = obj; int ret; ret = nsos_adapt_listen(sock->poll.mid.fd, backlog); if (ret < 0) { errno = errno_from_nsos_mid(-ret); return -1; } return ret; } static int nsos_accept(void *obj, struct sockaddr *addr, socklen_t *addrlen) { struct nsos_socket *accept_sock = obj; struct nsos_mid_sockaddr_storage addr_storage_mid; struct nsos_mid_sockaddr *addr_mid = (struct nsos_mid_sockaddr *)&addr_storage_mid; size_t addrlen_mid = sizeof(addr_storage_mid); int adapt_fd; int zephyr_fd; struct nsos_socket *conn_sock; int ret; ret = nsos_poll_if_blocking(accept_sock, ZSOCK_POLLIN, accept_sock->recv_timeout, 0); if (ret < 0) { goto return_ret; } ret = nsos_adapt_accept(accept_sock->poll.mid.fd, addr_mid, &addrlen_mid); if (ret < 0) { goto return_ret; } adapt_fd = ret; ret = sockaddr_from_nsos_mid(addr, addrlen, addr_mid, addrlen_mid); if (ret < 0) { goto close_adapt_fd; } zephyr_fd = zvfs_reserve_fd(); if (zephyr_fd < 0) { ret = -errno_to_nsos_mid(-zephyr_fd); goto close_adapt_fd; } conn_sock = k_malloc(sizeof(*conn_sock)); if (!conn_sock) { ret = -NSOS_MID_ENOMEM; goto free_zephyr_fd; } conn_sock->fd = zephyr_fd; conn_sock->poll.mid.fd = adapt_fd; zvfs_finalize_typed_fd(zephyr_fd, conn_sock, &nsos_socket_fd_op_vtable.fd_vtable, ZVFS_MODE_IFSOCK); return zephyr_fd; free_zephyr_fd: zvfs_free_fd(zephyr_fd); close_adapt_fd: nsi_host_close(adapt_fd); return_ret: errno = errno_from_nsos_mid(-ret); return -1; } static ssize_t nsos_sendto(void *obj, const void *buf, size_t len, int flags, const struct sockaddr *addr, socklen_t addrlen) { struct nsos_socket *sock = obj; struct nsos_mid_sockaddr_storage addr_storage_mid; struct nsos_mid_sockaddr *addr_mid = (struct nsos_mid_sockaddr *)&addr_storage_mid; size_t addrlen_mid = sizeof(addr_storage_mid); int flags_mid; int ret; ret = socket_flags_to_nsos_mid(flags); if (ret < 0) { goto return_ret; } flags_mid = ret; ret = sockaddr_to_nsos_mid(addr, addrlen, &addr_mid, &addrlen_mid); if (ret < 0) { goto return_ret; } ret = nsos_poll_if_blocking(sock, ZSOCK_POLLOUT, sock->send_timeout, flags); if (ret < 0) { goto return_ret; } ret = nsos_adapt_sendto(sock->poll.mid.fd, buf, len, flags_mid, addr_mid, addrlen_mid); return_ret: if (ret < 0) { errno = errno_from_nsos_mid(-ret); return -1; } return ret; } static ssize_t nsos_sendmsg(void *obj, const struct msghdr *msg, int flags) { struct nsos_socket *sock = obj; struct nsos_mid_sockaddr_storage addr_storage_mid; struct nsos_mid_sockaddr *addr_mid = (struct nsos_mid_sockaddr *)&addr_storage_mid; size_t addrlen_mid = sizeof(addr_storage_mid); struct nsos_mid_msghdr msg_mid; struct nsos_mid_iovec *msg_iov; int flags_mid; int ret; ret = socket_flags_to_nsos_mid(flags); if (ret < 0) { goto return_ret; } flags_mid = ret; ret = sockaddr_to_nsos_mid(msg->msg_name, msg->msg_namelen, &addr_mid, &addrlen_mid); if (ret < 0) { goto return_ret; } msg_iov = k_calloc(msg->msg_iovlen, sizeof(*msg_iov)); if (!msg_iov) { ret = -NSOS_MID_ENOMEM; goto return_ret; } for (size_t i = 0; i < msg->msg_iovlen; i++) { msg_iov[i].iov_base = msg->msg_iov[i].iov_base; msg_iov[i].iov_len = msg->msg_iov[i].iov_len; } msg_mid.msg_name = addr_mid; msg_mid.msg_namelen = addrlen_mid; msg_mid.msg_iov = msg_iov; msg_mid.msg_iovlen = msg->msg_iovlen; msg_mid.msg_control = NULL; msg_mid.msg_controllen = 0; msg_mid.msg_flags = 0; ret = nsos_poll_if_blocking(sock, ZSOCK_POLLOUT, sock->send_timeout, flags); if (ret < 0) { goto free_msg_iov; } ret = nsos_adapt_sendmsg(sock->poll.mid.fd, &msg_mid, flags_mid); free_msg_iov: k_free(msg_iov); return_ret: if (ret < 0) { errno = errno_from_nsos_mid(-ret); return -1; } return ret; } static ssize_t nsos_recvfrom(void *obj, void *buf, size_t len, int flags, struct sockaddr *addr, socklen_t *addrlen) { struct nsos_socket *sock = obj; struct nsos_mid_sockaddr_storage addr_storage_mid; struct nsos_mid_sockaddr *addr_mid = (struct nsos_mid_sockaddr *)&addr_storage_mid; size_t addrlen_mid = sizeof(addr_storage_mid); int flags_mid; int ret; ret = socket_flags_to_nsos_mid(flags); if (ret < 0) { goto return_ret; } flags_mid = ret; ret = nsos_poll_if_blocking(sock, ZSOCK_POLLIN, sock->recv_timeout, flags); if (ret < 0) { goto return_ret; } ret = nsos_adapt_recvfrom(sock->poll.mid.fd, buf, len, flags_mid, addr_mid, &addrlen_mid); if (ret < 0) { goto return_ret; } sockaddr_from_nsos_mid(addr, addrlen, addr_mid, addrlen_mid); return_ret: if (ret < 0) { errno = errno_from_nsos_mid(-ret); return -1; } return ret; } static ssize_t nsos_recvmsg(void *obj, struct msghdr *msg, int flags) { errno = ENOTSUP; return -1; } static int socket_type_from_nsos_mid(int type_mid, int *type) { switch (type_mid) { case NSOS_MID_SOCK_STREAM: *type = SOCK_STREAM; break; case NSOS_MID_SOCK_DGRAM: *type = SOCK_DGRAM; break; case NSOS_MID_SOCK_RAW: *type = SOCK_RAW; break; default: return -NSOS_MID_ESOCKTNOSUPPORT; } return 0; } static int socket_proto_from_nsos_mid(int proto_mid, int *proto) { switch (proto_mid) { case NSOS_MID_IPPROTO_IP: *proto = IPPROTO_IP; break; case NSOS_MID_IPPROTO_ICMP: *proto = IPPROTO_ICMP; break; case NSOS_MID_IPPROTO_IGMP: *proto = IPPROTO_IGMP; break; case NSOS_MID_IPPROTO_IPIP: *proto = IPPROTO_IPIP; break; case NSOS_MID_IPPROTO_TCP: *proto = IPPROTO_TCP; break; case NSOS_MID_IPPROTO_UDP: *proto = IPPROTO_UDP; break; case NSOS_MID_IPPROTO_IPV6: *proto = IPPROTO_IPV6; break; case NSOS_MID_IPPROTO_RAW: *proto = IPPROTO_RAW; break; default: return -NSOS_MID_EPROTONOSUPPORT; } return 0; } static int socket_family_from_nsos_mid(int family_mid, int *family) { switch (family_mid) { case NSOS_MID_AF_UNSPEC: *family = AF_UNSPEC; break; case NSOS_MID_AF_INET: *family = AF_INET; break; case NSOS_MID_AF_INET6: *family = AF_INET6; break; default: return -NSOS_MID_EAFNOSUPPORT; } return 0; } static int nsos_getsockopt_int(struct nsos_socket *sock, int nsos_mid_level, int nsos_mid_optname, void *optval, socklen_t *optlen) { size_t nsos_mid_optlen = sizeof(int); int err; if (*optlen != sizeof(int)) { errno = EINVAL; return -1; } err = nsos_adapt_getsockopt(sock->poll.mid.fd, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_KEEPALIVE, optval, &nsos_mid_optlen); if (err) { errno = errno_from_nsos_mid(-err); return -1; } *optlen = nsos_mid_optlen; return 0; } static int nsos_getsockopt(void *obj, int level, int optname, void *optval, socklen_t *optlen) { struct nsos_socket *sock = obj; switch (level) { case SOL_SOCKET: switch (optname) { case SO_ERROR: { int nsos_mid_err; int err; if (*optlen != sizeof(int)) { errno = EINVAL; return -1; } err = nsos_adapt_getsockopt(sock->poll.mid.fd, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_ERROR, &nsos_mid_err, NULL); if (err) { errno = errno_from_nsos_mid(-err); return -1; } *(int *)optval = errno_from_nsos_mid(nsos_mid_err); return 0; } case SO_TYPE: { int nsos_mid_type; int err; if (*optlen != sizeof(nsos_mid_type)) { errno = EINVAL; return -1; } err = nsos_adapt_getsockopt(sock->poll.mid.fd, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_TYPE, &nsos_mid_type, NULL); if (err) { errno = errno_from_nsos_mid(-err); return -1; } err = socket_type_from_nsos_mid(nsos_mid_type, optval); if (err) { errno = errno_from_nsos_mid(-err); return -1; } return 0; } case SO_PROTOCOL: { int nsos_mid_proto; int err; if (*optlen != sizeof(nsos_mid_proto)) { errno = EINVAL; return -1; } err = nsos_adapt_getsockopt(sock->poll.mid.fd, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_PROTOCOL, &nsos_mid_proto, NULL); if (err) { errno = errno_from_nsos_mid(-err); return -1; } err = socket_proto_from_nsos_mid(nsos_mid_proto, optval); if (err) { errno = errno_from_nsos_mid(-err); return -1; } return 0; } case SO_DOMAIN: { int nsos_mid_family; int err; if (*optlen != sizeof(nsos_mid_family)) { errno = EINVAL; return -1; } err = nsos_adapt_getsockopt(sock->poll.mid.fd, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_DOMAIN, &nsos_mid_family, NULL); if (err) { errno = errno_from_nsos_mid(-err); return -1; } err = socket_family_from_nsos_mid(nsos_mid_family, optval); if (err) { errno = errno_from_nsos_mid(-err); return -1; } return 0; } case SO_RCVBUF: return nsos_getsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_RCVBUF, optval, optlen); case SO_SNDBUF: return nsos_getsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_SNDBUF, optval, optlen); case SO_REUSEADDR: return nsos_getsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_REUSEADDR, optval, optlen); case SO_REUSEPORT: return nsos_getsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_REUSEPORT, optval, optlen); case SO_KEEPALIVE: return nsos_getsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_KEEPALIVE, optval, optlen); } break; case IPPROTO_TCP: switch (optname) { case TCP_NODELAY: return nsos_getsockopt_int(sock, NSOS_MID_IPPROTO_TCP, NSOS_MID_TCP_NODELAY, optval, optlen); case TCP_KEEPIDLE: return nsos_getsockopt_int(sock, NSOS_MID_IPPROTO_TCP, NSOS_MID_TCP_KEEPIDLE, optval, optlen); case TCP_KEEPINTVL: return nsos_getsockopt_int(sock, NSOS_MID_IPPROTO_TCP, NSOS_MID_TCP_KEEPINTVL, optval, optlen); case TCP_KEEPCNT: return nsos_getsockopt_int(sock, NSOS_MID_IPPROTO_TCP, NSOS_MID_TCP_KEEPCNT, optval, optlen); } break; case IPPROTO_IPV6: switch (optname) { case IPV6_V6ONLY: return nsos_getsockopt_int(sock, NSOS_MID_IPPROTO_IPV6, NSOS_MID_IPV6_V6ONLY, optval, optlen); } break; } errno = EOPNOTSUPP; return -1; } static int nsos_setsockopt_int(struct nsos_socket *sock, int nsos_mid_level, int nsos_mid_optname, const void *optval, socklen_t optlen) { int err; if (optlen != sizeof(int)) { errno = EINVAL; return -1; } err = nsos_adapt_setsockopt(sock->poll.mid.fd, nsos_mid_level, nsos_mid_optname, optval, optlen); if (err) { errno = errno_from_nsos_mid(-err); return -1; } return 0; } static int nsos_setsockopt(void *obj, int level, int optname, const void *optval, socklen_t optlen) { struct nsos_socket *sock = obj; switch (level) { case SOL_SOCKET: switch (optname) { case SO_PRIORITY: { int nsos_mid_priority; int err; if (optlen != sizeof(uint8_t)) { errno = EINVAL; return -1; } nsos_mid_priority = *(uint8_t *)optval; err = nsos_adapt_setsockopt(sock->poll.mid.fd, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_PRIORITY, &nsos_mid_priority, sizeof(nsos_mid_priority)); if (err) { errno = errno_from_nsos_mid(-err); return -1; } return 0; } case SO_RCVTIMEO: { const struct zsock_timeval *tv = optval; struct nsos_mid_timeval nsos_mid_tv; int err; if (optlen != sizeof(struct zsock_timeval)) { errno = EINVAL; return -1; } nsos_mid_tv.tv_sec = tv->tv_sec; nsos_mid_tv.tv_usec = tv->tv_usec; err = nsos_adapt_setsockopt(sock->poll.mid.fd, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_RCVTIMEO, &nsos_mid_tv, sizeof(nsos_mid_tv)); if (err) { errno = errno_from_nsos_mid(-err); return -1; } if (tv->tv_sec == 0 && tv->tv_usec == 0) { sock->recv_timeout = K_FOREVER; } else { sock->recv_timeout = K_USEC(tv->tv_sec * 1000000LL + tv->tv_usec); } return 0; } case SO_SNDTIMEO: { const struct zsock_timeval *tv = optval; struct nsos_mid_timeval nsos_mid_tv; int err; if (optlen != sizeof(struct zsock_timeval)) { errno = EINVAL; return -1; } nsos_mid_tv.tv_sec = tv->tv_sec; nsos_mid_tv.tv_usec = tv->tv_usec; err = nsos_adapt_setsockopt(sock->poll.mid.fd, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_SNDTIMEO, &nsos_mid_tv, sizeof(nsos_mid_tv)); if (err) { errno = errno_from_nsos_mid(-err); return -1; } if (tv->tv_sec == 0 && tv->tv_usec == 0) { sock->send_timeout = K_FOREVER; } else { sock->send_timeout = K_USEC(tv->tv_sec * 1000000LL + tv->tv_usec); } return 0; } case SO_RCVBUF: return nsos_setsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_RCVBUF, optval, optlen); case SO_SNDBUF: return nsos_setsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_SNDBUF, optval, optlen); case SO_REUSEADDR: return nsos_setsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_REUSEADDR, optval, optlen); case SO_REUSEPORT: return nsos_setsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_REUSEPORT, optval, optlen); case SO_LINGER: return nsos_setsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_LINGER, optval, optlen); case SO_KEEPALIVE: return nsos_setsockopt_int(sock, NSOS_MID_SOL_SOCKET, NSOS_MID_SO_KEEPALIVE, optval, optlen); } break; case IPPROTO_TCP: switch (optname) { case TCP_NODELAY: return nsos_setsockopt_int(sock, NSOS_MID_IPPROTO_TCP, NSOS_MID_TCP_NODELAY, optval, optlen); case TCP_KEEPIDLE: return nsos_setsockopt_int(sock, NSOS_MID_IPPROTO_TCP, NSOS_MID_TCP_KEEPIDLE, optval, optlen); case TCP_KEEPINTVL: return nsos_setsockopt_int(sock, NSOS_MID_IPPROTO_TCP, NSOS_MID_TCP_KEEPINTVL, optval, optlen); case TCP_KEEPCNT: return nsos_setsockopt_int(sock, NSOS_MID_IPPROTO_TCP, NSOS_MID_TCP_KEEPCNT, optval, optlen); } break; case IPPROTO_IPV6: switch (optname) { case IPV6_V6ONLY: return nsos_setsockopt_int(sock, NSOS_MID_IPPROTO_IPV6, NSOS_MID_IPV6_V6ONLY, optval, optlen); } break; } errno = EOPNOTSUPP; return -1; } static const struct socket_op_vtable nsos_socket_fd_op_vtable = { .fd_vtable = { .read = nsos_read, .write = nsos_write, .close = nsos_close, .ioctl = nsos_ioctl, }, .bind = nsos_bind, .connect = nsos_connect, .listen = nsos_listen, .accept = nsos_accept, .sendto = nsos_sendto, .sendmsg = nsos_sendmsg, .recvfrom = nsos_recvfrom, .recvmsg = nsos_recvmsg, .getsockopt = nsos_getsockopt, .setsockopt = nsos_setsockopt, }; static bool nsos_is_supported(int family, int type, int proto) { int dummy; int err; err = socket_family_to_nsos_mid(family, &dummy); if (err) { return false; } err = socket_type_to_nsos_mid(type, &dummy); if (err) { return false; } err = socket_proto_to_nsos_mid(proto, &dummy); if (err) { return false; } return true; } NET_SOCKET_OFFLOAD_REGISTER(nsos, CONFIG_NET_SOCKETS_OFFLOAD_PRIORITY, AF_UNSPEC, nsos_is_supported, nsos_socket_create); struct zsock_addrinfo_wrap { struct zsock_addrinfo addrinfo; struct sockaddr_storage addr_storage; struct nsos_mid_addrinfo *addrinfo_mid; }; /* * (Zephyr) * zsock_addrinfo_wrap * ----------------------- * | zsock_addrinfo | * ----------------------- (trampoline) * | sockaddr_storage | nsos_addrinfo_wrap * ----------------------- ----------------------------- * | nsos_mid_addrinfo * | -> | nsos_mid_addrinfo | * ----------------------- ----------------------------- * | nsos_mid_sockaddr_storage | * ----------------------------- (Linux host) * | addrinfo * | -> addrinfo * ----------------------------- */ static int addrinfo_from_nsos_mid(struct nsos_mid_addrinfo *nsos_res, struct zsock_addrinfo **res) { struct zsock_addrinfo_wrap *res_wraps; size_t idx_res = 0; size_t n_res = 0; for (struct nsos_mid_addrinfo *res_p = nsos_res; res_p; res_p = res_p->ai_next) { n_res++; } if (n_res == 0) { return 0; } res_wraps = k_calloc(n_res, sizeof(*res_wraps)); if (!res_wraps) { return -ENOMEM; } for (struct nsos_mid_addrinfo *res_p = nsos_res; res_p; res_p = res_p->ai_next, idx_res++) { struct zsock_addrinfo_wrap *wrap = &res_wraps[idx_res]; wrap->addrinfo_mid = res_p; wrap->addrinfo.ai_flags = res_p->ai_flags; wrap->addrinfo.ai_family = res_p->ai_family; wrap->addrinfo.ai_socktype = res_p->ai_socktype; wrap->addrinfo.ai_protocol = res_p->ai_protocol; wrap->addrinfo.ai_addr = (struct sockaddr *)&wrap->addr_storage; wrap->addrinfo.ai_addrlen = sizeof(wrap->addr_storage); sockaddr_from_nsos_mid(wrap->addrinfo.ai_addr, &wrap->addrinfo.ai_addrlen, res_p->ai_addr, res_p->ai_addrlen); wrap->addrinfo.ai_canonname = res_p->ai_canonname ? strdup(res_p->ai_canonname) : NULL; wrap->addrinfo.ai_next = &wrap[1].addrinfo; } res_wraps[n_res - 1].addrinfo.ai_next = NULL; *res = &res_wraps->addrinfo; return 0; } static int nsos_getaddrinfo(const char *node, const char *service, const struct zsock_addrinfo *hints, struct zsock_addrinfo **res) { struct nsos_mid_addrinfo hints_mid; struct nsos_mid_addrinfo *res_mid; int system_errno; int ret; if (!res) { return -EINVAL; } if (hints) { hints_mid.ai_flags = hints->ai_flags; hints_mid.ai_family = hints->ai_family; hints_mid.ai_socktype = hints->ai_socktype; hints_mid.ai_protocol = hints->ai_protocol; } ret = nsos_adapt_getaddrinfo(node, service, hints ? &hints_mid : NULL, &res_mid, &system_errno); if (ret < 0) { if (ret == NSOS_MID_EAI_SYSTEM) { errno = errno_from_nsos_mid(system_errno); } return eai_from_nsos_mid(ret); } ret = addrinfo_from_nsos_mid(res_mid, res); if (ret < 0) { errno = -ret; return DNS_EAI_SYSTEM; } return ret; } static void nsos_freeaddrinfo(struct zsock_addrinfo *res) { struct zsock_addrinfo_wrap *wrap = CONTAINER_OF(res, struct zsock_addrinfo_wrap, addrinfo); for (struct zsock_addrinfo *res_p = res; res_p; res_p = res_p->ai_next) { free(res_p->ai_canonname); } nsos_adapt_freeaddrinfo(wrap->addrinfo_mid); k_free(wrap); } static const struct socket_dns_offload nsos_dns_ops = { .getaddrinfo = nsos_getaddrinfo, .freeaddrinfo = nsos_freeaddrinfo, }; static void nsos_isr(const void *obj) { struct nsos_socket_poll *poll; SYS_DLIST_FOR_EACH_CONTAINER(&nsos_polls, poll, node) { if (poll->mid.revents) { poll->mid.cb(&poll->mid); } } } static int nsos_socket_offload_init(const struct device *arg) { ARG_UNUSED(arg); IRQ_CONNECT(NSOS_IRQ, NSOS_IRQ_PRIORITY, nsos_isr, NULL, NSOS_IRQ_FLAGS); irq_enable(NSOS_IRQ); return 0; } static void nsos_iface_api_init(struct net_if *iface) { iface->if_dev->socket_offload = nsos_socket_create; socket_offload_dns_register(&nsos_dns_ops); } static int nsos_iface_enable(const struct net_if *iface, bool enabled) { ARG_UNUSED(iface); ARG_UNUSED(enabled); return 0; } static struct offloaded_if_api nsos_iface_offload_api = { .iface_api.init = nsos_iface_api_init, .enable = nsos_iface_enable, }; NET_DEVICE_OFFLOAD_INIT(nsos_socket, "nsos_socket", nsos_socket_offload_init, NULL, NULL, NULL, 0, &nsos_iface_offload_api, NET_ETH_MTU); ```
/content/code_sandbox/drivers/net/nsos_sockets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,244
```objective-c /* * */ #include <stdbool.h> #include <zephyr/device.h> #include <zephyr/net/buf.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #if defined(CONFIG_SLIP_TAP) #define _SLIP_MTU 1500 #else #define _SLIP_MTU 576 #endif /* CONFIG_SLIP_TAP */ struct slip_context { bool init_done; bool first; /* SLIP received it's byte or not after * driver initialization or SLIP_END byte. */ uint8_t buf[1]; /* SLIP data is read into this buf */ struct net_pkt *rx; /* and then placed into this net_pkt */ struct net_buf *last; /* Pointer to last buffer in the list */ uint8_t *ptr; /* Where in net_pkt to add data */ struct net_if *iface; uint8_t state; uint8_t mac_addr[6]; struct net_linkaddr ll_addr; #if defined(CONFIG_SLIP_STATISTICS) #define SLIP_STATS(statement) #else uint16_t garbage; #define SLIP_STATS(statement) statement #endif }; void slip_iface_init(struct net_if *iface); int slip_init(const struct device *dev); int slip_send(const struct device *dev, struct net_pkt *pkt); ```
/content/code_sandbox/drivers/net/slip.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
280
```c /* * */ /** * @file * * SLIP driver using uart_pipe. This is meant for network connectivity between * host and qemu. The host will need to run tunslip process. */ #define LOG_MODULE_NAME slip #define LOG_LEVEL CONFIG_SLIP_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <stdio.h> #include <zephyr/kernel.h> #include <errno.h> #include <stddef.h> #include <zephyr/sys/util.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/net_core.h> #include <zephyr/net/dummy.h> #include <zephyr/drivers/uart_pipe.h> #include <zephyr/random/random.h> #include "slip.h" #define SLIP_END 0300 #define SLIP_ESC 0333 #define SLIP_ESC_END 0334 #define SLIP_ESC_ESC 0335 enum slip_state { STATE_GARBAGE, STATE_OK, STATE_ESC, }; #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE) #define SLIP_FRAG_LEN CONFIG_NET_BUF_DATA_SIZE #else #define SLIP_FRAG_LEN _SLIP_MTU #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */ static inline void slip_writeb(unsigned char c) { uint8_t buf[1] = { c }; uart_pipe_send(&buf[0], 1); } /** * @brief Write byte to SLIP, escape if it is END or ESC character * * @param c a byte to write */ static void slip_writeb_esc(unsigned char c) { switch (c) { case SLIP_END: /* If it's the same code as an END character, * we send a special two character code so as * not to make the receiver think we sent * an END. */ slip_writeb(SLIP_ESC); slip_writeb(SLIP_ESC_END); break; case SLIP_ESC: /* If it's the same code as an ESC character, * we send a special two character code so as * not to make the receiver think we sent * an ESC. */ slip_writeb(SLIP_ESC); slip_writeb(SLIP_ESC_ESC); break; default: slip_writeb(c); } } int slip_send(const struct device *dev, struct net_pkt *pkt) { struct net_buf *buf; uint8_t *ptr; uint16_t i; uint8_t c; ARG_UNUSED(dev); if (!pkt->buffer) { /* No data? */ return -ENODATA; } slip_writeb(SLIP_END); for (buf = pkt->buffer; buf; buf = buf->frags) { ptr = buf->data; for (i = 0U; i < buf->len; ++i) { c = *ptr++; slip_writeb_esc(c); } if (LOG_LEVEL >= LOG_LEVEL_DBG) { LOG_DBG("sent data %d bytes", buf->len); if (buf->len) { LOG_HEXDUMP_DBG(buf->data, buf->len, "<slip "); } } } slip_writeb(SLIP_END); return 0; } static struct net_pkt *slip_poll_handler(struct slip_context *slip) { if (slip->last && slip->last->len) { return slip->rx; } return NULL; } static inline struct net_if *get_iface(struct slip_context *context, uint16_t vlan_tag) { #if defined(CONFIG_NET_VLAN) struct net_if *iface; iface = net_eth_get_vlan_iface(context->iface, vlan_tag); if (!iface) { return context->iface; } return iface; #else ARG_UNUSED(vlan_tag); return context->iface; #endif } static void process_msg(struct slip_context *slip) { uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC; struct net_pkt *pkt; pkt = slip_poll_handler(slip); if (!pkt || !pkt->buffer) { return; } #if defined(CONFIG_NET_VLAN) { struct net_eth_hdr *hdr = NET_ETH_HDR(pkt); if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) { struct net_eth_vlan_hdr *hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci)); vlan_tag = net_pkt_vlan_tag(pkt); } } #endif if (net_recv_data(get_iface(slip, vlan_tag), pkt) < 0) { net_pkt_unref(pkt); } slip->rx = NULL; slip->last = NULL; } static inline int slip_input_byte(struct slip_context *slip, unsigned char c) { switch (slip->state) { case STATE_GARBAGE: if (c == SLIP_END) { slip->state = STATE_OK; } return 0; case STATE_ESC: if (c == SLIP_ESC_END) { c = SLIP_END; } else if (c == SLIP_ESC_ESC) { c = SLIP_ESC; } else { slip->state = STATE_GARBAGE; SLIP_STATS(slip->garbage++); return 0; } slip->state = STATE_OK; break; case STATE_OK: if (c == SLIP_ESC) { slip->state = STATE_ESC; return 0; } if (c == SLIP_END) { slip->state = STATE_OK; slip->first = false; if (slip->rx) { return 1; } return 0; } if (slip->first && !slip->rx) { /* Must have missed buffer allocation on first byte. */ return 0; } if (!slip->first) { slip->first = true; slip->rx = net_pkt_rx_alloc_on_iface(slip->iface, K_NO_WAIT); if (!slip->rx) { LOG_ERR("[%p] cannot allocate pkt", slip); return 0; } slip->last = net_pkt_get_frag(slip->rx, SLIP_FRAG_LEN, K_NO_WAIT); if (!slip->last) { LOG_ERR("[%p] cannot allocate 1st data buffer", slip); net_pkt_unref(slip->rx); slip->rx = NULL; return 0; } net_pkt_append_buffer(slip->rx, slip->last); slip->ptr = net_pkt_ip_data(slip->rx); } break; } /* It is possible that slip->last is not set during the startup * of the device. If this happens do not continue and overwrite * some random memory. */ if (!slip->last) { return 0; } if (!net_buf_tailroom(slip->last)) { /* We need to allocate a new buffer */ struct net_buf *buf; buf = net_pkt_get_reserve_rx_data(SLIP_FRAG_LEN, K_NO_WAIT); if (!buf) { LOG_ERR("[%p] cannot allocate next data buf", slip); net_pkt_unref(slip->rx); slip->rx = NULL; slip->last = NULL; return 0; } net_buf_frag_insert(slip->last, buf); slip->last = buf; slip->ptr = slip->last->data; } /* The net_buf_add_u8() cannot add data to ll header so we need * a way to do it. */ if (slip->ptr < slip->last->data) { *slip->ptr = c; } else { slip->ptr = net_buf_add_u8(slip->last, c); } slip->ptr++; return 0; } static uint8_t *recv_cb(uint8_t *buf, size_t *off) { struct slip_context *slip = CONTAINER_OF(buf, struct slip_context, buf[0]); size_t i; if (!slip->init_done) { *off = 0; return buf; } for (i = 0; i < *off; i++) { if (slip_input_byte(slip, buf[i])) { if (LOG_LEVEL >= LOG_LEVEL_DBG) { struct net_buf *rx_buf = slip->rx->buffer; int bytes = net_buf_frags_len(rx_buf); int count = 0; while (bytes && rx_buf) { char msg[6 + 10 + 1]; snprintk(msg, sizeof(msg), ">slip %2d", count); LOG_HEXDUMP_DBG(rx_buf->data, rx_buf->len, msg); rx_buf = rx_buf->frags; count++; } LOG_DBG("[%p] received data %d bytes", slip, bytes); } process_msg(slip); break; } } *off = 0; return buf; } int slip_init(const struct device *dev) { struct slip_context *slip = dev->data; LOG_DBG("[%p] dev %p", slip, dev); slip->state = STATE_OK; slip->rx = NULL; slip->first = false; #if defined(CONFIG_SLIP_TAP) && defined(CONFIG_NET_IPV4) LOG_DBG("ARP enabled"); #endif uart_pipe_register(slip->buf, sizeof(slip->buf), recv_cb); return 0; } static inline struct net_linkaddr *slip_get_mac(struct slip_context *slip) { slip->ll_addr.addr = slip->mac_addr; slip->ll_addr.len = sizeof(slip->mac_addr); return &slip->ll_addr; } void slip_iface_init(struct net_if *iface) { struct slip_context *slip = net_if_get_device(iface)->data; struct net_linkaddr *ll_addr; #if defined(CONFIG_SLIP_TAP) && defined(CONFIG_NET_L2_ETHERNET) ethernet_init(iface); #endif #if defined(CONFIG_NET_LLDP) net_lldp_set_lldpdu(iface); #endif if (slip->init_done) { return; } ll_addr = slip_get_mac(slip); slip->init_done = true; slip->iface = iface; if (CONFIG_SLIP_MAC_ADDR[0] != 0) { if (net_bytes_from_str(slip->mac_addr, sizeof(slip->mac_addr), CONFIG_SLIP_MAC_ADDR) < 0) { goto use_random_mac; } } else { use_random_mac: /* 00-00-5E-00-53-xx Documentation RFC 7042 */ slip->mac_addr[0] = 0x00; slip->mac_addr[1] = 0x00; slip->mac_addr[2] = 0x5E; slip->mac_addr[3] = 0x00; slip->mac_addr[4] = 0x53; slip->mac_addr[5] = sys_rand8_get(); } net_if_set_link_addr(iface, ll_addr->addr, ll_addr->len, NET_LINK_ETHERNET); } #if !defined(CONFIG_SLIP_TAP) static struct slip_context slip_context_data; static const struct dummy_api slip_if_api = { .iface_api.init = slip_iface_init, .send = slip_send, }; #define _SLIP_L2_LAYER DUMMY_L2 #define _SLIP_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(DUMMY_L2) NET_DEVICE_INIT(slip, CONFIG_SLIP_DRV_NAME, slip_init, NULL, &slip_context_data, NULL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &slip_if_api, _SLIP_L2_LAYER, _SLIP_L2_CTX_TYPE, _SLIP_MTU); #endif ```
/content/code_sandbox/drivers/net/slip.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,666
```c /** * */ /** * @file * * Linux (bottom) side of NSOS (Native Simulator Offloaded Sockets). */ #define _DEFAULT_SOURCE #include <errno.h> #include <fcntl.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <poll.h> #include <stdlib.h> #include <string.h> #include <sys/epoll.h> #include <sys/ioctl.h> #include <sys/socket.h> #include <unistd.h> #include "nsos.h" #include "nsos_errno.h" #include "nsos_fcntl.h" #include "nsos_netdb.h" #include "nsos_socket.h" #include "board_soc.h" #include "irq_ctrl.h" #include "nsi_hws_models_if.h" #include "nsi_tasks.h" #include "nsi_tracing.h" #include <stdio.h> static int nsos_epoll_fd; static int nsos_adapt_nfds; #ifndef ARRAY_SIZE #define ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) #endif #ifndef CONTAINER_OF #define CONTAINER_OF(ptr, type, field) \ ((type *)(((char *)(ptr)) - offsetof(type, field))) #endif int nsos_adapt_get_errno(void) { return errno_to_nsos_mid(errno); } static int socket_family_from_nsos_mid(int family_mid, int *family) { switch (family_mid) { case NSOS_MID_AF_UNSPEC: *family = AF_UNSPEC; break; case NSOS_MID_AF_INET: *family = AF_INET; break; case NSOS_MID_AF_INET6: *family = AF_INET6; break; default: nsi_print_warning("%s: socket family %d not supported\n", __func__, family_mid); return -NSOS_MID_EAFNOSUPPORT; } return 0; } static int socket_family_to_nsos_mid(int family, int *family_mid) { switch (family) { case AF_UNSPEC: *family_mid = NSOS_MID_AF_UNSPEC; break; case AF_INET: *family_mid = NSOS_MID_AF_INET; break; case AF_INET6: *family_mid = NSOS_MID_AF_INET6; break; default: nsi_print_warning("%s: socket family %d not supported\n", __func__, family); return -NSOS_MID_EAFNOSUPPORT; } return 0; } static int socket_proto_from_nsos_mid(int proto_mid, int *proto) { switch (proto_mid) { case NSOS_MID_IPPROTO_IP: *proto = IPPROTO_IP; break; case NSOS_MID_IPPROTO_ICMP: *proto = IPPROTO_ICMP; break; case NSOS_MID_IPPROTO_IGMP: *proto = IPPROTO_IGMP; break; case NSOS_MID_IPPROTO_IPIP: *proto = IPPROTO_IPIP; break; case NSOS_MID_IPPROTO_TCP: *proto = IPPROTO_TCP; break; case NSOS_MID_IPPROTO_UDP: *proto = IPPROTO_UDP; break; case NSOS_MID_IPPROTO_IPV6: *proto = IPPROTO_IPV6; break; case NSOS_MID_IPPROTO_RAW: *proto = IPPROTO_RAW; break; default: nsi_print_warning("%s: socket protocol %d not supported\n", __func__, proto_mid); return -NSOS_MID_EPROTONOSUPPORT; } return 0; } static int socket_proto_to_nsos_mid(int proto, int *proto_mid) { switch (proto) { case IPPROTO_IP: *proto_mid = NSOS_MID_IPPROTO_IP; break; case IPPROTO_ICMP: *proto_mid = NSOS_MID_IPPROTO_ICMP; break; case IPPROTO_IGMP: *proto_mid = NSOS_MID_IPPROTO_IGMP; break; case IPPROTO_IPIP: *proto_mid = NSOS_MID_IPPROTO_IPIP; break; case IPPROTO_TCP: *proto_mid = NSOS_MID_IPPROTO_TCP; break; case IPPROTO_UDP: *proto_mid = NSOS_MID_IPPROTO_UDP; break; case IPPROTO_IPV6: *proto_mid = NSOS_MID_IPPROTO_IPV6; break; case IPPROTO_RAW: *proto_mid = NSOS_MID_IPPROTO_RAW; break; default: nsi_print_warning("%s: socket protocol %d not supported\n", __func__, proto); return -NSOS_MID_EPROTONOSUPPORT; } return 0; } static int socket_type_from_nsos_mid(int type_mid, int *type) { switch (type_mid) { case NSOS_MID_SOCK_STREAM: *type = SOCK_STREAM; break; case NSOS_MID_SOCK_DGRAM: *type = SOCK_DGRAM; break; case NSOS_MID_SOCK_RAW: *type = SOCK_RAW; break; default: nsi_print_warning("%s: socket type %d not supported\n", __func__, type_mid); return -NSOS_MID_ESOCKTNOSUPPORT; } return 0; } static int socket_type_to_nsos_mid(int type, int *type_mid) { switch (type) { case SOCK_STREAM: *type_mid = NSOS_MID_SOCK_STREAM; break; case SOCK_DGRAM: *type_mid = NSOS_MID_SOCK_DGRAM; break; case SOCK_RAW: *type_mid = NSOS_MID_SOCK_RAW; break; default: nsi_print_warning("%s: socket type %d not supported\n", __func__, type); return -NSOS_MID_ESOCKTNOSUPPORT; } return 0; } static int socket_flags_from_nsos_mid(int flags_mid) { int flags = 0; nsos_socket_flag_convert(&flags_mid, NSOS_MID_MSG_PEEK, &flags, MSG_PEEK); nsos_socket_flag_convert(&flags_mid, NSOS_MID_MSG_TRUNC, &flags, MSG_TRUNC); nsos_socket_flag_convert(&flags_mid, NSOS_MID_MSG_DONTWAIT, &flags, MSG_DONTWAIT); nsos_socket_flag_convert(&flags_mid, NSOS_MID_MSG_WAITALL, &flags, MSG_WAITALL); if (flags_mid != 0) { return -NSOS_MID_EINVAL; } return flags; } int nsos_adapt_socket(int family_mid, int type_mid, int proto_mid) { int family; int type; int proto; int ret; ret = socket_family_from_nsos_mid(family_mid, &family); if (ret < 0) { return ret; } ret = socket_type_from_nsos_mid(type_mid, &type); if (ret < 0) { return ret; } ret = socket_proto_from_nsos_mid(proto_mid, &proto); if (ret < 0) { return ret; } ret = socket(family, type, proto); if (ret < 0) { return -errno_to_nsos_mid(errno); } return ret; } static int sockaddr_from_nsos_mid(struct sockaddr **addr, socklen_t *addrlen, const struct nsos_mid_sockaddr *addr_mid, size_t addrlen_mid) { if (!addr_mid || addrlen_mid == 0) { *addr = NULL; *addrlen = 0; return 0; } switch (addr_mid->sa_family) { case NSOS_MID_AF_INET: { const struct nsos_mid_sockaddr_in *addr_in_mid = (const struct nsos_mid_sockaddr_in *)addr_mid; struct sockaddr_in *addr_in = (struct sockaddr_in *)*addr; addr_in->sin_family = AF_INET; addr_in->sin_port = addr_in_mid->sin_port; addr_in->sin_addr.s_addr = addr_in_mid->sin_addr; *addrlen = sizeof(*addr_in); return 0; } case NSOS_MID_AF_INET6: { const struct nsos_mid_sockaddr_in6 *addr_in_mid = (const struct nsos_mid_sockaddr_in6 *)addr_mid; struct sockaddr_in6 *addr_in = (struct sockaddr_in6 *)*addr; addr_in->sin6_family = AF_INET6; addr_in->sin6_port = addr_in_mid->sin6_port; addr_in->sin6_flowinfo = 0; memcpy(addr_in->sin6_addr.s6_addr, addr_in_mid->sin6_addr, sizeof(addr_in->sin6_addr.s6_addr)); addr_in->sin6_scope_id = addr_in_mid->sin6_scope_id; *addrlen = sizeof(*addr_in); return 0; } } return -NSOS_MID_EINVAL; } static int sockaddr_to_nsos_mid(const struct sockaddr *addr, socklen_t addrlen, struct nsos_mid_sockaddr *addr_mid, size_t *addrlen_mid) { if (!addr || addrlen == 0) { *addrlen_mid = 0; return 0; } switch (addr->sa_family) { case AF_INET: { struct nsos_mid_sockaddr_in *addr_in_mid = (struct nsos_mid_sockaddr_in *)addr_mid; const struct sockaddr_in *addr_in = (const struct sockaddr_in *)addr; if (addr_in_mid) { addr_in_mid->sin_family = NSOS_MID_AF_INET; addr_in_mid->sin_port = addr_in->sin_port; addr_in_mid->sin_addr = addr_in->sin_addr.s_addr; } if (addrlen_mid) { *addrlen_mid = sizeof(*addr_in); } return 0; } case AF_INET6: { struct nsos_mid_sockaddr_in6 *addr_in_mid = (struct nsos_mid_sockaddr_in6 *)addr_mid; const struct sockaddr_in6 *addr_in = (const struct sockaddr_in6 *)addr; if (addr_in_mid) { addr_in_mid->sin6_family = NSOS_MID_AF_INET6; addr_in_mid->sin6_port = addr_in->sin6_port; memcpy(addr_in_mid->sin6_addr, addr_in->sin6_addr.s6_addr, sizeof(addr_in_mid->sin6_addr)); addr_in_mid->sin6_scope_id = addr_in->sin6_scope_id; } if (addrlen_mid) { *addrlen_mid = sizeof(*addr_in); } return 0; } } nsi_print_warning("%s: socket family %d not supported\n", __func__, addr->sa_family); return -NSOS_MID_EINVAL; } int nsos_adapt_bind(int fd, const struct nsos_mid_sockaddr *addr_mid, size_t addrlen_mid) { struct sockaddr_storage addr_storage; struct sockaddr *addr = (struct sockaddr *)&addr_storage; socklen_t addrlen; int ret; ret = sockaddr_from_nsos_mid(&addr, &addrlen, addr_mid, addrlen_mid); if (ret < 0) { return ret; } ret = bind(fd, addr, addrlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } return ret; } int nsos_adapt_connect(int fd, const struct nsos_mid_sockaddr *addr_mid, size_t addrlen_mid) { struct sockaddr_storage addr_storage; struct sockaddr *addr = (struct sockaddr *)&addr_storage; socklen_t addrlen; int ret; ret = sockaddr_from_nsos_mid(&addr, &addrlen, addr_mid, addrlen_mid); if (ret < 0) { return ret; } ret = connect(fd, addr, addrlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } return ret; } int nsos_adapt_listen(int fd, int backlog) { int ret; ret = listen(fd, backlog); if (ret < 0) { return -errno_to_nsos_mid(errno); } return ret; } int nsos_adapt_accept(int fd, struct nsos_mid_sockaddr *addr_mid, size_t *addrlen_mid) { struct sockaddr_storage addr_storage; struct sockaddr *addr = (struct sockaddr *)&addr_storage; socklen_t addrlen = sizeof(addr_storage); int ret; int err; ret = accept(fd, addr, &addrlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } err = sockaddr_to_nsos_mid(addr, addrlen, addr_mid, addrlen_mid); if (err) { close(ret); return err; } return ret; } int nsos_adapt_sendto(int fd, const void *buf, size_t len, int flags, const struct nsos_mid_sockaddr *addr_mid, size_t addrlen_mid) { struct sockaddr_storage addr_storage; struct sockaddr *addr = (struct sockaddr *)&addr_storage; socklen_t addrlen; int ret; ret = sockaddr_from_nsos_mid(&addr, &addrlen, addr_mid, addrlen_mid); if (ret < 0) { return ret; } ret = sendto(fd, buf, len, socket_flags_from_nsos_mid(flags) | MSG_NOSIGNAL, addr, addrlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } return ret; } int nsos_adapt_sendmsg(int fd, const struct nsos_mid_msghdr *msg_mid, int flags) { struct sockaddr_storage addr_storage; struct sockaddr *addr = (struct sockaddr *)&addr_storage; struct msghdr msg; struct iovec *msg_iov; socklen_t addrlen; int ret; ret = sockaddr_from_nsos_mid(&addr, &addrlen, msg_mid->msg_name, msg_mid->msg_namelen); if (ret < 0) { return ret; } msg_iov = calloc(msg_mid->msg_iovlen, sizeof(*msg_iov)); if (!msg_iov) { ret = -ENOMEM; return ret; } for (size_t i = 0; i < msg_mid->msg_iovlen; i++) { msg_iov[i].iov_base = msg_mid->msg_iov[i].iov_base; msg_iov[i].iov_len = msg_mid->msg_iov[i].iov_len; } msg.msg_name = addr; msg.msg_namelen = addrlen; msg.msg_iov = msg_iov; msg.msg_iovlen = msg_mid->msg_iovlen; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; ret = sendmsg(fd, &msg, socket_flags_from_nsos_mid(flags) | MSG_NOSIGNAL); if (ret < 0) { ret = -errno_to_nsos_mid(errno); } free(msg_iov); return ret; } int nsos_adapt_recvfrom(int fd, void *buf, size_t len, int flags, struct nsos_mid_sockaddr *addr_mid, size_t *addrlen_mid) { struct sockaddr_storage addr_storage; struct sockaddr *addr = (struct sockaddr *)&addr_storage; socklen_t addrlen = sizeof(addr_storage); int ret; int err; ret = recvfrom(fd, buf, len, socket_flags_from_nsos_mid(flags), addr, &addrlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } err = sockaddr_to_nsos_mid(addr, addrlen, addr_mid, addrlen_mid); if (err) { return err; } return ret; } static int nsos_adapt_getsockopt_int(int fd, int level, int optname, void *optval, size_t *nsos_mid_optlen) { socklen_t optlen = *nsos_mid_optlen; int ret; ret = getsockopt(fd, level, optname, optval, &optlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } *nsos_mid_optlen = optlen; return 0; } int nsos_adapt_getsockopt(int fd, int nsos_mid_level, int nsos_mid_optname, void *nsos_mid_optval, size_t *nsos_mid_optlen) { switch (nsos_mid_level) { case NSOS_MID_SOL_SOCKET: switch (nsos_mid_optname) { case NSOS_MID_SO_ERROR: { int err; socklen_t optlen = sizeof(err); int ret; ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &optlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } *(int *)nsos_mid_optval = errno_to_nsos_mid(err); return 0; } case NSOS_MID_SO_TYPE: { int type; socklen_t optlen = sizeof(type); int ret; int err; ret = getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &optlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } err = socket_type_to_nsos_mid(type, nsos_mid_optval); if (err) { return err; } return 0; } case NSOS_MID_SO_PROTOCOL: { int proto; socklen_t optlen = sizeof(proto); int ret; int err; ret = getsockopt(fd, SOL_SOCKET, SO_PROTOCOL, &proto, &optlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } err = socket_proto_to_nsos_mid(proto, nsos_mid_optval); if (err) { return err; } return 0; } case NSOS_MID_SO_DOMAIN: { int family; socklen_t optlen = sizeof(family); int ret; int err; ret = getsockopt(fd, SOL_SOCKET, SO_DOMAIN, &family, &optlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } err = socket_family_to_nsos_mid(family, nsos_mid_optval); if (err) { return err; } return 0; } case NSOS_MID_SO_RCVBUF: return nsos_adapt_getsockopt_int(fd, SOL_SOCKET, SO_RCVBUF, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_SNDBUF: return nsos_adapt_getsockopt_int(fd, SOL_SOCKET, SO_SNDBUF, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_REUSEADDR: return nsos_adapt_getsockopt_int(fd, SOL_SOCKET, SO_REUSEADDR, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_REUSEPORT: return nsos_adapt_getsockopt_int(fd, SOL_SOCKET, SO_REUSEPORT, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_LINGER: return nsos_adapt_getsockopt_int(fd, SOL_SOCKET, SO_LINGER, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_KEEPALIVE: return nsos_adapt_getsockopt_int(fd, SOL_SOCKET, SO_KEEPALIVE, nsos_mid_optval, nsos_mid_optlen); } break; case NSOS_MID_IPPROTO_TCP: switch (nsos_mid_optname) { case NSOS_MID_TCP_NODELAY: return nsos_adapt_getsockopt_int(fd, IPPROTO_TCP, TCP_NODELAY, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_TCP_KEEPIDLE: return nsos_adapt_getsockopt_int(fd, IPPROTO_TCP, TCP_KEEPIDLE, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_TCP_KEEPINTVL: return nsos_adapt_getsockopt_int(fd, IPPROTO_TCP, TCP_KEEPINTVL, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_TCP_KEEPCNT: return nsos_adapt_getsockopt_int(fd, IPPROTO_TCP, TCP_KEEPCNT, nsos_mid_optval, nsos_mid_optlen); } break; case NSOS_MID_IPPROTO_IPV6: switch (nsos_mid_optname) { case NSOS_MID_IPV6_V6ONLY: return nsos_adapt_getsockopt_int(fd, IPPROTO_IPV6, IPV6_V6ONLY, nsos_mid_optval, nsos_mid_optlen); } break; } return -NSOS_MID_EOPNOTSUPP; } static int nsos_adapt_setsockopt_int(int fd, int level, int optname, const void *optval, size_t optlen) { int ret; ret = setsockopt(fd, level, optname, optval, optlen); if (ret < 0) { return -errno_to_nsos_mid(errno); } return 0; } int nsos_adapt_setsockopt(int fd, int nsos_mid_level, int nsos_mid_optname, const void *nsos_mid_optval, size_t nsos_mid_optlen) { switch (nsos_mid_level) { case NSOS_MID_SOL_SOCKET: switch (nsos_mid_optname) { case NSOS_MID_SO_PRIORITY: return nsos_adapt_setsockopt_int(fd, SOL_SOCKET, SO_PRIORITY, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_RCVTIMEO: { const struct nsos_mid_timeval *nsos_mid_tv = nsos_mid_optval; struct timeval tv = { .tv_sec = nsos_mid_tv->tv_sec, .tv_usec = nsos_mid_tv->tv_usec, }; int ret; ret = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)); if (ret < 0) { return -errno_to_nsos_mid(errno); } return 0; } case NSOS_MID_SO_SNDTIMEO: { const struct nsos_mid_timeval *nsos_mid_tv = nsos_mid_optval; struct timeval tv = { .tv_sec = nsos_mid_tv->tv_sec, .tv_usec = nsos_mid_tv->tv_usec, }; int ret; ret = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)); if (ret < 0) { return -errno_to_nsos_mid(errno); } return 0; } case NSOS_MID_SO_RCVBUF: return nsos_adapt_setsockopt_int(fd, SOL_SOCKET, SO_RCVBUF, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_SNDBUF: return nsos_adapt_setsockopt_int(fd, SOL_SOCKET, SO_SNDBUF, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_REUSEADDR: return nsos_adapt_setsockopt_int(fd, SOL_SOCKET, SO_REUSEADDR, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_REUSEPORT: return nsos_adapt_setsockopt_int(fd, SOL_SOCKET, SO_REUSEPORT, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_LINGER: return nsos_adapt_setsockopt_int(fd, SOL_SOCKET, SO_LINGER, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_SO_KEEPALIVE: return nsos_adapt_setsockopt_int(fd, SOL_SOCKET, SO_KEEPALIVE, nsos_mid_optval, nsos_mid_optlen); } break; case NSOS_MID_IPPROTO_TCP: switch (nsos_mid_optname) { case NSOS_MID_TCP_NODELAY: return nsos_adapt_setsockopt_int(fd, IPPROTO_TCP, TCP_NODELAY, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_TCP_KEEPIDLE: return nsos_adapt_setsockopt_int(fd, IPPROTO_TCP, TCP_KEEPIDLE, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_TCP_KEEPINTVL: return nsos_adapt_setsockopt_int(fd, IPPROTO_TCP, TCP_KEEPINTVL, nsos_mid_optval, nsos_mid_optlen); case NSOS_MID_TCP_KEEPCNT: return nsos_adapt_setsockopt_int(fd, IPPROTO_TCP, TCP_KEEPCNT, nsos_mid_optval, nsos_mid_optlen); } break; case NSOS_MID_IPPROTO_IPV6: switch (nsos_mid_optname) { case NSOS_MID_IPV6_V6ONLY: return nsos_adapt_setsockopt_int(fd, IPPROTO_IPV6, IPV6_V6ONLY, nsos_mid_optval, nsos_mid_optlen); } break; } return -NSOS_MID_EOPNOTSUPP; } #define MAP_POLL_EPOLL(_event_from, _event_to) \ if (events_from & (_event_from)) { \ events_from &= ~(_event_from); \ events_to |= _event_to; \ } static int nsos_poll_to_epoll_events(int events_from) { int events_to = 0; MAP_POLL_EPOLL(POLLIN, EPOLLIN); MAP_POLL_EPOLL(POLLOUT, EPOLLOUT); MAP_POLL_EPOLL(POLLERR, EPOLLERR); MAP_POLL_EPOLL(POLLHUP, EPOLLHUP); return events_to; } static int nsos_epoll_to_poll_events(int events_from) { int events_to = 0; MAP_POLL_EPOLL(EPOLLIN, POLLIN); MAP_POLL_EPOLL(EPOLLOUT, POLLOUT); MAP_POLL_EPOLL(EPOLLERR, POLLERR); MAP_POLL_EPOLL(EPOLLHUP, POLLHUP); return events_to; } #undef MAP_POLL_EPOLL static uint64_t nsos_adapt_poll_time = NSI_NEVER; void nsos_adapt_poll_add(struct nsos_mid_pollfd *pollfd) { struct epoll_event ev = { .data.ptr = pollfd, .events = nsos_poll_to_epoll_events(pollfd->events), }; int err; nsos_adapt_nfds++; err = epoll_ctl(nsos_epoll_fd, EPOLL_CTL_ADD, pollfd->fd, &ev); if (err) { nsi_print_error_and_exit("error in EPOLL_CTL_ADD: errno=%d\n", errno); return; } nsos_adapt_poll_time = nsi_hws_get_time() + 1; nsi_hws_find_next_event(); } void nsos_adapt_poll_remove(struct nsos_mid_pollfd *pollfd) { int err; err = epoll_ctl(nsos_epoll_fd, EPOLL_CTL_DEL, pollfd->fd, NULL); if (err) { nsi_print_error_and_exit("error in EPOLL_CTL_DEL: errno=%d\n", errno); return; } nsos_adapt_nfds--; } void nsos_adapt_poll_update(struct nsos_mid_pollfd *pollfd) { struct pollfd fds = { .fd = pollfd->fd, .events = pollfd->events, }; int ret; ret = poll(&fds, 1, 0); if (ret < 0) { nsi_print_error_and_exit("error in poll(): errno=%d\n", errno); return; } if (ret > 0) { pollfd->revents = fds.revents; } } struct nsos_addrinfo_wrap { struct nsos_mid_addrinfo addrinfo_mid; struct nsos_mid_sockaddr_storage addr_storage; struct addrinfo *addrinfo; }; static int addrinfo_to_nsos_mid(struct addrinfo *res, struct nsos_mid_addrinfo **mid_res) { struct nsos_addrinfo_wrap *nsos_res_wraps; size_t idx_res = 0; size_t n_res = 0; int ret; for (struct addrinfo *res_p = res; res_p; res_p = res_p->ai_next) { n_res++; } if (n_res == 0) { return 0; } nsos_res_wraps = calloc(n_res, sizeof(*nsos_res_wraps)); if (!nsos_res_wraps) { return -NSOS_MID_ENOMEM; } for (struct addrinfo *res_p = res; res_p; res_p = res_p->ai_next, idx_res++) { struct nsos_addrinfo_wrap *wrap = &nsos_res_wraps[idx_res]; wrap->addrinfo = res_p; wrap->addrinfo_mid.ai_flags = res_p->ai_flags; ret = socket_family_to_nsos_mid(res_p->ai_family, &wrap->addrinfo_mid.ai_family); if (ret < 0) { goto free_wraps; } ret = socket_type_to_nsos_mid(res_p->ai_socktype, &wrap->addrinfo_mid.ai_socktype); if (ret < 0) { goto free_wraps; } ret = socket_proto_to_nsos_mid(res_p->ai_protocol, &wrap->addrinfo_mid.ai_protocol); if (ret < 0) { goto free_wraps; } wrap->addrinfo_mid.ai_addr = (struct nsos_mid_sockaddr *)&wrap->addr_storage; wrap->addrinfo_mid.ai_addrlen = sizeof(wrap->addr_storage); ret = sockaddr_to_nsos_mid(res_p->ai_addr, res_p->ai_addrlen, wrap->addrinfo_mid.ai_addr, &wrap->addrinfo_mid.ai_addrlen); if (ret < 0) { goto free_wraps; } wrap->addrinfo_mid.ai_canonname = res_p->ai_canonname ? strdup(res_p->ai_canonname) : NULL; wrap->addrinfo_mid.ai_next = &wrap[1].addrinfo_mid; } nsos_res_wraps[n_res - 1].addrinfo_mid.ai_next = NULL; *mid_res = &nsos_res_wraps->addrinfo_mid; return 0; free_wraps: for (struct nsos_mid_addrinfo *res_p = &nsos_res_wraps[0].addrinfo_mid; res_p; res_p = res_p->ai_next) { free(res_p->ai_canonname); } free(nsos_res_wraps); return ret; } int nsos_adapt_getaddrinfo(const char *node, const char *service, const struct nsos_mid_addrinfo *hints_mid, struct nsos_mid_addrinfo **res_mid, int *system_errno) { struct addrinfo hints; struct addrinfo *res = NULL; int ret; if (hints_mid) { hints.ai_flags = hints_mid->ai_flags; ret = socket_family_from_nsos_mid(hints_mid->ai_family, &hints.ai_family); if (ret < 0) { *system_errno = -ret; return NSOS_MID_EAI_SYSTEM; } ret = socket_type_from_nsos_mid(hints_mid->ai_socktype, &hints.ai_socktype); if (ret < 0) { *system_errno = -ret; return NSOS_MID_EAI_SYSTEM; } ret = socket_proto_from_nsos_mid(hints_mid->ai_protocol, &hints.ai_protocol); if (ret < 0) { *system_errno = -ret; return NSOS_MID_EAI_SYSTEM; } } ret = getaddrinfo(node, service, hints_mid ? &hints : NULL, &res); if (ret < 0) { return ret; } ret = addrinfo_to_nsos_mid(res, res_mid); if (ret < 0) { *system_errno = -ret; return NSOS_MID_EAI_SYSTEM; } return ret; } void nsos_adapt_freeaddrinfo(struct nsos_mid_addrinfo *res_mid) { struct nsos_addrinfo_wrap *wrap = CONTAINER_OF(res_mid, struct nsos_addrinfo_wrap, addrinfo_mid); for (struct nsos_mid_addrinfo *res_p = res_mid; res_p; res_p = res_p->ai_next) { free(res_p->ai_canonname); } freeaddrinfo(wrap->addrinfo); free(wrap); } int nsos_adapt_fcntl_getfl(int fd) { int flags; flags = fcntl(fd, F_GETFL); return fl_to_nsos_mid(flags); } int nsos_adapt_fcntl_setfl(int fd, int flags) { int ret; ret = fcntl(fd, F_SETFL, fl_from_nsos_mid(flags)); if (ret < 0) { return -errno_to_nsos_mid(errno); } return 0; } int nsos_adapt_fionread(int fd, int *avail) { int ret; ret = ioctl(fd, FIONREAD, avail); if (ret < 0) { return -errno_to_nsos_mid(errno); } return 0; } int nsos_adapt_dup(int oldfd) { int ret; ret = dup(oldfd); if (ret < 0) { return -errno_to_nsos_mid(errno); } return ret; } static void nsos_adapt_init(void) { nsos_epoll_fd = epoll_create(1); if (nsos_epoll_fd < 0) { nsi_print_error_and_exit("error from epoll_create(): errno=%d\n", errno); return; } } NSI_TASK(nsos_adapt_init, HW_INIT, 500); static void nsos_adapt_poll_triggered(void) { static struct epoll_event events[1024]; int ret; if (nsos_adapt_nfds == 0) { nsos_adapt_poll_time = NSI_NEVER; return; } ret = epoll_wait(nsos_epoll_fd, events, ARRAY_SIZE(events), 0); if (ret < 0) { if (errno == EINTR) { nsi_print_warning("interrupted epoll_wait()\n"); nsos_adapt_poll_time = nsi_hws_get_time() + 1; return; } nsi_print_error_and_exit("error in nsos_adapt poll(): errno=%d\n", errno); nsos_adapt_poll_time = NSI_NEVER; return; } for (int i = 0; i < ret; i++) { struct nsos_mid_pollfd *pollfd = events[i].data.ptr; pollfd->revents = nsos_epoll_to_poll_events(events[i].events); } if (ret > 0) { hw_irq_ctrl_set_irq(NSOS_IRQ); nsos_adapt_poll_time = nsi_hws_get_time() + 1; } else { nsos_adapt_poll_time = nsi_hws_get_time() + NSOS_EPOLL_WAIT_INTERVAL; } } NSI_HW_EVENT(nsos_adapt_poll_time, nsos_adapt_poll_triggered, 500); ```
/content/code_sandbox/drivers/net/nsos_adapt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,759
```c /* * */ /** * @file * * Network loopback interface implementation. */ #define LOG_MODULE_NAME netlo #define LOG_LEVEL CONFIG_NET_LOOPBACK_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/net/net_pkt.h> #include <zephyr/net/buf.h> #include <zephyr/net/net_ip.h> #include <zephyr/net/net_if.h> #include <zephyr/net/loopback.h> #include <zephyr/net/dummy.h> int loopback_dev_init(const struct device *dev) { ARG_UNUSED(dev); return 0; } static void loopback_init(struct net_if *iface) { struct net_if_addr *ifaddr; /* RFC 7042, s.2.1.1. address to use in documentation */ net_if_set_link_addr(iface, "\x00\x00\x5e\x00\x53\xff", 6, NET_LINK_DUMMY); if (IS_ENABLED(CONFIG_NET_IPV4)) { struct in_addr ipv4_loopback = INADDR_LOOPBACK_INIT; struct in_addr netmask = { { { 255, 0, 0, 0 } } }; ifaddr = net_if_ipv4_addr_add(iface, &ipv4_loopback, NET_ADDR_AUTOCONF, 0); if (!ifaddr) { LOG_ERR("Failed to register IPv4 loopback address"); } net_if_ipv4_set_netmask_by_addr(iface, &ipv4_loopback, &netmask); } if (IS_ENABLED(CONFIG_NET_IPV6)) { struct in6_addr ipv6_loopback = IN6ADDR_LOOPBACK_INIT; ifaddr = net_if_ipv6_addr_add(iface, &ipv6_loopback, NET_ADDR_AUTOCONF, 0); if (!ifaddr) { LOG_ERR("Failed to register IPv6 loopback address"); } } } #ifdef CONFIG_NET_LOOPBACK_SIMULATE_PACKET_DROP static float loopback_packet_drop_ratio = 0.0f; static float loopback_packet_drop_state = 0.0f; static int loopback_packet_dropped_count; int loopback_set_packet_drop_ratio(float ratio) { if (ratio < 0.0f || ratio > 1.0f) { return -EINVAL; } loopback_packet_drop_ratio = ratio; return 0; } int loopback_get_num_dropped_packets(void) { return loopback_packet_dropped_count; } #endif static int loopback_send(const struct device *dev, struct net_pkt *pkt) { struct net_pkt *cloned; int res; ARG_UNUSED(dev); #ifdef CONFIG_NET_LOOPBACK_SIMULATE_PACKET_DROP /* Drop packets based on the loopback_packet_drop_ratio * a ratio of 0.2 will drop one every 5 packets */ loopback_packet_drop_state += loopback_packet_drop_ratio; if (loopback_packet_drop_state >= 1.0f) { /* Administrate we dropped a packet */ loopback_packet_drop_state -= 1.0f; loopback_packet_dropped_count++; return 0; } #endif if (!pkt->frags) { LOG_ERR("No data to send"); return -ENODATA; } /* We should simulate normal driver meaning that if the packet is * properly sent (which is always in this driver), then the packet * must be dropped. This is very much needed for TCP packets where * the packet is reference counted in various stages of sending. */ cloned = net_pkt_rx_clone(pkt, K_MSEC(100)); if (!cloned) { res = -ENOMEM; goto out; } /* We need to swap the IP addresses because otherwise * the packet will be dropped. */ if (net_pkt_family(pkt) == AF_INET6) { net_ipv6_addr_copy_raw(NET_IPV6_HDR(cloned)->src, NET_IPV6_HDR(pkt)->dst); net_ipv6_addr_copy_raw(NET_IPV6_HDR(cloned)->dst, NET_IPV6_HDR(pkt)->src); } else { net_ipv4_addr_copy_raw(NET_IPV4_HDR(cloned)->src, NET_IPV4_HDR(pkt)->dst); net_ipv4_addr_copy_raw(NET_IPV4_HDR(cloned)->dst, NET_IPV4_HDR(pkt)->src); } res = net_recv_data(net_pkt_iface(cloned), cloned); if (res < 0) { LOG_ERR("Data receive failed."); } out: /* Let the receiving thread run now */ k_yield(); return res; } static struct dummy_api loopback_api = { .iface_api.init = loopback_init, .send = loopback_send, }; NET_DEVICE_INIT(loopback, "lo", loopback_dev_init, NULL, NULL, NULL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &loopback_api, DUMMY_L2, NET_L2_GET_CTX_TYPE(DUMMY_L2), CONFIG_NET_LOOPBACK_MTU); ```
/content/code_sandbox/drivers/net/loopback.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,075
```objective-c /** * */ #ifndef __DRIVERS_NET_NSOS_SOCKET_H__ #define __DRIVERS_NET_NSOS_SOCKET_H__ #include <stdint.h> /** * @name Socket level options (NSOS_MID_SOL_SOCKET) * @{ */ /** Socket-level option */ #define NSOS_MID_SOL_SOCKET 1 /* Socket options for NSOS_MID_SOL_SOCKET level */ /** Recording debugging information (ignored, for compatibility) */ #define NSOS_MID_SO_DEBUG 1 /** address reuse */ #define NSOS_MID_SO_REUSEADDR 2 /** Type of the socket */ #define NSOS_MID_SO_TYPE 3 /** Async error */ #define NSOS_MID_SO_ERROR 4 /** Bypass normal routing and send directly to host (ignored, for compatibility) */ #define NSOS_MID_SO_DONTROUTE 5 /** Transmission of broadcast messages is supported (ignored, for compatibility) */ #define NSOS_MID_SO_BROADCAST 6 /** Size of socket send buffer */ #define NSOS_MID_SO_SNDBUF 7 /** Size of socket recv buffer */ #define NSOS_MID_SO_RCVBUF 8 /** Enable sending keep-alive messages on connections */ #define NSOS_MID_SO_KEEPALIVE 9 /** Place out-of-band data into receive stream (ignored, for compatibility) */ #define NSOS_MID_SO_OOBINLINE 10 /** Socket priority */ #define NSOS_MID_SO_PRIORITY 12 /** Socket lingers on close (ignored, for compatibility) */ #define NSOS_MID_SO_LINGER 13 /** Allow multiple sockets to reuse a single port */ #define NSOS_MID_SO_REUSEPORT 15 /** Receive low watermark (ignored, for compatibility) */ #define NSOS_MID_SO_RCVLOWAT 18 /** Send low watermark (ignored, for compatibility) */ #define NSOS_MID_SO_SNDLOWAT 19 /** * Receive timeout * Applies to receive functions like recv(), but not to connect() */ #define NSOS_MID_SO_RCVTIMEO 20 /** Send timeout */ #define NSOS_MID_SO_SNDTIMEO 21 /** Bind a socket to an interface */ #define NSOS_MID_SO_BINDTODEVICE 25 /** Socket accepts incoming connections (ignored, for compatibility) */ #define NSOS_MID_SO_ACCEPTCONN 30 /** Timestamp TX packets */ #define NSOS_MID_SO_TIMESTAMPING 37 /** Protocol used with the socket */ #define NSOS_MID_SO_PROTOCOL 38 /** Domain used with SOCKET */ #define NSOS_MID_SO_DOMAIN 39 /** Enable SOCKS5 for Socket */ #define NSOS_MID_SO_SOCKS5 60 /** Socket TX time (when the data should be sent) */ #define NSOS_MID_SO_TXTIME 61 struct nsos_mid_timeval { int64_t tv_sec; int64_t tv_usec; }; /** @} */ /** * @name TCP level options (NSOS_MID_IPPROTO_TCP) * @{ */ /* Socket options for NSOS_MID_IPPROTO_TCP level */ /** Disable TCP buffering (ignored, for compatibility) */ #define NSOS_MID_TCP_NODELAY 1 /** Start keepalives after this period (seconds) */ #define NSOS_MID_TCP_KEEPIDLE 2 /** Interval between keepalives (seconds) */ #define NSOS_MID_TCP_KEEPINTVL 3 /** Number of keepalives before dropping connection */ #define NSOS_MID_TCP_KEEPCNT 4 /** @} */ /** * @name IPv6 level options (NSOS_MID_IPPROTO_IPV6) * @{ */ /* Socket options for NSOS_MID_IPPROTO_IPV6 level */ /** Set the unicast hop limit for the socket. */ #define NSOS_MID_IPV6_UNICAST_HOPS 16 /** Set the multicast hop limit for the socket. */ #define NSOS_MID_IPV6_MULTICAST_HOPS 18 /** Join IPv6 multicast group. */ #define NSOS_MID_IPV6_ADD_MEMBERSHIP 20 /** Leave IPv6 multicast group. */ #define NSOS_MID_IPV6_DROP_MEMBERSHIP 21 /** Don't support IPv4 access */ #define NSOS_MID_IPV6_V6ONLY 26 /** Pass an IPV6_RECVPKTINFO ancillary message that contains a * in6_pktinfo structure that supplies some information about the * incoming packet. See RFC 3542. */ #define NSOS_MID_IPV6_RECVPKTINFO 49 /** @} */ #endif /* __DRIVERS_NET_NSOS_SOCKET_H__ */ ```
/content/code_sandbox/drivers/net/nsos_socket.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
946
```unknown # Misc network drivers configuration options menuconfig NET_DRIVERS bool "Network drivers" if NET_DRIVERS # # PPP options # menuconfig NET_PPP bool "Point-to-point (PPP) UART based driver" depends on NET_L2_PPP depends on NET_NATIVE select RING_BUFFER select CRC if NET_PPP config NET_PPP_ASYNC_UART bool "Asynchronous UART API is used" depends on UART_ASYNC_API config NET_PPP_DRV_NAME string "PPP Driver name" default "ppp" help This option sets the driver name config NET_PPP_MTU_MRU int "PPP MTU and MRU" default 1500 help This options sets MTU and MRU for PPP link. config NET_PPP_UART_BUF_LEN int "Buffer length when reading from UART" default 8 help This options sets the size of the UART buffer where data is being read to. config NET_PPP_RINGBUF_SIZE int "PPP ring buffer size" default 256 help PPP ring buffer size when passing data from RX ISR to worker thread that will pass the data to IP stack. config NET_PPP_RX_STACK_SIZE int "Size of the stack allocated for receiving data from modem" default 768 help Sets the stack size which will be used by the PPP RX workqueue. config NET_PPP_RX_PRIORITY int "RX workqueue thread priority" default 7 help Sets the priority of the RX workqueue thread. config NET_PPP_VERIFY_FCS bool "Verify that received FCS is valid" default y help If you have a reliable link, then it might make sense to disable this as it takes some time to verify the received packet. config NET_PPP_CAPTURE bool "Capture received PPP packets" depends on NET_CAPTURE_COOKED_MODE help This enables PPP packet capture. One needs to configure the packet capturing in core network stack to send the captured packets to outside system. This requires a non-PPP network connection where the captured packets are sent for processing. Note that you cannot use the PPP connection to sending packets as that would lead recursion. config NET_PPP_CAPTURE_BUF_SIZE int "Capture buffer for storing full PPP packets" depends on NET_PPP_CAPTURE default 1500 help The captured PPP frames are temporarily stored into this buffer. config PPP_MAC_ADDR string "MAC address for the interface" help Specify a MAC address for the PPP interface in the form of six hex 8-bit chars separated by colons (e.g.: aa:33:cc:22:e2:c0). The default is an empty string, which means the code will make 00:00:5E:00:53:XX, where XX will be random. config PPP_CLIENT_CLIENTSERVER bool "Reply to the request CLIENT with CLIENTSERVER" help This is only necessary if a ppp connection should be established with a Microsoft Windows PC. config PPP_NET_IF_NO_AUTO_START bool "Disable PPP interface auto-start" help This option allows user to disable autostarting of the PPP interface immediately after initialization. if NET_PPP_ASYNC_UART config NET_PPP_ASYNC_UART_TX_BUF_LEN int "Length of the UART TX buffer to which data is written." default 2048 config NET_PPP_ASYNC_UART_TX_TIMEOUT int "The timeout for UART transfers in milliseconds, or 0 for no timeout." default 0 config NET_PPP_ASYNC_UART_RX_RECOVERY_TIMEOUT int "UART RX recovery timeout in milliseconds" default 200 help The time that UART RX is in disabled state in case when we cannot receive more data from UART. config NET_PPP_ASYNC_UART_RX_ENABLE_TIMEOUT int "A timeout for uart_rx_enable() in milliseconds" default 100 endif # NET_PPP_ASYNC_UART module = NET_PPP module-dep = LOG module-str = Log level for ppp driver module-help = Sets log level for ppp driver. source "subsys/net/Kconfig.template.log_config.net" endif # NET_PPP # # SLIP options # menuconfig SLIP bool "SLIP driver" if !QEMU_TARGET depends on NET_NATIVE select UART_PIPE select UART_INTERRUPT_DRIVEN if SLIP config SLIP_DRV_NAME string "SLIP Driver name" default "slip" help This option sets the driver name module = SLIP module-dep = LOG module-str = Log level for slip driver module-help = Sets log level for slip driver. source "subsys/net/Kconfig.template.log_config.net" config SLIP_STATISTICS bool "SLIP network connection statistics" help This option enables statistics support for SLIP driver. config SLIP_TAP bool "Use TAP interface to host" default y help In TAP the Ethernet frames are transferred over SLIP. config SLIP_MAC_ADDR string "MAC address for the interface" help Specify a MAC address for the SLIP interface in the form of six hex 8-bit chars separated by colons (e.g.: aa:33:cc:22:e2:c0). The default is an empty string, which means the code will make 00:00:5E:00:53:XX, where XX will be random. endif # # Net loopback options # menuconfig NET_LOOPBACK bool "Net loopback driver" select NET_L2_DUMMY if NET_LOOPBACK config NET_LOOPBACK_SIMULATE_PACKET_DROP bool "Controlable packet drop" help Enable interface to have a controlable packet drop rate, only for testing, should not be enabled for normal applications config NET_LOOPBACK_MTU int "MTU for loopback interface" default 576 help This option sets the MTU for loopback interface. module = NET_LOOPBACK module-dep = LOG module-str = Log level for network loopback driver module-help = Sets log level for network loopback driver. source "subsys/net/Kconfig.template.log_config.net" endif # # CAN bus network driver options # menuconfig NET_CANBUS bool "Controller Area Network (CAN) bus network driver" help Enable the CAN bus network driver. This driver provides a network interface on top of the CAN controller driver API. if NET_CANBUS module = NET_CANBUS module-dep = LOG module-str = Log level for CAN bus network driver module-help = Sets log level CAN bus network driver. source "subsys/net/Kconfig.template.log_config.net" config NET_CANBUS_INIT_PRIORITY int "CAN bus network device init priority" default 81 help CAN bus network device initialization priority. The priority needs to be lower than the network stack and higher than the CAN controller driver. endif # NET_CAN # # Native simulator offloaded sockets # menuconfig NET_NATIVE_OFFLOADED_SOCKETS bool "Native Simulator offloaded sockets" depends on ARCH_POSIX depends on NATIVE_LIBRARY depends on NET_SOCKETS_OFFLOAD help Offloaded sockets for Native Simulator utilize host BSD sockets API (like socket(), connect(), send(), recvfrom(), etc.) in order to provide networking capability. This driver main advantage is that it is possible to use this driver without any additional setup on the host side, unlike with the native TAP Ethernet driver. if NET_NATIVE_OFFLOADED_SOCKETS config NET_NATIVE_OFFLOADED_SOCKETS_EPOLL_WAIT_INTERVAL int "Interval between epoll_wait() calls (in simulated microseconds)" default 1000 help Number of simulated microseconds before next epoll_wait() call, when there were no pending events detected. Decrease that value when lower network traffic latency is expected, at the expense of more CPU processing overhead. endif # NET_NATIVE_OFFLOADED_SOCKETS endif # NET_DRIVERS ```
/content/code_sandbox/drivers/net/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,716
```c /** * */ #include "nsos_errno.h" #ifndef ARRAY_SIZE #define ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) #endif struct nsos_mid_errno_map { /** Zephyr/host error code */ int err; /** NSOS middleground error code */ int mid_err; }; #define ERR(_name) \ { _name, NSOS_MID_ ## _name } static const struct nsos_mid_errno_map map[] = { ERR(EPERM), ERR(ENOENT), ERR(ESRCH), ERR(EINTR), ERR(EIO), ERR(ENXIO), ERR(E2BIG), ERR(ENOEXEC), ERR(EBADF), ERR(ECHILD), ERR(EAGAIN), ERR(ENOMEM), ERR(EACCES), ERR(EFAULT), ERR(ENOTBLK), ERR(EBUSY), ERR(EEXIST), ERR(EXDEV), ERR(ENODEV), ERR(ENOTDIR), ERR(EISDIR), ERR(EINVAL), ERR(ENFILE), ERR(EMFILE), ERR(ENOTTY), ERR(ETXTBSY), ERR(EFBIG), ERR(ENOSPC), ERR(ESPIPE), ERR(EROFS), ERR(EMLINK), ERR(EPIPE), ERR(EDOM), ERR(ERANGE), ERR(ENOMSG), ERR(EDEADLK), ERR(ENOLCK), ERR(ENOSTR), ERR(ENODATA), ERR(ETIME), ERR(ENOSR), ERR(EPROTO), ERR(EBADMSG), ERR(ENOSYS), ERR(ENOTEMPTY), ERR(ENAMETOOLONG), ERR(ELOOP), ERR(EOPNOTSUPP), ERR(EPFNOSUPPORT), ERR(ECONNRESET), ERR(ENOBUFS), ERR(EAFNOSUPPORT), ERR(EPROTOTYPE), ERR(ENOTSOCK), ERR(ENOPROTOOPT), ERR(ESHUTDOWN), ERR(ECONNREFUSED), ERR(EADDRINUSE), ERR(ECONNABORTED), ERR(ENETUNREACH), ERR(ENETDOWN), ERR(ETIMEDOUT), ERR(EHOSTDOWN), ERR(EHOSTUNREACH), ERR(EINPROGRESS), ERR(EALREADY), ERR(EDESTADDRREQ), ERR(EMSGSIZE), ERR(EPROTONOSUPPORT), ERR(ESOCKTNOSUPPORT), ERR(EADDRNOTAVAIL), ERR(ENETRESET), ERR(EISCONN), ERR(ENOTCONN), ERR(ETOOMANYREFS), ERR(ENOTSUP), ERR(EILSEQ), ERR(EOVERFLOW), ERR(ECANCELED), }; int errno_to_nsos_mid(int err) { for (int i = 0; i < ARRAY_SIZE(map); i++) { if (map[i].err == err) { return map[i].mid_err; } } return err; } int errno_from_nsos_mid(int err) { for (int i = 0; i < ARRAY_SIZE(map); i++) { if (map[i].mid_err == err) { return map[i].err; } } return err; } ```
/content/code_sandbox/drivers/net/nsos_errno.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
654
```objective-c /** * */ #ifndef __DRIVERS_NET_NSOS_FCNTL_H__ #define __DRIVERS_NET_NSOS_FCNTL_H__ #define NSOS_MID_O_RDONLY 00 #define NSOS_MID_O_WRONLY 01 #define NSOS_MID_O_RDWR 02 #define NSOS_MID_O_APPEND 0x0400 #define NSOS_MID_O_EXCL 0x0800 #define NSOS_MID_O_NONBLOCK 0x4000 int fl_to_nsos_mid(int flags); int fl_to_nsos_mid_strict(int flags); int fl_from_nsos_mid(int flags); #endif /* __DRIVERS_NET_NSOS_FCNTL_H__ */ ```
/content/code_sandbox/drivers/net/nsos_fcntl.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
144
```unknown # FPGA Microchip PolarFire SOC driver configuration options config MPFS_FPGA bool "Microchip PolarFire SOC FPGA driver" depends on SPI help Enable support for the Microchip PolarFire SOC FPGA driver. ```
/content/code_sandbox/drivers/fpga/Kconfig.mpfs
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
48
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_FPGA_EOS_S3_H_ #define ZEPHYR_DRIVERS_FPGA_EOS_S3_H_ #include <eoss3_dev.h> struct PIF_struct { /* Fabric Configuration Control Register, offset: 0x000 */ __IO uint32_t CFG_CTL; /* Maximum Bit Length Count, offset: 0x004 */ __IO uint32_t MAX_BL_CNT; /* Maximum Word Length Count, offset: 0x008 */ __IO uint32_t MAX_WL_CNT; uint32_t reserved[1020]; /* Configuration Data, offset: 0xFFC */ __IO uint32_t CFG_DATA; }; #define PIF ((struct PIF_struct *)PIF_CTRL_BASE) #define FB_CFG_ENABLE ((uint32_t)(0x00000200)) #define FB_CFG_DISABLE ((uint32_t)(0x00000000)) #define CFG_CTL_APB_CFG_WR ((uint32_t)(0x00008000)) #define CFG_CTL_APB_CFG_RD ((uint32_t)(0x00004000)) #define CFG_CTL_APB_WL_DIN ((uint32_t)(0x00003C00)) #define CFG_CTL_APB_PARTIAL_LOAD ((uint32_t)(0x00000200)) #define CFG_CTL_APB_BL_SEL ((uint32_t)(0x00000100)) #define CFG_CTL_APB_BLM_SEL ((uint32_t)(0x00000080)) #define CFG_CTL_APB_BR_SEL ((uint32_t)(0x00000040)) #define CFG_CTL_APB_BRM_SEL ((uint32_t)(0x00000020)) #define CFG_CTL_APB_TL_SEL ((uint32_t)(0x00000010)) #define CFG_CTL_APB_TLM_SEL ((uint32_t)(0x00000008)) #define CFG_CTL_APB_TR_SEL ((uint32_t)(0x00000004)) #define CFG_CTL_APB_TRM_SEL ((uint32_t)(0x00000002)) #define CFG_CTL_APB_SEL_CFG ((uint32_t)(0x00000001)) #define FB_ISOLATION_ENABLE ((uint32_t)(0x00000001)) #define FB_ISOLATION_DISABLE ((uint32_t)(0x00000000)) #define PMU_FFE_FB_PF_SW_PD_FB_PD ((uint32_t)(0x00000002)) #define PMU_FB_PWR_MODE_CFG_FB_SD ((uint32_t)(0x00000002)) #define PMU_FB_PWR_MODE_CFG_FB_DP ((uint32_t)(0x00000001)) #define FPGA_INFO \ "eos_s3 eFPGA features:\n" \ "891 Logic Cells\n" \ "8 FIFO Controllers\n" \ "32 Configurable Interfaces\n" \ "2x32x32(or 4x16x16) Multiplier\n" \ "64Kbit SRAM\n" #define PAD_ENABLE \ (PAD_E_4MA | PAD_P_PULLDOWN | PAD_OEN_NORMAL | PAD_SMT_DISABLE | PAD_REN_DISABLE | \ PAD_SR_SLOW | PAD_CTRL_SEL_AO_REG) #define PAD_DISABLE \ (PAD_SMT_DISABLE | PAD_REN_DISABLE | PAD_SR_SLOW | PAD_E_4MA | PAD_P_PULLDOWN | \ PAD_OEN_NORMAL | PAD_CTRL_SEL_AO_REG) #define CFG_CTL_LOAD_ENABLE \ (CFG_CTL_APB_CFG_WR | CFG_CTL_APB_WL_DIN | CFG_CTL_APB_BL_SEL | CFG_CTL_APB_BLM_SEL | \ CFG_CTL_APB_BR_SEL | CFG_CTL_APB_BRM_SEL | CFG_CTL_APB_TL_SEL | CFG_CTL_APB_TLM_SEL | \ CFG_CTL_APB_TR_SEL | CFG_CTL_APB_TRM_SEL | CFG_CTL_APB_SEL_CFG) #define CFG_CTL_LOAD_DISABLE 0 #endif /* ZEPHYR_DRIVERS_FPGA_EOS_S3_H_ */ ```
/content/code_sandbox/drivers/fpga/fpga_eos_s3.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
845
```c /* * */ #include <zephyr/sys/printk.h> #include <zephyr/shell/shell.h> #include <zephyr/version.h> #include <stdlib.h> #include <zephyr/drivers/fpga.h> static int parse_common_args(const struct shell *sh, char **argv, const struct device **dev) { *dev = device_get_binding(argv[1]); if (!*dev) { shell_error(sh, "FPGA device %s not found", argv[1]); return -ENODEV; } return 0; } static int cmd_on(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; err = parse_common_args(sh, argv, &dev); if (err < 0) { return err; } shell_print(sh, "%s: turning on", dev->name); err = fpga_on(dev); if (err) { shell_error(sh, "Error: %d", err); } return err; } static int cmd_off(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; err = parse_common_args(sh, argv, &dev); if (err < 0) { return err; } shell_print(sh, "%s: turning off", dev->name); err = fpga_off(dev); if (err) { shell_error(sh, "Error: %d", err); } return err; } static int cmd_reset(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; err = parse_common_args(sh, argv, &dev); if (err < 0) { return err; } shell_print(sh, "%s: resetting FPGA", dev->name); err = fpga_reset(dev); if (err) { shell_error(sh, "Error: %d", err); } return err; } static int cmd_load(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; err = parse_common_args(sh, argv, &dev); if (err < 0) { return err; } shell_print(sh, "%s: loading bitstream", dev->name); err = fpga_load(dev, (uint32_t *)strtol(argv[2], NULL, 0), (uint32_t)atoi(argv[3])); if (err) { shell_error(sh, "Error: %d", err); } return err; } static int cmd_get_status(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; err = parse_common_args(sh, argv, &dev); if (err < 0) { return err; } shell_print(sh, "%s status: %d", dev->name, fpga_get_status(dev)); return err; } static int cmd_get_info(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; err = parse_common_args(sh, argv, &dev); if (err < 0) { return err; } shell_print(sh, "%s", fpga_get_info(dev)); return err; } SHELL_STATIC_SUBCMD_SET_CREATE( sub_fpga, SHELL_CMD_ARG(off, NULL, "<device>", cmd_off, 2, 0), SHELL_CMD_ARG(on, NULL, "<device>", cmd_on, 2, 0), SHELL_CMD_ARG(reset, NULL, "<device>", cmd_reset, 2, 0), SHELL_CMD_ARG(load, NULL, "<device> <address> <size in bytes>", cmd_load, 4, 0), SHELL_CMD_ARG(get_status, NULL, "<device>", cmd_get_status, 2, 0), SHELL_CMD_ARG(get_info, NULL, "<device>", cmd_get_info, 2, 0), SHELL_SUBCMD_SET_END); SHELL_CMD_REGISTER(fpga, &sub_fpga, "FPGA commands", NULL); ```
/content/code_sandbox/drivers/fpga/fpga_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
878
```c /* * */ #define DT_DRV_COMPAT altr_socfpga_agilex_bridge #include <errno.h> #include <zephyr/device.h> #include <zephyr/sip_svc/sip_svc.h> #include <zephyr/drivers/sip_svc/sip_svc_agilex_smc.h> #include <zephyr/drivers/fpga.h> #include <zephyr/logging/log.h> #include "fpga_altera_agilex_bridge.h" LOG_MODULE_REGISTER(fpga_altera); struct fpga_bridge_dev_data { /* SiP SVC controller */ struct sip_svc_controller *mailbox_smc_dev; /* SiP SVC client token id */ uint32_t mailbox_client_token; }; #define MAX_TIMEOUT_MSECS (1 * 1000UL) /** * @brief Open SiP SVC client session * * @return 0 on success or negative value on failure */ static int32_t svc_client_open(const struct device *dev) { if (!dev) { LOG_ERR("No such device found"); return -ENODEV; } struct fpga_bridge_dev_data *const data = (struct fpga_bridge_dev_data *const)(dev->data); if ((!data->mailbox_smc_dev) || (data->mailbox_client_token == 0)) { LOG_ERR("Mailbox client is not registered"); return -ENODEV; } if (sip_svc_open(data->mailbox_smc_dev, data->mailbox_client_token, K_MSEC(MAX_TIMEOUT_MSECS))) { LOG_ERR("Mailbox client open fail"); return -ENODEV; } return 0; } /** * @brief Close the svc client * * @return 0 on success or negative value on fail */ static int32_t svc_client_close(const struct device *dev) { int32_t err; uint32_t cmd_size = sizeof(uint32_t); struct sip_svc_request request; if (!dev) { LOG_ERR("No such device found"); return -ENODEV; } struct fpga_bridge_dev_data *const data = (struct fpga_bridge_dev_data *const)(dev->data); uint32_t *cmd_addr = (uint32_t *)k_malloc(cmd_size); if (!cmd_addr) { return -ENOMEM; } /* Fill the SiP SVC buffer with CANCEL request */ *cmd_addr = MAILBOX_CANCEL_COMMAND; request.header = SIP_SVC_PROTO_HEADER(SIP_SVC_PROTO_CMD_ASYNC, 0); request.a0 = SMC_FUNC_ID_MAILBOX_SEND_COMMAND; request.a1 = 0; request.a2 = (uint64_t)cmd_addr; request.a3 = (uint64_t)cmd_size; request.a4 = 0; request.a5 = 0; request.a6 = 0; request.a7 = 0; request.resp_data_addr = (uint64_t)NULL; request.resp_data_size = 0; request.priv_data = NULL; err = sip_svc_close(data->mailbox_smc_dev, data->mailbox_client_token, &request); if (err) { k_free(cmd_addr); LOG_ERR("Mailbox client close fail (%d)", err); } return err; } /** * @brief Call back function which we receive when we send the data * based on the current stage it will collect the data * * @param[in] c_token Token id for our svc services * @param[in] response Buffer will contain the response * * @return void */ static void smc_callback(uint32_t c_token, struct sip_svc_response *response) { if (response == NULL) { return; } uint32_t *resp_data = NULL; uint32_t resp_len = 0; uint32_t mbox_idx = 0; struct sip_svc_private_data *private_data = (struct sip_svc_private_data *)response->priv_data; union mailbox_response_header response_header; LOG_DBG("SiP SVC callback"); LOG_DBG("\tresponse data below:"); LOG_DBG("\theader=%08x", response->header); LOG_DBG("\ta0=%016lx", response->a0); LOG_DBG("\ta1=%016lx", response->a1); LOG_DBG("\ta2=%016lx", response->a2); LOG_DBG("\ta3=%016lx", response->a3); private_data->response.header = response->header; private_data->response.a0 = response->a0; private_data->response.a1 = response->a1; private_data->response.a2 = response->a2; private_data->response.a3 = response->a3; private_data->response.resp_data_size = response->resp_data_size; /* Condition to check only for the mailbox command not for the non-mailbox command */ if (response->resp_data_size) { resp_data = (uint32_t *)response->resp_data_addr; resp_len = response->resp_data_size / 4; private_data->mbox_response_len = resp_len; if (resp_data && resp_len) { response_header = (union mailbox_response_header)resp_data[0]; private_data->mbox_response_data = (uint32_t *)k_malloc(sizeof(uint32_t) * resp_len); for (mbox_idx = 0; mbox_idx < resp_len; mbox_idx++) { LOG_DBG("\t\t[%4d] %08x", mbox_idx, resp_data[mbox_idx]); private_data->mbox_response_data[mbox_idx] = resp_data[mbox_idx]; } } else { LOG_ERR("\t\tInvalid addr (%p) or len (%d)", resp_data, resp_len); } } /* Condition for non-mailbox command*/ else { LOG_DBG("Response Data size is zero !!"); } /* Client only responsible to free the response data memory space, * the command data memory space had been freed by SiP SVC service. */ if (response->resp_data_addr) { LOG_DBG("\tFree response memory %p", (char *)response->resp_data_addr); k_free((char *)response->resp_data_addr); } k_sem_give(&(private_data->smc_sem)); } /** * @brief Send the data to SiP SVC service layer * based on the command type further data will be send to SDM using mailbox * * @param[in] cmd_type Command type (Mailbox or Non-Mailbox) * @param[in] function_identifier Function identifier for each command type * @param[in] cmd_request * @param[in] private_data * * @return 0 on success or negative value on fail */ static int32_t smc_send(const struct device *dev, uint32_t cmd_type, uint64_t function_identifier, uint32_t *cmd_request, struct sip_svc_private_data *private_data) { int32_t trans_id = 0; uint32_t *cmd_addr = NULL; uint32_t *resp_addr = NULL; struct sip_svc_request request; if (!dev) { LOG_ERR("No such device found"); return -ENODEV; } struct fpga_bridge_dev_data *const data = (struct fpga_bridge_dev_data *const)(dev->data); if (!data->mailbox_smc_dev) { LOG_ERR("Mailbox client is not registered"); return -ENODEV; } if (cmd_type == SIP_SVC_PROTO_CMD_ASYNC) { cmd_addr = (uint32_t *)k_malloc(FPGA_MB_CMD_ADDR_MEM_SIZE); if (!cmd_addr) { LOG_ERR("Failed to allocate command memory"); return -ENOMEM; } cmd_addr[MBOX_CMD_HEADER_INDEX] = MBOX_REQUEST_HEADER(cmd_request[SMC_REQUEST_A2_INDEX], 0, 0); resp_addr = (uint32_t *)k_malloc(FPGA_MB_RESPONSE_MEM_SIZE); if (!resp_addr) { k_free(cmd_addr); return -ENOMEM; } request.a2 = (uint64_t)cmd_addr; request.a3 = sizeof(uint32_t); request.resp_data_addr = (uint64_t)resp_addr; request.resp_data_size = FPGA_MB_RESPONSE_MEM_SIZE; #if defined(CONFIG_LOG) for (int32_t mbox_idx = 0; mbox_idx < request.a3 / 4; mbox_idx++) { LOG_DBG("\t [%d ] %08x", mbox_idx, cmd_addr[mbox_idx]); } #endif } else { request.a2 = cmd_request[SMC_REQUEST_A2_INDEX]; request.a3 = cmd_request[SMC_REQUEST_A3_INDEX]; request.resp_data_addr = 0; request.resp_data_size = 0; } /* Fill SiP SVC request buffer */ request.header = SIP_SVC_PROTO_HEADER(cmd_type, 0); request.a0 = function_identifier; request.a1 = 0; request.a4 = 0; request.a5 = 0; request.a6 = 0; request.a7 = 0; request.priv_data = (void *)private_data; /* Send SiP SVC request */ trans_id = sip_svc_send(data->mailbox_smc_dev, data->mailbox_client_token, &request, smc_callback); if (trans_id == SIP_SVC_ID_INVALID) { LOG_ERR("SiP SVC send request fail"); return -EBUSY; } return 0; } /* Validate the Reconfig status response */ static int32_t fpga_reconfig_status_validate(struct fpga_config_status *reconfig_status_resp) { uint32_t ret = 0; /* Check for any error */ ret = reconfig_status_resp->state; if (ret == MBOX_CFGSTAT_VAB_BS_PREAUTH) { return MBOX_CONFIG_STATUS_STATE_CONFIG; } if (ret && ret != MBOX_CONFIG_STATUS_STATE_CONFIG) { return ret; } /* Make sure nStatus is not 0 */ ret = reconfig_status_resp->pin_status.pin_status; if (!(ret & RECONFIG_PIN_STATUS_NSTATUS)) { return MBOX_CFGSTAT_STATE_ERROR_HARDWARE; } ret = reconfig_status_resp->soft_function_status; if ((ret & RECONFIG_SOFTFUNC_STATUS_CONF_DONE) && (ret & RECONFIG_SOFTFUNC_STATUS_INIT_DONE) && !reconfig_status_resp->state) { return 0; /* Configuration success */ } return MBOX_CONFIG_STATUS_STATE_CONFIG; } /* Will send the SMC command to check the status of the FPGA */ static int32_t fpga_config_ready_check(const struct device *dev) { uint32_t smc_cmd[2] = {0}; int32_t ret = 0; struct sip_svc_private_data priv_data; /* Initialize the semaphore */ k_sem_init(&(priv_data.smc_sem), 0, 1); smc_cmd[SMC_REQUEST_A2_INDEX] = FPGA_CONFIG_STATUS; smc_cmd[SMC_REQUEST_A3_INDEX] = 0; /* Sending the FPGA config status mailbox command */ ret = smc_send(dev, SIP_SVC_PROTO_CMD_ASYNC, SMC_FUNC_ID_MAILBOX_SEND_COMMAND, smc_cmd, &priv_data); if (ret) { LOG_ERR("Failed to Send the Mailbox Command !!"); return -ECANCELED; } k_sem_take(&(priv_data.smc_sem), K_FOREVER); /* Verify the SMC response */ if (!priv_data.response.resp_data_size && priv_data.mbox_response_len != FPGA_CONFIG_STATUS_RESPONSE_LEN) { return -EINVAL; } /* Verify the FPGA config status response */ ret = fpga_reconfig_status_validate( (struct fpga_config_status *)priv_data.mbox_response_data); k_free(priv_data.mbox_response_data); return ret; } static int32_t socfpga_bridges_reset(const struct device *dev, uint32_t enable) { uint32_t smc_cmd[2] = {0}; int ret = 0; struct sip_svc_private_data priv_data; if (!dev) { LOG_ERR("No such device found"); return -ENODEV; } /* Initialize the semaphore */ k_sem_init(&(priv_data.smc_sem), 0, 1); smc_cmd[SMC_REQUEST_A2_INDEX] = FIELD_GET(BIT(0), enable); smc_cmd[SMC_REQUEST_A2_INDEX] |= BIT(1); smc_cmd[SMC_REQUEST_A3_INDEX] = BRIDGE_MASK; ret = smc_send(dev, SIP_SVC_PROTO_CMD_SYNC, SMC_FUNC_ID_SET_HPS_BRIDGES, smc_cmd, &priv_data); if (ret) { LOG_ERR("Failed to send the smc Command !!"); return ret; } /* Wait for the SiP SVC callback */ k_sem_take(&(priv_data.smc_sem), K_FOREVER); /* Check error code */ if (priv_data.response.a0) { ret = -ENOMSG; } return ret; } static int altera_fpga_on(const struct device *dev) { int32_t ret = 0; if (!dev) { LOG_ERR("No such device found"); return -ENODEV; } /* Opening SIP SVC session */ ret = svc_client_open(dev); if (ret) { LOG_ERR("Client open Failed!"); return ret; } /* Check FPGA status before bridge enable/disable */ ret = fpga_config_ready_check(dev); if (ret) { LOG_ERR("FPGA not ready. Bridge reset aborted!"); svc_client_close(dev); return -EIO; } /* Bridge reset */ ret = socfpga_bridges_reset(dev, 0x01); if (ret) { LOG_ERR("Bridge reset failed"); } /* Ignoring the return value to return bridge reset status */ if (svc_client_close(dev)) { LOG_ERR("Unregistering & Closing failed"); } return ret; } static int altera_fpga_off(const struct device *dev) { int32_t ret = 0; if (!dev) { LOG_ERR("No such device found"); return -ENODEV; } /* Opening SIP SVC session */ ret = svc_client_open(dev); if (ret) { LOG_ERR("Client open Failed!"); return ret; } /* Check FPGA status before bridge enable/disable */ ret = fpga_config_ready_check(dev); if (ret) { LOG_ERR("FPGA not ready. Bridge reset aborted!"); svc_client_close(dev); return -EIO; } /* Bridge reset */ ret = socfpga_bridges_reset(dev, 0x00); if (ret) { LOG_ERR("Bridge reset failed"); } /* Ignoring the return value to return bridge reset status */ if (svc_client_close(dev)) { LOG_ERR("Unregistering & Closing failed"); } return ret; } static int altera_fpga_init(const struct device *dev) { if (!dev) { LOG_ERR("No such device found"); return -ENODEV; } struct fpga_bridge_dev_data *const data = (struct fpga_bridge_dev_data *const)(dev->data); data->mailbox_smc_dev = sip_svc_get_controller("smc"); if (!data->mailbox_smc_dev) { LOG_ERR("Arm SiP service not found"); return -ENODEV; } data->mailbox_client_token = sip_svc_register(data->mailbox_smc_dev, NULL); if (data->mailbox_client_token == SIP_SVC_ID_INVALID) { data->mailbox_smc_dev = NULL; LOG_ERR("Mailbox client register fail"); return -EINVAL; } return 0; } static const struct fpga_driver_api altera_fpga_api = { .on = altera_fpga_on, .off = altera_fpga_off, }; #define CREATE_ALTERA_FPGA_BRIDGE_DEV(inst) \ static struct fpga_bridge_dev_data fpga_bridge_data_##inst; \ DEVICE_DT_INST_DEFINE(inst, \ altera_fpga_init, \ NULL, &fpga_bridge_data_##inst, \ NULL, POST_KERNEL, \ CONFIG_FPGA_INIT_PRIORITY, \ &altera_fpga_api); \ DT_INST_FOREACH_STATUS_OKAY(CREATE_ALTERA_FPGA_BRIDGE_DEV); ```
/content/code_sandbox/drivers/fpga/fpga_altera_agilex_bridge.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,453
```unknown config ICE40_FPGA bool "Lattice iCE40 fpga driver [EXPERIMENTAL]" select EXPERIMENTAL imply CRC depends on SPI help Enable support for the Lattice iCE40 fpga driver. ```
/content/code_sandbox/drivers/fpga/Kconfig.ice40
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
54
```unknown # FPGA ALTERA driver configuration options config ALTERA_AGILEX_BRIDGE_FPGA bool "ALTERA fpga driver" depends on ARM_SIP_SVC_SUBSYS help Enable ALTERA FPGA driver. ```
/content/code_sandbox/drivers/fpga/Kconfig.altera_agilex_bridge
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
48
```c /* * */ /** * @file * * PPP driver using uart_pipe. This is meant for network connectivity between * two network end points. */ #define LOG_LEVEL CONFIG_NET_PPP_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(net_ppp, LOG_LEVEL); #include <stdio.h> #include <zephyr/kernel.h> #include <stdbool.h> #include <errno.h> #include <stddef.h> #include <zephyr/net/ppp.h> #include <zephyr/net/buf.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_core.h> #include <zephyr/sys/ring_buffer.h> #include <zephyr/sys/crc.h> #include <zephyr/drivers/uart.h> #include <zephyr/random/random.h> #include <zephyr/posix/net/if_arp.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/capture.h> #include "../../subsys/net/ip/net_stats.h" #include "../../subsys/net/ip/net_private.h" #define UART_BUF_LEN CONFIG_NET_PPP_UART_BUF_LEN #define UART_TX_BUF_LEN CONFIG_NET_PPP_ASYNC_UART_TX_BUF_LEN enum ppp_driver_state { STATE_HDLC_FRAME_START, STATE_HDLC_FRAME_ADDRESS, STATE_HDLC_FRAME_DATA, }; #define PPP_WORKQ_PRIORITY CONFIG_NET_PPP_RX_PRIORITY #define PPP_WORKQ_STACK_SIZE CONFIG_NET_PPP_RX_STACK_SIZE K_KERNEL_STACK_DEFINE(ppp_workq, PPP_WORKQ_STACK_SIZE); #if defined(CONFIG_NET_PPP_CAPTURE) #define MAX_CAPTURE_BUF_LEN CONFIG_NET_PPP_CAPTURE_BUF_SIZE #else #define MAX_CAPTURE_BUF_LEN 1 #endif struct net_ppp_capture_ctx { struct net_capture_cooked cooked; uint8_t capture_buf[MAX_CAPTURE_BUF_LEN]; }; #if defined(CONFIG_NET_PPP_CAPTURE) static struct net_ppp_capture_ctx _ppp_capture_ctx; static struct net_ppp_capture_ctx *ppp_capture_ctx = &_ppp_capture_ctx; #else static struct net_ppp_capture_ctx *ppp_capture_ctx; #endif struct ppp_driver_context { const struct device *dev; struct net_if *iface; /* This net_pkt contains pkt that is being read */ struct net_pkt *pkt; /* How much free space we have in the net_pkt */ size_t available; /* ppp data is read into this buf */ uint8_t buf[UART_BUF_LEN]; #if defined(CONFIG_NET_PPP_ASYNC_UART) /* with async we use 2 rx buffers */ uint8_t buf2[UART_BUF_LEN]; struct k_work_delayable uart_recovery_work; /* ppp buf use when sending data */ uint8_t send_buf[UART_TX_BUF_LEN]; #else /* ppp buf use when sending data */ uint8_t send_buf[UART_BUF_LEN]; #endif uint8_t mac_addr[6]; struct net_linkaddr ll_addr; /* Flag that tells whether this instance is initialized or not */ atomic_t modem_init_done; /* Incoming data is routed via ring buffer */ struct ring_buf rx_ringbuf; uint8_t rx_buf[CONFIG_NET_PPP_RINGBUF_SIZE]; /* ISR function callback worker */ struct k_work cb_work; struct k_work_q cb_workq; #if defined(CONFIG_NET_STATISTICS_PPP) struct net_stats_ppp stats; #endif enum ppp_driver_state state; #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER) /* correctly received CLIENT bytes */ uint8_t client_index; #endif uint8_t init_done : 1; uint8_t next_escaped : 1; }; static struct ppp_driver_context ppp_driver_context_data; #if defined(CONFIG_NET_PPP_ASYNC_UART) static bool rx_retry_pending; static bool uart_recovery_pending; static uint8_t *next_buf; static K_SEM_DEFINE(uarte_tx_finished, 0, 1); static void uart_callback(const struct device *dev, struct uart_event *evt, void *user_data) { struct ppp_driver_context *context = user_data; uint8_t *p; int err, ret, len, space_left; switch (evt->type) { case UART_TX_DONE: LOG_DBG("UART_TX_DONE: sent %zu bytes", evt->data.tx.len); k_sem_give(&uarte_tx_finished); break; case UART_TX_ABORTED: { k_sem_give(&uarte_tx_finished); if (CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT == 0) { LOG_WRN("UART TX aborted."); break; } struct uart_config uart_conf; err = uart_config_get(dev, &uart_conf); if (err) { LOG_ERR("uart_config_get() err: %d", err); } else if (uart_conf.baudrate / 10 * CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT / MSEC_PER_SEC > evt->data.tx.len * 2) { /* The abort likely did not happen because of missing bandwidth. */ LOG_DBG("UART_TX_ABORTED"); } else { LOG_WRN("UART TX aborted: Only %zu bytes were sent. You may want" " to change either CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT" " (%d ms) or the UART baud rate (%u).", evt->data.tx.len, CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT, uart_conf.baudrate); } break; } case UART_RX_RDY: len = evt->data.rx.len; p = evt->data.rx.buf + evt->data.rx.offset; LOG_DBG("Received data %d bytes", len); ret = ring_buf_put(&context->rx_ringbuf, p, len); if (ret < evt->data.rx.len) { LOG_WRN("Rx buffer doesn't have enough space. " "Bytes pending: %d, written only: %d. " "Disabling RX for now.", evt->data.rx.len, ret); /* No possibility to set flow ctrl ON towards PC, * thus workrounding this lack in async API by turning * rx off for now and re-enabling that later. */ if (!rx_retry_pending) { uart_rx_disable(dev); rx_retry_pending = true; } } space_left = ring_buf_space_get(&context->rx_ringbuf); if (!rx_retry_pending && space_left < (sizeof(context->rx_buf) / 8)) { /* Not much room left in buffer after a write to ring buffer. * We submit a work, but enable flow ctrl also * in this case to avoid packet losses. */ uart_rx_disable(dev); rx_retry_pending = true; LOG_WRN("%d written to RX buf, but after that only %d space left. " "Disabling RX for now.", ret, space_left); } k_work_submit_to_queue(&context->cb_workq, &context->cb_work); break; case UART_RX_BUF_REQUEST: { LOG_DBG("UART_RX_BUF_REQUEST: buf %p", (void *)next_buf); if (next_buf) { err = uart_rx_buf_rsp(dev, next_buf, sizeof(context->buf)); if (err) { LOG_ERR("uart_rx_buf_rsp() err: %d", err); } } break; } case UART_RX_BUF_RELEASED: next_buf = evt->data.rx_buf.buf; LOG_DBG("UART_RX_BUF_RELEASED: buf %p", (void *)next_buf); break; case UART_RX_DISABLED: LOG_DBG("UART_RX_DISABLED - re-enabling in a while"); if (rx_retry_pending && !uart_recovery_pending) { k_work_schedule(&context->uart_recovery_work, K_MSEC(CONFIG_NET_PPP_ASYNC_UART_RX_RECOVERY_TIMEOUT)); rx_retry_pending = false; uart_recovery_pending = true; } break; case UART_RX_STOPPED: LOG_DBG("UART_RX_STOPPED: stop reason %d", evt->data.rx_stop.reason); if (evt->data.rx_stop.reason != 0) { rx_retry_pending = true; } break; } } static int ppp_async_uart_rx_enable(struct ppp_driver_context *context) { int err; next_buf = context->buf2; err = uart_callback_set(context->dev, uart_callback, (void *)context); if (err) { LOG_ERR("Failed to set uart callback, err %d", err); } err = uart_rx_enable(context->dev, context->buf, sizeof(context->buf), CONFIG_NET_PPP_ASYNC_UART_RX_ENABLE_TIMEOUT * USEC_PER_MSEC); if (err) { LOG_ERR("uart_rx_enable() failed, err %d", err); } else { LOG_DBG("RX enabled"); } rx_retry_pending = false; return err; } static void uart_recovery(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct ppp_driver_context *ppp = CONTAINER_OF(dwork, struct ppp_driver_context, uart_recovery_work); int ret; ret = ring_buf_space_get(&ppp->rx_ringbuf); if (ret >= (sizeof(ppp->rx_buf) / 2)) { ret = ppp_async_uart_rx_enable(ppp); if (ret) { LOG_ERR("ppp_async_uart_rx_enable() failed, err %d", ret); } else { LOG_DBG("UART RX recovered."); } uart_recovery_pending = false; } else { LOG_ERR("Rx buffer still doesn't have enough room %d to be re-enabled", ret); k_work_schedule(&ppp->uart_recovery_work, K_MSEC(CONFIG_NET_PPP_ASYNC_UART_RX_RECOVERY_TIMEOUT)); } } #endif static int ppp_save_byte(struct ppp_driver_context *ppp, uint8_t byte) { int ret; if (!ppp->pkt) { ppp->pkt = net_pkt_rx_alloc_with_buffer( ppp->iface, CONFIG_NET_BUF_DATA_SIZE, AF_UNSPEC, 0, K_NO_WAIT); if (!ppp->pkt) { LOG_ERR("[%p] cannot allocate pkt", ppp); return -ENOMEM; } net_pkt_cursor_init(ppp->pkt); ppp->available = net_pkt_available_buffer(ppp->pkt); } /* Extra debugging can be enabled separately if really * needed. Normally it would just print too much data. */ if (0) { LOG_DBG("Saving byte %02x", byte); } /* This is not very intuitive but we must allocate new buffer * before we write a byte to last available cursor position. */ if (ppp->available == 1) { ret = net_pkt_alloc_buffer(ppp->pkt, CONFIG_NET_BUF_DATA_SIZE + ppp->available, AF_UNSPEC, K_NO_WAIT); if (ret < 0) { LOG_ERR("[%p] cannot allocate new data buffer", ppp); goto out_of_mem; } ppp->available = net_pkt_available_buffer(ppp->pkt); } if (ppp->available) { ret = net_pkt_write_u8(ppp->pkt, byte); if (ret < 0) { LOG_ERR("[%p] Cannot write to pkt %p (%d)", ppp, ppp->pkt, ret); goto out_of_mem; } ppp->available--; } return 0; out_of_mem: net_pkt_unref(ppp->pkt); ppp->pkt = NULL; return -ENOMEM; } static const char *ppp_driver_state_str(enum ppp_driver_state state) { #if (CONFIG_NET_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) switch (state) { case STATE_HDLC_FRAME_START: return "START"; case STATE_HDLC_FRAME_ADDRESS: return "ADDRESS"; case STATE_HDLC_FRAME_DATA: return "DATA"; } #else ARG_UNUSED(state); #endif return ""; } static void ppp_change_state(struct ppp_driver_context *ctx, enum ppp_driver_state new_state) { NET_ASSERT(ctx); if (ctx->state == new_state) { return; } NET_ASSERT(new_state >= STATE_HDLC_FRAME_START && new_state <= STATE_HDLC_FRAME_DATA); NET_DBG("[%p] state %s (%d) => %s (%d)", ctx, ppp_driver_state_str(ctx->state), ctx->state, ppp_driver_state_str(new_state), new_state); ctx->state = new_state; } static int ppp_send_flush(struct ppp_driver_context *ppp, int off) { if (IS_ENABLED(CONFIG_NET_TEST)) { return 0; } uint8_t *buf = ppp->send_buf; if (IS_ENABLED(CONFIG_NET_PPP_CAPTURE) && net_capture_is_enabled(NULL) && ppp_capture_ctx) { size_t len = off; uint8_t *start = &buf[0]; /* Do not capture HDLC frame start and stop bytes (0x7e) */ if (buf[0] == 0x7e) { len--; start++; } if (buf[off] == 0x7e) { len--; } net_capture_data(&ppp_capture_ctx->cooked, start, len, NET_CAPTURE_OUTGOING, NET_ETH_PTYPE_HDLC); } #if defined(CONFIG_NET_PPP_ASYNC_UART) int ret; const int32_t timeout = CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT ? CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT * USEC_PER_MSEC : SYS_FOREVER_US; k_sem_take(&uarte_tx_finished, K_FOREVER); ret = uart_tx(ppp->dev, buf, off, timeout); if (ret) { LOG_ERR("uart_tx() failed, err %d", ret); k_sem_give(&uarte_tx_finished); } #else while (off--) { uart_poll_out(ppp->dev, *buf++); } #endif return 0; } static int ppp_send_bytes(struct ppp_driver_context *ppp, const uint8_t *data, int len, int off) { int i; for (i = 0; i < len; i++) { ppp->send_buf[off++] = data[i]; if (off >= sizeof(ppp->send_buf)) { off = ppp_send_flush(ppp, off); } } return off; } #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER) #define CLIENT "CLIENT" #define CLIENTSERVER "CLIENTSERVER" static void ppp_handle_client(struct ppp_driver_context *ppp, uint8_t byte) { static const char *client = CLIENT; static const char *clientserver = CLIENTSERVER; int offset; if (ppp->client_index >= (sizeof(CLIENT) - 1)) { ppp->client_index = 0; } if (byte != client[ppp->client_index]) { ppp->client_index = 0; if (byte != client[ppp->client_index]) { return; } } ++ppp->client_index; if (ppp->client_index >= (sizeof(CLIENT) - 1)) { LOG_DBG("Received complete CLIENT string"); offset = ppp_send_bytes(ppp, clientserver, sizeof(CLIENTSERVER) - 1, 0); (void)ppp_send_flush(ppp, offset); ppp->client_index = 0; } } #endif static int ppp_input_byte(struct ppp_driver_context *ppp, uint8_t byte) { int ret = -EAGAIN; switch (ppp->state) { case STATE_HDLC_FRAME_START: /* Synchronizing the flow with HDLC flag field */ if (byte == 0x7e) { /* Note that we do not save the sync flag */ LOG_DBG("Sync byte (0x%02x) start", byte); ppp_change_state(ppp, STATE_HDLC_FRAME_ADDRESS); #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER) } else { ppp_handle_client(ppp, byte); #endif } break; case STATE_HDLC_FRAME_ADDRESS: if (byte != 0xff) { /* Check if we need to sync again */ if (byte == 0x7e) { /* Just skip to the start of the pkt byte */ return -EAGAIN; } LOG_DBG("Invalid (0x%02x) byte, expecting Address", byte); /* If address is != 0xff, then ignore this * frame. RFC 1662 ch 3.1 */ ppp_change_state(ppp, STATE_HDLC_FRAME_START); } else { LOG_DBG("Address byte (0x%02x) start", byte); ppp_change_state(ppp, STATE_HDLC_FRAME_DATA); /* Save the address field so that we can calculate * the FCS. The address field will not be passed * to upper stack. */ ret = ppp_save_byte(ppp, byte); if (ret < 0) { ppp_change_state(ppp, STATE_HDLC_FRAME_START); } ret = -EAGAIN; } break; case STATE_HDLC_FRAME_DATA: /* If the next frame starts, then send this one * up in the network stack. */ if (byte == 0x7e) { LOG_DBG("End of pkt (0x%02x)", byte); ppp_change_state(ppp, STATE_HDLC_FRAME_ADDRESS); ret = 0; } else { if (byte == 0x7d) { /* RFC 1662, ch. 4.2 */ ppp->next_escaped = true; break; } if (ppp->next_escaped) { /* RFC 1662, ch. 4.2 */ byte ^= 0x20; ppp->next_escaped = false; } ret = ppp_save_byte(ppp, byte); if (ret < 0) { ppp_change_state(ppp, STATE_HDLC_FRAME_START); } ret = -EAGAIN; } break; default: LOG_ERR("[%p] Invalid state %d", ppp, ppp->state); break; } return ret; } static bool ppp_check_fcs(struct ppp_driver_context *ppp) { struct net_buf *buf; uint16_t crc; buf = ppp->pkt->buffer; if (!buf) { return false; } crc = crc16_ccitt(0xffff, buf->data, buf->len); buf = buf->frags; while (buf) { crc = crc16_ccitt(crc, buf->data, buf->len); buf = buf->frags; } if (crc != 0xf0b8) { LOG_DBG("Invalid FCS (0x%x)", crc); #if defined(CONFIG_NET_STATISTICS_PPP) ppp->stats.chkerr++; #endif return false; } return true; } static void ppp_process_msg(struct ppp_driver_context *ppp) { if (LOG_LEVEL >= LOG_LEVEL_DBG) { net_pkt_hexdump(ppp->pkt, "recv ppp"); } if (IS_ENABLED(CONFIG_NET_PPP_VERIFY_FCS) && !ppp_check_fcs(ppp)) { #if defined(CONFIG_NET_STATISTICS_PPP) ppp->stats.drop++; ppp->stats.pkts.rx++; #endif net_pkt_unref(ppp->pkt); } else { /* If PPP packet capturing is enabled, then send the * full packet with PPP headers for processing. Currently this * captures only valid frames. If we would need to receive also * invalid frames, the if-block would need to be moved before * fcs check above. */ if (IS_ENABLED(CONFIG_NET_PPP_CAPTURE) && net_capture_is_enabled(NULL) && ppp_capture_ctx) { size_t copied; /* Linearize the packet data. We cannot use the * capture API that deals with net_pkt as we work * in cooked mode and want to capture also the * HDLC frame data. */ copied = net_buf_linearize(ppp_capture_ctx->capture_buf, sizeof(ppp_capture_ctx->capture_buf), ppp->pkt->buffer, 0U, net_pkt_get_len(ppp->pkt)); net_capture_data(&ppp_capture_ctx->cooked, ppp_capture_ctx->capture_buf, copied, NET_CAPTURE_HOST, NET_ETH_PTYPE_HDLC); } /* Remove the Address (0xff), Control (0x03) and * FCS fields (16-bit) as the PPP L2 layer does not need * those bytes. */ uint16_t addr_and_ctrl = net_buf_pull_be16(ppp->pkt->buffer); /* Currently we do not support compressed Address and Control * fields so they must always be present. */ if (addr_and_ctrl != (0xff << 8 | 0x03)) { #if defined(CONFIG_NET_STATISTICS_PPP) ppp->stats.drop++; ppp->stats.pkts.rx++; #endif net_pkt_unref(ppp->pkt); } else { /* Remove FCS bytes (2) */ net_pkt_remove_tail(ppp->pkt, 2); /* Make sure we now start reading from PPP header in * PPP L2 recv() */ net_pkt_cursor_init(ppp->pkt); net_pkt_set_overwrite(ppp->pkt, true); if (net_recv_data(ppp->iface, ppp->pkt) < 0) { net_pkt_unref(ppp->pkt); } } } ppp->pkt = NULL; } #if defined(CONFIG_NET_TEST) static uint8_t *ppp_recv_cb(uint8_t *buf, size_t *off) { struct ppp_driver_context *ppp = CONTAINER_OF(buf, struct ppp_driver_context, buf[0]); size_t i, len = *off; for (i = 0; i < *off; i++) { if (0) { /* Extra debugging can be enabled separately if really * needed. Normally it would just print too much data. */ LOG_DBG("[%zd] %02x", i, buf[i]); } if (ppp_input_byte(ppp, buf[i]) == 0) { /* Ignore empty or too short frames */ if (ppp->pkt && net_pkt_get_len(ppp->pkt) > 3) { ppp_process_msg(ppp); break; } } } if (i == *off) { *off = 0; } else { *off = len - i - 1; memmove(&buf[0], &buf[i + 1], *off); } return buf; } void ppp_driver_feed_data(uint8_t *data, int data_len) { struct ppp_driver_context *ppp = &ppp_driver_context_data; size_t recv_off = 0; /* We are expecting that the tests are feeding data in large * chunks so we can reset the uart buffer here. */ memset(ppp->buf, 0, UART_BUF_LEN); ppp_change_state(ppp, STATE_HDLC_FRAME_START); while (data_len > 0) { int data_to_copy = MIN(data_len, UART_BUF_LEN); int remaining; LOG_DBG("Feeding %d bytes", data_to_copy); memcpy(ppp->buf, data, data_to_copy); recv_off = data_to_copy; (void)ppp_recv_cb(ppp->buf, &recv_off); remaining = data_to_copy - recv_off; LOG_DBG("We copied %d bytes", remaining); data_len -= remaining; data += remaining; } } #endif static bool calc_fcs(struct net_pkt *pkt, uint16_t *fcs, uint16_t protocol) { struct net_buf *buf; uint16_t crc; uint16_t c; buf = pkt->buffer; if (!buf) { return false; } /* HDLC Address and Control fields */ c = sys_cpu_to_be16(0xff << 8 | 0x03); crc = crc16_ccitt(0xffff, (const uint8_t *)&c, sizeof(c)); if (protocol > 0) { crc = crc16_ccitt(crc, (const uint8_t *)&protocol, sizeof(protocol)); } while (buf) { crc = crc16_ccitt(crc, buf->data, buf->len); buf = buf->frags; } crc ^= 0xffff; *fcs = crc; return true; } static uint16_t ppp_escape_byte(uint8_t byte, int *offset) { if (byte == 0x7e || byte == 0x7d || byte < 0x20) { *offset = 0; return (0x7d << 8) | (byte ^ 0x20); } *offset = 1; return byte; } static int ppp_send(const struct device *dev, struct net_pkt *pkt) { struct ppp_driver_context *ppp = dev->data; struct net_buf *buf = pkt->buffer; uint16_t protocol = 0; int send_off = 0; uint32_t sync_addr_ctrl; uint16_t fcs, escaped; uint8_t byte; int i, offset; #if defined(CONFIG_NET_TEST) return 0; #endif ARG_UNUSED(dev); if (!buf) { /* No data? */ return -ENODATA; } /* If the packet is a normal network packet, we must add the protocol * value here. */ if (!net_pkt_is_ppp(pkt)) { if (net_pkt_family(pkt) == AF_INET) { protocol = htons(PPP_IP); } else if (net_pkt_family(pkt) == AF_INET6) { protocol = htons(PPP_IPV6); } else { return -EPROTONOSUPPORT; } } if (!calc_fcs(pkt, &fcs, protocol)) { return -ENOMEM; } /* Sync, Address & Control fields */ sync_addr_ctrl = sys_cpu_to_be32(0x7e << 24 | 0xff << 16 | 0x7d << 8 | 0x23); send_off = ppp_send_bytes(ppp, (const uint8_t *)&sync_addr_ctrl, sizeof(sync_addr_ctrl), send_off); if (protocol > 0) { escaped = htons(ppp_escape_byte(protocol, &offset)); send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset, offset ? 1 : 2, send_off); escaped = htons(ppp_escape_byte(protocol >> 8, &offset)); send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset, offset ? 1 : 2, send_off); } /* Note that we do not print the first four bytes and FCS bytes at the * end so that we do not need to allocate separate net_buf just for * that purpose. */ if (LOG_LEVEL >= LOG_LEVEL_DBG) { net_pkt_hexdump(pkt, "send ppp"); } while (buf) { for (i = 0; i < buf->len; i++) { /* Escape illegal bytes */ escaped = htons(ppp_escape_byte(buf->data[i], &offset)); send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset, offset ? 1 : 2, send_off); } buf = buf->frags; } escaped = htons(ppp_escape_byte(fcs, &offset)); send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset, offset ? 1 : 2, send_off); escaped = htons(ppp_escape_byte(fcs >> 8, &offset)); send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset, offset ? 1 : 2, send_off); byte = 0x7e; send_off = ppp_send_bytes(ppp, &byte, 1, send_off); (void)ppp_send_flush(ppp, send_off); return 0; } #if !defined(CONFIG_NET_TEST) static int ppp_consume_ringbuf(struct ppp_driver_context *ppp) { uint8_t *data; size_t len, tmp; int ret; len = ring_buf_get_claim(&ppp->rx_ringbuf, &data, CONFIG_NET_PPP_RINGBUF_SIZE); if (len == 0) { LOG_DBG("Ringbuf %p is empty!", &ppp->rx_ringbuf); return 0; } /* This will print too much data, enable only if really needed */ if (0) { LOG_HEXDUMP_DBG(data, len, ppp->dev->name); } tmp = len; do { if (ppp_input_byte(ppp, *data++) == 0) { /* Ignore empty or too short frames */ if (ppp->pkt && net_pkt_get_len(ppp->pkt) > 3) { ppp_process_msg(ppp); } } } while (--tmp); ret = ring_buf_get_finish(&ppp->rx_ringbuf, len); if (ret < 0) { LOG_DBG("Cannot flush ring buffer (%d)", ret); } return -EAGAIN; } static void ppp_isr_cb_work(struct k_work *work) { struct ppp_driver_context *ppp = CONTAINER_OF(work, struct ppp_driver_context, cb_work); int ret = -EAGAIN; while (ret == -EAGAIN) { ret = ppp_consume_ringbuf(ppp); } } #endif /* !CONFIG_NET_TEST */ static int ppp_driver_init(const struct device *dev) { struct ppp_driver_context *ppp = dev->data; LOG_DBG("[%p] dev %p", ppp, dev); #if !defined(CONFIG_NET_TEST) ring_buf_init(&ppp->rx_ringbuf, sizeof(ppp->rx_buf), ppp->rx_buf); k_work_init(&ppp->cb_work, ppp_isr_cb_work); k_work_queue_start(&ppp->cb_workq, ppp_workq, K_KERNEL_STACK_SIZEOF(ppp_workq), K_PRIO_COOP(PPP_WORKQ_PRIORITY), NULL); k_thread_name_set(&ppp->cb_workq.thread, "ppp_workq"); #if defined(CONFIG_NET_PPP_ASYNC_UART) k_work_init_delayable(&ppp->uart_recovery_work, uart_recovery); #endif #endif ppp->pkt = NULL; ppp_change_state(ppp, STATE_HDLC_FRAME_START); #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER) ppp->client_index = 0; #endif return 0; } static inline struct net_linkaddr *ppp_get_mac(struct ppp_driver_context *ppp) { ppp->ll_addr.addr = ppp->mac_addr; ppp->ll_addr.len = sizeof(ppp->mac_addr); return &ppp->ll_addr; } static void ppp_iface_init(struct net_if *iface) { struct ppp_driver_context *ppp = net_if_get_device(iface)->data; struct net_linkaddr *ll_addr; LOG_DBG("[%p] iface %p", ppp, iface); net_ppp_init(iface); if (ppp->init_done) { return; } ppp->init_done = true; ppp->iface = iface; /* The mac address is not really used but network interface expects * to find one. */ ll_addr = ppp_get_mac(ppp); if (CONFIG_PPP_MAC_ADDR[0] != 0) { if (net_bytes_from_str(ppp->mac_addr, sizeof(ppp->mac_addr), CONFIG_PPP_MAC_ADDR) < 0) { goto use_random_mac; } } else { use_random_mac: /* 00-00-5E-00-53-xx Documentation RFC 7042 */ ppp->mac_addr[0] = 0x00; ppp->mac_addr[1] = 0x00; ppp->mac_addr[2] = 0x5E; ppp->mac_addr[3] = 0x00; ppp->mac_addr[4] = 0x53; ppp->mac_addr[5] = sys_rand8_get(); } net_if_set_link_addr(iface, ll_addr->addr, ll_addr->len, NET_LINK_ETHERNET); if (IS_ENABLED(CONFIG_NET_PPP_CAPTURE)) { static bool capture_setup_done; if (!capture_setup_done) { int ret; ret = net_capture_cooked_setup(&ppp_capture_ctx->cooked, ARPHRD_PPP, sizeof(ppp->mac_addr), ppp->mac_addr); if (ret < 0) { LOG_DBG("Cannot setup capture (%d)", ret); } else { capture_setup_done = true; } } } memset(ppp->buf, 0, sizeof(ppp->buf)); #if defined(CONFIG_PPP_NET_IF_NO_AUTO_START) /* * If interface autostart is disabled from Kconfig, then do not start the * interface automatically but only when manually started. */ net_if_flag_set(iface, NET_IF_NO_AUTO_START); #endif } #if defined(CONFIG_NET_STATISTICS_PPP) static struct net_stats_ppp *ppp_get_stats(const struct device *dev) { struct ppp_driver_context *context = dev->data; return &context->stats; } #endif #if !defined(CONFIG_NET_TEST) && !defined(CONFIG_NET_PPP_ASYNC_UART) static void ppp_uart_flush(const struct device *dev) { uint8_t c; while (uart_fifo_read(dev, &c, 1) > 0) { continue; } } static void ppp_uart_isr(const struct device *uart, void *user_data) { struct ppp_driver_context *context = user_data; int rx = 0, ret; /* get all of the data off UART as fast as we can */ while (uart_irq_update(uart) && uart_irq_rx_ready(uart)) { rx = uart_fifo_read(uart, context->buf, sizeof(context->buf)); if (rx <= 0) { continue; } ret = ring_buf_put(&context->rx_ringbuf, context->buf, rx); if (ret < rx) { LOG_ERR("Rx buffer doesn't have enough space. " "Bytes pending: %d, written: %d", rx, ret); break; } k_work_submit_to_queue(&context->cb_workq, &context->cb_work); } } #endif /* !CONFIG_NET_TEST && !CONFIG_NET_PPP_ASYNC_UART */ static int ppp_start(const struct device *dev) { struct ppp_driver_context *context = dev->data; /* Init the PPP UART. This should only be called once. */ #if !defined(CONFIG_NET_TEST) if (atomic_cas(&context->modem_init_done, false, true)) { context->dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_ppp_uart)); LOG_DBG("Initializing PPP to use %s", context->dev->name); if (!device_is_ready(context->dev)) { LOG_ERR("Device %s is not ready", context->dev->name); return -ENODEV; } #if defined(CONFIG_NET_PPP_ASYNC_UART) k_sem_give(&uarte_tx_finished); ppp_async_uart_rx_enable(context); #else uart_irq_rx_disable(context->dev); uart_irq_tx_disable(context->dev); ppp_uart_flush(context->dev); uart_irq_callback_user_data_set(context->dev, ppp_uart_isr, context); uart_irq_rx_enable(context->dev); #endif } #endif /* !CONFIG_NET_TEST */ net_if_carrier_on(context->iface); return 0; } static int ppp_stop(const struct device *dev) { struct ppp_driver_context *context = dev->data; net_if_carrier_off(context->iface); #if defined(CONFIG_NET_PPP_ASYNC_UART) uart_rx_disable(context->dev); #endif context->modem_init_done = false; return 0; } static const struct ppp_api ppp_if_api = { .iface_api.init = ppp_iface_init, .send = ppp_send, .start = ppp_start, .stop = ppp_stop, #if defined(CONFIG_NET_STATISTICS_PPP) .get_stats = ppp_get_stats, #endif }; NET_DEVICE_INIT(ppp, CONFIG_NET_PPP_DRV_NAME, ppp_driver_init, NULL, &ppp_driver_context_data, NULL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &ppp_if_api, PPP_L2, NET_L2_GET_CTX_TYPE(PPP_L2), PPP_MTU); ```
/content/code_sandbox/drivers/net/ppp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,181
```c /* * */ #define DT_DRV_COMPAT xlnx_fpga #include <zephyr/device.h> #include <zephyr/drivers/fpga.h> #include "fpga_zynqmp.h" #include <errno.h> #include <string.h> #include <zephyr/sys/byteorder.h> #include <stdlib.h> #include <stdio.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(fpga_zynqmp); static void power_up_fpga(void) { PMU_GLOBAL_PWRUP_EN = PWR_PL_MASK; PMU_REQ_PWRUP_TRIG = PWR_PL_MASK; while (PWR_STATUS & PWR_PL_MASK) { }; } struct zynqmp_fpga_data { char FPGA_info[16]; }; static void update_part_name(const struct device *dev) { struct zynqmp_fpga_data *data = dev->data; int zu_number = 0; switch (IDCODE & IDCODE_MASK) { case ZU2_IDCODE: zu_number = 2; break; case ZU3_IDCODE: zu_number = 3; break; case ZU4_IDCODE: zu_number = 4; break; case ZU5_IDCODE: zu_number = 5; break; case ZU6_IDCODE: zu_number = 6; break; case ZU7_IDCODE: zu_number = 7; break; case ZU9_IDCODE: zu_number = 9; break; case ZU11_IDCODE: zu_number = 11; break; case ZU15_IDCODE: zu_number = 15; break; case ZU17_IDCODE: zu_number = 17; break; case ZU19_IDCODE: zu_number = 19; break; case ZU21_IDCODE: zu_number = 21; break; case ZU25_IDCODE: zu_number = 25; break; case ZU27_IDCODE: zu_number = 27; break; case ZU28_IDCODE: zu_number = 28; break; case ZU29_IDCODE: zu_number = 29; break; case ZU39_IDCODE: zu_number = 39; break; case ZU43_IDCODE: zu_number = 43; break; case ZU46_IDCODE: zu_number = 46; break; case ZU47_IDCODE: zu_number = 47; break; case ZU48_IDCODE: zu_number = 48; break; case ZU49_IDCODE: zu_number = 49; break; } if (zu_number == 0) { snprintf(data->FPGA_info, sizeof(data->FPGA_info), "unknown"); } else { snprintf(data->FPGA_info, sizeof(data->FPGA_info), "Part name: ZU%d", zu_number); } } /* * This function is responsible for shifting the bitstream * by its header and extracting information from this header. * The bitstream header has 5 sections starting with the letters a,b,c... * Each section has the following structure: * [key][length of data][data] */ static uint32_t *parse_header(const struct device *dev, uint32_t *image_ptr, uint32_t *img_size) { unsigned char *header = (unsigned char *)image_ptr; uint32_t length = XLNX_BITSTREAM_SECTION_LENGTH(header); /* shift to the next section*/ header += 0x4U + length; if (*header++ != 'a') { LOG_ERR("Incorrect bitstream format"); return NULL; } length = XLNX_BITSTREAM_SECTION_LENGTH(header); /* shift to the data section*/ header += 0x2U; LOG_DBG("Design name = %s", header); header += length; if (*header++ != 'b') { LOG_ERR("Incorrect bitstream format"); return NULL; } length = XLNX_BITSTREAM_SECTION_LENGTH(header); /* shift to the data section*/ header += 0x2U; LOG_DBG("Part name = %s", header); header += length; if (*header++ != 'c') { LOG_ERR("Incorrect bitstream format"); return NULL; } length = XLNX_BITSTREAM_SECTION_LENGTH(header); /* shift to the data section*/ header += 0x2U; LOG_DBG("Date = %s", header); header += length; if (*header++ != 'd') { LOG_ERR("Incorrect bitstream format"); return NULL; } length = XLNX_BITSTREAM_SECTION_LENGTH(header); /* shift to the data section*/ header += 0x2U; LOG_DBG("Time = %s", header); header += length; if (*header++ != 'e') { LOG_ERR("Incorrect bitstream format"); return NULL; } /* * The last section is the raw bitstream. * It is preceded by its size, which is needed for DMA transfer. */ *img_size = ((uint32_t)*header << 24) | ((uint32_t) *(header + 1) << 16) | ((uint32_t) *(header + 2) << 8) | ((uint32_t) *(header + 3)); return (uint32_t *)header; } static int csudma_transfer(uint32_t size) { /* setup the source DMA channel */ CSUDMA_SRC_ADDR = (uint32_t)BITSTREAM & CSUDMA_SRC_ADDR_MASK; CSUDMA_SRC_ADDR_MSB = 0; CSUDMA_SRC_SIZE = size << CSUDMA_SRC_SIZE_SHIFT; /* wait for the SRC_DMA to complete */ while ((CSUDMA_SRC_I_STS & CSUDMA_I_STS_DONE_MASK) != CSUDMA_I_STS_DONE_MASK) { }; /* acknowledge the transfer has completed */ CSUDMA_SRC_I_STS = CSUDMA_I_STS_DONE_MASK; return 0; } static int wait_for_done(void) { /* wait for PCAP PL_DONE */ while ((PCAP_STATUS & PCAP_PL_DONE_MASK) != PCAP_PL_DONE_MASK) { }; PCAP_RESET = PCAP_RESET_MASK; power_up_fpga(); return 0; } static enum FPGA_status zynqmp_fpga_get_status(const struct device *dev) { ARG_UNUSED(dev); if ((PCAP_STATUS & PCAP_PL_INIT_MASK) && (PCAP_STATUS & PCAP_PL_DONE_MASK)) { return FPGA_STATUS_ACTIVE; } else { return FPGA_STATUS_INACTIVE; } } static const char *zynqmp_fpga_get_info(const struct device *dev) { struct zynqmp_fpga_data *data = dev->data; return data->FPGA_info; } static int zynqmp_fpga_reset(const struct device *dev) { ARG_UNUSED(dev); /* Reset PL */ PCAP_PROG = PCAP_PROG_RESET_MASK; PCAP_PROG = ~PCAP_PROG_RESET_MASK; while ((PCAP_STATUS & PCAP_CFG_RESET) != PCAP_CFG_RESET) { }; return 0; } static int init_pcap(const struct device *dev) { /* take PCAP out of Reset */ PCAP_RESET = ~PCAP_RESET_MASK; /* select PCAP mode and change PCAP to write mode */ PCAP_CTRL = PCAP_PR_MASK; PCAP_RDWR = PCAP_WRITE_MASK; power_up_fpga(); /* setup the SSS */ CSU_SSS_CFG = PCAP_PCAP_SSS_MASK; zynqmp_fpga_reset(dev); /* wait for pl init */ while ((PCAP_STATUS & PCAP_PL_INIT_MASK) != PCAP_PL_INIT_MASK) { }; return 0; } static int zynqmp_fpga_load(const struct device *dev, uint32_t *image_ptr, uint32_t img_size) { uint32_t *addr = parse_header(dev, image_ptr, &img_size); if (addr == NULL) { LOG_ERR("Failed to read bitstream"); return -EINVAL; } for (int i = 0; i < (img_size / 4); i++) { *(BITSTREAM + i) = BSWAP_32(*(addr + i)); } init_pcap(dev); csudma_transfer(img_size); wait_for_done(); return 0; } static int zynqmp_fpga_init(const struct device *dev) { /* turn on PCAP CLK */ PCAP_CLK_CTRL = PCAP_CLK_CTRL | PCAP_CLKACT_MASK; update_part_name(dev); return 0; } static struct zynqmp_fpga_data fpga_data; static const struct fpga_driver_api zynqmp_api = { .reset = zynqmp_fpga_reset, .load = zynqmp_fpga_load, .get_status = zynqmp_fpga_get_status, .get_info = zynqmp_fpga_get_info }; DEVICE_DT_INST_DEFINE(0, &zynqmp_fpga_init, NULL, &fpga_data, NULL, POST_KERNEL, CONFIG_FPGA_INIT_PRIORITY, &zynqmp_api); ```
/content/code_sandbox/drivers/fpga/fpga_zynqmp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,003
```unknown # FPGA ZYNQMP driver configuration options config ZYNQMP_FPGA bool "ZYNQMP fpga driver" help Enable ZYNQMP FPGA driver. ```
/content/code_sandbox/drivers/fpga/Kconfig.zynqmp
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
40
```unknown # FPGA EOS S3 driver configuration options config EOS_S3_FPGA bool "EOS S3 fpga driver" help Enable EOS S3 FPGA driver. ```
/content/code_sandbox/drivers/fpga/Kconfig.eos_s3
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
36
```unknown # FPGA driver configuration options menuconfig FPGA bool "Field-Programmable Gate Array (FPGA) drivers" help Enable support for FPGA drivers. if FPGA module = fpga module-str = fpga source "subsys/logging/Kconfig.template.log_config" config FPGA_INIT_PRIORITY int "Init priority" default 75 help FPGA device drivers initialization priority config FPGA_SHELL bool "FPGA Shell" depends on SHELL && FPGA help Enable FPGA Shell support. source "drivers/fpga/Kconfig.altera_agilex_bridge" source "drivers/fpga/Kconfig.eos_s3" source "drivers/fpga/Kconfig.ice40" source "drivers/fpga/Kconfig.mpfs" source "drivers/fpga/Kconfig.zynqmp" endif # FPGA ```
/content/code_sandbox/drivers/fpga/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
173
```objective-c /* * */ #ifndef ZEPHYR_SUBSYS_FPGA_BRIDGE_INTEL_H_ #define ZEPHYR_SUBSYS_FPGA_BRIDGE_INTEL_H_ #include <zephyr/kernel.h> /* Mask for FPGA-HPS bridges */ #define BRIDGE_MASK 0x0F /* Mailbox command header index */ #define MBOX_CMD_HEADER_INDEX 0x00 /* Mailbox command memory size */ #define FPGA_MB_CMD_ADDR_MEM_SIZE 20 /* Mailbox command response memory size */ #define FPGA_MB_RESPONSE_MEM_SIZE 20 /* Config status response length */ #define FPGA_CONFIG_STATUS_RESPONSE_LEN 0x07 #define MBOX_CMD_CODE_OFFSET 0x00 #define MBOX_CMD_ID_MASK 0x7FF #define MBOX_CMD_MODE_OFFSET 0x0B #define MBOX_CMD_MODE_MASK 0x800 #define MBOX_DATA_LEN_OFFSET 0x0C #define MBOX_DATA_LEN_MASK 0xFFF000 #define RECONFIG_DIRECT_COUNT_OFFSET 0x00 #define RECONFIG_DIRECT_COUNT_MASK 0xFF #define RECONFIG_INDIRECT_ARG_OFFSET 0x08 #define RECONFIG_INDIRECT_COUNT_MASK 0xFF00 #define RECONFIG_INDIRECT_RESPONSE_OFFSET 0x10 #define RECONFIG_RESPONSE_COUNT_MASK 0xFF0000 #define RECONFIG_DATA_MB_CMD_SIZE 0x10 #define RECONFIG_DATA_MB_CMD_INDIRECT_MODE 0x01 #define RECONFIG_DATA_MB_CMD_LENGTH 0x03 #define RECONFIG_DATA_MB_CMD_DIRECT_COUNT 0x00 #define RECONFIG_DATA_MB_CMD_INDIRECT_ARG 0x01 #define RECONFIG_DATA_MB_CMD_INDIRECT_RESPONSE 0x00 #define RECONFIG_STATUS_INTERVAL_DELAY_US 1000 #define RECONFIG_STATUS_RETRY_COUNT 20 #define MBOX_CONFIG_STATUS_STATE_CONFIG 0x10000000 #define MBOX_CFGSTAT_VAB_BS_PREAUTH 0x20000000 #define FPGA_NOT_CONFIGURED_ERROR 0x02000004 #define MBOX_CFGSTAT_STATE_ERROR_HARDWARE 0xF0000005 #define RECONFIG_SOFTFUNC_STATUS_CONF_DONE BIT(0) #define RECONFIG_SOFTFUNC_STATUS_INIT_DONE BIT(1) #define RECONFIG_SOFTFUNC_STATUS_SEU_ERROR BIT(3) #define RECONFIG_PIN_STATUS_NSTATUS BIT(31) #define MBOX_REQUEST_HEADER(cmd_id, cmd_mode, len) \ ((cmd_id << MBOX_CMD_CODE_OFFSET) & (MBOX_CMD_ID_MASK)) | \ ((cmd_mode << MBOX_CMD_MODE_OFFSET) & (MBOX_CMD_MODE_MASK)) | \ ((len << MBOX_DATA_LEN_OFFSET) & (MBOX_DATA_LEN_MASK)) #define MBOX_RECONFIG_REQUEST_DATA_FORMAT(direct_count, indirect_arg_count, response_arg_count) \ ((direct_count << RECONFIG_DIRECT_COUNT_OFFSET) & (RECONFIG_DIRECT_COUNT_MASK)) | \ ((indirect_arg_count << RECONFIG_INDIRECT_ARG_OFFSET) & \ (RECONFIG_INDIRECT_COUNT_MASK)) | \ ((response_arg_count << RECONFIG_INDIRECT_RESPONSE_OFFSET) & \ (RECONFIG_RESPONSE_COUNT_MASK)) union mailbox_response_header { /* Header of the config status response */ uint32_t header; struct { /* error_code Field provides a basic description of whether the command * succeeded or not. A successful response returns an error code of 0x0, * non-zero values indicate failure */ uint32_t error_code : 11; /* indirect_bit - Field indicates an indirect command */ uint32_t indirect_bit : 1; /* data_length - Field counts the number of word arguments which follow the * response header word. The meaning of these words depends on the command * code. Units are words */ uint32_t data_length : 11; /* reserve bit */ uint32_t reserved_bit : 1; /* id - Field is returned unchanged from the matching command header and is * useful for matching responses to commands along with the CLIENT */ uint32_t id : 4; /* client_id - Field is returned unchanged from the matching command header and * is useful for matching responses to commands along with the ID */ uint32_t client_id : 4; } mailbox_resp_header; }; union config_status_version { /* Version of the config status response */ uint32_t version; struct { /* update number bits */ uint32_t update_number : 8; /* minor acds release number bits */ uint32_t minor_acds_release_number : 8; /* major acds release number bits */ uint32_t major_acds_release_number : 8; /* qspi flash index bits */ uint32_t qspi_flash_index : 8; } response_version_member; }; union config_status_pin_status { uint32_t pin_status; struct { /* msel bits */ uint32_t msel : 4; /* pmf data bits */ uint32_t pmf_data : 4; /* reserve bits */ uint32_t reserved_bit : 22; /* nconfig bits */ uint32_t nconfig : 1; /* nconfig_status bits */ uint32_t nconfig_status : 1; } pin_status_member; }; /* Struct to store the fpga_config_status */ struct fpga_config_status { /* Response header */ union mailbox_response_header header; /* Config state idle or config mode */ uint32_t state; /* Version number */ union config_status_version version; /* Pin status */ union config_status_pin_status pin_status; /* Soft function status details */ uint32_t soft_function_status; /* Location in the bitstream where the error occurred */ uint32_t error_location; /* Data is non-zero only for certain errors. The contents are highly dependent * on which error was reported. The meaning of this data will not be made available to * customers and can only be interpreted by investigating the source code directly */ uint32_t error_details; }; enum smc_cmd_code { /* SMC COMMAND ID to disable all the bridges */ FPGA_ALL_BRIDGE_DISABLE = 0x00, /* SMC COMMAND ID to enable all the bridges */ FPGA_ALL_BRIDGE_ENABLE = 0x01, /* SMC Cancel Command */ FPGA_CANCEL = 0x03, /* SMC COMMAND ID to check Reconfig status to SDM via mailbox */ FPGA_CONFIG_STATUS = 0x04, /* SMC COMMAND ID to check Reconfig status to SDM via mailbox */ FPGA_RECONFIG_STATUS = 0x09 }; enum mbox_reconfig_status_resp { /* Mailbox reconfig status header */ MBOX_RECONFIG_STATUS_HEADER, /* Mailbox reconfig status state */ MBOX_RECONFIG_STATUS_STATE, /* Mailbox reconfig status version */ MBOX_RECONFIG_STATUS_VERSION, /* Mailbox reconfig status pin status */ MBOX_RECONFIG_STATUS_PIN_STATUS, /* Mailbox reconfig status soft function */ MBOX_RECONFIG_STATUS_SOFT_FUNCTION, /* Mailbox reconfig status error location */ MBOX_RECONFIG_STATUS_ERROR_LOCATION, /* Mailbox reconfig status error details */ MBOX_RECONFIG_STATUS_ERROR_DETAILS }; enum smc_request { /* SMC request parameter a2 index*/ SMC_REQUEST_A2_INDEX = 0x00, /* SMC request parameter a3 index */ SMC_REQUEST_A3_INDEX = 0x01 }; /* SIP SVC response private data */ struct sip_svc_private_data { struct sip_svc_response response; uint32_t *mbox_response_data; uint32_t mbox_response_len; struct k_sem smc_sem; struct fpga_config_status config_status; }; #endif ```
/content/code_sandbox/drivers/fpga/fpga_altera_agilex_bridge.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,668