text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_FPGA_ZYNQMP_H #define ZEPHYR_DRIVERS_FPGA_ZYNQMP_H #define PCAP_STATUS (*(volatile uint32_t *) (0xFFCA3010)) #define PCAP_RESET (*(volatile uint32_t *) (0xFFCA300C)) #define PCAP_CTRL (*(volatile uint32_t *) (0xFFCA3008)) #define PCAP_RDWR (*(volatile uint32_t *) (0xFFCA3004)) #define PMU_REQ_PWRUP_TRIG (*(volatile uint32_t *) (0xFFD80120)) #define PCAP_PROG (*(volatile uint32_t *) (0xFFCA3000)) #define CSU_SSS_CFG (*(volatile uint32_t *) (0xFFCA0008)) #define CSUDMA_SRC_ADDR (*(volatile uint32_t *) (0xFFC80000)) #define CSUDMA_SRC_SIZE (*(volatile uint32_t *) (0xFFC80004)) #define CSUDMA_SRC_I_STS (*(volatile uint32_t *) (0xFFC80014)) #define CSUDMA_SRC_ADDR_MSB (*(volatile uint32_t *) (0xFFC80028)) #define PWR_STATUS (*(volatile uint32_t *) (0xFFD80110)) #define PMU_GLOBAL_ISO_STATUS (*(volatile uint32_t *) (0xFFD80310)) #define PMU_GLOBAL_PWRUP_EN (*(volatile uint32_t *) (0xFFD80118)) #define PCAP_CLK_CTRL (*(volatile uint32_t *) (0xFF5E00A4)) #define PMU_GLOBAL_ISO_INT_EN (*(volatile uint32_t *) (0xFFD80318)) #define PMU_GLOBAL_ISO_TRIG (*(volatile uint32_t *) (0xFFD80320)) #define IDCODE (*(volatile uint32_t *) (0xFFCA0040)) #define BITSTREAM ((volatile uint32_t *) (0x01000000)) #define PWR_PL_MASK 0x800000U #define ISO_MASK 0x4U #define PCAP_RESET_MASK 0x1U #define PCAP_PROG_RESET_MASK 0x0U #define PCAP_PR_MASK 0x1U #define PCAP_WRITE_MASK 0x0U #define PCAP_PL_INIT_MASK 0x4U #define PCAP_CLKACT_MASK 0x1000000U #define PCAP_PCAP_SSS_MASK 0x5U #define PCAP_PL_DONE_MASK 0x8U #define PCAP_CFG_RESET 0x40U #define CSUDMA_I_STS_DONE_MASK 0x2U #define CSUDMA_SRC_ADDR_MASK 0xFFFFFFFCU #define CSUDMA_SRC_SIZE_SHIFT 0x2U #define IDCODE_MASK 0xFFFFFFF #define ZU2_IDCODE 0x4711093 #define ZU3_IDCODE 0x4710093 #define ZU4_IDCODE 0x4721093 #define ZU5_IDCODE 0x4720093 #define ZU6_IDCODE 0x4739093 #define ZU7_IDCODE 0x4730093 #define ZU9_IDCODE 0x4738093 #define ZU11_IDCODE 0x4740093 #define ZU15_IDCODE 0x4750093 #define ZU17_IDCODE 0x4759093 #define ZU19_IDCODE 0x4758093 #define ZU21_IDCODE 0x47E1093 #define ZU25_IDCODE 0x47E5093 #define ZU27_IDCODE 0x47E4093 #define ZU28_IDCODE 0x47E0093 #define ZU29_IDCODE 0x47E2093 #define ZU39_IDCODE 0x47E6093 #define ZU43_IDCODE 0x47FD093 #define ZU46_IDCODE 0x47F8093 #define ZU47_IDCODE 0x47FF093 #define ZU48_IDCODE 0x47FB093 #define ZU49_IDCODE 0x47FE093 #define XLNX_BITSTREAM_SECTION_LENGTH(data) (*(data + 1) | *data << 0x8U); #endif /* ZEPHYR_DRIVERS_FPGA_ZYNQMP_H */ ```
/content/code_sandbox/drivers/fpga/fpga_zynqmp.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
955
```c /* * */ #define DT_DRV_COMPAT lattice_ice40_fpga #include <stdbool.h> #include <stdio.h> #include <zephyr/device.h> #include <zephyr/drivers/fpga.h> #include <zephyr/drivers/gpio.h> #ifdef CONFIG_PINCTRL #include <zephyr/drivers/pinctrl.h> #endif #include <zephyr/drivers/spi.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/crc.h> /* * Note: When loading a bitstream, the iCE40 has a 'quirk' in that the CS * polarity must be inverted during the 'leading clocks' phase and * 'trailing clocks' phase. While the bitstream is being transmitted, the * CS polarity is normal (active low). Zephyr's SPI driver model currently * does not handle these types of quirks (in contrast to e.g. Linux). * * The logical alternative would be to put the CS into GPIO mode, perform 3 * separate SPI transfers (inverting CS polarity as necessary) and then * restore the default pinctrl settings. On some higher-end microcontrollers * and microprocessors, it's possible to do that without breaking the iCE40 * timing requirements. * * However, on lower-end microcontrollers, the amount of time that elapses * between SPI transfers does break the iCE40 timing requirements. That * leaves us with the bitbanging option. Of course, on lower-end * microcontrollers, the amount of time required to execute something * like gpio_pin_configure_dt() dwarfs the 2*500 nanoseconds needed to * achieve the minimum 1 MHz clock rate for loading the iCE40 bistream. So * in order to bitbang on lower-end microcontrollers, we actually require * direct register access to the set and clear registers. * * With that, this driver is left with 2 possible modes of operation which * are: * - FPGA_ICE40_LOAD_MODE_SPI (for higher-end microcontrollers) * - FPGA_ICE40_LOAD_MODE_GPIO (for lower-end microcontrollers) */ #define FPGA_ICE40_LOAD_MODE_SPI 0 #define FPGA_ICE40_LOAD_MODE_GPIO 1 #ifndef BITS_PER_NIBBLE #define BITS_PER_NIBBLE 4 #endif #ifndef BITS_PER_BYTE #define BITS_PER_BYTE 8 #endif #ifndef NIBBLES_PER_BYTE #define NIBBLES_PER_BYTE (BITS_PER_BYTE / BITS_PER_NIBBLE) #endif /* * Values in Hz, intentionally to be comparable with the spi-max-frequency * property from DT bindings in spi-device.yaml. */ #define FPGA_ICE40_SPI_HZ_MIN 1000000 #define FPGA_ICE40_SPI_HZ_MAX 25000000 #define FPGA_ICE40_CRESET_DELAY_US_MIN 1 /* 200ns absolute minimum */ #define FPGA_ICE40_CONFIG_DELAY_US_MIN 1200 #define FPGA_ICE40_LEADING_CLOCKS_MIN 8 #define FPGA_ICE40_TRAILING_CLOCKS_MIN 49 LOG_MODULE_REGISTER(fpga_ice40); struct fpga_ice40_data { uint32_t crc; /* simply use crc32 as info */ char info[2 * sizeof(uint32_t) + 1]; bool on; bool loaded; struct k_spinlock lock; }; struct fpga_ice40_config { struct spi_dt_spec bus; struct gpio_dt_spec cdone; struct gpio_dt_spec creset; struct gpio_dt_spec clk; struct gpio_dt_spec pico; volatile gpio_port_pins_t *set; volatile gpio_port_pins_t *clear; uint16_t mhz_delay_count; uint16_t creset_delay_us; uint16_t config_delay_us; uint8_t leading_clocks; uint8_t trailing_clocks; fpga_api_load load; #ifdef CONFIG_PINCTRL const struct pinctrl_dev_config *pincfg; #endif }; static void fpga_ice40_crc_to_str(uint32_t crc, char *s) { char ch; uint8_t i; uint8_t nibble; const char *table = "0123456789abcdef"; for (i = 0; i < sizeof(crc) * NIBBLES_PER_BYTE; ++i, crc >>= BITS_PER_NIBBLE) { nibble = crc & GENMASK(BITS_PER_NIBBLE, 0); ch = table[nibble]; s[sizeof(crc) * NIBBLES_PER_BYTE - i - 1] = ch; } s[sizeof(crc) * NIBBLES_PER_BYTE] = '\0'; } /* * This is a calibrated delay loop used to achieve a 1 MHz SPI_CLK frequency * with FPGA_ICE40_LOAD_MODE_GPIO. It is used both in fpga_ice40_send_clocks() * and fpga_ice40_spi_send_data(). * * Calibration is achieved via the mhz_delay_count device tree parameter. See * lattice,ice40-fpga.yaml for details. */ static inline void fpga_ice40_delay(size_t n) { for (; n > 0; --n) { __asm__ __volatile__(""); } } static void fpga_ice40_send_clocks(size_t delay, volatile gpio_port_pins_t *set, volatile gpio_port_pins_t *clear, gpio_port_pins_t clk, size_t n) { for (; n > 0; --n) { *clear |= clk; fpga_ice40_delay(delay); *set |= clk; fpga_ice40_delay(delay); } } static void fpga_ice40_spi_send_data(size_t delay, volatile gpio_port_pins_t *set, volatile gpio_port_pins_t *clear, gpio_port_pins_t cs, gpio_port_pins_t clk, gpio_port_pins_t pico, uint8_t *z, size_t n) { bool hi; /* assert chip-select (active low) */ *clear |= cs; for (; n > 0; --n, ++z) { /* msb down to lsb */ for (int b = 7; b >= 0; --b) { /* Data is shifted out on the falling edge (CPOL=0) */ *clear |= clk; fpga_ice40_delay(delay); hi = !!(BIT(b) & *z); if (hi) { *set |= pico; } else { *clear |= pico; } /* Data is sampled on the rising edge (CPHA=0) */ *set |= clk; fpga_ice40_delay(delay); } } /* de-assert chip-select (active low) */ *set |= cs; } static enum FPGA_status fpga_ice40_get_status(const struct device *dev) { enum FPGA_status st; k_spinlock_key_t key; struct fpga_ice40_data *data = dev->data; key = k_spin_lock(&data->lock); if (data->loaded && data->on) { st = FPGA_STATUS_ACTIVE; } else { st = FPGA_STATUS_INACTIVE; } k_spin_unlock(&data->lock, key); return st; } /* * See iCE40 Family Handbook, Appendix A. SPI Slave Configuration Procedure, * pp 15-21. * * path_to_url~/media/LatticeSemi/Documents/Handbooks/iCE40FamilyHandbook.pdf */ static int fpga_ice40_load_gpio(const struct device *dev, uint32_t *image_ptr, uint32_t img_size) { int ret; uint32_t crc; gpio_port_pins_t cs; gpio_port_pins_t clk; k_spinlock_key_t key; gpio_port_pins_t pico; gpio_port_pins_t creset; struct fpga_ice40_data *data = dev->data; const struct fpga_ice40_config *config = dev->config; /* prepare masks */ cs = BIT(config->bus.config.cs.gpio.pin); clk = BIT(config->clk.pin); pico = BIT(config->pico.pin); creset = BIT(config->creset.pin); /* crc check */ crc = crc32_ieee((uint8_t *)image_ptr, img_size); if (data->loaded && crc == data->crc) { LOG_WRN("already loaded with image CRC32c: 0x%08x", data->crc); } key = k_spin_lock(&data->lock); /* clear crc */ data->crc = 0; data->loaded = false; fpga_ice40_crc_to_str(0, data->info); LOG_DBG("Initializing GPIO"); ret = gpio_pin_configure_dt(&config->cdone, GPIO_INPUT) || gpio_pin_configure_dt(&config->creset, GPIO_OUTPUT_HIGH) || gpio_pin_configure_dt(&config->bus.config.cs.gpio, GPIO_OUTPUT_HIGH) || gpio_pin_configure_dt(&config->clk, GPIO_OUTPUT_HIGH) || gpio_pin_configure_dt(&config->pico, GPIO_OUTPUT_HIGH); __ASSERT(ret == 0, "Failed to initialize GPIO: %d", ret); LOG_DBG("Set CRESET low"); LOG_DBG("Set SPI_CS low"); *config->clear |= (creset | cs); /* Wait a minimum of 200ns */ LOG_DBG("Delay %u us", config->creset_delay_us); fpga_ice40_delay(2 * config->mhz_delay_count * config->creset_delay_us); __ASSERT(gpio_pin_get_dt(&config->cdone) == 0, "CDONE was not high"); LOG_DBG("Set CRESET high"); *config->set |= creset; LOG_DBG("Delay %u us", config->config_delay_us); k_busy_wait(config->config_delay_us); LOG_DBG("Set SPI_CS high"); *config->set |= cs; LOG_DBG("Send %u clocks", config->leading_clocks); fpga_ice40_send_clocks(config->mhz_delay_count, config->set, config->clear, clk, config->leading_clocks); LOG_DBG("Set SPI_CS low"); LOG_DBG("Send bin file"); LOG_DBG("Set SPI_CS high"); fpga_ice40_spi_send_data(config->mhz_delay_count, config->set, config->clear, cs, clk, pico, (uint8_t *)image_ptr, img_size); LOG_DBG("Send %u clocks", config->trailing_clocks); fpga_ice40_send_clocks(config->mhz_delay_count, config->set, config->clear, clk, config->trailing_clocks); LOG_DBG("checking CDONE"); ret = gpio_pin_get_dt(&config->cdone); if (ret < 0) { LOG_ERR("failed to read CDONE: %d", ret); goto unlock; } else if (ret != 1) { ret = -EIO; LOG_ERR("CDONE did not go high"); goto unlock; } ret = 0; data->loaded = true; fpga_ice40_crc_to_str(crc, data->info); LOG_INF("Loaded image with CRC32 0x%08x", crc); unlock: (void)gpio_pin_configure_dt(&config->creset, GPIO_OUTPUT_HIGH); (void)gpio_pin_configure_dt(&config->bus.config.cs.gpio, GPIO_OUTPUT_HIGH); (void)gpio_pin_configure_dt(&config->clk, GPIO_DISCONNECTED); (void)gpio_pin_configure_dt(&config->pico, GPIO_DISCONNECTED); #ifdef CONFIG_PINCTRL (void)pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); #endif k_spin_unlock(&data->lock, key); return ret; } static int fpga_ice40_load_spi(const struct device *dev, uint32_t *image_ptr, uint32_t img_size) { int ret; uint32_t crc; k_spinlock_key_t key; struct spi_buf tx_buf; const struct spi_buf_set tx_bufs = { .buffers = &tx_buf, .count = 1, }; struct fpga_ice40_data *data = dev->data; uint8_t clock_buf[(UINT8_MAX + 1) / BITS_PER_BYTE]; const struct fpga_ice40_config *config = dev->config; /* crc check */ crc = crc32_ieee((uint8_t *)image_ptr, img_size); if (data->loaded && crc == data->crc) { LOG_WRN("already loaded with image CRC32c: 0x%08x", data->crc); } key = k_spin_lock(&data->lock); /* clear crc */ data->crc = 0; data->loaded = false; fpga_ice40_crc_to_str(0, data->info); LOG_DBG("Initializing GPIO"); ret = gpio_pin_configure_dt(&config->cdone, GPIO_INPUT) || gpio_pin_configure_dt(&config->creset, GPIO_OUTPUT_HIGH) || gpio_pin_configure_dt(&config->bus.config.cs.gpio, GPIO_OUTPUT_HIGH); __ASSERT(ret == 0, "Failed to initialize GPIO: %d", ret); LOG_DBG("Set CRESET low"); ret = gpio_pin_configure_dt(&config->creset, GPIO_OUTPUT_LOW); if (ret < 0) { LOG_ERR("failed to set CRESET low: %d", ret); goto unlock; } LOG_DBG("Set SPI_CS low"); ret = gpio_pin_configure_dt(&config->bus.config.cs.gpio, GPIO_OUTPUT_LOW); if (ret < 0) { LOG_ERR("failed to set SPI_CS low: %d", ret); goto unlock; } /* Wait a minimum of 200ns */ LOG_DBG("Delay %u us", config->creset_delay_us); k_usleep(config->creset_delay_us); __ASSERT(gpio_pin_get_dt(&config->cdone) == 0, "CDONE was not high"); LOG_DBG("Set CRESET high"); ret = gpio_pin_configure_dt(&config->creset, GPIO_OUTPUT_HIGH); if (ret < 0) { LOG_ERR("failed to set CRESET high: %d", ret); goto unlock; } LOG_DBG("Delay %u us", config->config_delay_us); k_busy_wait(config->config_delay_us); LOG_DBG("Set SPI_CS high"); ret = gpio_pin_configure_dt(&config->bus.config.cs.gpio, GPIO_OUTPUT_HIGH); if (ret < 0) { LOG_ERR("failed to set SPI_CS high: %d", ret); goto unlock; } LOG_DBG("Send %u clocks", config->leading_clocks); tx_buf.buf = clock_buf; tx_buf.len = DIV_ROUND_UP(config->leading_clocks, BITS_PER_BYTE); ret = spi_write_dt(&config->bus, &tx_bufs); if (ret < 0) { LOG_ERR("Failed to send leading %u clocks: %d", config->leading_clocks, ret); goto unlock; } LOG_DBG("Set SPI_CS low"); ret = gpio_pin_configure_dt(&config->bus.config.cs.gpio, GPIO_OUTPUT_LOW); if (ret < 0) { LOG_ERR("failed to set SPI_CS low: %d", ret); goto unlock; } LOG_DBG("Send bin file"); tx_buf.buf = image_ptr; tx_buf.len = img_size; ret = spi_write_dt(&config->bus, &tx_bufs); if (ret < 0) { LOG_ERR("Failed to send bin file: %d", ret); goto unlock; } LOG_DBG("Set SPI_CS high"); ret = gpio_pin_configure_dt(&config->bus.config.cs.gpio, GPIO_OUTPUT_HIGH); if (ret < 0) { LOG_ERR("failed to set SPI_CS high: %d", ret); goto unlock; } LOG_DBG("Send %u clocks", config->trailing_clocks); tx_buf.buf = clock_buf; tx_buf.len = DIV_ROUND_UP(config->trailing_clocks, BITS_PER_BYTE); ret = spi_write_dt(&config->bus, &tx_bufs); if (ret < 0) { LOG_ERR("Failed to send trailing %u clocks: %d", config->trailing_clocks, ret); goto unlock; } LOG_DBG("checking CDONE"); ret = gpio_pin_get_dt(&config->cdone); if (ret < 0) { LOG_ERR("failed to read CDONE: %d", ret); goto unlock; } else if (ret != 1) { ret = -EIO; LOG_ERR("CDONE did not go high"); goto unlock; } ret = 0; data->loaded = true; fpga_ice40_crc_to_str(crc, data->info); LOG_INF("Loaded image with CRC32 0x%08x", crc); unlock: (void)gpio_pin_configure_dt(&config->creset, GPIO_OUTPUT_HIGH); (void)gpio_pin_configure_dt(&config->bus.config.cs.gpio, GPIO_OUTPUT_HIGH); #ifdef CONFIG_PINCTRL (void)pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); #endif k_spin_unlock(&data->lock, key); return ret; } static int fpga_ice40_load(const struct device *dev, uint32_t *image_ptr, uint32_t img_size) { const struct fpga_ice40_config *config = dev->config; return config->load(dev, image_ptr, img_size); } static int fpga_ice40_on_off(const struct device *dev, bool on) { int ret; k_spinlock_key_t key; struct fpga_ice40_data *data = dev->data; const struct fpga_ice40_config *config = dev->config; key = k_spin_lock(&data->lock); ret = gpio_pin_configure_dt(&config->creset, on ? GPIO_OUTPUT_HIGH : GPIO_OUTPUT_LOW); if (ret < 0) { goto unlock; } data->on = on; ret = 0; unlock: k_spin_unlock(&data->lock, key); return ret; } static int fpga_ice40_on(const struct device *dev) { return fpga_ice40_on_off(dev, true); } static int fpga_ice40_off(const struct device *dev) { return fpga_ice40_on_off(dev, false); } static int fpga_ice40_reset(const struct device *dev) { return fpga_ice40_off(dev) || fpga_ice40_on(dev); } static const char *fpga_ice40_get_info(const struct device *dev) { struct fpga_ice40_data *data = dev->data; return data->info; } static const struct fpga_driver_api fpga_ice40_api = { .get_status = fpga_ice40_get_status, .reset = fpga_ice40_reset, .load = fpga_ice40_load, .on = fpga_ice40_on, .off = fpga_ice40_off, .get_info = fpga_ice40_get_info, }; static int fpga_ice40_init(const struct device *dev) { int ret; const struct fpga_ice40_config *config = dev->config; ret = gpio_pin_configure_dt(&config->creset, GPIO_OUTPUT_HIGH); if (ret < 0) { LOG_ERR("failed to configure CRESET: %d", ret); return ret; } ret = gpio_pin_configure_dt(&config->cdone, 0); if (ret < 0) { LOG_ERR("Failed to initialize CDONE: %d", ret); return ret; } return 0; } #define FPGA_ICE40_BUS_FREQ(inst) DT_INST_PROP(inst, spi_max_frequency) #define FPGA_ICE40_CONFIG_DELAY_US(inst) \ DT_INST_PROP_OR(inst, config_delay_us, FPGA_ICE40_CONFIG_DELAY_US_MIN) #define FPGA_ICE40_CRESET_DELAY_US(inst) \ DT_INST_PROP_OR(inst, creset_delay_us, FPGA_ICE40_CRESET_DELAY_US_MIN) #define FPGA_ICE40_LEADING_CLOCKS(inst) \ DT_INST_PROP_OR(inst, leading_clocks, FPGA_ICE40_LEADING_CLOCKS_MIN) #define FPGA_ICE40_TRAILING_CLOCKS(inst) \ DT_INST_PROP_OR(inst, trailing_clocks, FPGA_ICE40_TRAILING_CLOCKS_MIN) #define FPGA_ICE40_MHZ_DELAY_COUNT(inst) DT_INST_PROP_OR(inst, mhz_delay_count, 0) #define FPGA_ICE40_GPIO_PINS(inst, name) (volatile gpio_port_pins_t *)DT_INST_PROP_OR(inst, name, 0) #define FPGA_ICE40_LOAD_MODE(inst) DT_INST_PROP(inst, load_mode) #define FPGA_ICE40_LOAD_FUNC(inst) \ (FPGA_ICE40_LOAD_MODE(inst) == FPGA_ICE40_LOAD_MODE_SPI \ ? fpga_ice40_load_spi \ : (FPGA_ICE40_LOAD_MODE(inst) == FPGA_ICE40_LOAD_MODE_GPIO ? fpga_ice40_load_gpio \ : NULL)) #ifdef CONFIG_PINCTRL #define FPGA_ICE40_PINCTRL_CONFIG(inst) .pincfg = PINCTRL_DT_DEV_CONFIG_GET(DT_INST_PARENT(inst)), #define FPGA_ICE40_PINCTRL_DEFINE(inst) PINCTRL_DT_DEFINE(DT_INST_PARENT(inst)) #else #define FPGA_ICE40_PINCTRL_CONFIG(inst) #define FPGA_ICE40_PINCTRL_DEFINE(inst) #endif #define FPGA_ICE40_DEFINE(inst) \ BUILD_ASSERT(FPGA_ICE40_LOAD_MODE(inst) == FPGA_ICE40_LOAD_MODE_SPI || \ FPGA_ICE40_LOAD_MODE(inst) == FPGA_ICE40_LOAD_MODE_GPIO); \ BUILD_ASSERT(FPGA_ICE40_BUS_FREQ(inst) >= FPGA_ICE40_SPI_HZ_MIN); \ BUILD_ASSERT(FPGA_ICE40_BUS_FREQ(inst) <= FPGA_ICE40_SPI_HZ_MAX); \ BUILD_ASSERT(FPGA_ICE40_CONFIG_DELAY_US(inst) >= FPGA_ICE40_CONFIG_DELAY_US_MIN); \ BUILD_ASSERT(FPGA_ICE40_CONFIG_DELAY_US(inst) <= UINT16_MAX); \ BUILD_ASSERT(FPGA_ICE40_CRESET_DELAY_US(inst) >= FPGA_ICE40_CRESET_DELAY_US_MIN); \ BUILD_ASSERT(FPGA_ICE40_CRESET_DELAY_US(inst) <= UINT16_MAX); \ BUILD_ASSERT(FPGA_ICE40_LEADING_CLOCKS(inst) >= FPGA_ICE40_LEADING_CLOCKS_MIN); \ BUILD_ASSERT(FPGA_ICE40_LEADING_CLOCKS(inst) <= UINT8_MAX); \ BUILD_ASSERT(FPGA_ICE40_TRAILING_CLOCKS(inst) >= FPGA_ICE40_TRAILING_CLOCKS_MIN); \ BUILD_ASSERT(FPGA_ICE40_TRAILING_CLOCKS(inst) <= UINT8_MAX); \ BUILD_ASSERT(FPGA_ICE40_MHZ_DELAY_COUNT(inst) >= 0); \ \ FPGA_ICE40_PINCTRL_DEFINE(inst); \ static struct fpga_ice40_data fpga_ice40_data_##inst; \ \ static const struct fpga_ice40_config fpga_ice40_config_##inst = { \ .bus = SPI_DT_SPEC_INST_GET(inst, SPI_WORD_SET(8) | SPI_TRANSFER_MSB, 0), \ .creset = GPIO_DT_SPEC_INST_GET(inst, creset_gpios), \ .cdone = GPIO_DT_SPEC_INST_GET(inst, cdone_gpios), \ .clk = GPIO_DT_SPEC_INST_GET_OR(inst, clk_gpios, {0}), \ .pico = GPIO_DT_SPEC_INST_GET_OR(inst, pico_gpios, {0}), \ .set = FPGA_ICE40_GPIO_PINS(inst, gpios_set_reg), \ .clear = FPGA_ICE40_GPIO_PINS(inst, gpios_clear_reg), \ .mhz_delay_count = FPGA_ICE40_MHZ_DELAY_COUNT(inst), \ .config_delay_us = FPGA_ICE40_CONFIG_DELAY_US(inst), \ .creset_delay_us = FPGA_ICE40_CRESET_DELAY_US(inst), \ .leading_clocks = FPGA_ICE40_LEADING_CLOCKS(inst), \ .trailing_clocks = FPGA_ICE40_TRAILING_CLOCKS(inst), \ .load = FPGA_ICE40_LOAD_FUNC(inst), \ FPGA_ICE40_PINCTRL_CONFIG(inst)}; \ \ DEVICE_DT_INST_DEFINE(inst, fpga_ice40_init, NULL, &fpga_ice40_data_##inst, \ &fpga_ice40_config_##inst, POST_KERNEL, CONFIG_FPGA_INIT_PRIORITY, \ &fpga_ice40_api); DT_INST_FOREACH_STATUS_OKAY(FPGA_ICE40_DEFINE) ```
/content/code_sandbox/drivers/fpga/fpga_ice40.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,282
```c /* * */ #include <errno.h> #include <string.h> #include <zephyr/device.h> #include <zephyr/drivers/fpga.h> #include "fpga_eos_s3.h" void eos_s3_fpga_enable_clk(void) { CRU->C16_CLK_GATE = C16_CLK_GATE_PATH_0_ON; CRU->C21_CLK_GATE = C21_CLK_GATE_PATH_0_ON; CRU->C09_CLK_GATE = C09_CLK_GATE_PATH_1_ON | C09_CLK_GATE_PATH_2_ON; CRU->C02_CLK_GATE = C02_CLK_GATE_PATH_1_ON; } void eos_s3_fpga_disable_clk(void) { CRU->C16_CLK_GATE = C16_CLK_GATE_PATH_0_OFF; CRU->C21_CLK_GATE = C21_CLK_GATE_PATH_0_OFF; CRU->C09_CLK_GATE = C09_CLK_GATE_PATH_1_OFF | C09_CLK_GATE_PATH_2_OFF; CRU->C02_CLK_GATE = C02_CLK_GATE_PATH_1_OFF; } struct quickfeather_fpga_data { char *FPGA_info; }; static enum FPGA_status eos_s3_fpga_get_status(const struct device *dev) { ARG_UNUSED(dev); if (PMU->FB_STATUS == FPGA_STATUS_ACTIVE) { return FPGA_STATUS_ACTIVE; } else { return FPGA_STATUS_INACTIVE; } } static const char *eos_s3_fpga_get_info(const struct device *dev) { struct quickfeather_fpga_data *data = dev->data; return data->FPGA_info; } static int eos_s3_fpga_on(const struct device *dev) { if (eos_s3_fpga_get_status(dev) == FPGA_STATUS_ACTIVE) { return 0; } /* wake up the FPGA power domain */ PMU->FFE_FB_PF_SW_WU = PMU_FFE_FB_PF_SW_WU_FB_WU; while (PMU->FFE_FB_PF_SW_WU == PMU_FFE_FB_PF_SW_WU_FB_WU) { /* The register will clear itself if the FPGA starts */ }; eos_s3_fpga_enable_clk(); /* enable FPGA programming */ PMU->GEN_PURPOSE_0 = FB_CFG_ENABLE; PIF->CFG_CTL = CFG_CTL_LOAD_ENABLE; return 0; } static int eos_s3_fpga_off(const struct device *dev) { if (eos_s3_fpga_get_status(dev) == FPGA_STATUS_INACTIVE) { return 0; } PMU->FB_PWR_MODE_CFG = PMU_FB_PWR_MODE_CFG_FB_SD; PMU->FFE_FB_PF_SW_PD = PMU_FFE_FB_PF_SW_PD_FB_PD; eos_s3_fpga_disable_clk(); return 0; } static int eos_s3_fpga_reset(const struct device *dev) { if (eos_s3_fpga_get_status(dev) == FPGA_STATUS_ACTIVE) { eos_s3_fpga_off(dev); } eos_s3_fpga_on(dev); if (eos_s3_fpga_get_status(dev) == FPGA_STATUS_INACTIVE) { return -EAGAIN; } return 0; } static int eos_s3_fpga_load(const struct device *dev, uint32_t *image_ptr, uint32_t img_size) { if (eos_s3_fpga_get_status(dev) == FPGA_STATUS_INACTIVE) { return -EINVAL; } volatile uint32_t *bitstream = (volatile uint32_t *)image_ptr; for (uint32_t chunk_cnt = 0; chunk_cnt < (img_size / 4); chunk_cnt++) { PIF->CFG_DATA = *bitstream; bitstream++; } /* disable FPGA programming */ PMU->GEN_PURPOSE_0 = FB_CFG_DISABLE; PIF->CFG_CTL = CFG_CTL_LOAD_DISABLE; PMU->FB_ISOLATION = FB_ISOLATION_DISABLE; /* disable software resets */ CRU->FB_SW_RESET &= ~(FB_C21_DOMAIN_SW_RESET | FB_C16_DOMAIN_SW_RESET | FB_C02_DOMAIN_SW_RESET | FB_C09_DOMAIN_SW_RESET); return 0; } static int eos_s3_fpga_init(const struct device *dev) { IO_MUX->PAD_19_CTRL = PAD_ENABLE; struct quickfeather_fpga_data *data = dev->data; data->FPGA_info = FPGA_INFO; eos_s3_fpga_reset(dev); return 0; } static struct quickfeather_fpga_data fpga_data; static const struct fpga_driver_api eos_s3_api = { .reset = eos_s3_fpga_reset, .load = eos_s3_fpga_load, .get_status = eos_s3_fpga_get_status, .on = eos_s3_fpga_on, .off = eos_s3_fpga_off, .get_info = eos_s3_fpga_get_info }; DEVICE_DT_DEFINE(DT_NODELABEL(fpga0), &eos_s3_fpga_init, NULL, &fpga_data, NULL, POST_KERNEL, CONFIG_FPGA_INIT_PRIORITY, &eos_s3_api); ```
/content/code_sandbox/drivers/fpga/fpga_eos_s3.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,078
```c /* * */ #define DT_DRV_COMPAT microchip_mpfs_mailbox #include <stdio.h> #include <string.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/flash.h> #include <zephyr/drivers/fpga.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/util.h> LOG_MODULE_REGISTER(fpga_mpfs); #define SPI_FLASH_DIRECTORY_OFFSET 0x00000000 #define SPI_FLASH_GOLDEN_IMAGE_OFFSET 0x00100400 #define SPI_FLASH_NEW_IMAGE_OFFSET 0x01500400 #define SPI_FLASH_SECTOR_SIZE 4096 #define SPI_FLASH_PAGE_SIZE 256 #define SERVICES_CR_OFFSET 0x50u #define SERVICES_SR_OFFSET 0x54u #define SCBCTRL_SERVICESCR_REQ (0u) #define SCBCTRL_SERVICESCR_REQ_MASK BIT(SCBCTRL_SERVICESCR_REQ) #define SCBCTRL_SERVICESSR_BUSY (1u) #define SCBCTRL_SERVICESSR_BUSY_MASK BIT(SCBCTRL_SERVICESSR_BUSY) #define SCBCTRL_SERVICESSR_STATUS (16u) #define SCBCTRL_SERVICESSR_STATUS_MASK_WIDTH (16u) #define SCBCTRL_SERVICESSR_STATUS_MASK \ GENMASK(SCBCTRL_SERVICESSR_STATUS + SCBCTRL_SERVICESSR_STATUS_MASK_WIDTH - 1, \ SCBCTRL_SERVICESSR_STATUS) #define MSS_DESIGN_INFO_CMD (0x02) #define MSS_SYS_BITSTREAM_AUTHENTICATE_CMD 0x23u #define MSS_SYS_IAP_PROGRAM_BY_SPIIDX_CMD 0x42u struct mpfs_fpga_config { mm_reg_t base; mm_reg_t mailbox; }; struct mpfs_fpga_data { char FPGA_design_ver[30]; }; static inline uint32_t scb_read(mm_reg_t add, mm_reg_t offset) { return sys_read32(add + offset); } static inline void scb_write(mm_reg_t add, mm_reg_t offset, uint32_t val) { return sys_write32(val, add + offset); } /*This function add the index of new image into the spi directory at offset 0x004. * Note: In the Flash directory first four pages(each page of 256 Bytes) have either * a valid image address or zeros. The other remaining 12 pages are all filled with 0xFFs. * * |------------------------------| 0x000 * | Golden Image Address: | * | 0x0100400 | * |------------------------------| 0x004 * | Update Image Address | * | 0x1500400 | * |------------------------------| 0x008 * | Empty | * | 0x000000 | * |------------------------------| 0x00C * | Unused for re-programming | * | | * |------------------------------| 0x400 */ static uint8_t update_spi_flash_directory(const struct device *flash_dev) { size_t len = SPI_FLASH_PAGE_SIZE; uint8_t buf[SPI_FLASH_PAGE_SIZE]; uint8_t rc, k; memset(buf, 0, len); rc = flash_read(flash_dev, SPI_FLASH_DIRECTORY_OFFSET, buf, len); if (rc != 0) { LOG_ERR("Flash read failed! %d", rc); return rc; } /* New image address(0x1500400) entry at offset 0x004 */ buf[4] = 0x00; buf[5] = 0x04; buf[6] = 0x50; buf[7] = 0x01; /* Erase SPI flash directory */ rc = flash_erase(flash_dev, SPI_FLASH_DIRECTORY_OFFSET, SPI_FLASH_SECTOR_SIZE); if (rc != 0) { LOG_ERR("erase failed! %d", rc); } /* Write the first page with updated address entry */ rc = flash_write(flash_dev, SPI_FLASH_DIRECTORY_OFFSET, buf, len); if (rc != 0) { LOG_ERR("Flash write failed! %d", rc); return rc; } /* Fill page number second, third and fourth with zeros */ memset(buf, 0, len); k = 1; while (k < 4) { rc = flash_write(flash_dev, (SPI_FLASH_DIRECTORY_OFFSET + k * 0x100), buf, len); if (rc != 0) { LOG_ERR("Flash write failed! %d", rc); return rc; } k++; } return rc; } /* This function Program a new FPGA design image into the SPI Flash at location * 0x1500400. * Note: The source location of new image is _bin_start symbol value and the size of * new image is _bim_size symbol value. */ static uint8_t program_new_image(const struct device *flash_dev, uint8_t *image_start, uint32_t image_size) { size_t len = SPI_FLASH_PAGE_SIZE; uint8_t buf[SPI_FLASH_PAGE_SIZE]; uint8_t rc; uint32_t i, count, k; uint8_t *temp; temp = image_start; if (image_size > 0x1400000) { LOG_ERR("Image is larger than 20Mb"); return 1; } /* Find the sectors to erase */ count = (uint32_t)(image_size / SPI_FLASH_SECTOR_SIZE) + 1; LOG_INF("Erasing."); i = 0; while (i < count) { rc = flash_erase( flash_dev, ((SPI_FLASH_NEW_IMAGE_OFFSET - 0x400) + (i * SPI_FLASH_SECTOR_SIZE)), SPI_FLASH_SECTOR_SIZE); if (rc != 0) { LOG_ERR("erase failed! %d", rc); } if (i % 0x100 == 0) { LOG_DBG("."); } i++; } /* Erase completed and ready to program new image */ /* Find the pages to program */ count = (uint32_t)(image_size / SPI_FLASH_PAGE_SIZE) + 1; LOG_INF("Programming."); i = 0; while (i < count) { temp = (image_start + i * SPI_FLASH_PAGE_SIZE); memset(buf, 0, len); for (k = 0; k < 256; k++) { buf[k] = *temp; temp = temp + 1; } rc = flash_write(flash_dev, (SPI_FLASH_NEW_IMAGE_OFFSET + i * SPI_FLASH_PAGE_SIZE), buf, len); if (rc != 0) { LOG_ERR("Flash write failed! %d", rc); return rc; } if (i % 0x100 == 0) { LOG_DBG("."); } i++; } LOG_INF("Programming completed."); return rc; } static int8_t verify_image(const struct device *dev) { const struct mpfs_fpga_config *cfg = dev->config; int8_t status = EINVAL; uint32_t value = 0; LOG_INF("Image verification started..."); /* Once system controller starts processing command The busy bit will * go 1. Make sure that service is complete i.e. BUSY bit is gone 0 */ while (scb_read(cfg->base, SERVICES_SR_OFFSET) & SCBCTRL_SERVICESSR_BUSY_MASK) { ; } /* Form the SS command: bit 0 to 6 is the opcode, bit 7 to 15 is the Mailbox * offset For some services this field has another meaning. * (e.g. for IAP bit-stream auth. it means spi_idx) */ scb_write(cfg->mailbox, 0, 0x1500400); value = (MSS_SYS_BITSTREAM_AUTHENTICATE_CMD << 16) | 0x1; scb_write(cfg->base, SERVICES_CR_OFFSET, value); /* REQ bit will remain set till the system controller starts * processing command. Since DRI is slow interface, we are waiting * here to make sure System controller has started processing * command */ while (scb_read(cfg->base, SERVICES_CR_OFFSET) & SCBCTRL_SERVICESCR_REQ_MASK) { ; } /* Once system controller starts processing command The busy bit will * go 1. Make sure that service is complete i.e. BUSY bit is gone 0 */ while (scb_read(cfg->base, SERVICES_SR_OFFSET) & SCBCTRL_SERVICESSR_BUSY_MASK) { ; } /* Read the status returned by System Controller */ status = ((scb_read(cfg->base, SERVICES_SR_OFFSET) & SCBCTRL_SERVICESSR_STATUS_MASK) >> SCBCTRL_SERVICESSR_STATUS); LOG_INF("Image verification status : %x ", status); return status; } static void activate_image(const struct device *dev) { const struct mpfs_fpga_config *cfg = dev->config; int8_t status = EINVAL; uint32_t value = 0; LOG_INF("Image activation started..."); /* Once system controller starts processing command The busy bit will * go 1. Make sure that service is complete i.e. BUSY bit is gone 0 */ while (scb_read(cfg->base, SERVICES_SR_OFFSET) & SCBCTRL_SERVICESSR_BUSY_MASK) { ; } /* Form the SS command: bit 0 to 6 is the opcode, bit 7 to 15 is the Mailbox * offset For some services this field has another meaning. * (e.g. for IAP bit-stream auth. it means spi_idx) */ value = (MSS_SYS_IAP_PROGRAM_BY_SPIIDX_CMD << 16) | BIT(23) | 0x1; scb_write(cfg->base, SERVICES_CR_OFFSET, value); /* REQ bit will remain set till the system controller starts * processing command. Since DRI is slow interface, we are waiting * here to make sure System controller has started processing * command */ while (scb_read(cfg->base, SERVICES_CR_OFFSET) & SCBCTRL_SERVICESCR_REQ_MASK) { ; } /* Once system controller starts processing command The busy bit will * go 1. Make sure that service is complete i.e. BUSY bit is gone 0 */ while (scb_read(cfg->base, SERVICES_SR_OFFSET) & SCBCTRL_SERVICESSR_BUSY_MASK) { ; } /* Read the status returned by System Controller */ status = ((scb_read(cfg->base, SERVICES_SR_OFFSET) & SCBCTRL_SERVICESSR_STATUS_MASK) >> SCBCTRL_SERVICESSR_STATUS); LOG_INF("Image activation status : %x ", status); } static int mpfs_fpga_reset(const struct device *dev) { int8_t status = EINVAL; status = verify_image(dev); if (status == 0) { activate_image(dev); } return 0; } static int mpfs_fpga_load(const struct device *dev, uint32_t *image_ptr, uint32_t img_size) { const struct device *flash_dev = DEVICE_DT_GET_OR_NULL(DT_ALIAS(bitstream_flash)); if (flash_dev == NULL) { LOG_ERR("Device not found"); return -ENOENT; } if (!device_is_ready(flash_dev)) { LOG_ERR("%s: device not ready.", flash_dev->name); return 1; } if (img_size == 0) { LOG_ERR("Image size is zero."); return -EINVAL; } if (image_ptr == NULL) { LOG_ERR("Failed to read FPGA image"); return -EINVAL; } update_spi_flash_directory(flash_dev); program_new_image(flash_dev, (uint8_t *)image_ptr, img_size); return 0; } static const char *mpfs_fpga_get_info(const struct device *dev) { struct mpfs_fpga_data *data = dev->data; const struct mpfs_fpga_config *cfg = dev->config; uint32_t value = 0; uint16_t design_version = 0; /* Once system controller starts processing command The busy bit will * go 1. Make sure that service is complete i.e. BUSY bit is gone 0 */ while (scb_read(cfg->base, SERVICES_SR_OFFSET) & SCBCTRL_SERVICESSR_BUSY_MASK) { ; } /* Form the SS command: bit 0 to 6 is the opcode, bit 7 to 15 is the Mailbox * offset For some services this field has another meaning. * (e.g. for IAP bit-stream auth. it means spi_idx) */ value = (MSS_DESIGN_INFO_CMD << 16) | 0x1; scb_write(cfg->base, SERVICES_CR_OFFSET, value); /* REQ bit will remain set till the system controller starts * processing command. Since DRI is slow interface, we are waiting * here to make sure System controller has started processing * command */ while (scb_read(cfg->base, SERVICES_CR_OFFSET) & SCBCTRL_SERVICESCR_REQ_MASK) { ; } /* Once system controller starts processing command The busy bit will * go 1. Make sure that service is complete i.e. BUSY bit is gone 0 */ while (scb_read(cfg->base, SERVICES_SR_OFFSET) & SCBCTRL_SERVICESSR_BUSY_MASK) { ; } design_version = scb_read(cfg->mailbox, 32); sprintf(data->FPGA_design_ver, (uint8_t *)"Design Version : 0x%x", design_version); return data->FPGA_design_ver; } static enum FPGA_status mpfs_fpga_get_status(const struct device *dev) { const struct mpfs_fpga_config *cfg = dev->config; if (scb_read(cfg->base, SERVICES_SR_OFFSET) & SCBCTRL_SERVICESSR_BUSY_MASK) { return FPGA_STATUS_INACTIVE; } else { return FPGA_STATUS_ACTIVE; } } static int mpfs_fpga_init(const struct device *dev) { return 0; } static struct mpfs_fpga_data fpga_data; static struct mpfs_fpga_config fpga_config = { .base = DT_INST_REG_ADDR_BY_IDX(0, 0), .mailbox = DT_INST_REG_ADDR_BY_IDX(0, 2), }; static const struct fpga_driver_api mpfs_fpga_api = { .reset = mpfs_fpga_reset, .load = mpfs_fpga_load, .get_info = mpfs_fpga_get_info, .get_status = mpfs_fpga_get_status, }; DEVICE_DT_INST_DEFINE(0, &mpfs_fpga_init, NULL, &fpga_data, &fpga_config, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &mpfs_fpga_api); ```
/content/code_sandbox/drivers/fpga/fpga_mpfs.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,242
```objective-c /* * * */ #ifndef ZEPHYR_DRIVERS_CRYPTO_CRYPTO_ATAES132A_PRIV_H_ #define ZEPHYR_DRIVERS_CRYPTO_CRYPTO_ATAES132A_PRIV_H_ #include <zephyr/drivers/i2c.h> #include <zephyr/kernel.h> #include <zephyr/sys/util.h> /* Configuration Read Only Registers */ #define ATAES_SERIALNUM_REG 0xF000 #define ATAES_LOTHISTORY_REG 0xF008 #define ATAES_JEDEC_REG 0xF010 #define ATAES_ALGORITHM_REG 0xF015 #define ATAES_EEPAGESIZE_REG 0xF017 #define ATAES_ENCREADSIZE_REG 0xF018 #define ATAES_ENCWRITESIZE_REG 0xF019 #define ATAES_DEVICENUM_REG 0xF01A #define ATAES_MANUFACTID_REG 0xF02B #define ATAES_PERMCONFIG_REG 0xF02D /* Configuration Pre-Lock Writable Registers */ #define ATAES_I2CADDR_REG 0xF040 #define ATAES_CHIPCONFIG_REG 0xF042 #define ATAES_FREESPACE_ADDR 0xF180 /** * Counter Config Memory Map * ctrid valid entries are [0x0-0xF] */ #define ATAES_CTRCFG_REG(ctrid) (0xF060 + (ctrid < 1)) /** * Key Config Memory Map * keyid valid entries are [0x0-0xF] */ #define ATAES_KEYCFG_REG(keyid) (0xF080 + (keyid < 2)) /** * Zone Config Memory Map * zoneid valid entries are [0x0-0xF] */ #define ATAES_ZONECFG_REG(zoneid) (0xF0C0 + (zoneid < 2)) /** * Counter Memory Map * crtid valid entries are [0x0-0xF] characters */ #define ATAES_COUNTER_REG(ctrid) (0xF100 + (ctrid < 3)) /** * Small Zone Memory Address * Pre-Small Zone Lock Writable */ #define ATAES_SMALLZONE_ADDR 0xF1E0 /** * Key Memory Map * keynum valid entries are [0-F] characters */ #define ATAES_KEYMEMMAP_REG(keyid) (0xF2##keyid##0) #define ATAES_COMMAND_MEM_ADDR 0xFE00 #define ATAES_COMMAND_ADDRR_RESET 0xFFE0 #define ATAES_STATUS_REG 0xFFF0 #define ATAES_STATUS_WIP BIT(0) #define ATAES_STATUS_WEN BIT(1) #define ATAES_STATUS_WAK BIT(2) #define ATAES_STATUS_CRC BIT(4) #define ATAES_STATUS_RDY BIT(6) #define ATAES_STATUS_ERR BIT(7) #define ATAES_VOLATILE_KEYID 0xFF #define ATAES_VOLATILE_AUTHOK BIT(0) #define ATAES_VOLATILE_ENCOK (BIT(1) & BIT(2)) #define ATAES_VOLATILE_DECOK BIT(3) #define ATAES_VOLATILE_RNDNNC BIT(4) #define ATAES_VOLATILE_AUTHCO BIT(5) #define ATAES_VOLATILE_LEGACYOK BIT(6) #define ATAES_KEYCONFIG_EXTERNAL BIT(0) #define ATAES_KEYCONFIG_RAND_NONCE BIT(2) #define ATAES_KEYCONFIG_LEGACYOK BIT(3) #define ATAES_KEYCONFIG_AUTHKEY BIT(4) #define ATAES_CHIPCONFIG_LEGACYE BIT(0) #define ATAES_NONCE_OP 0x01 #define ATAES_ENCRYPT_OP 0x06 #define ATAES_DECRYPT_OP 0x07 #define ATAES_INFO_OP 0x0C #define ATAES_LEGACY_OP 0x0F #define ATAES_BLOCKRD_OP 0x10 #define ATAES_MAC_MODE_COUNTER BIT(5) #define ATAES_MAC_MODE_SERIAL BIT(6) #define ATAES_MAC_MODE_SMALLZONE BIT(7) #if defined(CONFIG_CRYPTO_ATAES132A_I2C_SPEED_STANDARD) #define ATAES132A_BUS_SPEED I2C_SPEED_STANDARD #else #define ATAES132A_BUS_SPEED I2C_SPEED_FAST #endif #define CRC16_POLY 0x8005 void ataes132a_atmel_crc(uint8_t *input, uint8_t length, uint8_t *output) { int i, j; uint8_t bit; uint16_t crc; uint16_t double_carry; uint8_t higher_crc_bit; for (i = 0, crc = 0U; i < length; i++) { for (j = 7; j >= 0; j--) { bit = !!(input[i] & BIT(j)); higher_crc_bit = crc >> 15; double_carry = (crc & BIT(8)) << 1; crc <<= 1; crc |= double_carry; if ((bit ^ higher_crc_bit)) { crc ^= CRC16_POLY; } } } *(uint16_t *)output = crc << 8 | crc >> 8; } static inline int burst_write_i2c(const struct i2c_dt_spec *spec, uint16_t start_addr, uint8_t *buf, uint8_t num_bytes) { uint8_t addr_buffer[2]; struct i2c_msg msg[2]; addr_buffer[1] = start_addr & 0xFF; addr_buffer[0] = start_addr >> 8; msg[0].buf = addr_buffer; msg[0].len = 2U; msg[0].flags = I2C_MSG_WRITE; msg[1].buf = buf; msg[1].len = num_bytes; msg[1].flags = I2C_MSG_WRITE | I2C_MSG_STOP; return i2c_transfer_dt(spec, msg, 2); } static inline int burst_read_i2c(const struct i2c_dt_spec *spec, uint16_t start_addr, uint8_t *buf, uint8_t num_bytes) { uint8_t addr_buffer[2]; struct i2c_msg msg[2]; addr_buffer[1] = start_addr & 0xFF; addr_buffer[0] = start_addr >> 8; msg[0].buf = addr_buffer; msg[0].len = 2U; msg[0].flags = I2C_MSG_WRITE; msg[1].buf = buf; msg[1].len = num_bytes; msg[1].flags = I2C_MSG_RESTART | I2C_MSG_READ | I2C_MSG_STOP; return i2c_transfer_dt(spec, msg, 2); } static inline int read_reg_i2c(const struct i2c_dt_spec *spec, uint16_t reg_addr, uint8_t *value) { return burst_read_i2c(spec, reg_addr, value, 1); } static inline int write_reg_i2c(const struct i2c_dt_spec *spec, uint16_t reg_addr, uint8_t value) { return burst_write_i2c(spec, reg_addr, &value, 1); } struct ataes132a_device_config { struct i2c_dt_spec i2c; }; struct ataes132a_device_data { const struct device *i2c; uint8_t command_buffer[64]; struct k_sem device_sem; }; struct ataes132a_driver_state { bool in_use; uint8_t key_id; uint8_t key_config; uint8_t chip_config; }; /** * @brief Data structure that describes the ATAES132A device external items * used in the CCM MAC generation and authorization processes. */ struct ataes132a_mac_packet { /** Key storage id used on CCM encryption */ uint8_t encryption_key_id; /** MAC Count value */ uint8_t encryption_mac_count; }; /** * @brief Data structure that describes the ATAES132A device internal items * used in the CCM MAC generation and authorization processes. */ struct ataes132a_mac_mode { /** Indicates to include the counter value * in the MAC calculation */ bool include_counter; /** Indicates to include the device serial * number in the MAC calculation */ bool include_serial; /** Indicates to include the small zone number * in the MAC calculation */ bool include_smallzone; }; /** * @brief ATAES132A device initialize function * * This function receives a reference to the i2c port * where the ATES132A device is attached. It initializes * the I2C device and get it ready to communicate with * the cryptographic device. * * @param i2c_dev reference to the I2C device where ATES132A is attached. * * @return Returns 0 in case of success and an error code otherwise. */ int ataes132a_init(const struct device *i2c_dev); /** * @brief ATAES132A CCM decrypt function * * This function performs a CCM decrypt and authorization operation on the * input and MAC buffer. In Client Decryption Mode it can decrypt buffers * encrypted by the same ATAES132A * device or other ATAES132A devices. * In User Decryption Mode it can decrypt buffers encrypted by the Host. * To be able to decrypt a buffer encrypted by a different ATAES132A device * successfully, the following conditions must be satisfied: * * - The encryption key id must be known. * - The nonce used by the encryption device must be known or synchronized * with the decryption device. * - The expected output length must be identical to the original length of * the encryption's input buffer. * - The MAC Count of the encryption device must be known. * - The MAC Mode must be identical between encrypt and decrypt calls. * - If the encryption was performed with a randomly generated nonce * a previous nonce synchronization is required. * - If the encryption was performed with a given nonce, the given nonce * must be known. * * @param i2c_dev Reference to the I2C device where ATES132A is attached. * * @param key_id Key ID from the ATAS132A key storage. This will be the used * to decrypt and authenticate the buffer and MAC. * * @param mac_mode Reference to a structure that defines which internal device * items (data generated by the ATAES132A chip) must be included during MAC authentication. The values * must be identical to the ones used during encryption. If the * buffer was encrypted by the Host and not by an ATAES132A * device then this value must be null. * * @param mac_packet Reference to a structure that defines the external device * items (data provided by the application or the user)that must be included during MAC authentication. The * values must be identical to those used during encryption. * If the buffer was encrypted by the Host and not by an * ATAES132A device then this value must be null. * * @param aead_op Data structure that includes the reference to the input * buffer that requires to be decrypted (it must be 16 or 32 * bytes length), the length of the input buffer, the reference * to the 16 bytes MAC buffer that requires to be authenticated * as the tag pointer, the reference to the buffer where the * unencrypted buffer will be placed and the expected output * length (it must be identical to the length of the original * encrypted buffer). * * @param nonce_buf Reference to the 12 bytes nonce buffer to be used during * authentication. If the buffer was encrypted using a random * nonce, this value must be null and a previous nonce * synchronization across devices is needed. * * @return Returns 0 in case of success and an error code otherwise. */ int ataes132a_aes_ccm_decrypt(const struct device *i2c_dev, uint8_t key_id, struct ataes132a_mac_mode *mac_mode, struct ataes132a_mac_packet *mac_packet, struct cipher_aead_pkt *aead_op, uint8_t *nonce_buf); /** * @brief ATAES132A CCM encrypt function * * This function performs a CCM encrypt operation on the input buffer. * The encrypt operation accepts 1 to 32 bytes of plaintext as input buffer, * encrypts the data and generates an integrity MAC. * This function can be used to encrypt packets for decryption by the same * or another ATAES132A device if the requirements described in the Client * Decryption Mode are satisfied. * * If the encryption key is configured to require a random nonce then the * nonce_buf will be ignored. It preferably must be null. * * @param i2c_dev Reference to the I2C device where ATES132A is attached. * * @param key_id Key ID from the ATAS132A key storage. This will be the used * to encrypt and generate the buffer and MAC. * * @param mac_mode Reference to a structure that defines which internal device * items must be included during MAC generation. The values * must be known by the decrypt operation. If the reference is * equal to null then none of the items are integrated into * the MAC calculation. * @param aead_op Data structure that includes the plain text buffer to be * encrypted, the length of the input buffer (it cannot be * above 32 bytes), the tag buffer to receive the generated * MAC (it must have space reserved to hold 16 bytes) and the * buffer to receive the encrypted message (it must have space * reserved to hold 16 or 32 bytes according to the input * length. * * @param non_buf 12 bytes nonce buffer. If encryption key requires random * nonce the parameter will be ignored. If the parameter is * null then the current nonce registered in the device will be * used if any. * * @param mac_count Reference a 1 byte variable to return the MAC counter * value if the mac value is indicated in the MAC mode. * * @return Returns 0 in case of success and an error code otherwise. */ int ataes132a_aes_ccm_encrypt(const struct device *i2c_dev, uint8_t key_id, struct ataes132a_mac_mode *mac_mode, struct cipher_aead_pkt *aead_op, uint8_t *nonce_buf, uint8_t *mac_count); /** * @brief ATAES132A ECM block function * * This function performs an ECM encrypt operation on the input buffer. * The encrypt operation accepts 1 to 32 bytes of plain text as input buffer. * The encryption key must be enabled to perform legacy ECM operation. * Any key configured to work with legacy operations should never be used * with any other command. The ECM operation can be used to exhaustively * attack the key. * * @param i2c_dev Reference to the I2C device where ATES132A is attached. * * @param key_id Key ID from the ATAS132A key storage. * * @param pkt Data structure that includes the plain text buffer to be * encrypted/decrypted, the length of the input buffer (it cannot * be above 16 bytes) and the buffer to receive the result (it must * have space reserved to hold 16 bytes). * * @return Returns 0 in case of success and an error code otherwise. */ int ataes132a_aes_ecb_block(const struct device *i2c_dev, uint8_t key_id, struct cipher_pkt *pkt); #endif /* ZEPHYR_DRIVERS_CRYPTO_CRYPTO_ATAES132A_PRIV_H_ */ ```
/content/code_sandbox/drivers/crypto/crypto_ataes132a_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,419
```unknown # STM32 Cryptographic Accelerator configuration options menuconfig CRYPTO_STM32 bool "STM32 Cryptographic Accelerator driver" default y depends on DT_HAS_ST_STM32_AES_ENABLED || DT_HAS_ST_STM32_CRYP_ENABLED select USE_STM32_HAL_CRYP select USE_STM32_HAL_CRYP_EX help Enable STM32 HAL-based Cryptographic Accelerator driver. config CRYPTO_STM32_MAX_SESSION int "Maximum of sessions STM32 crypto driver can handle" default 2 depends on CRYPTO_STM32 help This can be used to tweak the amount of sessions the driver can handle in parallel. ```
/content/code_sandbox/drivers/crypto/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
143
```c /* * */ #define DT_DRV_COMPAT ite_it8xxx2_sha #include <zephyr/kernel.h> #include <zephyr/crypto/crypto.h> #include <zephyr/sys/byteorder.h> #include <chip_chipregs.h> #include <errno.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(sha_it8xxx2, CONFIG_CRYPTO_LOG_LEVEL); BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "unsupported sha instance"); #define IT8XXX2_SHA_REGS_BASE DT_REG_ADDR(DT_NODELABEL(sha0)) /* 0x00: Hash Control Register */ #define IT8XXX2_REG_HASHCTRLR (0) /* 0x01: SHA256 Hash Base Address 1 Register */ #define IT8XXX2_REG_SHA_HBADDR (1) /* 0x02: SHA256 Hash Base Address 2 Register */ #define IT8XXX2_REG_SHA_HBADDR2 (2) #define IT8XXX2_SHA_START_SHA256 BIT(1) #define SHA_SHA256_HASH_LEN 32 #define SHA_SHA256_BLOCK_LEN 64 #define SHA_SHA256_K_LEN 256 #define SHA_SHA256_HASH_LEN_WORDS (SHA_SHA256_HASH_LEN / sizeof(uint32_t)) #define SHA_SHA256_BLOCK_LEN_WORDS (SHA_SHA256_BLOCK_LEN / sizeof(uint32_t)) #define SHA_SHA256_K_LEN_WORDS (SHA_SHA256_K_LEN / sizeof(uint32_t)) /* * This struct is used by the hardware and must be stored in RAM first 4k-byte * and aligned on a 256-byte boundary. */ struct chip_sha256_ctx { union { /* W[0] ~ W[15] */ uint32_t w_sha[SHA_SHA256_BLOCK_LEN_WORDS]; uint8_t w_input[SHA_SHA256_BLOCK_LEN]; }; /* reserved */ uint32_t reserved1[8]; /* H[0] ~ H[7] */ uint32_t h[SHA_SHA256_HASH_LEN_WORDS]; /* reserved */ uint32_t reserved2[30]; uint32_t w_input_index; uint32_t total_len; /* K[0] ~ K[63] */ uint32_t k[SHA_SHA256_K_LEN_WORDS]; } __aligned(256); Z_GENERIC_SECTION(.__sha256_ram_block) struct chip_sha256_ctx chip_ctx; static const uint32_t sha256_h0[SHA_SHA256_HASH_LEN_WORDS] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; /* * References of K of SHA-256: * path_to_url#Pseudocode */ static const uint32_t sha256_k[SHA_SHA256_K_LEN_WORDS] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; static void it8xxx2_sha256_init(bool init_k) { int i; chip_ctx.total_len = 0; chip_ctx.w_input_index = 0; /* Initialize hash values */ for (i = 0; i < ARRAY_SIZE(sha256_h0); i++) { chip_ctx.h[i] = sha256_h0[i]; } /* Initialize array of round constants */ if (init_k) { for (i = 0; i < ARRAY_SIZE(sha256_k); i++) { chip_ctx.k[i] = sha256_k[i]; } } } static void it8xxx2_sha256_module_calculation(void) { uint32_t key; uint8_t hash_ctrl; /* * Since W field on it8xxx2 requires big-endian format, change byte * order before computing hash. */ for (int i = 0; i < SHA_SHA256_BLOCK_LEN_WORDS; i++) { chip_ctx.w_sha[i] = sys_cpu_to_be32(chip_ctx.w_sha[i]); } /* * Global interrupt is disabled because the CPU cannot access memory * via the DLM (Data Local Memory) bus while HW module is computing * hash. */ key = irq_lock(); hash_ctrl = sys_read8(IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_HASHCTRLR); sys_write8(hash_ctrl | IT8XXX2_SHA_START_SHA256, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_HASHCTRLR); hash_ctrl = sys_read8(IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_HASHCTRLR); irq_unlock(key); chip_ctx.w_input_index = 0; } static int it8xxx2_hash_handler(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish) { uint32_t rem_len = pkt->in_len; uint32_t in_buf_idx = 0; while (rem_len--) { chip_ctx.w_input[chip_ctx.w_input_index++] = pkt->in_buf[in_buf_idx++]; if (chip_ctx.w_input_index >= SHA_SHA256_BLOCK_LEN) { it8xxx2_sha256_module_calculation(); } } chip_ctx.total_len += pkt->in_len; if (finish) { uint32_t *ob_ptr = (uint32_t *)pkt->out_buf; /* Pre-processing (Padding) */ memset(&chip_ctx.w_input[chip_ctx.w_input_index], 0, SHA_SHA256_BLOCK_LEN - chip_ctx.w_input_index); chip_ctx.w_input[chip_ctx.w_input_index] = 0x80; if (chip_ctx.w_input_index >= 56) { it8xxx2_sha256_module_calculation(); memset(&chip_ctx.w_input[chip_ctx.w_input_index], 0, SHA_SHA256_BLOCK_LEN - chip_ctx.w_input_index); } chip_ctx.w_sha[15] = sys_cpu_to_be32(chip_ctx.total_len * 8); it8xxx2_sha256_module_calculation(); for (int i = 0; i < SHA_SHA256_HASH_LEN_WORDS; i++) { ob_ptr[i] = sys_be32_to_cpu(chip_ctx.h[i]); } it8xxx2_sha256_init(false); } return 0; } static int it8xxx2_hash_session_free(const struct device *dev, struct hash_ctx *ctx) { it8xxx2_sha256_init(false); return 0; } static inline int it8xxx2_query_hw_caps(const struct device *dev) { return (CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS); } static int it8xxx2_hash_begin_session(const struct device *dev, struct hash_ctx *ctx, enum hash_algo algo) { if (algo != CRYPTO_HASH_ALGO_SHA256) { LOG_ERR("Unsupported algo"); return -EINVAL; } if (ctx->flags & ~(it8xxx2_query_hw_caps(dev))) { LOG_ERR("Unsupported flag"); return -EINVAL; } it8xxx2_sha256_init(false); ctx->hash_hndlr = it8xxx2_hash_handler; return 0; } static int it8xxx2_sha_init(const struct device *dev) { it8xxx2_sha256_init(true); /* Configure base address register for W and H */ sys_write8(((uint32_t)&chip_ctx >> 6) & 0xfc, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHA_HBADDR); /* Configure base address register for K */ sys_write8(((uint32_t)&chip_ctx.k >> 6) & 0xfc, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHA_HBADDR2); return 0; } static const struct crypto_driver_api it8xxx2_crypto_api = { .hash_begin_session = it8xxx2_hash_begin_session, .hash_free_session = it8xxx2_hash_session_free, .query_hw_caps = it8xxx2_query_hw_caps, }; DEVICE_DT_INST_DEFINE(0, &it8xxx2_sha_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, &it8xxx2_crypto_api); ```
/content/code_sandbox/drivers/crypto/crypto_it8xxx2_sha.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,294
```c /* * */ #include <zephyr/init.h> #include <zephyr/kernel.h> #include <string.h> #include <zephyr/device.h> #include <zephyr/drivers/i2c.h> #include <zephyr/sys/__assert.h> #include <zephyr/crypto/crypto.h> #define DT_DRV_COMPAT atmel_ataes132a #include "crypto_ataes132a_priv.h" #define D10D24S 11 #define MAX_RETRIES 3 #define ATAES132A_AES_KEY_SIZE 16 /* ATAES132A can store up to 16 different crypto keys */ #define CRYPTO_MAX_SESSION 16 #define LOG_LEVEL CONFIG_CRYPTO_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ataes132a); static struct ataes132a_driver_state ataes132a_state[CRYPTO_MAX_SESSION]; static void ataes132a_init_states(void) { int i; for (i = 0; i < ATAES132A_AES_KEY_SIZE; i++) { ataes132a_state[i].in_use = false; ataes132a_state[i].key_id = i; } } static int ataes132a_send_command(const struct device *dev, uint8_t opcode, uint8_t mode, uint8_t *params, uint8_t nparams, uint8_t *response, uint8_t *nresponse) { int retry_count = 0; struct ataes132a_device_data *data = dev->data; const struct ataes132a_device_config *cfg = dev->config; uint8_t count; uint8_t status; uint8_t crc[2]; int i, i2c_return; count = nparams + 5; if (count > 64) { LOG_ERR("command too large for command buffer"); return -EDOM; } /* If there is a command in progress, idle wait until it is available. * If there is concurrency protection around the driver, this should * never happen. */ read_reg_i2c(&cfg->i2c, ATAES_STATUS_REG, &status); while (status & ATAES_STATUS_WIP) { k_busy_wait(D10D24S); read_reg_i2c(&cfg->i2c, ATAES_STATUS_REG, &status); } data->command_buffer[0] = count; data->command_buffer[1] = opcode; data->command_buffer[2] = mode; for (i = 0; i < nparams; i++) { data->command_buffer[i + 3] = params[i]; } /*Calculate command CRC*/ ataes132a_atmel_crc(data->command_buffer, nparams + 3, crc); data->command_buffer[nparams + 3] = crc[0]; data->command_buffer[nparams + 4] = crc[1]; /*Reset i/O address start before sending a command*/ write_reg_i2c(&cfg->i2c, ATAES_COMMAND_ADDRR_RESET, 0x0); /*Send a command through the command buffer*/ i2c_return = burst_write_i2c(&cfg->i2c, ATAES_COMMAND_MEM_ADDR, data->command_buffer, count); LOG_DBG("BURST WRITE RETURN: %d", i2c_return); /* Idle-waiting for the command completion*/ do { k_busy_wait(D10D24S); read_reg_i2c(&cfg->i2c, ATAES_STATUS_REG, &status); } while (status & ATAES_STATUS_WIP); if (status & ATAES_STATUS_CRC) { LOG_ERR("incorrect CRC command"); return -EINVAL; } if (!(status & ATAES_STATUS_RDY)) { LOG_ERR("expected response is not in place"); return -EINVAL; } /* Read the response */ burst_read_i2c(&cfg->i2c, ATAES_COMMAND_MEM_ADDR, data->command_buffer, 64); count = data->command_buffer[0]; /* Calculate and validate response CRC */ ataes132a_atmel_crc(data->command_buffer, count - 2, crc); LOG_DBG("COMMAND CRC %x%x", data->command_buffer[count - 2], data->command_buffer[count - 1]); LOG_DBG("CALCULATED CRC %x%x", crc[0], crc[1]); /* If CRC fails retry reading MAX RETRIES times */ while (crc[0] != data->command_buffer[count - 2] || crc[1] != data->command_buffer[count - 1]) { if (retry_count > MAX_RETRIES - 1) { LOG_ERR("response crc validation rebase" " max retries"); return -EINVAL; } burst_read_i2c(&cfg->i2c, ATAES_COMMAND_MEM_ADDR, data->command_buffer, 64); count = data->command_buffer[0]; ataes132a_atmel_crc(data->command_buffer, count - 2, crc); retry_count++; LOG_DBG("COMMAND RETRY %d", retry_count); LOG_DBG("COMMAND CRC %x%x", data->command_buffer[count - 2], data->command_buffer[count - 1]); LOG_DBG("CALCULATED CRC %x%x", crc[0], crc[1]); } if ((status & ATAES_STATUS_ERR) || data->command_buffer[1] != 0x00) { LOG_ERR("command execution error %x", data->command_buffer[1]); return -EIO; } LOG_DBG("Read the response count: %d", count); for (i = 0; i < count - 3; i++) { response[i] = data->command_buffer[i + 1]; } *nresponse = count - 3; return 0; } int ataes132a_init(const struct device *dev) { struct ataes132a_device_data *ataes132a = dev->data; const struct ataes132a_device_config *cfg = dev->config; uint32_t i2c_cfg; LOG_DBG("ATAES132A INIT"); if (!device_is_ready(cfg->i2c.bus)) { LOG_ERR("Bus device is not ready"); return -ENODEV; } i2c_cfg = I2C_MODE_CONTROLLER | I2C_SPEED_SET(ATAES132A_BUS_SPEED); i2c_configure(cfg->i2c.bus, i2c_cfg); k_sem_init(&ataes132a->device_sem, 1, K_SEM_MAX_LIMIT); ataes132a_init_states(); return 0; } int ataes132a_aes_ccm_decrypt(const struct device *dev, uint8_t key_id, struct ataes132a_mac_mode *mac_mode, struct ataes132a_mac_packet *mac_packet, struct cipher_aead_pkt *aead_op, uint8_t *nonce_buf) { uint8_t command_mode = 0x0; struct ataes132a_device_data *data = dev->data; uint8_t out_len; uint8_t in_buf_len; uint8_t return_code; uint8_t expected_out_len; uint8_t param_buffer[52]; if (!aead_op) { LOG_ERR("Parameter cannot be null"); return -EINVAL; } if (!aead_op->pkt) { LOG_ERR("Parameter cannot be null"); return -EINVAL; } in_buf_len = aead_op->pkt->in_len; expected_out_len = aead_op->pkt->out_len; /*The KeyConfig[EKeyID].ExternalCrypto bit must be 1b.*/ if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_EXTERNAL)) { LOG_ERR("key %x external mode disabled", key_id); return -EINVAL; } if (in_buf_len != 16U && in_buf_len != 32U) { LOG_ERR("ccm mode only accepts input blocks of 16" " and 32 bytes"); return -EINVAL; } if (expected_out_len > 32) { LOG_ERR("ccm mode cannot generate more than" " 32 output bytes"); return -EINVAL; } /* If KeyConfig[key_id].AuthKey is set, then prior authentication * is required */ if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_AUTHKEY)) { LOG_DBG("keep in mind key %x will require" " previous authentication", key_id); } if (!aead_op->pkt->in_buf || !aead_op->pkt->out_buf) { return 0; } /* If the KeyConfig[EKeyID].RandomNonce bit is set * the current nonce register content will be used. * If there is an invalid random nonce or if there * is no nonce synchronization between device * the decrypt operation will fail accordingly. */ if (ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_RAND_NONCE) { LOG_DBG("key %x requires random nonce," " nonce_buf will be ignored", key_id); LOG_DBG("current nonce register will be used"); } k_sem_take(&data->device_sem, K_FOREVER); /* If the KeyConfig[EKeyID].RandomNonce bit is not set * then the nonce send as parameter will be loaded into * the nonce register. */ if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_RAND_NONCE) && nonce_buf) { param_buffer[0] = 0x0; param_buffer[1] = 0x0; param_buffer[2] = 0x0; param_buffer[3] = 0x0; memcpy(param_buffer + 4, nonce_buf, 12); return_code = ataes132a_send_command(dev, ATAES_NONCE_OP, 0x0, param_buffer, 16, param_buffer, &out_len); if (return_code != 0U) { LOG_ERR("nonce command ended with code %d", return_code); k_sem_give(&data->device_sem); return -EINVAL; } if (param_buffer[0] != 0U) { LOG_ERR("nonce command failed with error" " code %d", param_buffer[0]); k_sem_give(&data->device_sem); return -EIO; } } /* If the KeyConfig[EKeyID].RandomNonce bit is not set * and the nonce send as parameter is a null value, * the command will use the current nonce register value. */ if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_RAND_NONCE) && !nonce_buf) { LOG_DBG("current nonce register will be used"); } /* Client decryption mode requires a MAC packet to specify the * encryption key id and the MAC count of the encryption device * to synchronize MAC generation */ if (mac_packet) { param_buffer[0] = mac_packet->encryption_key_id; param_buffer[2] = mac_packet->encryption_mac_count; } else { param_buffer[0] = 0x0; param_buffer[2] = 0x0; LOG_DBG("normal decryption mode" " ignores mac_packet parameter"); } /* Client decryption mode requires a MAC packet to specify * if MAC counter, serial number and small zone number are * included in MAC generation. */ if (mac_mode) { if (mac_mode->include_counter) { LOG_DBG("including usage counter in the MAC: " "decrypt and encrypt dev must be the same"); command_mode = command_mode | ATAES_MAC_MODE_COUNTER; } if (mac_mode->include_serial) { LOG_DBG("including serial number in the MAC: " "decrypt and encrypt dev must be the same"); command_mode = command_mode | ATAES_MAC_MODE_SERIAL; } if (mac_mode->include_smallzone) { LOG_DBG("including small zone in the MAC: " "decrypt and encrypt dev share the " "first four bytes of their small zone"); command_mode = command_mode | ATAES_MAC_MODE_SMALLZONE; } } param_buffer[1] = key_id; param_buffer[3] = expected_out_len; if (aead_op->tag) { memcpy(param_buffer + 4, aead_op->tag, 16); } memcpy(param_buffer + 20, aead_op->pkt->in_buf, in_buf_len); return_code = ataes132a_send_command(dev, ATAES_DECRYPT_OP, command_mode, param_buffer, in_buf_len + 4, param_buffer, &out_len); if (return_code != 0U) { LOG_ERR("decrypt command ended with code %d", return_code); k_sem_give(&data->device_sem); return -EINVAL; } if (out_len < 2 || out_len > 33) { LOG_ERR("decrypt command response has invalid" " size %d", out_len); k_sem_give(&data->device_sem); return -EINVAL; } if (param_buffer[0] != 0U) { LOG_ERR("legacy command failed with error" " code %d", param_buffer[0]); k_sem_give(&data->device_sem); return -param_buffer[0]; } if (expected_out_len != out_len - 1) { LOG_ERR("decrypted output data size %d and expected data" " size %d are different", out_len - 1, expected_out_len); k_sem_give(&data->device_sem); return -EINVAL; } memcpy(aead_op->pkt->out_buf, param_buffer + 1, out_len - 1); k_sem_give(&data->device_sem); return 0; } int ataes132a_aes_ccm_encrypt(const struct device *dev, uint8_t key_id, struct ataes132a_mac_mode *mac_mode, struct cipher_aead_pkt *aead_op, uint8_t *nonce_buf, uint8_t *mac_count) { uint8_t command_mode = 0x0; struct ataes132a_device_data *data = dev->data; uint8_t buf_len; uint8_t out_len; uint8_t return_code; uint8_t param_buffer[40]; if (!aead_op) { LOG_ERR("Parameter cannot be null"); return -EINVAL; } if (!aead_op->pkt) { LOG_ERR("Parameter cannot be null"); return -EINVAL; } buf_len = aead_op->pkt->in_len; /*The KeyConfig[EKeyID].ExternalCrypto bit must be 1b.*/ if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_EXTERNAL)) { LOG_ERR("key %x external mode disabled", key_id); return -EINVAL; } if (buf_len > 32) { LOG_ERR("only up to 32 bytes accepted for ccm mode"); return -EINVAL; } /* If KeyConfig[key_id].AuthKey is set, then prior authentication * is required */ if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_AUTHKEY)) { LOG_DBG("keep in mind key %x will require" " previous authentication", key_id); } if (!aead_op->pkt->in_buf || !aead_op->pkt->out_buf) { return 0; } /* If the KeyConfig[EKeyID].RandomNonce bit is set * the current nonce register content will be used. * If there is an invalid random nonce or if there * is no nonce synchronization between device * the decrypt operation will fail accordingly. */ if (ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_RAND_NONCE) { LOG_DBG("key %x requires random nonce," " nonce_buf will be ignored", key_id); LOG_DBG("current nonce register will be used"); } k_sem_take(&data->device_sem, K_FOREVER); /* If the KeyConfig[EKeyID].RandomNonce bit is not set * then the nonce send as parameter will be loaded into * the nonce register. */ if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_RAND_NONCE) && nonce_buf) { param_buffer[0] = 0x0; param_buffer[1] = 0x0; param_buffer[2] = 0x0; param_buffer[3] = 0x0; memcpy(param_buffer + 4, nonce_buf, 12); return_code = ataes132a_send_command(dev, ATAES_NONCE_OP, 0x0, param_buffer, 16, param_buffer, &out_len); if (return_code != 0U) { LOG_ERR("nonce command ended with code %d", return_code); k_sem_give(&data->device_sem); return -EINVAL; } if (param_buffer[0] != 0U) { LOG_ERR("nonce command failed with error" " code %d", param_buffer[0]); k_sem_give(&data->device_sem); return -EIO; } } /* If the KeyConfig[EKeyID].RandomNonce bit is not set * and the nonce send as parameter is a null value, * the command will use the current nonce register value. */ if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_RAND_NONCE) && !nonce_buf) { LOG_DBG("current nonce register will be used"); } /* MAC packet to specify if MAC counter, serial number and small zone * number are included in MAC generation. */ if (mac_mode) { if (mac_mode->include_counter) { LOG_DBG("including usage counter in the MAC: " "decrypt and encrypt dev must be the same"); command_mode = command_mode | ATAES_MAC_MODE_COUNTER; } if (mac_mode->include_serial) { LOG_DBG("including serial number in the MAC: " "decrypt and encrypt dev must be the same"); command_mode = command_mode | ATAES_MAC_MODE_SERIAL; } if (mac_mode->include_smallzone) { LOG_DBG("including small zone in the MAC: " "decrypt and encrypt dev share the " "first four bytes of their small zone"); command_mode = command_mode | ATAES_MAC_MODE_SMALLZONE; } } param_buffer[0] = key_id; param_buffer[1] = buf_len; memcpy(param_buffer + 2, aead_op->pkt->in_buf, buf_len); return_code = ataes132a_send_command(dev, ATAES_ENCRYPT_OP, command_mode, param_buffer, buf_len + 2, param_buffer, &out_len); if (return_code != 0U) { LOG_ERR("encrypt command ended with code %d", return_code); k_sem_give(&data->device_sem); return -EINVAL; } if (out_len < 33 || out_len > 49) { LOG_ERR("encrypt command response has invalid" " size %d", out_len); k_sem_give(&data->device_sem); return -EINVAL; } if (param_buffer[0] != 0U) { LOG_ERR("encrypt command failed with error" " code %d", param_buffer[0]); k_sem_give(&data->device_sem); return -EIO; } if (aead_op->tag) { memcpy(aead_op->tag, param_buffer + 1, 16); } memcpy(aead_op->pkt->out_buf, param_buffer + 17, out_len - 17U); if (mac_mode) { if (mac_mode->include_counter) { param_buffer[0] = 0x0; param_buffer[1] = 0x0; param_buffer[2] = 0x0; param_buffer[3] = 0x0; ataes132a_send_command(dev, ATAES_INFO_OP, 0x0, param_buffer, 4, param_buffer, &out_len); if (param_buffer[0] != 0U) { LOG_ERR("info command failed with error" " code %d", param_buffer[0]); k_sem_give(&data->device_sem); return -EIO; } if (mac_count) { *mac_count = param_buffer[2]; } } } k_sem_give(&data->device_sem); return 0; } int ataes132a_aes_ecb_block(const struct device *dev, uint8_t key_id, struct cipher_pkt *pkt) { struct ataes132a_device_data *data = dev->data; uint8_t buf_len; uint8_t out_len; uint8_t return_code; uint8_t param_buffer[19]; if (!pkt) { LOG_ERR("Parameter cannot be null"); return -EINVAL; } buf_len = pkt->in_len; if (buf_len > 16) { LOG_ERR("input block cannot be above 16 bytes"); return -EINVAL; } /* AES ECB can only be executed if the ChipConfig.LegacyE configuration * is set to 1 and if KeyConfig[key_id].LegacyOK is set to 1. */ if (!(ataes132a_state[key_id].chip_config & ATAES_CHIPCONFIG_LEGACYE)) { LOG_ERR("legacy mode disabled"); return -EINVAL; } if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_LEGACYOK)) { LOG_ERR("key %x legacy mode disabled", key_id); return -EINVAL; } LOG_DBG("Chip config: %x", ataes132a_state[key_id].chip_config); LOG_DBG("Key ID: %d", key_id); LOG_DBG("Key config: %x", ataes132a_state[key_id].key_config); /* If KeyConfig[key_id].AuthKey is set, then prior authentication * is required */ if (!(ataes132a_state[key_id].key_config & ATAES_KEYCONFIG_AUTHKEY)) { LOG_DBG("keep in mind key %x will require" " previous authentication", key_id); } if (!pkt->in_buf || !pkt->out_buf) { return 0; } k_sem_take(&data->device_sem, K_FOREVER); param_buffer[0] = 0x0; param_buffer[1] = key_id; param_buffer[2] = 0x0; memcpy(param_buffer + 3, pkt->in_buf, buf_len); (void)memset(param_buffer + 3 + buf_len, 0x0, 16 - buf_len); return_code = ataes132a_send_command(dev, ATAES_LEGACY_OP, 0x00, param_buffer, buf_len + 3, param_buffer, &out_len); if (return_code != 0U) { LOG_ERR("legacy command ended with code %d", return_code); k_sem_give(&data->device_sem); return -EINVAL; } if (out_len != 17U) { LOG_ERR("legacy command response has invalid" " size %d", out_len); k_sem_give(&data->device_sem); return -EINVAL; } if (param_buffer[0] != 0U) { LOG_ERR("legacy command failed with error" " code %d", param_buffer[0]); k_sem_give(&data->device_sem); return -EIO; } memcpy(pkt->out_buf, param_buffer + 1, 16); k_sem_give(&data->device_sem); return 0; } static int do_ccm_encrypt_mac(struct cipher_ctx *ctx, struct cipher_aead_pkt *aead_op, uint8_t *nonce) { const struct device *dev = ctx->device; struct ataes132a_driver_state *state = ctx->drv_sessn_state; struct ataes132a_mac_mode mac_mode; uint8_t key_id; key_id = state->key_id; __ASSERT_NO_MSG(*(uint8_t *)ctx->key.handle == key_id); /* Removing all this salt from the MAC reduces the protection * but allows any other crypto implementations to authorize * the message. */ mac_mode.include_counter = false; mac_mode.include_serial = false; mac_mode.include_smallzone = false; if (aead_op->pkt->in_len <= 16 && aead_op->pkt->out_buf_max < 16) { LOG_ERR("Not enough space available in out buffer."); return -EINVAL; } if (aead_op->pkt->in_len > 16 && aead_op->pkt->out_buf_max < 32) { LOG_ERR("Not enough space available in out buffer."); return -EINVAL; } if (aead_op->pkt->in_len <= 16) { aead_op->pkt->out_len = 16; } else if (aead_op->pkt->in_len > 16) { aead_op->pkt->out_len = 32; } if (aead_op->ad != NULL || aead_op->ad_len != 0U) { LOG_ERR("Associated data is not supported."); return -EINVAL; } ataes132a_aes_ccm_encrypt(dev, key_id, &mac_mode, aead_op, nonce, NULL); return 0; } static int do_ccm_decrypt_auth(struct cipher_ctx *ctx, struct cipher_aead_pkt *aead_op, uint8_t *nonce) { const struct device *dev = ctx->device; struct ataes132a_driver_state *state = ctx->drv_sessn_state; struct ataes132a_mac_mode mac_mode; uint8_t key_id; key_id = state->key_id; __ASSERT_NO_MSG(*(uint8_t *)ctx->key.handle == key_id); /* Removing all this salt from the MAC reduces the protection * but allows any other crypto implementations to authorize * the message. */ mac_mode.include_counter = false; mac_mode.include_serial = false; mac_mode.include_smallzone = false; if (aead_op->pkt->in_len <= 16 && aead_op->pkt->out_buf_max < 16) { LOG_ERR("Not enough space available in out buffer."); return -EINVAL; } if (aead_op->pkt->in_len > 16 && aead_op->pkt->out_buf_max < 32) { LOG_ERR("Not enough space available in out buffer."); return -EINVAL; } aead_op->pkt->ctx = ctx; if (aead_op->ad != NULL || aead_op->ad_len != 0U) { LOG_ERR("Associated data is not supported."); return -EINVAL; } /* Normal Decryption Mode will only decrypt host generated packets */ ataes132a_aes_ccm_decrypt(dev, key_id, &mac_mode, NULL, aead_op, nonce); return 0; } static int do_block(struct cipher_ctx *ctx, struct cipher_pkt *pkt) { const struct device *dev = ctx->device; struct ataes132a_driver_state *state = ctx->drv_sessn_state; uint8_t key_id; key_id = state->key_id; __ASSERT_NO_MSG(*(uint8_t *)ctx->key.handle == key_id); if (pkt->out_buf_max < 16) { LOG_ERR("Not enough space available in out buffer."); return -EINVAL; } pkt->out_len = 16; return ataes132a_aes_ecb_block(dev, key_id, pkt); } static int ataes132a_session_free(const struct device *dev, struct cipher_ctx *session) { struct ataes132a_driver_state *state = session->drv_sessn_state; ARG_UNUSED(dev); state->in_use = false; return 0; } static int ataes132a_session_setup(const struct device *dev, struct cipher_ctx *ctx, enum cipher_algo algo, enum cipher_mode mode, enum cipher_op op_type) { uint8_t key_id = *((uint8_t *)ctx->key.handle); const struct ataes132a_device_config *cfg = dev->config; uint8_t config; if (ataes132a_state[key_id].in_use) { LOG_ERR("Session in progress"); return -EINVAL; } if (mode == CRYPTO_CIPHER_MODE_CCM && ctx->mode_params.ccm_info.tag_len != 16U) { LOG_ERR("ATAES132A support 16 byte tag only."); return -EINVAL; } if (mode == CRYPTO_CIPHER_MODE_CCM && ctx->mode_params.ccm_info.nonce_len != 12U) { LOG_ERR("ATAES132A support 12 byte nonce only."); return -EINVAL; } ataes132a_state[key_id].in_use = true; read_reg_i2c(&cfg->i2c, ATAES_KEYCFG_REG(key_id), &config); ataes132a_state[key_id].key_config = config; read_reg_i2c(&cfg->i2c, ATAES_CHIPCONFIG_REG, &config); ataes132a_state[key_id].chip_config = config; ctx->drv_sessn_state = &ataes132a_state[key_id]; ctx->device = dev; if (algo != CRYPTO_CIPHER_ALGO_AES) { LOG_ERR("ATAES132A unsupported algorithm"); return -EINVAL; } /*ATAES132A support I2C polling only*/ if (!(ctx->flags & CAP_SYNC_OPS)) { LOG_ERR("Async not supported by this driver"); return -EINVAL; } if (ctx->keylen != ATAES132A_AES_KEY_SIZE) { LOG_ERR("ATAES132A unsupported key size"); return -EINVAL; } if (op_type == CRYPTO_CIPHER_OP_ENCRYPT) { switch (mode) { case CRYPTO_CIPHER_MODE_ECB: ctx->ops.block_crypt_hndlr = do_block; break; case CRYPTO_CIPHER_MODE_CCM: ctx->ops.ccm_crypt_hndlr = do_ccm_encrypt_mac; break; default: LOG_ERR("ATAES132A unsupported mode"); return -EINVAL; } } else { switch (mode) { case CRYPTO_CIPHER_MODE_ECB: ctx->ops.block_crypt_hndlr = do_block; break; case CRYPTO_CIPHER_MODE_CCM: ctx->ops.ccm_crypt_hndlr = do_ccm_decrypt_auth; break; default: LOG_ERR("ATAES132A unsupported mode"); return -EINVAL; } } ctx->ops.cipher_mode = mode; return 0; } static int ataes132a_query_caps(const struct device *dev) { return (CAP_OPAQUE_KEY_HNDL | CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS | CAP_AUTONONCE); } static const struct ataes132a_device_config ataes132a_config = { .i2c = I2C_DT_SPEC_INST_GET(0), }; static struct crypto_driver_api crypto_enc_funcs = { .cipher_begin_session = ataes132a_session_setup, .cipher_free_session = ataes132a_session_free, .cipher_async_callback_set = NULL, .query_hw_caps = ataes132a_query_caps, }; struct ataes132a_device_data ataes132a_data; DEVICE_DT_INST_DEFINE(0, ataes132a_init, NULL, &ataes132a_data, &ataes132a_config, POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, (void *)&crypto_enc_funcs); ```
/content/code_sandbox/drivers/crypto/crypto_ataes132a.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,966
```c /* * */ #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/sys/__assert.h> #include <zephyr/crypto/crypto.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/reset.h> #include <zephyr/sys/byteorder.h> #include <soc.h> #include "crypto_stm32_priv.h" #define LOG_LEVEL CONFIG_CRYPTO_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(crypto_stm32); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_cryp) #define DT_DRV_COMPAT st_stm32_cryp #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_aes) #define DT_DRV_COMPAT st_stm32_aes #else #error No STM32 HW Crypto Accelerator in device tree #endif #define CRYP_SUPPORT (CAP_RAW_KEY | CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS | \ CAP_NO_IV_PREFIX) #define BLOCK_LEN_BYTES 16 #define BLOCK_LEN_WORDS (BLOCK_LEN_BYTES / sizeof(uint32_t)) #define CRYPTO_MAX_SESSION CONFIG_CRYPTO_STM32_MAX_SESSION #if defined(CRYP_KEYSIZE_192B) #define STM32_CRYPTO_KEYSIZE_192B_SUPPORT #endif #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_cryp) #define STM32_CRYPTO_TYPEDEF CRYP_TypeDef #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_aes) #define STM32_CRYPTO_TYPEDEF AES_TypeDef #endif struct crypto_stm32_session crypto_stm32_sessions[CRYPTO_MAX_SESSION]; static int copy_reverse_words(uint8_t *dst_buf, int dst_len, const uint8_t *src_buf, int src_len) { int i; if ((dst_len < src_len) || ((dst_len % 4) != 0)) { LOG_ERR("Buffer length error"); return -EINVAL; } memcpy(dst_buf, src_buf, src_len); for (i = 0; i < dst_len; i += sizeof(uint32_t)) { sys_mem_swap(&dst_buf[i], sizeof(uint32_t)); } return 0; } static int do_encrypt(struct cipher_ctx *ctx, uint8_t *in_buf, int in_len, uint8_t *out_buf) { HAL_StatusTypeDef status; struct crypto_stm32_data *data = CRYPTO_STM32_DATA(ctx->device); struct crypto_stm32_session *session = CRYPTO_STM32_SESSN(ctx); k_sem_take(&data->device_sem, K_FOREVER); status = HAL_CRYP_SetConfig(&data->hcryp, &session->config); if (status != HAL_OK) { LOG_ERR("Configuration error"); k_sem_give(&data->device_sem); return -EIO; } status = HAL_CRYP_Encrypt(&data->hcryp, (uint32_t *)in_buf, in_len, (uint32_t *)out_buf, HAL_MAX_DELAY); if (status != HAL_OK) { LOG_ERR("Encryption error"); k_sem_give(&data->device_sem); return -EIO; } k_sem_give(&data->device_sem); return 0; } static int do_decrypt(struct cipher_ctx *ctx, uint8_t *in_buf, int in_len, uint8_t *out_buf) { HAL_StatusTypeDef status; struct crypto_stm32_data *data = CRYPTO_STM32_DATA(ctx->device); struct crypto_stm32_session *session = CRYPTO_STM32_SESSN(ctx); k_sem_take(&data->device_sem, K_FOREVER); status = HAL_CRYP_SetConfig(&data->hcryp, &session->config); if (status != HAL_OK) { LOG_ERR("Configuration error"); k_sem_give(&data->device_sem); return -EIO; } status = HAL_CRYP_Decrypt(&data->hcryp, (uint32_t *)in_buf, in_len, (uint32_t *)out_buf, HAL_MAX_DELAY); if (status != HAL_OK) { LOG_ERR("Decryption error"); k_sem_give(&data->device_sem); return -EIO; } k_sem_give(&data->device_sem); return 0; } static int crypto_stm32_ecb_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt) { int ret; /* For security reasons, ECB mode should not be used to encrypt * more than one block. Use CBC mode instead. */ if (pkt->in_len > 16) { LOG_ERR("Cannot encrypt more than 1 block"); return -EINVAL; } ret = do_encrypt(ctx, pkt->in_buf, pkt->in_len, pkt->out_buf); if (ret == 0) { pkt->out_len = 16; } return ret; } static int crypto_stm32_ecb_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt) { int ret; /* For security reasons, ECB mode should not be used to encrypt * more than one block. Use CBC mode instead. */ if (pkt->in_len > 16) { LOG_ERR("Cannot encrypt more than 1 block"); return -EINVAL; } ret = do_decrypt(ctx, pkt->in_buf, pkt->in_len, pkt->out_buf); if (ret == 0) { pkt->out_len = 16; } return ret; } static int crypto_stm32_cbc_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv) { int ret; uint32_t vec[BLOCK_LEN_WORDS]; int out_offset = 0; struct crypto_stm32_session *session = CRYPTO_STM32_SESSN(ctx); (void)copy_reverse_words((uint8_t *)vec, sizeof(vec), iv, BLOCK_LEN_BYTES); session->config.pInitVect = vec; if ((ctx->flags & CAP_NO_IV_PREFIX) == 0U) { /* Prefix IV to ciphertext unless CAP_NO_IV_PREFIX is set. */ memcpy(pkt->out_buf, iv, 16); out_offset = 16; } ret = do_encrypt(ctx, pkt->in_buf, pkt->in_len, pkt->out_buf + out_offset); if (ret == 0) { pkt->out_len = pkt->in_len + out_offset; } return ret; } static int crypto_stm32_cbc_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv) { int ret; uint32_t vec[BLOCK_LEN_WORDS]; int in_offset = 0; struct crypto_stm32_session *session = CRYPTO_STM32_SESSN(ctx); (void)copy_reverse_words((uint8_t *)vec, sizeof(vec), iv, BLOCK_LEN_BYTES); session->config.pInitVect = vec; if ((ctx->flags & CAP_NO_IV_PREFIX) == 0U) { in_offset = 16; } ret = do_decrypt(ctx, pkt->in_buf + in_offset, pkt->in_len, pkt->out_buf); if (ret == 0) { pkt->out_len = pkt->in_len - in_offset; } return ret; } static int crypto_stm32_ctr_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv) { int ret; uint32_t ctr[BLOCK_LEN_WORDS] = {0}; int ivlen = BLOCK_LEN_BYTES - (ctx->mode_params.ctr_info.ctr_len >> 3); struct crypto_stm32_session *session = CRYPTO_STM32_SESSN(ctx); if (copy_reverse_words((uint8_t *)ctr, sizeof(ctr), iv, ivlen) != 0) { return -EIO; } session->config.pInitVect = ctr; ret = do_encrypt(ctx, pkt->in_buf, pkt->in_len, pkt->out_buf); if (ret == 0) { pkt->out_len = pkt->in_len; } return ret; } static int crypto_stm32_ctr_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv) { int ret; uint32_t ctr[BLOCK_LEN_WORDS] = {0}; int ivlen = BLOCK_LEN_BYTES - (ctx->mode_params.ctr_info.ctr_len >> 3); struct crypto_stm32_session *session = CRYPTO_STM32_SESSN(ctx); if (copy_reverse_words((uint8_t *)ctr, sizeof(ctr), iv, ivlen) != 0) { return -EIO; } session->config.pInitVect = ctr; ret = do_decrypt(ctx, pkt->in_buf, pkt->in_len, pkt->out_buf); if (ret == 0) { pkt->out_len = pkt->in_len; } return ret; } static int crypto_stm32_get_unused_session_index(const struct device *dev) { int i; struct crypto_stm32_data *data = CRYPTO_STM32_DATA(dev); k_sem_take(&data->session_sem, K_FOREVER); for (i = 0; i < CRYPTO_MAX_SESSION; i++) { if (!crypto_stm32_sessions[i].in_use) { crypto_stm32_sessions[i].in_use = true; k_sem_give(&data->session_sem); return i; } } k_sem_give(&data->session_sem); return -1; } static int crypto_stm32_session_setup(const struct device *dev, struct cipher_ctx *ctx, enum cipher_algo algo, enum cipher_mode mode, enum cipher_op op_type) { int ctx_idx, ret; struct crypto_stm32_session *session; struct crypto_stm32_data *data = CRYPTO_STM32_DATA(dev); if (ctx->flags & ~(CRYP_SUPPORT)) { LOG_ERR("Unsupported flag"); return -EINVAL; } if (algo != CRYPTO_CIPHER_ALGO_AES) { LOG_ERR("Unsupported algo"); return -EINVAL; } /* The CRYP peripheral supports the AES ECB, CBC, CTR, CCM and GCM * modes of operation, of which ECB, CBC, CTR and CCM are supported * through the crypto API. However, in CCM mode, although the STM32Cube * HAL driver follows the documentation (cf. RM0090, par. 23.3) by * padding incomplete input data blocks in software prior encryption, * incorrect authentication tags are returned for input data which is * not a multiple of 128 bits. Therefore, CCM mode is not supported by * this driver. */ if ((mode != CRYPTO_CIPHER_MODE_ECB) && (mode != CRYPTO_CIPHER_MODE_CBC) && (mode != CRYPTO_CIPHER_MODE_CTR)) { LOG_ERR("Unsupported mode"); return -EINVAL; } /* The STM32F4 CRYP peripheral supports key sizes of 128, 192 and 256 * bits. */ if ((ctx->keylen != 16U) && #if defined(STM32_CRYPTO_KEYSIZE_192B_SUPPORT) (ctx->keylen != 24U) && #endif (ctx->keylen != 32U)) { LOG_ERR("%u key size is not supported", ctx->keylen); return -EINVAL; } ctx_idx = crypto_stm32_get_unused_session_index(dev); if (ctx_idx < 0) { LOG_ERR("No free session for now"); return -ENOSPC; } session = &crypto_stm32_sessions[ctx_idx]; memset(&session->config, 0, sizeof(session->config)); if (data->hcryp.State == HAL_CRYP_STATE_RESET) { if (HAL_CRYP_Init(&data->hcryp) != HAL_OK) { LOG_ERR("Initialization error"); session->in_use = false; return -EIO; } } switch (ctx->keylen) { case 16U: session->config.KeySize = CRYP_KEYSIZE_128B; break; #if defined(STM32_CRYPTO_KEYSIZE_192B_SUPPORT) case 24U: session->config.KeySize = CRYP_KEYSIZE_192B; break; #endif case 32U: session->config.KeySize = CRYP_KEYSIZE_256B; break; } if (op_type == CRYPTO_CIPHER_OP_ENCRYPT) { switch (mode) { case CRYPTO_CIPHER_MODE_ECB: session->config.Algorithm = CRYP_AES_ECB; ctx->ops.block_crypt_hndlr = crypto_stm32_ecb_encrypt; break; case CRYPTO_CIPHER_MODE_CBC: session->config.Algorithm = CRYP_AES_CBC; ctx->ops.cbc_crypt_hndlr = crypto_stm32_cbc_encrypt; break; case CRYPTO_CIPHER_MODE_CTR: session->config.Algorithm = CRYP_AES_CTR; ctx->ops.ctr_crypt_hndlr = crypto_stm32_ctr_encrypt; break; default: break; } } else { switch (mode) { case CRYPTO_CIPHER_MODE_ECB: session->config.Algorithm = CRYP_AES_ECB; ctx->ops.block_crypt_hndlr = crypto_stm32_ecb_decrypt; break; case CRYPTO_CIPHER_MODE_CBC: session->config.Algorithm = CRYP_AES_CBC; ctx->ops.cbc_crypt_hndlr = crypto_stm32_cbc_decrypt; break; case CRYPTO_CIPHER_MODE_CTR: session->config.Algorithm = CRYP_AES_CTR; ctx->ops.ctr_crypt_hndlr = crypto_stm32_ctr_decrypt; break; default: break; } } ret = copy_reverse_words((uint8_t *)session->key, CRYPTO_STM32_AES_MAX_KEY_LEN, ctx->key.bit_stream, ctx->keylen); if (ret != 0) { return -EIO; } session->config.pKey = session->key; session->config.DataType = CRYP_DATATYPE_8B; session->config.DataWidthUnit = CRYP_DATAWIDTHUNIT_BYTE; ctx->drv_sessn_state = session; ctx->device = dev; return 0; } static int crypto_stm32_session_free(const struct device *dev, struct cipher_ctx *ctx) { int i; struct crypto_stm32_data *data = CRYPTO_STM32_DATA(dev); const struct crypto_stm32_config *cfg = CRYPTO_STM32_CFG(dev); struct crypto_stm32_session *session = CRYPTO_STM32_SESSN(ctx); session->in_use = false; k_sem_take(&data->session_sem, K_FOREVER); /* Disable peripheral only if there are no more active sessions. */ for (i = 0; i < CRYPTO_MAX_SESSION; i++) { if (crypto_stm32_sessions[i].in_use) { k_sem_give(&data->session_sem); return 0; } } /* Deinitialize and reset peripheral. */ if (HAL_CRYP_DeInit(&data->hcryp) != HAL_OK) { LOG_ERR("Deinitialization error"); k_sem_give(&data->session_sem); return -EIO; } (void)reset_line_toggle_dt(&cfg->reset); k_sem_give(&data->session_sem); return 0; } static int crypto_stm32_query_caps(const struct device *dev) { return CRYP_SUPPORT; } static int crypto_stm32_init(const struct device *dev) { const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); struct crypto_stm32_data *data = CRYPTO_STM32_DATA(dev); const struct crypto_stm32_config *cfg = CRYPTO_STM32_CFG(dev); if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } if (clock_control_on(clk, (clock_control_subsys_t)&cfg->pclken) != 0) { LOG_ERR("clock op failed\n"); return -EIO; } k_sem_init(&data->device_sem, 1, 1); k_sem_init(&data->session_sem, 1, 1); if (HAL_CRYP_DeInit(&data->hcryp) != HAL_OK) { LOG_ERR("Peripheral reset error"); return -EIO; } return 0; } static struct crypto_driver_api crypto_enc_funcs = { .cipher_begin_session = crypto_stm32_session_setup, .cipher_free_session = crypto_stm32_session_free, .cipher_async_callback_set = NULL, .query_hw_caps = crypto_stm32_query_caps, }; static struct crypto_stm32_data crypto_stm32_dev_data = { .hcryp = { .Instance = (STM32_CRYPTO_TYPEDEF *)DT_INST_REG_ADDR(0), } }; static const struct crypto_stm32_config crypto_stm32_dev_config = { .reset = RESET_DT_SPEC_INST_GET(0), .pclken = { .enr = DT_INST_CLOCKS_CELL(0, bits), .bus = DT_INST_CLOCKS_CELL(0, bus) } }; DEVICE_DT_INST_DEFINE(0, crypto_stm32_init, NULL, &crypto_stm32_dev_data, &crypto_stm32_dev_config, POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, (void *)&crypto_enc_funcs); ```
/content/code_sandbox/drivers/crypto/crypto_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,795
```c /* * */ /** * @file Shim layer for TinyCrypt, making it complaint to crypto API. */ #include <tinycrypt/cbc_mode.h> #include <tinycrypt/ctr_mode.h> #include <tinycrypt/ccm_mode.h> #include <tinycrypt/constants.h> #include <tinycrypt/utils.h> #include <string.h> #include <zephyr/crypto/crypto.h> #include "crypto_tc_shim_priv.h" #define LOG_LEVEL CONFIG_CRYPTO_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(tinycrypt); #define CRYPTO_MAX_SESSION CONFIG_CRYPTO_TINYCRYPT_SHIM_MAX_SESSION static struct tc_shim_drv_state tc_driver_state[CRYPTO_MAX_SESSION]; static int do_cbc_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *op, uint8_t *iv) { struct tc_shim_drv_state *data = ctx->drv_sessn_state; if (tc_cbc_mode_encrypt(op->out_buf, op->out_buf_max, op->in_buf, op->in_len, iv, &data->session_key) == TC_CRYPTO_FAIL) { LOG_ERR("TC internal error during CBC encryption"); return -EIO; } /* out_len is the same as in_len in CBC mode */ op->out_len = op->in_len; return 0; } static int do_cbc_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *op, uint8_t *iv) { struct tc_shim_drv_state *data = ctx->drv_sessn_state; /* TinyCrypt expects the IV and cipher text to be in a contiguous * buffer for efficiency */ if (iv != op->in_buf) { LOG_ERR("TC needs contiguous iv and ciphertext"); return -EIO; } if (tc_cbc_mode_decrypt(op->out_buf, op->out_buf_max, op->in_buf + TC_AES_BLOCK_SIZE, op->in_len - TC_AES_BLOCK_SIZE, op->in_buf, &data->session_key) == TC_CRYPTO_FAIL) { LOG_ERR("Func TC internal error during CBC decryption"); return -EIO; } /* out_len is the same as in_len in CBC mode */ op->out_len = op->in_len; return 0; } static int do_ctr_op(struct cipher_ctx *ctx, struct cipher_pkt *op, uint8_t *iv) { struct tc_shim_drv_state *data = ctx->drv_sessn_state; uint8_t ctr[16] = {0}; /* CTR mode Counter = iv:ctr */ int ivlen = ctx->keylen - (ctx->mode_params.ctr_info.ctr_len >> 3); /* Tinycrypt takes the last 4 bytes of the counter parameter as the * true counter start. IV forms the first 12 bytes of the split counter. */ memcpy(ctr, iv, ivlen); if (tc_ctr_mode(op->out_buf, op->out_buf_max, op->in_buf, op->in_len, ctr, &data->session_key) == TC_CRYPTO_FAIL) { LOG_ERR("TC internal error during CTR OP"); return -EIO; } /* out_len is the same as in_len in CTR mode */ op->out_len = op->in_len; return 0; } static int do_ccm_encrypt_mac(struct cipher_ctx *ctx, struct cipher_aead_pkt *aead_op, uint8_t *nonce) { struct tc_ccm_mode_struct ccm; struct tc_shim_drv_state *data = ctx->drv_sessn_state; struct ccm_params *ccm_param = &ctx->mode_params.ccm_info; struct cipher_pkt *op = aead_op->pkt; if (tc_ccm_config(&ccm, &data->session_key, nonce, ccm_param->nonce_len, ccm_param->tag_len) == TC_CRYPTO_FAIL) { LOG_ERR("TC internal error during CCM encryption config"); return -EIO; } if (tc_ccm_generation_encryption(op->out_buf, op->out_buf_max, aead_op->ad, aead_op->ad_len, op->in_buf, op->in_len, &ccm) == TC_CRYPTO_FAIL) { LOG_ERR("TC internal error during CCM Encryption OP"); return -EIO; } /* Looks like TinyCrypt appends the MAC to the end of out_buf as it * does not give a separate hash parameter. The user needs to be aware * of this and provide sufficient buffer space in output buffer to hold * both encrypted output and hash */ if (aead_op->tag) { memcpy(aead_op->tag, op->out_buf + op->in_len, ccm.mlen); } /* Before returning TC_CRYPTO_SUCCESS, tc_ccm_generation_encryption() * will advance the output buffer pointer by op->in_len bytes, * and then increment it ccm.mlen times (while writing to it). */ op->out_len = op->in_len + ccm.mlen; return 0; } static int do_ccm_decrypt_auth(struct cipher_ctx *ctx, struct cipher_aead_pkt *aead_op, uint8_t *nonce) { struct tc_ccm_mode_struct ccm; struct tc_shim_drv_state *data = ctx->drv_sessn_state; struct ccm_params *ccm_param = &ctx->mode_params.ccm_info; struct cipher_pkt *op = aead_op->pkt; if (tc_ccm_config(&ccm, &data->session_key, nonce, ccm_param->nonce_len, ccm_param->tag_len) == TC_CRYPTO_FAIL) { LOG_ERR("TC internal error during CCM decryption config"); return -EIO; } /* TinyCrypt expects the hash/MAC to be present at the end of in_buf * as it doesnt take a separate hash parameter. Ideally this should * be moved to a ctx.flag check during session_setup, later. */ if (aead_op->tag != op->in_buf + op->in_len) { LOG_ERR("TC needs contiguous hash at the end of inbuf"); return -EIO; } if (tc_ccm_decryption_verification(op->out_buf, op->out_buf_max, aead_op->ad, aead_op->ad_len, op->in_buf, op->in_len + ccm_param->tag_len, &ccm) == TC_CRYPTO_FAIL) { LOG_ERR("TC internal error during CCM decryption OP"); return -EIO; } op->out_len = op->in_len + ccm_param->tag_len; return 0; } static int get_unused_session(void) { int i; for (i = 0; i < CRYPTO_MAX_SESSION; i++) { if (tc_driver_state[i].in_use == 0) { tc_driver_state[i].in_use = 1; break; } } return i; } static int tc_session_setup(const struct device *dev, struct cipher_ctx *ctx, enum cipher_algo algo, enum cipher_mode mode, enum cipher_op op_type) { struct tc_shim_drv_state *data; int idx; ARG_UNUSED(dev); /* The shim currently supports only CBC or CTR mode for AES */ if (algo != CRYPTO_CIPHER_ALGO_AES) { LOG_ERR("TC Shim Unsupported algo"); return -EINVAL; } /* TinyCrypt being a software library, only synchronous operations * make sense. */ if (!(ctx->flags & CAP_SYNC_OPS)) { LOG_ERR("Async not supported by this driver"); return -EINVAL; } if (ctx->keylen != TC_AES_KEY_SIZE) { /* TinyCrypt supports only 128 bits */ LOG_ERR("TC Shim Unsupported key size"); return -EINVAL; } if (op_type == CRYPTO_CIPHER_OP_ENCRYPT) { switch (mode) { case CRYPTO_CIPHER_MODE_CBC: ctx->ops.cbc_crypt_hndlr = do_cbc_encrypt; break; case CRYPTO_CIPHER_MODE_CTR: if (ctx->mode_params.ctr_info.ctr_len != 32U) { LOG_ERR("Tinycrypt supports only 32 bit " "counter"); return -EINVAL; } ctx->ops.ctr_crypt_hndlr = do_ctr_op; break; case CRYPTO_CIPHER_MODE_CCM: ctx->ops.ccm_crypt_hndlr = do_ccm_encrypt_mac; break; default: LOG_ERR("TC Shim Unsupported mode"); return -EINVAL; } } else { switch (mode) { case CRYPTO_CIPHER_MODE_CBC: ctx->ops.cbc_crypt_hndlr = do_cbc_decrypt; break; case CRYPTO_CIPHER_MODE_CTR: /* Maybe validate CTR length */ if (ctx->mode_params.ctr_info.ctr_len != 32U) { LOG_ERR("Tinycrypt supports only 32 bit " "counter"); return -EINVAL; } ctx->ops.ctr_crypt_hndlr = do_ctr_op; break; case CRYPTO_CIPHER_MODE_CCM: ctx->ops.ccm_crypt_hndlr = do_ccm_decrypt_auth; break; default: LOG_ERR("TC Shim Unsupported mode"); return -EINVAL; } } ctx->ops.cipher_mode = mode; idx = get_unused_session(); if (idx == CRYPTO_MAX_SESSION) { LOG_ERR("Max sessions in progress"); return -ENOSPC; } data = &tc_driver_state[idx]; if (tc_aes128_set_encrypt_key(&data->session_key, ctx->key.bit_stream) == TC_CRYPTO_FAIL) { LOG_ERR("TC internal error in setting key"); tc_driver_state[idx].in_use = 0; return -EIO; } ctx->drv_sessn_state = data; return 0; } static int tc_query_caps(const struct device *dev) { return (CAP_RAW_KEY | CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS); } static int tc_session_free(const struct device *dev, struct cipher_ctx *sessn) { struct tc_shim_drv_state *data = sessn->drv_sessn_state; ARG_UNUSED(dev); (void)memset(data, 0, sizeof(struct tc_shim_drv_state)); data->in_use = 0; return 0; } static int tc_shim_init(const struct device *dev) { int i; ARG_UNUSED(dev); for (i = 0; i < CRYPTO_MAX_SESSION; i++) { tc_driver_state[i].in_use = 0; } return 0; } static struct crypto_driver_api crypto_enc_funcs = { .cipher_begin_session = tc_session_setup, .cipher_free_session = tc_session_free, .cipher_async_callback_set = NULL, .query_hw_caps = tc_query_caps, }; DEVICE_DEFINE(crypto_tinycrypt, CONFIG_CRYPTO_TINYCRYPT_SHIM_DRV_NAME, &tc_shim_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, (void *)&crypto_enc_funcs); ```
/content/code_sandbox/drivers/crypto/crypto_tc_shim.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,432
```c /* * */ /** * @file Shim layer for mbedTLS, crypto API compliant. */ #include <zephyr/kernel.h> #include <zephyr/init.h> #include <errno.h> #include <zephyr/crypto/crypto.h> #if !defined(CONFIG_MBEDTLS_CFG_FILE) #include "mbedtls/config.h" #else #include CONFIG_MBEDTLS_CFG_FILE #endif /* CONFIG_MBEDTLS_CFG_FILE */ #include <mbedtls/ccm.h> #ifdef CONFIG_MBEDTLS_CIPHER_GCM_ENABLED #include <mbedtls/gcm.h> #endif #include <mbedtls/aes.h> #include <mbedtls/sha256.h> #include <mbedtls/sha512.h> #define MTLS_SUPPORT (CAP_RAW_KEY | CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS | \ CAP_NO_IV_PREFIX) #define LOG_LEVEL CONFIG_CRYPTO_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mbedtls); struct mtls_shim_session { union { mbedtls_ccm_context mtls_ccm; #ifdef CONFIG_MBEDTLS_CIPHER_GCM_ENABLED mbedtls_gcm_context mtls_gcm; #endif mbedtls_aes_context mtls_aes; mbedtls_sha256_context mtls_sha256; mbedtls_sha512_context mtls_sha512; }; bool in_use; union { enum cipher_mode mode; enum hash_algo algo; }; }; #define CRYPTO_MAX_SESSION CONFIG_CRYPTO_MBEDTLS_SHIM_MAX_SESSION struct mtls_shim_session mtls_sessions[CRYPTO_MAX_SESSION]; #if defined(MBEDTLS_MEMORY_BUFFER_ALLOC_C) #include "mbedtls/memory_buffer_alloc.h" #else #error "You need to define MBEDTLS_MEMORY_BUFFER_ALLOC_C" #endif /* MBEDTLS_MEMORY_BUFFER_ALLOC_C */ #define MTLS_GET_CTX(c, m) \ (&((struct mtls_shim_session *)c->drv_sessn_state)->mtls_ ## m) #define MTLS_GET_ALGO(c) \ (((struct mtls_shim_session *)c->drv_sessn_state)->algo) int mtls_ecb_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt) { int ret; mbedtls_aes_context *ecb_ctx = MTLS_GET_CTX(ctx, aes); /* For security reasons, ECB mode should not be used to encrypt * more than one block. Use CBC mode instead. */ if (pkt->in_len > 16) { LOG_ERR("Cannot encrypt more than 1 block"); return -EINVAL; } ret = mbedtls_aes_crypt_ecb(ecb_ctx, MBEDTLS_AES_ENCRYPT, pkt->in_buf, pkt->out_buf); if (ret) { LOG_ERR("Could not encrypt (%d)", ret); return -EINVAL; } pkt->out_len = 16; return 0; } int mtls_ecb_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt) { int ret; mbedtls_aes_context *ecb_ctx = MTLS_GET_CTX(ctx, aes); /* For security reasons, ECB mode should not be used to decrypt * more than one block. Use CBC mode instead. */ if (pkt->in_len > 16) { LOG_ERR("Cannot decrypt more than 1 block"); return -EINVAL; } ret = mbedtls_aes_crypt_ecb(ecb_ctx, MBEDTLS_AES_DECRYPT, pkt->in_buf, pkt->out_buf); if (ret) { LOG_ERR("Could not encrypt (%d)", ret); return -EINVAL; } pkt->out_len = 16; return 0; } int mtls_cbc_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv) { int ret, iv_bytes; uint8_t *p_iv, iv_loc[16]; mbedtls_aes_context *cbc_ctx = MTLS_GET_CTX(ctx, aes); if ((ctx->flags & CAP_NO_IV_PREFIX) == 0U) { /* Prefix IV to ciphertext, which is default behavior of Zephyr * crypto API, unless CAP_NO_IV_PREFIX is requested. */ iv_bytes = 16; memcpy(pkt->out_buf, iv, 16); p_iv = iv; } else { iv_bytes = 0; memcpy(iv_loc, iv, 16); p_iv = iv_loc; } ret = mbedtls_aes_crypt_cbc(cbc_ctx, MBEDTLS_AES_ENCRYPT, pkt->in_len, p_iv, pkt->in_buf, pkt->out_buf + iv_bytes); if (ret) { LOG_ERR("Could not encrypt (%d)", ret); return -EINVAL; } pkt->out_len = pkt->in_len + iv_bytes; return 0; } int mtls_cbc_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv) { int ret, iv_bytes; uint8_t *p_iv, iv_loc[16]; mbedtls_aes_context *cbc_ctx = MTLS_GET_CTX(ctx, aes); if ((ctx->flags & CAP_NO_IV_PREFIX) == 0U) { iv_bytes = 16; p_iv = iv; } else { iv_bytes = 0; memcpy(iv_loc, iv, 16); p_iv = iv_loc; } ret = mbedtls_aes_crypt_cbc(cbc_ctx, MBEDTLS_AES_DECRYPT, pkt->in_len, p_iv, pkt->in_buf + iv_bytes, pkt->out_buf); if (ret) { LOG_ERR("Could not encrypt (%d)", ret); return -EINVAL; } pkt->out_len = pkt->in_len - iv_bytes; return 0; } static int mtls_ccm_encrypt_auth(struct cipher_ctx *ctx, struct cipher_aead_pkt *apkt, uint8_t *nonce) { mbedtls_ccm_context *mtls_ctx = MTLS_GET_CTX(ctx, ccm); int ret; ret = mbedtls_ccm_encrypt_and_tag(mtls_ctx, apkt->pkt->in_len, nonce, ctx->mode_params.ccm_info.nonce_len, apkt->ad, apkt->ad_len, apkt->pkt->in_buf, apkt->pkt->out_buf, apkt->tag, ctx->mode_params.ccm_info.tag_len); if (ret) { LOG_ERR("Could not encrypt/auth (%d)", ret); /*ToDo: try to return relevant code depending on ret? */ return -EINVAL; } /* This is equivalent to what the TinyCrypt shim does in * do_ccm_encrypt_mac(). */ apkt->pkt->out_len = apkt->pkt->in_len; apkt->pkt->out_len += ctx->mode_params.ccm_info.tag_len; return 0; } static int mtls_ccm_decrypt_auth(struct cipher_ctx *ctx, struct cipher_aead_pkt *apkt, uint8_t *nonce) { mbedtls_ccm_context *mtls_ctx = MTLS_GET_CTX(ctx, ccm); int ret; ret = mbedtls_ccm_auth_decrypt(mtls_ctx, apkt->pkt->in_len, nonce, ctx->mode_params.ccm_info.nonce_len, apkt->ad, apkt->ad_len, apkt->pkt->in_buf, apkt->pkt->out_buf, apkt->tag, ctx->mode_params.ccm_info.tag_len); if (ret) { if (ret == MBEDTLS_ERR_CCM_AUTH_FAILED) { LOG_ERR("Message authentication failed"); return -EFAULT; } LOG_ERR("Could not decrypt/auth (%d)", ret); /*ToDo: try to return relevant code depending on ret? */ return -EINVAL; } apkt->pkt->out_len = apkt->pkt->in_len; apkt->pkt->out_len += ctx->mode_params.ccm_info.tag_len; return 0; } #ifdef CONFIG_MBEDTLS_CIPHER_GCM_ENABLED static int mtls_gcm_encrypt_auth(struct cipher_ctx *ctx, struct cipher_aead_pkt *apkt, uint8_t *nonce) { mbedtls_gcm_context *mtls_ctx = MTLS_GET_CTX(ctx, gcm); int ret; ret = mbedtls_gcm_crypt_and_tag(mtls_ctx, MBEDTLS_GCM_ENCRYPT, apkt->pkt->in_len, nonce, ctx->mode_params.gcm_info.nonce_len, apkt->ad, apkt->ad_len, apkt->pkt->in_buf, apkt->pkt->out_buf, ctx->mode_params.gcm_info.tag_len, apkt->tag); if (ret) { LOG_ERR("Could not encrypt/auth (%d)", ret); return -EINVAL; } /* This is equivalent to what is done in mtls_ccm_encrypt_auth(). */ apkt->pkt->out_len = apkt->pkt->in_len; apkt->pkt->out_len += ctx->mode_params.gcm_info.tag_len; return 0; } static int mtls_gcm_decrypt_auth(struct cipher_ctx *ctx, struct cipher_aead_pkt *apkt, uint8_t *nonce) { mbedtls_gcm_context *mtls_ctx = MTLS_GET_CTX(ctx, gcm); int ret; ret = mbedtls_gcm_auth_decrypt(mtls_ctx, apkt->pkt->in_len, nonce, ctx->mode_params.gcm_info.nonce_len, apkt->ad, apkt->ad_len, apkt->tag, ctx->mode_params.gcm_info.tag_len, apkt->pkt->in_buf, apkt->pkt->out_buf); if (ret) { if (ret == MBEDTLS_ERR_GCM_AUTH_FAILED) { LOG_ERR("Message authentication failed"); return -EFAULT; } LOG_ERR("Could not decrypt/auth (%d)", ret); return -EINVAL; } apkt->pkt->out_len = apkt->pkt->in_len; apkt->pkt->out_len += ctx->mode_params.gcm_info.tag_len; return 0; } #endif /* CONFIG_MBEDTLS_CIPHER_GCM_ENABLED */ static int mtls_get_unused_session_index(void) { int i; for (i = 0; i < CRYPTO_MAX_SESSION; i++) { if (!mtls_sessions[i].in_use) { mtls_sessions[i].in_use = true; return i; } } return -1; } static int mtls_session_setup(const struct device *dev, struct cipher_ctx *ctx, enum cipher_algo algo, enum cipher_mode mode, enum cipher_op op_type) { mbedtls_aes_context *aes_ctx; mbedtls_ccm_context *ccm_ctx; #ifdef CONFIG_MBEDTLS_CIPHER_GCM_ENABLED mbedtls_gcm_context *gcm_ctx; #endif int ctx_idx; int ret; if (ctx->flags & ~(MTLS_SUPPORT)) { LOG_ERR("Unsupported flag"); return -EINVAL; } if (algo != CRYPTO_CIPHER_ALGO_AES) { LOG_ERR("Unsupported algo"); return -EINVAL; } if (mode != CRYPTO_CIPHER_MODE_CCM && mode != CRYPTO_CIPHER_MODE_CBC && #ifdef CONFIG_MBEDTLS_CIPHER_GCM_ENABLED mode != CRYPTO_CIPHER_MODE_GCM && #endif mode != CRYPTO_CIPHER_MODE_ECB) { LOG_ERR("Unsupported mode"); return -EINVAL; } if (ctx->keylen != 16U) { LOG_ERR("%u key size is not supported", ctx->keylen); return -EINVAL; } ctx_idx = mtls_get_unused_session_index(); if (ctx_idx < 0) { LOG_ERR("No free session for now"); return -ENOSPC; } switch (mode) { case CRYPTO_CIPHER_MODE_ECB: aes_ctx = &mtls_sessions[ctx_idx].mtls_aes; mbedtls_aes_init(aes_ctx); if (op_type == CRYPTO_CIPHER_OP_ENCRYPT) { ret = mbedtls_aes_setkey_enc(aes_ctx, ctx->key.bit_stream, ctx->keylen * 8U); ctx->ops.block_crypt_hndlr = mtls_ecb_encrypt; } else { ret = mbedtls_aes_setkey_dec(aes_ctx, ctx->key.bit_stream, ctx->keylen * 8U); ctx->ops.block_crypt_hndlr = mtls_ecb_decrypt; } if (ret) { LOG_ERR("AES_ECB: failed at setkey (%d)", ret); ctx->ops.block_crypt_hndlr = NULL; mtls_sessions[ctx_idx].in_use = false; return -EINVAL; } break; case CRYPTO_CIPHER_MODE_CBC: aes_ctx = &mtls_sessions[ctx_idx].mtls_aes; mbedtls_aes_init(aes_ctx); if (op_type == CRYPTO_CIPHER_OP_ENCRYPT) { ret = mbedtls_aes_setkey_enc(aes_ctx, ctx->key.bit_stream, ctx->keylen * 8U); ctx->ops.cbc_crypt_hndlr = mtls_cbc_encrypt; } else { ret = mbedtls_aes_setkey_dec(aes_ctx, ctx->key.bit_stream, ctx->keylen * 8U); ctx->ops.cbc_crypt_hndlr = mtls_cbc_decrypt; } if (ret) { LOG_ERR("AES_CBC: failed at setkey (%d)", ret); ctx->ops.cbc_crypt_hndlr = NULL; mtls_sessions[ctx_idx].in_use = false; return -EINVAL; } break; case CRYPTO_CIPHER_MODE_CCM: ccm_ctx = &mtls_sessions[ctx_idx].mtls_ccm; mbedtls_ccm_init(ccm_ctx); ret = mbedtls_ccm_setkey(ccm_ctx, MBEDTLS_CIPHER_ID_AES, ctx->key.bit_stream, ctx->keylen * 8U); if (ret) { LOG_ERR("AES_CCM: failed at setkey (%d)", ret); mtls_sessions[ctx_idx].in_use = false; return -EINVAL; } if (op_type == CRYPTO_CIPHER_OP_ENCRYPT) { ctx->ops.ccm_crypt_hndlr = mtls_ccm_encrypt_auth; } else { ctx->ops.ccm_crypt_hndlr = mtls_ccm_decrypt_auth; } break; #ifdef CONFIG_MBEDTLS_CIPHER_GCM_ENABLED case CRYPTO_CIPHER_MODE_GCM: gcm_ctx = &mtls_sessions[ctx_idx].mtls_gcm; mbedtls_gcm_init(gcm_ctx); ret = mbedtls_gcm_setkey(gcm_ctx, MBEDTLS_CIPHER_ID_AES, ctx->key.bit_stream, ctx->keylen * 8U); if (ret) { LOG_ERR("AES_GCM: failed at setkey (%d)", ret); mtls_sessions[ctx_idx].in_use = false; return -EINVAL; } if (op_type == CRYPTO_CIPHER_OP_ENCRYPT) { ctx->ops.gcm_crypt_hndlr = mtls_gcm_encrypt_auth; } else { ctx->ops.gcm_crypt_hndlr = mtls_gcm_decrypt_auth; } break; #endif /* CONFIG_MBEDTLS_CIPHER_GCM_ENABLED */ default: LOG_ERR("Unhandled mode"); mtls_sessions[ctx_idx].in_use = false; return -EINVAL; } mtls_sessions[ctx_idx].mode = mode; ctx->drv_sessn_state = &mtls_sessions[ctx_idx]; return ret; } static int mtls_session_free(const struct device *dev, struct cipher_ctx *ctx) { struct mtls_shim_session *mtls_session = (struct mtls_shim_session *)ctx->drv_sessn_state; if (mtls_session->mode == CRYPTO_CIPHER_MODE_CCM) { mbedtls_ccm_free(&mtls_session->mtls_ccm); #ifdef CONFIG_MBEDTLS_CIPHER_GCM_ENABLED } else if (mtls_session->mode == CRYPTO_CIPHER_MODE_GCM) { mbedtls_gcm_free(&mtls_session->mtls_gcm); #endif } else { mbedtls_aes_free(&mtls_session->mtls_aes); } mtls_session->in_use = false; return 0; } static int mtls_sha256_compute(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish) { int ret; mbedtls_sha256_context *sha256_ctx = MTLS_GET_CTX(ctx, sha256); if (!ctx->started) { ret = mbedtls_sha256_starts(sha256_ctx, MTLS_GET_ALGO(ctx) == CRYPTO_HASH_ALGO_SHA224); if (ret != 0) { LOG_ERR("Could not compute the hash"); return -EINVAL; } ctx->started = true; } ret = mbedtls_sha256_update(sha256_ctx, pkt->in_buf, pkt->in_len); if (ret != 0) { LOG_ERR("Could not update the hash"); ctx->started = false; return -EINVAL; } if (finish) { ctx->started = false; ret = mbedtls_sha256_finish(sha256_ctx, pkt->out_buf); if (ret != 0) { LOG_ERR("Could not compute the hash"); return -EINVAL; } } return 0; } static int mtls_sha512_compute(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish) { int ret; mbedtls_sha512_context *sha512_ctx = MTLS_GET_CTX(ctx, sha512); if (!ctx->started) { ret = mbedtls_sha512_starts(sha512_ctx, MTLS_GET_ALGO(ctx) == CRYPTO_HASH_ALGO_SHA384); if (ret != 0) { LOG_ERR("Could not compute the hash"); return -EINVAL; } ctx->started = true; } ret = mbedtls_sha512_update(sha512_ctx, pkt->in_buf, pkt->in_len); if (ret != 0) { LOG_ERR("Could not update the hash"); ctx->started = false; return -EINVAL; } if (finish) { ctx->started = false; ret = mbedtls_sha512_finish(sha512_ctx, pkt->out_buf); if (ret != 0) { LOG_ERR("Could not compute the hash"); return -EINVAL; } } return 0; } static int mtls_hash_session_setup(const struct device *dev, struct hash_ctx *ctx, enum hash_algo algo) { int ctx_idx; if (ctx->flags & ~(MTLS_SUPPORT)) { LOG_ERR("Unsupported flag"); return -EINVAL; } if ((algo != CRYPTO_HASH_ALGO_SHA224) && (algo != CRYPTO_HASH_ALGO_SHA256) && (algo != CRYPTO_HASH_ALGO_SHA384) && (algo != CRYPTO_HASH_ALGO_SHA512)) { LOG_ERR("Unsupported algo: %d", algo); return -EINVAL; } ctx_idx = mtls_get_unused_session_index(); if (ctx_idx < 0) { LOG_ERR("No free session for now"); return -ENOSPC; } mtls_sessions[ctx_idx].algo = algo; ctx->drv_sessn_state = &mtls_sessions[ctx_idx]; ctx->started = false; if ((algo == CRYPTO_HASH_ALGO_SHA224) || (algo == CRYPTO_HASH_ALGO_SHA256)) { mbedtls_sha256_context *sha256_ctx = &mtls_sessions[ctx_idx].mtls_sha256; mbedtls_sha256_init(sha256_ctx); ctx->hash_hndlr = mtls_sha256_compute; } else { mbedtls_sha512_context *sha512_ctx = &mtls_sessions[ctx_idx].mtls_sha512; mbedtls_sha512_init(sha512_ctx); ctx->hash_hndlr = mtls_sha512_compute; } return 0; } static int mtls_hash_session_free(const struct device *dev, struct hash_ctx *ctx) { struct mtls_shim_session *mtls_session = (struct mtls_shim_session *)ctx->drv_sessn_state; if (mtls_session->algo == CRYPTO_HASH_ALGO_SHA256) { mbedtls_sha256_free(&mtls_session->mtls_sha256); } else { mbedtls_sha512_free(&mtls_session->mtls_sha512); } mtls_session->in_use = false; return 0; } static int mtls_query_caps(const struct device *dev) { return MTLS_SUPPORT; } static struct crypto_driver_api mtls_crypto_funcs = { .cipher_begin_session = mtls_session_setup, .cipher_free_session = mtls_session_free, .cipher_async_callback_set = NULL, .hash_begin_session = mtls_hash_session_setup, .hash_free_session = mtls_hash_session_free, .query_hw_caps = mtls_query_caps, }; DEVICE_DEFINE(crypto_mtls, CONFIG_CRYPTO_MBEDTLS_SHIM_DRV_NAME, NULL, NULL, NULL, NULL, POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, (void *)&mtls_crypto_funcs); ```
/content/code_sandbox/drivers/crypto/crypto_mtls_shim.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,583
```unknown config CRYPTO_INTEL_SHA bool "Intel SHA driver" default y depends on DT_HAS_INTEL_ADSP_SHA_ENABLED help Enable Intel SHA driver. To be used on ACE and CAVS platforms. ```
/content/code_sandbox/drivers/crypto/Kconfig.intel
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
49
```c /* * */ #define DT_DRV_COMPAT nxp_mcux_dcp #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(mcux_dcp, CONFIG_CRYPTO_LOG_LEVEL); #include <errno.h> #include <string.h> #include <zephyr/kernel.h> #include <zephyr/cache.h> #include <zephyr/crypto/crypto.h> #include <zephyr/sys/util.h> #include <fsl_dcp.h> #define CRYPTO_DCP_CIPHER_CAPS (CAP_RAW_KEY | CAP_SEPARATE_IO_BUFS |\ CAP_SYNC_OPS | CAP_NO_IV_PREFIX) #define CRYPTO_DCP_HASH_CAPS (CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS) struct crypto_dcp_session { dcp_handle_t handle; dcp_hash_ctx_t hash_ctx; bool in_use; }; struct crypto_dcp_config { DCP_Type *base; }; struct crypto_dcp_data { struct crypto_dcp_session sessions[CONFIG_CRYPTO_MCUX_DCP_MAX_SESSION]; }; /* Helper function to convert common FSL error status codes to errno codes */ static inline int fsl_to_errno(status_t status) { switch (status) { case kStatus_Success: return 0; case kStatus_InvalidArgument: return -EINVAL; case kStatus_Timeout: return -EAGAIN; } return -1; } static struct crypto_dcp_session *get_session(const struct device *dev) { struct crypto_dcp_data *data = dev->data; for (size_t i = 0; i < CONFIG_CRYPTO_MCUX_DCP_MAX_SESSION; ++i) { if (!data->sessions[i].in_use) { data->sessions[i].in_use = true; return &data->sessions[i]; } } return NULL; } static inline void free_session(struct crypto_dcp_session *session) { session->in_use = false; } static int crypto_dcp_query_hw_caps(const struct device *dev) { ARG_UNUSED(dev); return CRYPTO_DCP_CIPHER_CAPS | CRYPTO_DCP_HASH_CAPS; } static int crypto_dcp_aes_cbc_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv) { const struct crypto_dcp_config *cfg = ctx->device->config; struct crypto_dcp_session *session = ctx->drv_sessn_state; status_t status; size_t iv_bytes; uint8_t *p_iv, iv_loc[16]; if ((ctx->flags & CAP_NO_IV_PREFIX) == 0U) { /* Prefix IV to ciphertext, which is default behavior of Zephyr * crypto API, unless CAP_NO_IV_PREFIX is requested. */ iv_bytes = 16U; memcpy(pkt->out_buf, iv, 16U); p_iv = iv; } else { iv_bytes = 0U; memcpy(iv_loc, iv, 16U); p_iv = iv_loc; } sys_cache_data_disable(); status = DCP_AES_EncryptCbc(cfg->base, &session->handle, pkt->in_buf, pkt->out_buf + iv_bytes, pkt->in_len, p_iv); sys_cache_data_enable(); if (status != kStatus_Success) { return fsl_to_errno(status); } pkt->out_len = pkt->in_len + iv_bytes; return 0; } static int crypto_dcp_aes_cbc_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv) { const struct crypto_dcp_config *cfg = ctx->device->config; struct crypto_dcp_session *session = ctx->drv_sessn_state; status_t status; size_t iv_bytes; uint8_t *p_iv, iv_loc[16]; if ((ctx->flags & CAP_NO_IV_PREFIX) == 0U) { iv_bytes = 16U; p_iv = iv; } else { iv_bytes = 0U; memcpy(iv_loc, iv, 16U); p_iv = iv_loc; } sys_cache_data_disable(); status = DCP_AES_DecryptCbc(cfg->base, &session->handle, pkt->in_buf + iv_bytes, pkt->out_buf, pkt->in_len, p_iv); sys_cache_data_enable(); if (status != kStatus_Success) { return fsl_to_errno(status); } pkt->out_len = pkt->in_len - iv_bytes; return 0; } static int crypto_dcp_aes_ecb_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt) { const struct crypto_dcp_config *cfg = ctx->device->config; struct crypto_dcp_session *session = ctx->drv_sessn_state; status_t status; sys_cache_data_disable(); status = DCP_AES_EncryptEcb(cfg->base, &session->handle, pkt->in_buf, pkt->out_buf, pkt->in_len); sys_cache_data_enable(); if (status != kStatus_Success) { return fsl_to_errno(status); } pkt->out_len = pkt->in_len; return 0; } static int crypto_dcp_aes_ecb_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt) { const struct crypto_dcp_config *cfg = ctx->device->config; struct crypto_dcp_session *session = ctx->drv_sessn_state; status_t status; sys_cache_data_disable(); status = DCP_AES_DecryptEcb(cfg->base, &session->handle, pkt->in_buf, pkt->out_buf, pkt->in_len); sys_cache_data_enable(); if (status != kStatus_Success) { return fsl_to_errno(status); } pkt->out_len = pkt->in_len; return 0; } static int crypto_dcp_cipher_begin_session(const struct device *dev, struct cipher_ctx *ctx, enum cipher_algo algo, enum cipher_mode mode, enum cipher_op op_type) { const struct crypto_dcp_config *cfg = dev->config; struct crypto_dcp_session *session; status_t status; if (algo != CRYPTO_CIPHER_ALGO_AES || (mode != CRYPTO_CIPHER_MODE_CBC && mode != CRYPTO_CIPHER_MODE_ECB)) { return -ENOTSUP; } if (ctx->flags & ~(CRYPTO_DCP_CIPHER_CAPS)) { return -ENOTSUP; } session = get_session(dev); if (session == NULL) { return -ENOSPC; } if (mode == CRYPTO_CIPHER_MODE_CBC) { if (op_type == CRYPTO_CIPHER_OP_DECRYPT) { ctx->ops.cbc_crypt_hndlr = crypto_dcp_aes_cbc_decrypt; } else { ctx->ops.cbc_crypt_hndlr = crypto_dcp_aes_cbc_encrypt; } } else { if (op_type == CRYPTO_CIPHER_OP_DECRYPT) { ctx->ops.block_crypt_hndlr = crypto_dcp_aes_ecb_decrypt; } else { ctx->ops.block_crypt_hndlr = crypto_dcp_aes_ecb_encrypt; } } ctx->drv_sessn_state = session; status = DCP_AES_SetKey(cfg->base, &session->handle, ctx->key.bit_stream, ctx->keylen); if (status != kStatus_Success) { free_session(session); return fsl_to_errno(status); } return 0; } static int crypto_dcp_cipher_free_session(const struct device *dev, struct cipher_ctx *ctx) { struct crypto_dcp_session *session; ARG_UNUSED(dev); session = ctx->drv_sessn_state; free_session(session); return 0; } static int crypto_dcp_sha256(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish) { const struct crypto_dcp_config *cfg = ctx->device->config; struct crypto_dcp_session *session = ctx->drv_sessn_state; status_t status; sys_cache_data_disable(); status = DCP_HASH_Update(cfg->base, &session->hash_ctx, pkt->in_buf, pkt->in_len); sys_cache_data_enable(); if (status != kStatus_Success) { return fsl_to_errno(status); } if (finish) { sys_cache_data_disable(); status = DCP_HASH_Finish(cfg->base, &session->hash_ctx, pkt->out_buf, NULL); sys_cache_data_enable(); } return fsl_to_errno(status); } static int crypto_dcp_hash_begin_session(const struct device *dev, struct hash_ctx *ctx, enum hash_algo algo) { const struct crypto_dcp_config *cfg = dev->config; struct crypto_dcp_session *session; status_t status; if (algo != CRYPTO_HASH_ALGO_SHA256) { return -ENOTSUP; } if (ctx->flags & ~(CRYPTO_DCP_HASH_CAPS)) { return -ENOTSUP; } session = get_session(dev); if (session == NULL) { return -ENOSPC; } status = DCP_HASH_Init(cfg->base, &session->handle, &session->hash_ctx, kDCP_Sha256); if (status != kStatus_Success) { free_session(session); return fsl_to_errno(status); } ctx->drv_sessn_state = session; ctx->hash_hndlr = crypto_dcp_sha256; return 0; } static int crypto_dcp_hash_free_session(const struct device *dev, struct hash_ctx *ctx) { struct crypto_dcp_session *session; ARG_UNUSED(dev); session = ctx->drv_sessn_state; free_session(session); return 0; } static int crypto_dcp_init(const struct device *dev) { const struct crypto_dcp_config *cfg = dev->config; struct crypto_dcp_data *data = dev->data; dcp_config_t hal_cfg; DCP_GetDefaultConfig(&hal_cfg); DCP_Init(cfg->base, &hal_cfg); /* Assign unique channels/key slots to each session */ for (size_t i = 0; i < CONFIG_CRYPTO_MCUX_DCP_MAX_SESSION; ++i) { data->sessions[i].in_use = false; data->sessions[i].handle.channel = kDCP_Channel0 << i; data->sessions[i].handle.keySlot = kDCP_KeySlot0 + i; data->sessions[i].handle.swapConfig = kDCP_NoSwap; } return 0; } static const struct crypto_driver_api crypto_dcp_api = { .query_hw_caps = crypto_dcp_query_hw_caps, .cipher_begin_session = crypto_dcp_cipher_begin_session, .cipher_free_session = crypto_dcp_cipher_free_session, .hash_begin_session = crypto_dcp_hash_begin_session, .hash_free_session = crypto_dcp_hash_free_session, }; #define CRYPTO_DCP_DEFINE(inst) \ static const struct crypto_dcp_config crypto_dcp_config_##inst = { \ .base = (DCP_Type *)DT_INST_REG_ADDR(inst), \ }; \ static struct crypto_dcp_data crypto_dcp_data_##inst; \ DEVICE_DT_INST_DEFINE(inst, crypto_dcp_init, NULL, \ &crypto_dcp_data_##inst, &crypto_dcp_config_##inst, \ POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, &crypto_dcp_api); DT_INST_FOREACH_STATUS_OKAY(CRYPTO_DCP_DEFINE) ```
/content/code_sandbox/drivers/crypto/crypto_mcux_dcp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,417
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_CRYPTO_CRYPTO_INTEL_SHA_PRIV_H_ #define ZEPHYR_DRIVERS_CRYPTO_CRYPTO_INTEL_SHA_PRIV_H_ #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include "crypto_intel_sha_registers.h" #define SHA_HASH_DATA_BLOCK_LEN (64) #define SHA_API_MAX_FRAG_LEN (64 * 1024 - 256) #define SHA_REQUIRED_BLOCK_ALIGNMENT (512) /* Possible SHA states */ #define SHA_FIRST (2) #define SHA_MIDLE (3) #define SHA_LAST (0) /* SHA resume flag */ #define SHA_HRSM_ENABLE (1) #define SHA_HRSM_DISABLE (0) #define SHA1_ALGORITHM_HASH_SIZEOF (160 / 8) #define SHA224_ALGORITHM_HASH_SIZEOF (224 / 8) #define SHA256_ALGORITHM_HASH_SIZEOF (256 / 8) #define SHA384_ALGORITHM_HASH_SIZEOF (384 / 8) #define SHA512_ALGORITHM_HASH_SIZEOF (512 / 8) #define SHA_MAX_SESSIONS 8 #define BYTE_SWAP32(x) \ (((x >> 24) & 0x000000FF) | ((x << 24) & 0xFF000000) | ((x >> 8) & 0x0000FF00) | \ ((x << 8) & 0x00FF0000)) struct sha_hw_regs { union PIBCS pibcs; union PIBBA pibba; union PIBS pibs; union PIBFPI pibfpi; union PIBRP pibrp; union PIBWP pibwp; union PIBSP pibsp; uint32_t not_used1[5]; union SHARLDW0 sharldw0; union SHARLDW1 sharldw1; union SHAALDW0 shaaldw0; union SHAALDW1 shaaldw1; union SHACTL shactl; union SHASTS shasts; uint32_t not_used12[2]; uint8_t initial_vector[64]; uint8_t sha_result[64]; }; union sha_state { uint32_t full; struct { /* Hash state: SHA_FIRST, SHA_MIDLE or SHA_LAST */ uint32_t state : 3; /* Hash resume bit */ uint32_t hrsm : 1; uint32_t rsvd : 28; } part; }; struct sha_context { union SHAALDW0 shaaldw0; union SHAALDW1 shaaldw1; uint8_t initial_vector[SHA_HASH_DATA_BLOCK_LEN]; uint8_t sha_result[SHA_HASH_DATA_BLOCK_LEN]; }; struct sha_session { struct sha_context sha_ctx; union sha_state state; uint32_t algo; bool in_use; }; struct sha_container { /* pointer to DSP SHA Registers */ volatile struct sha_hw_regs *dfsha; }; #endif ```
/content/code_sandbox/drivers/crypto/crypto_intel_sha_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
614
```unknown config CRYPTO_IT8XXX2_SHA bool "ITE IT8XXX2 SHA driver" default y depends on DT_HAS_ITE_IT8XXX2_SHA_ENABLED select SOC_IT8XXX2_SHA256_HW_ACCELERATE help Enable ITE IT8XXX2 SHA driver. This driver supports SHA256 hardware accelerator of the it8xxx2 series. It requires 256 + 256 bytes in the RAM's first 4k-bytes to calculate SHA256 hash. config CRYPTO_IT8XXX2_SHA_V2 bool "ITE IT8XXX2 SHA V2 driver" default y depends on DT_HAS_ITE_IT8XXX2_SHA_V2_ENABLED select SOC_IT8XXX2_SHA256_HW_ACCELERATE help Enable ITE IT8XXX2 SHA V2 driver. This driver supports SHA256 hardware accelerator of the it82xx2 series. It requires 1024 + 256 bytes in the RAM's first 4k-bytes to calculate SHA256 hash. ```
/content/code_sandbox/drivers/crypto/Kconfig.it8xxx2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
219
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/init.h> #include <errno.h> #include <zephyr/crypto/crypto.h> #include "crypto_intel_sha_priv.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(SHA); #define DT_DRV_COMPAT intel_adsp_sha static struct sha_session sha_sessions[SHA_MAX_SESSIONS]; static int intel_sha_get_unused_session_idx(void) { int i; for (i = 0; i < SHA_MAX_SESSIONS; i++) { if (!sha_sessions[i].in_use) { sha_sessions[i].in_use = true; return i; } } return -1; } static int intel_sha_set_ctl_enable(struct sha_container *sha, int status) { /* wait until not busy when turning off */ if (status == 0 && sha->dfsha->shactl.part.en == 1) { while (sha->dfsha->shasts.part.busy) { } } sha->dfsha->shactl.part.en = status; return 0; } static int intel_sha_set_resume_length_dw0(struct sha_container *sha, uint32_t lower_length) { int err = -EINVAL; if (IS_ALIGNED(lower_length, SHA_REQUIRED_BLOCK_ALIGNMENT)) { sha->dfsha->sharldw0.full = lower_length; err = 0; } return err; } static int intel_sha_set_resume_length_dw1(struct sha_container *sha, uint32_t upper_length) { sha->dfsha->sharldw1.full = upper_length; return 0; } static int intel_sha_regs_cpy(void *dst, const void *src, size_t len) { uint32_t counter; int err = -EINVAL; if ((IS_ALIGNED(len, sizeof(uint32_t))) && (IS_ALIGNED(dst, sizeof(uint32_t))) && (IS_ALIGNED(src, sizeof(uint32_t)))) { len /= sizeof(uint32_t); for (counter = 0; counter != len; ++counter) { ((uint32_t *)dst)[counter] = ((uint32_t *)src)[counter]; } err = 0; } return err; } /* ! Perform SHA computation over requested region. */ static int intel_sha_device_run(const struct device *dev, const void *buf_in, size_t buf_in_size, size_t max_buff_len, uint32_t state) { int err; struct sha_container *const self = dev->data; union sha_state state_u = { .full = state }; /* align to OWORD */ const size_t aligned_buff_size = ROUND_UP(buf_in_size, 0x10); err = intel_sha_set_ctl_enable(self, 0); if (err) { return err; } /* set processing element disable */ self->dfsha->pibcs.part.peen = 0; /* set pib base addr */ self->dfsha->pibba.full = (uint32_t)buf_in; if (max_buff_len < aligned_buff_size) { return -EINVAL; } self->dfsha->pibs.full = aligned_buff_size; /* enable interrupt */ self->dfsha->pibcs.part.bscie = 1; self->dfsha->pibcs.part.teie = 0; /* set processing element enable */ self->dfsha->pibcs.part.peen = 1; if (self->dfsha->shactl.part.en) { return -EINVAL; /* already enabled */ } self->dfsha->shactl.part.hrsm = state_u.part.hrsm; /* set initial values if resuming */ if (state_u.part.hrsm) { err = intel_sha_set_resume_length_dw0(self, self->dfsha->shaaldw0.full); if (err) { return err; } err = intel_sha_set_resume_length_dw1(self, self->dfsha->shaaldw1.full); if (err) { return err; } err = intel_sha_regs_cpy((void *)self->dfsha->initial_vector, (void *)self->dfsha->sha_result, sizeof(self->dfsha->initial_vector)); if (err) { return err; } } /* set ctl hash first middle */ if (self->dfsha->shactl.part.en) { return -EINVAL; /* already enabled */ } self->dfsha->shactl.part.hfm = state_u.part.state; /* increment pointer */ self->dfsha->pibfpi.full = buf_in_size; err = intel_sha_set_ctl_enable(self, 1); if (err) { return err; } err = intel_sha_set_ctl_enable(self, 0); return err; } static int intel_sha_copy_hash(struct sha_container *const self, void *dst, size_t len) { /* NOTE: generated hash value should be read from the end */ int err = -EINVAL; uint32_t counter = 0; uint32_t last_idx = 0; if ((IS_ALIGNED(len, sizeof(uint32_t))) && (IS_ALIGNED(dst, sizeof(uint32_t)))) { len /= sizeof(uint32_t); counter = 0; /* The index of a last element in the sha result buffer. */ last_idx = (sizeof(self->dfsha->sha_result) / sizeof(uint32_t)) - 1; for (counter = 0; counter != len; counter++) { ((uint32_t *)dst)[counter] = ((uint32_t *)self->dfsha->sha_result)[last_idx - counter]; } err = 0; } return err; } static int intel_sha_device_get_hash(const struct device *dev, void *buf_out, size_t buf_out_size) { int err; struct sha_container *const self = dev->data; if (buf_out == NULL) { return -EINVAL; } /* wait until not busy */ while (self->dfsha->shasts.part.busy) { } err = intel_sha_copy_hash(self, buf_out, buf_out_size); return err; } static int intel_sha_compute(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish) { int ret; struct sha_container *self = (struct sha_container *const)(ctx->device)->data; struct sha_session *session = (struct sha_session *)ctx->drv_sessn_state; size_t frag_length; size_t output_size; uint32_t *hash_int_ptr = (uint32_t *)(pkt->out_buf); /* set algo */ self->dfsha->shactl.full = 0x0; self->dfsha->shactl.part.algo = session->algo; /* restore ctx */ self->dfsha->shaaldw0 = session->sha_ctx.shaaldw0; self->dfsha->shaaldw1 = session->sha_ctx.shaaldw1; ret = intel_sha_regs_cpy((void *)self->dfsha->initial_vector, (void *)session->sha_ctx.initial_vector, sizeof(self->dfsha->initial_vector)); if (ret) { return ret; } ret = intel_sha_regs_cpy((void *)self->dfsha->sha_result, (void *)session->sha_ctx.sha_result, sizeof(self->dfsha->sha_result)); if (ret) { return ret; } /* compute hash */ do { frag_length = pkt->in_len > SHA_API_MAX_FRAG_LEN ? SHA_API_MAX_FRAG_LEN : pkt->in_len; if ((frag_length == pkt->in_len) && finish) { session->state.part.state = SHA_LAST; } ret = intel_sha_device_run(ctx->device, pkt->in_buf, frag_length, frag_length, session->state.full); if (ret) { return ret; } /* set state for next iteration */ session->state.part.hrsm = SHA_HRSM_ENABLE; session->state.part.state = SHA_MIDLE; pkt->in_len -= frag_length; pkt->in_buf += frag_length; } while (pkt->in_len > 0); if (finish) { switch (self->dfsha->shactl.part.algo) { case CRYPTO_HASH_ALGO_SHA224: output_size = SHA224_ALGORITHM_HASH_SIZEOF; break; case CRYPTO_HASH_ALGO_SHA256: output_size = SHA256_ALGORITHM_HASH_SIZEOF; break; case CRYPTO_HASH_ALGO_SHA384: output_size = SHA384_ALGORITHM_HASH_SIZEOF; break; case CRYPTO_HASH_ALGO_SHA512: output_size = SHA512_ALGORITHM_HASH_SIZEOF; break; default: return -ENOTSUP; } ret = intel_sha_device_get_hash(ctx->device, pkt->out_buf, output_size); if (ret) { return ret; } /* Fix byte ordering to match common hash representation. */ for (size_t i = 0; i != output_size / sizeof(uint32_t); i++) { hash_int_ptr[i] = BYTE_SWAP32(hash_int_ptr[i]); } } return ret; } static int intel_sha_device_set_hash_type(const struct device *dev, struct hash_ctx *ctx, enum hash_algo algo) { int ctx_idx; struct sha_container *self = (struct sha_container *const)(dev)->data; ctx_idx = intel_sha_get_unused_session_idx(); if (ctx_idx < 0) { LOG_ERR("All sessions in use!"); return -ENOSPC; } ctx->drv_sessn_state = &sha_sessions[ctx_idx]; /* set processing element enable */ self->dfsha->pibcs.part.peen = 0; /* populate sha session data */ sha_sessions[ctx_idx].state.part.state = SHA_FIRST; sha_sessions[ctx_idx].state.part.hrsm = SHA_HRSM_DISABLE; sha_sessions[ctx_idx].algo = algo; ctx->hash_hndlr = intel_sha_compute; return 0; } static int intel_sha_device_free(const struct device *dev, struct hash_ctx *ctx) { struct sha_container *self = (struct sha_container *const)(dev)->data; struct sha_session *session = (struct sha_session *)ctx->drv_sessn_state; (void)memset((void *)self->dfsha, 0, sizeof(struct sha_hw_regs)); (void)memset(&session->sha_ctx, 0, sizeof(struct sha_context)); (void)memset(&session->state, 0, sizeof(union sha_state)); session->in_use = 0; session->algo = 0; return 0; } static int intel_sha_device_hw_caps(const struct device *dev) { return (CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS); } static struct crypto_driver_api hash_enc_funcs = { .hash_begin_session = intel_sha_device_set_hash_type, .hash_free_session = intel_sha_device_free, .hash_async_callback_set = NULL, .query_hw_caps = intel_sha_device_hw_caps, }; #define INTEL_SHA_DEVICE_INIT(inst) \ static struct sha_container sha_data_##inst = { \ .dfsha = (volatile struct sha_hw_regs *)DT_INST_REG_ADDR_BY_IDX(inst, 0) \ }; \ DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &sha_data_##inst, NULL, \ POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, (void *)&hash_enc_funcs); DT_INST_FOREACH_STATUS_OKAY(INTEL_SHA_DEVICE_INIT) ```
/content/code_sandbox/drivers/crypto/crypto_intel_sha.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,497
```unknown # Atmel ATAES132A configuration options menuconfig CRYPTO_ATAES132A bool "Atmel ATAES132A 32k AES Serial EEPROM support" default y depends on DT_HAS_ATMEL_ATAES132A_ENABLED depends on I2C help Enable Atmel ATAES132A 32k AES Serial EEPROM support. if CRYPTO_ATAES132A choice prompt "ATAES132A I2C bus speed" default CRYPTO_ATAES132A_I2C_SPEED_STANDARD config CRYPTO_ATAES132A_I2C_SPEED_STANDARD bool "Standard" help Standard bis speed of up to 100KHz. config CRYPTO_ATAES132A_I2C_SPEED_FAST bool "Fast" help Fast bus speed of up to 400KHz. endchoice endif # CRYPTO_ATAES132A ```
/content/code_sandbox/drivers/crypto/Kconfig.ataes132a
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
195
```objective-c /* * * */ #ifndef ZEPHYR_DRIVERS_CRYPTO_CRYPTO_STM32_PRIV_H_ #define ZEPHYR_DRIVERS_CRYPTO_CRYPTO_STM32_PRIV_H_ /* Maximum supported key length is 256 bits */ #define CRYPTO_STM32_AES_MAX_KEY_LEN (256 / 8) struct crypto_stm32_config { const struct reset_dt_spec reset; struct stm32_pclken pclken; }; struct crypto_stm32_data { CRYP_HandleTypeDef hcryp; struct k_sem device_sem; struct k_sem session_sem; }; struct crypto_stm32_session { CRYP_ConfigTypeDef config; uint32_t key[CRYPTO_STM32_AES_MAX_KEY_LEN / sizeof(uint32_t)]; bool in_use; }; #define CRYPTO_STM32_CFG(dev) \ ((const struct crypto_stm32_config *const)(dev)->config) #define CRYPTO_STM32_DATA(dev) \ ((struct crypto_stm32_data *const)(dev)->data) #define CRYPTO_STM32_SESSN(ctx) \ ((struct crypto_stm32_session *const)(ctx)->drv_sessn_state) #endif /* ZEPHYR_DRIVERS_CRYPTO_CRYPTO_STM32_PRIV_H_ */ ```
/content/code_sandbox/drivers/crypto/crypto_stm32_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
250
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/crypto/crypto.h> #include <zephyr/irq.h> #include <DA1469xAB.h> #include <da1469x_config.h> #include <da1469x_otp.h> #include <system_DA1469x.h> #include <da1469x_pd.h> #include <zephyr/sys/byteorder.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(crypto_smartbond_crypto, CONFIG_CRYPTO_LOG_LEVEL); #define DT_DRV_COMPAT renesas_smartbond_crypto #define SMARTBOND_IRQN DT_INST_IRQN(0) #define SMARTBOND_IRQ_PRIO DT_INST_IRQ(0, priority) #if defined(CONFIG_CRYPTO_ASYNC) #define CRYPTO_HW_CAPS (CAP_RAW_KEY | CAP_SEPARATE_IO_BUFS | CAP_ASYNC_OPS | CAP_NO_IV_PREFIX) #else #define CRYPTO_HW_CAPS (CAP_RAW_KEY | CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS | CAP_NO_IV_PREFIX) #endif #define SWAP32(_w) __REV(_w) #define CRYPTO_CTRL_REG_SET(_field, _val) \ AES_HASH->CRYPTO_CTRL_REG = \ (AES_HASH->CRYPTO_CTRL_REG & ~AES_HASH_CRYPTO_CTRL_REG_ ## _field ## _Msk) | \ ((_val) << AES_HASH_CRYPTO_CTRL_REG_ ## _field ## _Pos) #define CRYPTO_CTRL_REG_GET(_field) \ ((AES_HASH->CRYPTO_CTRL_REG & AES_HASH_CRYPTO_CTRL_REG_ ## _field ## _Msk) >> \ AES_HASH_CRYPTO_CTRL_REG_ ## _field ## _Pos) struct crypto_smartbond_data { /* * Semaphore to provide mutual exlusion when a crypto session is requested. */ struct k_sem session_sem; /* * Semaphore to provide mutual exlusion when a cryptographic task is requested. * (a session should be requested at this point). */ struct k_sem device_sem; #if defined(CONFIG_CRYPTO_ASYNC) /* * User-defined callbacks to be called upon completion of asynchronous * cryptographic operations. Note that the AES and HASH modes can work * complementary to each other. */ union { cipher_completion_cb cipher_user_cb; hash_completion_cb hash_user_cb; }; /* * Packet context should be stored during a session so that can be rertieved * from within the crypto engine ISR context. */ union { struct cipher_pkt *cipher_pkt; struct hash_pkt *hash_pkt; }; #else /* * Semaphore used to block for as long as a synchronous cryptographic operation * is in progress. */ struct k_sem sync_sem; #endif }; /* * Status flag to indicate if the crypto engine resources have been granted. Note that the * device integrates a single crypto engine instance. */ static bool in_use; static void crypto_smartbond_set_status(bool enable); static void smartbond_crypto_isr(const void *arg) { struct crypto_smartbond_data *data = ((const struct device *)arg)->data; uint32_t status = AES_HASH->CRYPTO_STATUS_REG; if (status & AES_HASH_CRYPTO_STATUS_REG_CRYPTO_IRQ_ST_Msk) { /* Clear interrupt source. Otherwise the handler will be fire constantly! */ AES_HASH->CRYPTO_CLRIRQ_REG = 0x1; #if defined(CONFIG_CRYPTO_ASYNC) /* Define the slected crypto mode (AES/HASH). */ if (AES_HASH->CRYPTO_CTRL_REG & AES_HASH_CRYPTO_CTRL_REG_CRYPTO_HASH_SEL_Msk) { if (data->hash_user_cb) { data->hash_user_cb(data->hash_pkt, status); } } else { if (data->cipher_user_cb) { data->cipher_user_cb(data->cipher_pkt, status); } } #else /* Designate the requested cryptographic tasks is finished. */ k_sem_give(&data->sync_sem); #endif } } static inline void crypto_smartbond_pm_policy_state_lock_get(const struct device *dev) { /* * Prevent the SoC from entering the normal sleep state as PDC does not support * waking up the application core following AES/HASH events. */ pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } static inline void crypto_smartbond_pm_policy_state_lock_put(const struct device *dev) { /* Allow the SoC to enter the normal sleep state once AES/HASH operations are done. */ pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } static bool crypto_smartbond_lock_session(const struct device *dev) { bool lock = false; struct crypto_smartbond_data *data = dev->data; k_sem_take(&data->session_sem, K_FOREVER); if (!in_use) { in_use = true; /* Prevent sleep as long as a cryptographic session is in place */ da1469x_pd_acquire(MCU_PD_DOMAIN_SYS); crypto_smartbond_pm_policy_state_lock_get(dev); crypto_smartbond_set_status(true); lock = true; } k_sem_give(&data->session_sem); return lock; } static void crypto_smartbond_unlock_session(const struct device *dev) { struct crypto_smartbond_data *data = dev->data; k_sem_take(&data->session_sem, K_FOREVER); if (in_use) { in_use = false; crypto_smartbond_set_status(false); crypto_smartbond_pm_policy_state_lock_put(dev); da1469x_pd_release_nowait(MCU_PD_DOMAIN_SYS); } k_sem_give(&data->session_sem); } /* * Input vector should comply with the following restrictions: * * mode | CRYPTO_MORE_IN = true | CRYPTO_MORE_IN = false * ------------| -----------------------| ---------------------- * ECB | multiple of 16 (bytes) | multiple of 16 (bytes) * CBC | multiple of 16 | no restrictions * CTR | multiple of 16 | no restrictions * MD5 | multiple of 8 | no restrictions * SHA_1 | multiple of 8 | no restrictions * SHA_256_224 | multiple of 8 | no restrictions * SHA_256 | multiple of 8 | no restrictions * SHA_384 | multiple of 8 | no restrictions * SHA_512 | multiple of 8 | no restrictions * SHA_512_224 | multiple of 8 | no restrictions * SHA_512_256 | multiple of 8 | no restrictions */ static int crypto_smartbond_check_in_restrictions(uint16_t in_len) { #define CRYPTO_ALG_MD_ECB_MAGIC_0 0x00 #define CRYPTO_ALG_MD_ECB_MAGIC_1 0x01 bool not_last_in_block = !!(AES_HASH->CRYPTO_CTRL_REG & AES_HASH_CRYPTO_CTRL_REG_CRYPTO_MORE_IN_Msk); /* Define the slected crypto mode (AES/HASH). */ if (AES_HASH->CRYPTO_CTRL_REG & AES_HASH_CRYPTO_CTRL_REG_CRYPTO_HASH_SEL_Msk) { if (not_last_in_block && (in_len & 0x7)) { return -EINVAL; } } else { if (in_len & 0xF) { if (not_last_in_block) { return -EINVAL; } uint32_t crypto_mode = CRYPTO_CTRL_REG_GET(CRYPTO_ALG_MD); /* Check if AES mode is ECB */ if (crypto_mode == CRYPTO_ALG_MD_ECB_MAGIC_0 || crypto_mode == CRYPTO_ALG_MD_ECB_MAGIC_1) { return -EINVAL; } } } return 0; } /* * The driver model does not define the max. output length. As such, the max supported length * per mode is applied. */ static int crypto_smartbond_hash_set_out_len(void) { uint32_t hash_algo = (AES_HASH->CRYPTO_CTRL_REG & AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_Msk); if (AES_HASH->CRYPTO_CTRL_REG & AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_MD_Msk) { /* 64-bit HASH operations */ switch (hash_algo) { case 0x0: /* SHA-384: 0..47 --> 1..48 bytes */ CRYPTO_CTRL_REG_SET(CRYPTO_HASH_OUT_LEN, 47); break; case 0x1: /* SHA-512: 0..63 --> 1..64 bytes */ CRYPTO_CTRL_REG_SET(CRYPTO_HASH_OUT_LEN, 63); break; case 0x2: /* SHA-512/224: 0..27 --> 1..28 bytes */ CRYPTO_CTRL_REG_SET(CRYPTO_HASH_OUT_LEN, 27); break; case 0x3: /* SHA-512/256: 0..31 --> 1..32 bytes */ CRYPTO_CTRL_REG_SET(CRYPTO_HASH_OUT_LEN, 31); break; default: break; } } else { /* 32-bit HASH operations */ switch (hash_algo) { case 0x0: /* MD5: 0..15 --> 1..16 bytes */ CRYPTO_CTRL_REG_SET(CRYPTO_HASH_OUT_LEN, 15); break; case 0x1: /* SHA-1: 0..19 --> 1..20 bytes */ CRYPTO_CTRL_REG_SET(CRYPTO_HASH_OUT_LEN, 19); break; case 0x2: /* SHA-256/224: 0..27 --> 1..28 bytes */ CRYPTO_CTRL_REG_SET(CRYPTO_HASH_OUT_LEN, 27); break; case 0x3: /* SHA-256: 0..31 --> 1..32 bytes */ CRYPTO_CTRL_REG_SET(CRYPTO_HASH_OUT_LEN, 31); break; default: break; } } /* Return the OUT size applied. */ return CRYPTO_CTRL_REG_GET(CRYPTO_HASH_OUT_LEN) + 1; } static uint32_t crypto_smartbond_swap_word(uint8_t *data) { /* Check word boundaries of given address and if possible accellerate swapping */ if ((uint32_t)data & 0x3) { return SWAP32(sys_get_le32(data)); } else { return SWAP32(*(uint32_t *)data); } } static int crypto_smartbond_cipher_key_load(uint8_t *key, uint16_t key_len) { if (key == NULL) { return -EIO; } AES_HASH->CRYPTO_CTRL_REG &= ~(AES_HASH_CRYPTO_CTRL_REG_CRYPTO_AES_KEY_SZ_Msk); if (key_len == 32) { AES_HASH->CRYPTO_CTRL_REG |= (0x2 << AES_HASH_CRYPTO_CTRL_REG_CRYPTO_AES_KEY_SZ_Pos); } else if (key_len == 24) { AES_HASH->CRYPTO_CTRL_REG |= (0x1 << AES_HASH_CRYPTO_CTRL_REG_CRYPTO_AES_KEY_SZ_Pos); } else if (key_len == 16) { /* Nothing to do */ } else { return -EINVAL; } /* Key expansion is performed by the crypto engine */ AES_HASH->CRYPTO_CTRL_REG |= AES_HASH_CRYPTO_CTRL_REG_CRYPTO_AES_KEXP_Msk; /* Check whether the cipher key is located in OTP (user keys segment) */ if (IS_ADDRESS_USER_DATA_KEYS_SEGMENT((uint32_t)key)) { /* User keys segmnet can be accessed if not locked (stick bits are not set) */ if (CRG_TOP->SECURE_BOOT_REG & CRG_TOP_SECURE_BOOT_REG_PROT_AES_KEY_READ_Msk) { return -EIO; } uint32_t cell_offset = da1469x_otp_address_to_cell_offset((uint32_t)key); da1469x_otp_read(cell_offset, (void *)&AES_HASH->CRYPTO_KEYS_START, (uint32_t)key_len); } else { volatile uint32_t *kmem_ptr = &AES_HASH->CRYPTO_KEYS_START; do { *(kmem_ptr++) = crypto_smartbond_swap_word(key); key += 4; key_len -= 4; } while (key_len); } return 0; } static int crypto_smartbond_cipher_set_mode(enum cipher_mode mode) { /* Select AES mode */ AES_HASH->CRYPTO_CTRL_REG &= ~(AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_MD_Msk | AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_Msk | AES_HASH_CRYPTO_CTRL_REG_CRYPTO_HASH_SEL_Msk); switch (mode) { case CRYPTO_CIPHER_MODE_ECB: /* Already done; CRYPTO_ALG_MD = 0x0 or 0x1 defines ECB. */ break; case CRYPTO_CIPHER_MODE_CTR: AES_HASH->CRYPTO_CTRL_REG |= (0x2 << AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_MD_Pos); break; case CRYPTO_CIPHER_MODE_CBC: AES_HASH->CRYPTO_CTRL_REG |= (0x3 << AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_MD_Pos); break; default: return -EINVAL; } return 0; } static int crypto_smartbond_hash_set_algo(enum hash_algo algo) { /* Select HASH mode and reset to 32-bit mode */ AES_HASH->CRYPTO_CTRL_REG = (AES_HASH->CRYPTO_CTRL_REG & ~(AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_Msk | AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_MD_Msk)) | AES_HASH_CRYPTO_CTRL_REG_CRYPTO_HASH_SEL_Msk; switch (algo) { case CRYPTO_HASH_ALGO_SHA224: /* CRYPTO_ALG_MD = 0x0 defines 32-bit operations */ AES_HASH->CRYPTO_CTRL_REG |= (0x2 << AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_Pos); break; case CRYPTO_HASH_ALGO_SHA256: /* CRYPTO_ALG_MD = 0x0 defines 32-bit operations */ AES_HASH->CRYPTO_CTRL_REG |= (0x3 << AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_Pos); break; case CRYPTO_HASH_ALGO_SHA384: /* CRYPTO_ALG_MD = 0x1 defines 64-bit operations */ AES_HASH->CRYPTO_CLRIRQ_REG |= AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_MD_Msk; break; case CRYPTO_HASH_ALGO_SHA512: /* CRYPTO_ALG_MD = 0x1 defines 64-bit operations */ AES_HASH->CRYPTO_CTRL_REG |= (AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_MD_Msk | (0x1 << AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ALG_Pos)); break; default: return -EINVAL; } return 0; } static int crypto_smartbond_set_in_out_buf(uint8_t *in_buf, uint8_t *out_buf, int len) { if (in_buf == NULL) { return -EIO; } /* * Input data can reside in any address space. Cryto DMA can only access physical addresses * (not remapped). */ uint32_t phy_addr = black_orca_phy_addr((uint32_t)in_buf); if (IS_QSPIF_CACHED_ADDRESS(phy_addr)) { /* * To achiebe max. perfomance, peripherals should not access the Flash memory * through the instruction cache controller (avoid cache misses). */ phy_addr += (MCU_QSPIF_M_BASE - MCU_QSPIF_M_CACHED_BASE); } else if (IS_OTP_ADDRESS(phy_addr)) { /* Peripherals should access the OTP memory through its peripheral address space. */ phy_addr += (MCU_OTP_M_P_BASE - MCU_OTP_M_BASE); } AES_HASH->CRYPTO_FETCH_ADDR_REG = phy_addr; /* * OUT buffer can be NULL in case of fregmented data processing. CRYPTO_DEST_ADDR and * CRYPTO_FETCH_ADDR are being updated as calculations prceed and OUT data are written * into memory. */ if (out_buf) { uint32_t remap_adr0 = CRG_TOP->SYS_CTRL_REG & CRG_TOP_SYS_CTRL_REG_REMAP_ADR0_Msk; /* * OUT data can only be written in SYSRAM, non-cached remapped SYSRAM and * cached non-remapped SYSRAM. */ if (IS_SYSRAM_ADDRESS(out_buf) || (IS_REMAPPED_ADDRESS(out_buf) && remap_adr0 == 3)) { AES_HASH->CRYPTO_DEST_ADDR_REG = black_orca_phy_addr((uint32_t)out_buf); } else { return -EIO; } } AES_HASH->CRYPTO_LEN_REG = len; return 0; } static inline void crypto_smartbond_cipher_store_dep_data(uint32_t *words, uint32_t len_words) { volatile uint32_t *mreg3 = &AES_HASH->CRYPTO_MREG3_REG; for (int i = 0; i < len_words; i++) { *(mreg3--) = crypto_smartbond_swap_word((uint8_t *)(words++)); } } static int crypto_smartbond_cipher_set_mreg(uint8_t *mreg, uint32_t len_words) { if (mreg == NULL || len_words == 0 || len_words > 4) { return -EINVAL; } AES_HASH->CRYPTO_MREG0_REG = 0; AES_HASH->CRYPTO_MREG1_REG = 0; AES_HASH->CRYPTO_MREG2_REG = 0; AES_HASH->CRYPTO_MREG3_REG = 0; crypto_smartbond_cipher_store_dep_data((uint32_t *)mreg, len_words); return 0; } static void crypto_smartbond_set_status(bool enable) { unsigned int key; key = irq_lock(); if (enable) { CRG_TOP->CLK_AMBA_REG |= (CRG_TOP_CLK_AMBA_REG_AES_CLK_ENABLE_Msk); AES_HASH->CRYPTO_CLRIRQ_REG = 0x1; AES_HASH->CRYPTO_CTRL_REG |= (AES_HASH_CRYPTO_CTRL_REG_CRYPTO_IRQ_EN_Msk); irq_enable(SMARTBOND_IRQN); } else { AES_HASH->CRYPTO_CTRL_REG &= ~(AES_HASH_CRYPTO_CTRL_REG_CRYPTO_IRQ_EN_Msk); AES_HASH->CRYPTO_CLRIRQ_REG = 0x1; irq_disable(SMARTBOND_IRQN); CRG_TOP->CLK_AMBA_REG &= ~(CRG_TOP_CLK_AMBA_REG_AES_CLK_ENABLE_Msk); } irq_unlock(key); } static int crypto_smartbond_query_hw_caps(const struct device *dev) { return CRYPTO_HW_CAPS; } static int crypto_smartbond_cipher_ecb_handler(struct cipher_ctx *ctx, struct cipher_pkt *pkt) { int ret; struct crypto_smartbond_data *data = ctx->device->data; if ((AES_HASH->CRYPTO_STATUS_REG & AES_HASH_CRYPTO_STATUS_REG_CRYPTO_INACTIVE_Msk) == 0) { LOG_ERR("Crypto engine is already employed"); return -EINVAL; } if (pkt->out_buf_max < pkt->in_len) { LOG_ERR("OUT buffer cannot be less that IN buffer"); return -EINVAL; } if (pkt->in_buf == NULL || pkt->out_buf == NULL) { LOG_ERR("Missing IN or OUT buffer declaration"); return -EIO; } if (pkt->in_len > 16) { LOG_ERR("For security reasons, do not operate on more than 16 bytes"); return -EINVAL; } k_sem_take(&data->device_sem, K_FOREVER); ret = crypto_smartbond_check_in_restrictions(pkt->in_len); if (ret < 0) { LOG_ERR("Unsupported IN buffer size"); k_sem_give(&data->device_sem); return ret; } ret = crypto_smartbond_set_in_out_buf(pkt->in_buf, pkt->out_buf, pkt->in_len); if (ret < 0) { LOG_ERR("Unsupported IN or OUT buffer location"); k_sem_give(&data->device_sem); return ret; } #if defined(CONFIG_CRYPTO_ASYNC) data->cipher_pkt = pkt; #endif /* Start crypto processing */ AES_HASH->CRYPTO_START_REG = 1; #if !defined(CONFIG_CRYPTO_ASYNC) /* Wait for crypto to finish its task */ k_sem_take(&data->sync_sem, K_FOREVER); #endif /* Report that number of bytes operated upon. */ pkt->out_len = pkt->in_len; k_sem_give(&data->device_sem); return 0; } static int crypto_smartbond_cipher_cbc_handler(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv) { int ret; int offset = 0; struct crypto_smartbond_data *data = ctx->device->data; bool is_op_encryption = !!(AES_HASH->CRYPTO_CTRL_REG & AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ENCDEC_Msk); if ((AES_HASH->CRYPTO_STATUS_REG & AES_HASH_CRYPTO_STATUS_REG_CRYPTO_INACTIVE_Msk) == 0) { LOG_ERR("Crypto engine is already employed"); return -EINVAL; } if ((is_op_encryption && pkt->out_buf_max < (pkt->in_len + 16)) || pkt->out_buf_max < (pkt->in_len - 16)) { LOG_ERR("Invalid OUT buffer size"); return -EINVAL; } if (pkt->in_buf == NULL || pkt->out_buf == NULL) { LOG_ERR("Missing IN or OUT buffer declaration"); return -EIO; } if ((ctx->flags & CAP_NO_IV_PREFIX) == 0) { offset = 16; if (is_op_encryption) { /* Prefix IV to ciphertet unless CAP_NO_IV_PREFIX is set. */ memcpy(pkt->out_buf, iv, offset); } } k_sem_take(&data->device_sem, K_FOREVER); ret = crypto_smartbond_check_in_restrictions(pkt->in_len); if (ret < 0) { LOG_ERR("Unsupported IN buffer size"); k_sem_give(&data->device_sem); return ret; } ret = crypto_smartbond_cipher_set_mreg(iv, 4); if (ret < 0) { LOG_ERR("Missing Initialization Vector (IV)"); k_sem_give(&data->device_sem); return ret; } if (is_op_encryption) { ret = crypto_smartbond_set_in_out_buf(pkt->in_buf, pkt->out_buf + offset, pkt->in_len); } else { ret = crypto_smartbond_set_in_out_buf(pkt->in_buf + offset, pkt->out_buf, pkt->in_len - offset); } if (ret < 0) { LOG_ERR("Unsupported IN or OUT buffer location"); k_sem_give(&data->device_sem); return ret; } #if defined(CONFIG_CRYPTO_ASYNC) data->cipher_pkt = pkt; #endif /* Start crypto processing */ AES_HASH->CRYPTO_START_REG = 1; #if !defined(CONFIG_CRYPTO_ASYNC) /* Wait for crypto to finish its task */ k_sem_take(&data->sync_sem, K_FOREVER); #endif /* Report that number of bytes operated upon. */ if (is_op_encryption) { pkt->out_len = pkt->in_len + offset; } else { pkt->out_len = pkt->in_len - offset; } k_sem_give(&data->device_sem); return 0; } static int crypto_smartbond_cipher_ctr_handler(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *ic) { int ret; /* ivlen + ctrlen = keylen, ctrl_len is expressed in bits */ uint32_t iv_len = ctx->keylen - (ctx->mode_params.ctr_info.ctr_len >> 3); struct crypto_smartbond_data *data = ctx->device->data; if ((AES_HASH->CRYPTO_STATUS_REG & AES_HASH_CRYPTO_STATUS_REG_CRYPTO_INACTIVE_Msk) == 0) { LOG_ERR("Crypto engine is already employed"); return -EINVAL; } if (pkt->out_buf_max < pkt->in_len) { LOG_ERR("OUT buffer cannot be less that IN buffer"); return -EINVAL; } if (pkt->in_buf == NULL || pkt->out_buf == NULL) { LOG_ERR("Missing IN or OUT buffer declaration"); return -EIO; } k_sem_take(&data->device_sem, K_FOREVER); ret = crypto_smartbond_check_in_restrictions(pkt->in_len); if (ret < 0) { LOG_ERR("Unsupported IN buffer size"); k_sem_give(&data->device_sem); return ret; } ret = crypto_smartbond_cipher_set_mreg(ic, iv_len >> 2); if (ret < 0) { LOG_ERR("Missing Initialization Counter (IC)"); k_sem_give(&data->device_sem); return ret; } ret = crypto_smartbond_set_in_out_buf(pkt->in_buf, pkt->out_buf, pkt->in_len); if (ret < 0) { LOG_ERR("Unsupported IN or OUT buffer location"); k_sem_give(&data->device_sem); return ret; } #if defined(CONFIG_CRYPTO_ASYNC) data->cipher_pkt = pkt; #endif /* Start crypto processing */ AES_HASH->CRYPTO_START_REG = 1; #if !defined(CONFIG_CRYPTO_ASYNC) /* Wait for crypto to finish its task */ k_sem_take(&data->sync_sem, K_FOREVER); #endif /* Report that number of bytes operated upon. */ pkt->out_len = pkt->in_len; k_sem_give(&data->device_sem); return 0; } static int crypto_smartbond_hash_handler(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish) { int ret; struct crypto_smartbond_data *data = ctx->device->data; /* * In case of framgemented data processing crypto status should be visible as busy for * as long as the last block is to be processed. */ bool is_multipart_started = (AES_HASH->CRYPTO_STATUS_REG & AES_HASH_CRYPTO_STATUS_REG_CRYPTO_WAIT_FOR_IN_Msk) && !(AES_HASH->CRYPTO_STATUS_REG & AES_HASH_CRYPTO_STATUS_REG_CRYPTO_INACTIVE_Msk); if (pkt->in_buf == NULL || (pkt->out_buf == NULL)) { LOG_ERR("Missing IN or OUT buffer declaration"); return -EIO; } k_sem_take(&data->device_sem, K_FOREVER); /* Check if this is the last block to process or more blocks will follow */ if (finish) { AES_HASH->CRYPTO_CTRL_REG &= ~(AES_HASH_CRYPTO_CTRL_REG_CRYPTO_MORE_IN_Msk); } else { AES_HASH->CRYPTO_CTRL_REG |= AES_HASH_CRYPTO_CTRL_REG_CRYPTO_MORE_IN_Msk; } /* CRYPTO_MORE_IN should be updated prior to checking for IN restrictions! */ ret = crypto_smartbond_check_in_restrictions(pkt->in_len); if (ret < 0) { LOG_ERR("Unsupported IN buffer size"); k_sem_give(&data->device_sem); return ret; } if (!is_multipart_started) { ret = crypto_smartbond_hash_set_out_len(); if (ret < 0) { LOG_ERR("Invalid OUT buffer size"); k_sem_give(&data->device_sem); return ret; } } if (!is_multipart_started) { ret = crypto_smartbond_set_in_out_buf(pkt->in_buf, pkt->out_buf, pkt->in_len); } else { /* Destination buffer is being updated as fragmented input is being processed. */ ret = crypto_smartbond_set_in_out_buf(pkt->in_buf, NULL, pkt->in_len); } if (ret < 0) { LOG_ERR("Unsupported IN or OUT buffer location"); k_sem_give(&data->device_sem); return ret; } #if defined(CONFIG_CRYPTO_ASYNC) data->hash_pkt = pkt; #endif /* Start hash processing */ AES_HASH->CRYPTO_START_REG = 1; #if !defined(CONFIG_CRYPTO_ASYNC) k_sem_take(&data->sync_sem, K_FOREVER); #endif k_sem_give(&data->device_sem); return 0; } static int crypto_smartbond_cipher_begin_session(const struct device *dev, struct cipher_ctx *ctx, enum cipher_algo algo, enum cipher_mode mode, enum cipher_op op_type) { int ret; if (ctx->flags & ~(CRYPTO_HW_CAPS)) { LOG_ERR("Unsupported flag"); return -EINVAL; } if (algo != CRYPTO_CIPHER_ALGO_AES) { LOG_ERR("Unsupported cipher algo"); return -EINVAL; } if (!crypto_smartbond_lock_session(dev)) { LOG_ERR("No free session for now"); return -ENOSPC; } /* First check if the requested cryptographic algo is supported */ ret = crypto_smartbond_cipher_set_mode(mode); if (ret < 0) { LOG_ERR("Unsupported cipher mode"); crypto_smartbond_unlock_session(dev); return ret; } ret = crypto_smartbond_cipher_key_load((uint8_t *)ctx->key.bit_stream, ctx->keylen); if (ret < 0) { LOG_ERR("Invalid key length or key cannot be accessed"); crypto_smartbond_unlock_session(dev); return ret; } if (op_type == CRYPTO_CIPHER_OP_ENCRYPT) { AES_HASH->CRYPTO_CTRL_REG |= AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ENCDEC_Msk; } else { AES_HASH->CRYPTO_CTRL_REG &= ~(AES_HASH_CRYPTO_CTRL_REG_CRYPTO_ENCDEC_Msk); } /* IN buffer fragmentation is not supported by the driver model */ AES_HASH->CRYPTO_CTRL_REG &= ~(AES_HASH_CRYPTO_CTRL_REG_CRYPTO_MORE_IN_Msk); switch (mode) { case CRYPTO_CIPHER_MODE_ECB: ctx->ops.block_crypt_hndlr = crypto_smartbond_cipher_ecb_handler; break; case CRYPTO_CIPHER_MODE_CBC: ctx->ops.cbc_crypt_hndlr = crypto_smartbond_cipher_cbc_handler; break; case CRYPTO_CIPHER_MODE_CTR: ctx->ops.ctr_crypt_hndlr = crypto_smartbond_cipher_ctr_handler; break; default: break; } ctx->drv_sessn_state = NULL; return 0; } static int crypto_smartbond_cipher_free_session(const struct device *dev, struct cipher_ctx *ctx) { ARG_UNUSED(ctx); crypto_smartbond_unlock_session(dev); return 0; } #if defined(CONFIG_CRYPTO_ASYNC) static int crypto_smartbond_cipher_set_async_callback(const struct device *dev, cipher_completion_cb cb) { struct crypto_smartbond_data *data = dev->data; data->cipher_user_cb = cb; return 0; } #endif static int crypto_smartbond_hash_begin_session(const struct device *dev, struct hash_ctx *ctx, enum hash_algo algo) { int ret; if (ctx->flags & ~(CRYPTO_HW_CAPS)) { LOG_ERR("Unsupported flag"); return -EINVAL; } if (!crypto_smartbond_lock_session(dev)) { LOG_ERR("No free session for now"); return -ENOSPC; } /* * Crypto should be disabled only if not used in other sessions. In case of failure, * developer should next free the current session. */ crypto_smartbond_set_status(true); ret = crypto_smartbond_hash_set_algo(algo); if (ret < 0) { LOG_ERR("Unsupported HASH algo"); crypto_smartbond_unlock_session(dev); return ret; } ctx->hash_hndlr = crypto_smartbond_hash_handler; ctx->drv_sessn_state = NULL; return 0; } static int crypto_smartbond_hash_free_session(const struct device *dev, struct hash_ctx *ctx) { ARG_UNUSED(ctx); crypto_smartbond_unlock_session(dev); return 0; } #if defined(CONFIG_CRYPTO_ASYNC) static int crypto_smartbond_hash_set_async_callback(const struct device *dev, hash_completion_cb cb) { struct crypto_smartbond_data *data = dev->data; data->hash_user_cb = cb; return 0; } #endif static const struct crypto_driver_api crypto_smartbond_driver_api = { .cipher_begin_session = crypto_smartbond_cipher_begin_session, .cipher_free_session = crypto_smartbond_cipher_free_session, #if defined(CONFIG_CRYPTO_ASYNC) .cipher_async_callback_set = crypto_smartbond_cipher_set_async_callback, #endif .hash_begin_session = crypto_smartbond_hash_begin_session, .hash_free_session = crypto_smartbond_hash_free_session, #if defined(CONFIG_CRYPTO_ASYNC) .hash_async_callback_set = crypto_smartbond_hash_set_async_callback, #endif .query_hw_caps = crypto_smartbond_query_hw_caps }; #if defined(CONFIG_PM_DEVICE) static int crypto_smartbond_pm_action(const struct device *dev, enum pm_device_action action) { int ret = 0; switch (action) { case PM_DEVICE_ACTION_SUSPEND: /* * No need to perform any actions here as the AES/HASH controller * should already be turned off. */ break; case PM_DEVICE_ACTION_RESUME: /* * No need to perform any actions here as the AES/HASH controller * will be initialized upon acquiring a cryptographic session. */ break; default: return -ENOTSUP; } return ret; } #endif static int crypto_smartbond_init(const struct device *dev) { struct crypto_smartbond_data *data = dev->data; /* Semaphore used during sessions (begin/free) */ k_sem_init(&data->session_sem, 1, 1); /* Semaphore used to employ the crypto device */ k_sem_init(&data->device_sem, 1, 1); #if !defined(CONFIG_CRYPTO_ASYNC) /* Sempahore used when sync operations are enabled */ k_sem_init(&data->sync_sem, 0, 1); #endif IRQ_CONNECT(SMARTBOND_IRQN, SMARTBOND_IRQ_PRIO, smartbond_crypto_isr, DEVICE_DT_INST_GET(0), 0); /* Controller should be initialized once a crypyographic session is requested */ crypto_smartbond_set_status(false); return 0; } /* * There is only one instance integrated on the SoC. Just in case that assumption becomes invalid * in the future, we use a BUILD_ASSERT(). */ #define SMARTBOND_CRYPTO_INIT(inst) \ BUILD_ASSERT((inst) == 0, \ "multiple instances are not supported"); \ \ PM_DEVICE_DT_INST_DEFINE(inst, crypto_smartbond_pm_action); \ \ static struct crypto_smartbond_data crypto_smartbond_data_##inst; \ \ DEVICE_DT_INST_DEFINE(0, \ crypto_smartbond_init, \ PM_DEVICE_DT_INST_GET(inst), \ &crypto_smartbond_data_##inst, NULL, \ POST_KERNEL, \ CONFIG_CRYPTO_INIT_PRIORITY, \ &crypto_smartbond_driver_api); DT_INST_FOREACH_STATUS_OKAY(SMARTBOND_CRYPTO_INIT) ```
/content/code_sandbox/drivers/crypto/crypto_smartbond.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,608
```unknown # Smartbond Cryptographic Accelerator configuration options menuconfig CRYPTO_SMARTBOND bool "Smartbond Cryptographic Accelerator driver" depends on DT_HAS_RENESAS_SMARTBOND_CRYPTO_ENABLED default y help Enable Smartbond Cryptographic Accelerator driver. config CRYPTO_ASYNC bool "Support ASYNC crypto operations." depends on CRYPTO_SMARTBOND help Enable ASYNC crypto operations. ```
/content/code_sandbox/drivers/crypto/Kconfig.smartbond
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
92
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_sha #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/crypto/crypto.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(sha_npcx, CONFIG_CRYPTO_LOG_LEVEL); #include "soc_ncl.h" #define NPCX_HASH_CAPS_SUPPORT (CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS) #define NPCX_SHA256_HANDLE_SIZE DT_INST_PROP(0, context_buffer_size) #define NPCX_SHA_MAX_SESSION 1 /* The following table holds the function pointer for each SHA API in NPCX ROM. */ struct npcx_ncl_sha { /* Get the SHA context size required by SHA APIs. */ uint32_t (*get_context_size)(void); /* Initial SHA context. */ enum ncl_status (*init_context)(void *ctx); /* Finalize SHA context. */ enum ncl_status (*finalize_context)(void *ctx); /* Initiate the SHA hardware module and setups needed parameters. */ enum ncl_status (*init)(void *ctx); /* * Prepare the context buffer for a SHA calculation - by loading the * initial SHA-256/384/512 parameters. */ enum ncl_status (*start)(void *ctx, enum ncl_sha_type type); /* * Updates the SHA calculation with the additional data. When the * function returns, the hardware and memory buffer shall be ready to * accept new data * buffers for SHA calculation and changes to the data * in data buffer should no longer effect the SHA calculation. */ enum ncl_status (*update)(void *ctx, const uint8_t *data, uint32_t Len); /* Return the SHA result (digest.) */ enum ncl_status (*finish)(void *ctx, uint8_t *hashDigest); /* Perform a complete SHA calculation */ enum ncl_status (*calc)(void *ctx, enum ncl_sha_type type, const uint8_t *data, uint32_t Len, uint8_t *hashDigest); /* Power on/off the SHA module. */ enum ncl_status (*power)(void *ctx, uint8_t enable); /* Reset the SHA hardware and terminate any in-progress operations. */ enum ncl_status (*reset)(void *ctx); }; /* The start address of the SHA API table. */ #define NPCX_NCL_SHA ((const struct npcx_ncl_sha *)DT_INST_REG_ADDR(0)) struct npcx_sha_context { uint8_t handle[NPCX_SHA256_HANDLE_SIZE]; } __aligned(4); struct npcx_sha_session { struct npcx_sha_context npcx_sha_ctx; enum hash_algo algo; bool in_use; }; struct npcx_sha_session npcx_sessions[NPCX_SHA_MAX_SESSION]; static int npcx_get_unused_session_index(void) { int i; for (i = 0; i < NPCX_SHA_MAX_SESSION; i++) { if (!npcx_sessions[i].in_use) { npcx_sessions[i].in_use = true; return i; } } return -1; } static int npcx_sha_compute(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish) { enum ncl_status ret; struct npcx_sha_session *npcx_session = ctx->drv_sessn_state; struct npcx_sha_context *npcx_ctx = &npcx_session->npcx_sha_ctx; enum ncl_sha_type sha_type; switch (npcx_session->algo) { case CRYPTO_HASH_ALGO_SHA256: sha_type = NCL_SHA_TYPE_2_256; break; case CRYPTO_HASH_ALGO_SHA384: sha_type = NCL_SHA_TYPE_2_384; break; case CRYPTO_HASH_ALGO_SHA512: sha_type = NCL_SHA_TYPE_2_512; break; default: LOG_ERR("Unexpected algo: %d", npcx_session->algo); return -EINVAL; } if (!ctx->started) { ret = NPCX_NCL_SHA->start(npcx_ctx->handle, sha_type); if (ret != NCL_STATUS_OK) { LOG_ERR("Could not compute the hash, err:%d", ret); return -EINVAL; } ctx->started = true; } if (pkt->in_len != 0) { ret = NPCX_NCL_SHA->update(npcx_ctx->handle, pkt->in_buf, pkt->in_len); if (ret != NCL_STATUS_OK) { LOG_ERR("Could not update the hash, err:%d", ret); ctx->started = false; return -EINVAL; } } if (finish) { ctx->started = false; ret = NPCX_NCL_SHA->finish(npcx_ctx->handle, pkt->out_buf); if (ret != NCL_STATUS_OK) { LOG_ERR("Could not compute the hash, err:%d", ret); return -EINVAL; } } return 0; } static int npcx_hash_session_setup(const struct device *dev, struct hash_ctx *ctx, enum hash_algo algo) { int ctx_idx; struct npcx_sha_context *npcx_ctx; if (ctx->flags & ~(NPCX_HASH_CAPS_SUPPORT)) { LOG_ERR("Unsupported flag"); return -EINVAL; } if ((algo != CRYPTO_HASH_ALGO_SHA256) && (algo != CRYPTO_HASH_ALGO_SHA384) && (algo != CRYPTO_HASH_ALGO_SHA512)) { LOG_ERR("Unsupported algo: %d", algo); return -EINVAL; } ctx_idx = npcx_get_unused_session_index(); if (ctx_idx < 0) { LOG_ERR("No free session for now"); return -ENOSPC; } npcx_sessions[ctx_idx].algo = algo; ctx->drv_sessn_state = &npcx_sessions[ctx_idx]; ctx->started = false; ctx->hash_hndlr = npcx_sha_compute; npcx_ctx = &npcx_sessions[ctx_idx].npcx_sha_ctx; NPCX_NCL_SHA->init_context(npcx_ctx->handle); NPCX_NCL_SHA->power(npcx_ctx->handle, 1); NPCX_NCL_SHA->init(npcx_ctx->handle); NPCX_NCL_SHA->reset(npcx_ctx->handle); return 0; } static int npcx_hash_session_free(const struct device *dev, struct hash_ctx *ctx) { struct npcx_sha_session *npcx_session = ctx->drv_sessn_state; struct npcx_sha_context *npcx_ctx = &npcx_session->npcx_sha_ctx; NPCX_NCL_SHA->reset(npcx_ctx->handle); NPCX_NCL_SHA->power(npcx_ctx->handle, 0); NPCX_NCL_SHA->finalize_context(npcx_ctx->handle); npcx_session->in_use = false; return 0; } static int npcx_query_caps(const struct device *dev) { return NPCX_HASH_CAPS_SUPPORT; } static int npcx_hash_init(const struct device *dev) { uint32_t handle_size_required; handle_size_required = NPCX_NCL_SHA->get_context_size(); if (handle_size_required != NPCX_SHA256_HANDLE_SIZE) { LOG_ERR("Pre-alloc buf size doesn't match required buf size (%d)", handle_size_required); return -ENOSR; } return 0; } static const struct crypto_driver_api npcx_crypto_api = { .hash_begin_session = npcx_hash_session_setup, .hash_free_session = npcx_hash_session_free, .query_hw_caps = npcx_query_caps, }; DEVICE_DT_INST_DEFINE(0, npcx_hash_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, &npcx_crypto_api); BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "only one 'nuvoton,npcx-sha' compatible node can be supported"); ```
/content/code_sandbox/drivers/crypto/crypto_npcx_sha.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,710
```unknown config CRYPTO_MCHP_XEC_SYMCR bool "Microchip XEC symmetric crypto (AES/Hash) driver" default y depends on DT_HAS_MICROCHIP_XEC_SYMCR_ENABLED help Enable Microchip XEC symmetic crypto (AES/Hash) driver. Symmetric crypto provides a single hardware interface to AES and hash engines. ```
/content/code_sandbox/drivers/crypto/Kconfig.xec
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
80
```unknown # NPCX SHA driver configuration options config CRYPTO_NPCX_SHA bool "NPCX SHA driver" default y depends on DT_HAS_NUVOTON_NPCX_SHA_ENABLED help Enable NPCX SHA driver. ```
/content/code_sandbox/drivers/crypto/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
49
```c /* * */ #define DT_DRV_COMPAT ite_it8xxx2_sha_v2 #include <zephyr/kernel.h> #include <zephyr/crypto/crypto.h> #include <zephyr/sys/byteorder.h> #include <chip_chipregs.h> #include <errno.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(sha_it8xxx2, CONFIG_CRYPTO_LOG_LEVEL); BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "support only one sha compatible node"); #define IT8XXX2_SHA_REGS_BASE DT_REG_ADDR(DT_NODELABEL(sha0)) /* 0x00: SHA Control Register */ #define IT8XXX2_REG_SHACR (0x00) #define IT8XXX2_SEL1SHA1 BIT(6) #define IT8XXX2_SELSHA2ALL (BIT(5) | BIT(4)) #define IT8XXX2_SHAWB BIT(2) #define IT8XXX2_SHAINI BIT(1) #define IT8XXX2_SHAEXE BIT(0) /* 0x01: SHA Status Register */ #define IT8XXX2_REG_SHASR (0x01) #define IT8XXX2_SHAIE BIT(3) #define IT8XXX2_SHAIS BIT(2) #define IT8XXX2_SHABUSY BIT(0) /* 0x02: SHA Execution Counter Register */ #define IT8XXX2_REG_SHAECR (0x02) #define IT8XXX2_SHAEXEC_64Byte 0x0 #define IT8XXX2_SHAEXEC_512Byte 0x7 #define IT8XXX2_SHAEXEC_1KByte 0xf /* 0x03: SHA DLM Base Address 0 Register */ #define IT8XXX2_REG_SHADBA0R (0x03) /* 0x04: SHA DLM Base Address 1 Register */ #define IT8XXX2_REG_SHADBA1R (0x04) #define SHA_SHA256_HASH_LEN 32 #define SHA_SHA256_BLOCK_LEN 64 #define SHA_SHA256_SRAM_BUF 1024 #define SHA_SHA256_HASH_LEN_WORDS (SHA_SHA256_HASH_LEN / sizeof(uint32_t)) #define SHA_SHA256_BLOCK_LEN_WORDS (SHA_SHA256_BLOCK_LEN / sizeof(uint32_t)) #define SHA_SHA256_SRAM_BUF_WORDS (SHA_SHA256_SRAM_BUF / sizeof(uint32_t)) #define SHA_SHA256_CALCULATE_TIMEOUT_US 150 #define SHA_SHA256_WRITE_BACK_TIMEOUT_US 45 #define SHA_SHA256_WAIT_NEXT_CLOCK_TIME_US 15 /* * This struct is used by the hardware and must be stored in RAM first 4k-byte * and aligned on a 256-byte boundary. */ struct chip_sha256_ctx { union { /* SHA data buffer */ uint32_t w_sha[SHA_SHA256_SRAM_BUF_WORDS]; uint8_t w_input[SHA_SHA256_SRAM_BUF]; }; /* H[0] ~ H[7] */ uint32_t h[SHA_SHA256_HASH_LEN_WORDS]; uint32_t sha_init; uint32_t w_input_index; uint32_t total_len; } __aligned(256); Z_GENERIC_SECTION(.__sha256_ram_block) struct chip_sha256_ctx chip_ctx; static void it8xxx2_sha256_init(bool init_k) { chip_ctx.sha_init = init_k; chip_ctx.w_input_index = 0; chip_ctx.total_len = 0; /* Set DLM address for input data */ sys_write8(((uint32_t)&chip_ctx) & 0xc0, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHADBA0R); sys_write8(((uint32_t)&chip_ctx) >> 8, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHADBA1R); } static int it8xxx2_sha256_module_calculation(void) { struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE; uint32_t key, count; uint8_t sha_ctrl; bool timeout = true; sha_ctrl = sys_read8(IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHACR); if (chip_ctx.sha_init) { sha_ctrl |= (IT8XXX2_SHAINI | IT8XXX2_SHAEXE); chip_ctx.sha_init = 0; } else { sha_ctrl |= IT8XXX2_SHAEXE; } /* * Global interrupt is disabled because the CPU cannot access memory * via the DLM (Data Local Memory) bus while HW module is computing * hash. */ key = irq_lock(); /* Crypto use SRAM */ gctrl_regs->GCTRL_PMER3 |= IT8XXX2_GCTRL_SRAM_CRYPTO_USED; sys_write8(sha_ctrl, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHACR); /* * HW 64 bytes data calculation ~= 4us; * HW 1024 bytes data calculation ~= 66us. */ for (count = 0; count <= (SHA_SHA256_CALCULATE_TIMEOUT_US / SHA_SHA256_WAIT_NEXT_CLOCK_TIME_US); count++) { /* Delay 15us */ gctrl_regs->GCTRL_WNCKR = IT8XXX2_GCTRL_WN65K; if ((sys_read8(IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHASR) & IT8XXX2_SHAIS)) { timeout = 0; break; } } sys_write8(IT8XXX2_SHAIS, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHASR); /* CPU use SRAM */ gctrl_regs->GCTRL_PMER3 &= ~IT8XXX2_GCTRL_SRAM_CRYPTO_USED; gctrl_regs->GCTRL_PMER3; irq_unlock(key); if (timeout) { LOG_ERR("HW execute sha256 calculation timeout"); it8xxx2_sha256_init(true); return -ETIMEDOUT; } chip_ctx.w_input_index = 0; return 0; } static int it8xxx2_hash_handler(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish) { struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE; uint32_t rem_len = pkt->in_len; uint32_t in_buf_idx = 0; uint32_t i, key, count; uint8_t sha_ctrl; bool timeout = true; int ret; while (rem_len) { /* Data length >= 1KB */ if (rem_len >= SHA_SHA256_SRAM_BUF) { rem_len = rem_len - SHA_SHA256_SRAM_BUF; for (i = 0; i < SHA_SHA256_SRAM_BUF; i++) { chip_ctx.w_input[chip_ctx.w_input_index++] = pkt->in_buf[in_buf_idx++]; } /* HW automatically load 1KB data from DLM */ sys_write8(IT8XXX2_SHAEXEC_1KByte, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHAECR); ret = it8xxx2_sha256_module_calculation(); if (ret) { return ret; } } else { /* 0 <= Data length < 1KB */ while (rem_len) { rem_len--; chip_ctx.w_input[chip_ctx.w_input_index++] = pkt->in_buf[in_buf_idx++]; /* * If fill full 64byte then execute HW calculation. * If not, will execute in later finish block. */ if (chip_ctx.w_input_index >= SHA_SHA256_BLOCK_LEN) { /* HW automatically load 64Bytes data from DLM */ sys_write8(IT8XXX2_SHAEXEC_64Byte, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHAECR); ret = it8xxx2_sha256_module_calculation(); if (ret) { return ret; } } } } } chip_ctx.total_len += pkt->in_len; if (finish) { uint32_t *ob_ptr = (uint32_t *)pkt->out_buf; /* Pre-processing (Padding) */ memset(&chip_ctx.w_input[chip_ctx.w_input_index], 0, SHA_SHA256_BLOCK_LEN - chip_ctx.w_input_index); chip_ctx.w_input[chip_ctx.w_input_index] = 0x80; /* * Handles the boundary case of rest data: * Because the last eight bytes are bit length field of sha256 rule. * If the data index >= 56, it needs to trigger HW to calculate, * then fill 0 data and the last eight bytes bit length, and calculate again. */ if (chip_ctx.w_input_index >= 56) { /* HW automatically load 64Bytes data from DLM */ sys_write8(IT8XXX2_SHAEXEC_64Byte, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHAECR); ret = it8xxx2_sha256_module_calculation(); if (ret) { return ret; } memset(&chip_ctx.w_input[chip_ctx.w_input_index], 0, SHA_SHA256_BLOCK_LEN - chip_ctx.w_input_index); } /* * Since input data (big-endian) are copied 1byte by 1byte to * it8xxx2 memory (little-endian), so the bit length needs to * be transformed into big-endian format and then write to memory. */ chip_ctx.w_sha[15] = sys_cpu_to_be32(chip_ctx.total_len * 8); /* HW automatically load 64Bytes data from DLM */ sys_write8(IT8XXX2_SHAEXEC_64Byte, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHAECR); ret = it8xxx2_sha256_module_calculation(); if (ret) { return ret; } /* HW write back the hash result to DLM */ /* Set DLM address for input data */ sys_write8(((uint32_t)&chip_ctx.h) & 0xc0, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHADBA0R); sys_write8(((uint32_t)&chip_ctx.h) >> 8, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHADBA1R); key = irq_lock(); /* Crypto use SRAM */ gctrl_regs->GCTRL_PMER3 |= IT8XXX2_GCTRL_SRAM_CRYPTO_USED; sha_ctrl = sys_read8(IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHACR); sys_write8(sha_ctrl | IT8XXX2_SHAWB, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHACR); /* HW write back the hash result to DLM ~= 1us */ for (count = 0; count <= (SHA_SHA256_WRITE_BACK_TIMEOUT_US / SHA_SHA256_WAIT_NEXT_CLOCK_TIME_US); count++) { /* Delay 15us */ gctrl_regs->GCTRL_WNCKR = IT8XXX2_GCTRL_WN65K; if ((sys_read8(IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHASR) & IT8XXX2_SHAIS)) { timeout = 0; break; } } sys_write8(IT8XXX2_SHAIS, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHASR); /* CPU use SRAM */ gctrl_regs->GCTRL_PMER3 &= ~IT8XXX2_GCTRL_SRAM_CRYPTO_USED; gctrl_regs->GCTRL_PMER3; irq_unlock(key); if (timeout) { LOG_ERR("HW write back hash timeout"); it8xxx2_sha256_init(true); return -ETIMEDOUT; } for (i = 0; i < SHA_SHA256_HASH_LEN_WORDS; i++) { ob_ptr[i] = chip_ctx.h[i]; } it8xxx2_sha256_init(true); } return 0; } static int it8xxx2_hash_session_free(const struct device *dev, struct hash_ctx *ctx) { it8xxx2_sha256_init(true); return 0; } static inline int it8xxx2_query_hw_caps(const struct device *dev) { return (CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS); } static int it8xxx2_hash_begin_session(const struct device *dev, struct hash_ctx *ctx, enum hash_algo algo) { if (algo != CRYPTO_HASH_ALGO_SHA256) { LOG_ERR("Unsupported algorithm"); return -EINVAL; } if (ctx->flags & ~(it8xxx2_query_hw_caps(dev))) { LOG_ERR("Unsupported flag"); return -EINVAL; } it8xxx2_sha256_init(true); ctx->hash_hndlr = it8xxx2_hash_handler; return 0; } static int it8xxx2_sha_init(const struct device *dev) { struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE; /* CPU use SRAM */ gctrl_regs->GCTRL_PMER3 &= ~IT8XXX2_GCTRL_SRAM_CRYPTO_USED; gctrl_regs->GCTRL_PMER3; it8xxx2_sha256_init(true); /* Select SHA-2 Family, SHA-256 */ sys_write8(0, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHACR); /* SHA interrupt disable */ sys_write8(0, IT8XXX2_SHA_REGS_BASE + IT8XXX2_REG_SHASR); return 0; } static const struct crypto_driver_api it8xxx2_crypto_api = { .hash_begin_session = it8xxx2_hash_begin_session, .hash_free_session = it8xxx2_hash_session_free, .query_hw_caps = it8xxx2_query_hw_caps, }; DEVICE_DT_INST_DEFINE(0, &it8xxx2_sha_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, &it8xxx2_crypto_api); ```
/content/code_sandbox/drivers/crypto/crypto_it8xxx2_sha_v2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,121
```c /* * */ #include <string.h> #include <zephyr/crypto/crypto.h> #include <zephyr/logging/log.h> #include <hal/nrf_ecb.h> #define DT_DRV_COMPAT nordic_nrf_ecb #define ECB_AES_KEY_SIZE 16 #define ECB_AES_BLOCK_SIZE 16 LOG_MODULE_REGISTER(crypto_nrf_ecb, CONFIG_CRYPTO_LOG_LEVEL); struct ecb_data { uint8_t key[ECB_AES_KEY_SIZE]; uint8_t cleartext[ECB_AES_BLOCK_SIZE]; uint8_t ciphertext[ECB_AES_BLOCK_SIZE]; }; struct nrf_ecb_drv_state { struct ecb_data data; bool in_use; }; static struct nrf_ecb_drv_state drv_state; static int do_ecb_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt) { ARG_UNUSED(ctx); if (pkt->in_len != ECB_AES_BLOCK_SIZE) { LOG_ERR("only 16-byte blocks are supported"); return -EINVAL; } if (pkt->out_buf_max < pkt->in_len) { LOG_ERR("output buffer too small"); return -EINVAL; } if (pkt->in_buf != drv_state.data.cleartext) { memcpy(drv_state.data.cleartext, pkt->in_buf, ECB_AES_BLOCK_SIZE); } nrf_ecb_event_clear(NRF_ECB, NRF_ECB_EVENT_ENDECB); nrf_ecb_event_clear(NRF_ECB, NRF_ECB_EVENT_ERRORECB); nrf_ecb_task_trigger(NRF_ECB, NRF_ECB_TASK_STARTECB); while (!(nrf_ecb_event_check(NRF_ECB, NRF_ECB_EVENT_ENDECB) || nrf_ecb_event_check(NRF_ECB, NRF_ECB_EVENT_ERRORECB))) { } if (nrf_ecb_event_check(NRF_ECB, NRF_ECB_EVENT_ERRORECB)) { LOG_ERR("ECB operation error"); return -EIO; } if (pkt->out_buf != drv_state.data.ciphertext) { memcpy(pkt->out_buf, drv_state.data.ciphertext, ECB_AES_BLOCK_SIZE); } pkt->out_len = pkt->in_len; return 0; } static int nrf_ecb_driver_init(const struct device *dev) { ARG_UNUSED(dev); nrf_ecb_data_pointer_set(NRF_ECB, &drv_state.data); drv_state.in_use = false; return 0; } static int nrf_ecb_query_caps(const struct device *dev) { ARG_UNUSED(dev); return (CAP_RAW_KEY | CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS); } static int nrf_ecb_session_setup(const struct device *dev, struct cipher_ctx *ctx, enum cipher_algo algo, enum cipher_mode mode, enum cipher_op op_type) { ARG_UNUSED(dev); if ((algo != CRYPTO_CIPHER_ALGO_AES) || !(ctx->flags & CAP_SYNC_OPS) || (ctx->keylen != ECB_AES_KEY_SIZE) || (op_type != CRYPTO_CIPHER_OP_ENCRYPT) || (mode != CRYPTO_CIPHER_MODE_ECB)) { LOG_ERR("This driver only supports 128-bit AES ECB encryption" " in synchronous mode"); return -EINVAL; } if (ctx->key.bit_stream == NULL) { LOG_ERR("No key provided"); return -EINVAL; } if (drv_state.in_use) { LOG_ERR("Peripheral in use"); return -EBUSY; } drv_state.in_use = true; ctx->ops.block_crypt_hndlr = do_ecb_encrypt; ctx->ops.cipher_mode = mode; if (ctx->key.bit_stream != drv_state.data.key) { memcpy(drv_state.data.key, ctx->key.bit_stream, ECB_AES_KEY_SIZE); } return 0; } static int nrf_ecb_session_free(const struct device *dev, struct cipher_ctx *sessn) { ARG_UNUSED(dev); ARG_UNUSED(sessn); drv_state.in_use = false; return 0; } static const struct crypto_driver_api crypto_enc_funcs = { .cipher_begin_session = nrf_ecb_session_setup, .cipher_free_session = nrf_ecb_session_free, .cipher_async_callback_set = NULL, .query_hw_caps = nrf_ecb_query_caps, }; DEVICE_DT_INST_DEFINE(0, nrf_ecb_driver_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, &crypto_enc_funcs); ```
/content/code_sandbox/drivers/crypto/crypto_nrf_ecb.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
963
```objective-c /* * */ /** * @file * @brief tinycrypt driver context info * * The file defines the structure which is used to store per session context * by the driver. Placed in common location so that crypto applications * can allocate memory for the required number of sessions, to free driver * from dynamic memory allocation. */ #ifndef ZEPHYR_DRIVERS_CRYPTO_CRYPTO_TC_SHIM_PRIV_H_ #define ZEPHYR_DRIVERS_CRYPTO_CRYPTO_TC_SHIM_PRIV_H_ #include <tinycrypt/aes.h> struct tc_shim_drv_state { int in_use; struct tc_aes_key_sched_struct session_key; }; #endif /* ZEPHYR_DRIVERS_CRYPTO_CRYPTO_TC_SHIM_PRIV_H_ */ ```
/content/code_sandbox/drivers/crypto/crypto_tc_shim_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
150
```unknown # NRF AES ECB configuration options config CRYPTO_NRF_ECB bool "nRF AES electronic codebook mode encryption" default y depends on DT_HAS_NORDIC_NRF_ECB_ENABLED # Bluetooth controller uses the ECB peripheral directly # (see subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/ecb.c), # hence this driver cannot be enabled together with it. depends on !BT_CTLR help Enable nRF HAL-based AES ECB encryption driver ```
/content/code_sandbox/drivers/crypto/Kconfig.nrf_ecb
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
111
```unknown config CRYPTO_MCUX_DCP bool "NXP Data Co-Processor (DCP) driver" default y depends on HAS_MCUX_CACHE depends on DT_HAS_NXP_MCUX_DCP_ENABLED select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT select CACHE_MANAGEMENT if DCACHE help Enable NXP Data Co-Processor (DCP) driver. config CRYPTO_MCUX_DCP_MAX_SESSION int "Maximum number of sessions NXP DCP crypto driver can handle" range 1 4 default 2 depends on CRYPTO_MCUX_DCP help This can be used to tweak the amount of sessions the driver can handle in parallel. ```
/content/code_sandbox/drivers/crypto/Kconfig.mcux_dcp
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
152
```unknown # Crypto configuration options # # CRYPTO options # menuconfig CRYPTO bool "Crypto drivers" if CRYPTO config CRYPTO_INIT_PRIORITY int "Crypto devices init priority" default 90 help Crypto devices initialization priority. module = CRYPTO module-str = CRYPTO source "subsys/logging/Kconfig.template.log_config" config CRYPTO_TINYCRYPT_SHIM bool "TinyCrypt shim driver [EXPERIMENTAL]" select TINYCRYPT select TINYCRYPT_AES select TINYCRYPT_AES_CBC select TINYCRYPT_AES_CTR select TINYCRYPT_AES_CCM select TINYCRYPT_AES_CMAC select EXPERIMENTAL help Enable TinyCrypt shim layer compliant with crypto APIs. config CRYPTO_TINYCRYPT_SHIM_MAX_SESSION int "Maximum of sessions TinyCrypt shim driver can handle" default 2 depends on CRYPTO_TINYCRYPT_SHIM help This can be used to tweak the amount of sessions the driver can handle in parallel. config CRYPTO_TINYCRYPT_SHIM_DRV_NAME string "Device name for TinyCrypt Pseudo device" default "CRYPTO_TC" depends on CRYPTO_TINYCRYPT_SHIM help Device name for TinyCrypt Pseudo device. config CRYPTO_MBEDTLS_SHIM bool "MbedTLS shim driver [EXPERIMENTAL]" select MBEDTLS select MBEDTLS_ENABLE_HEAP select MBEDTLS_SHA512 select MBEDTLS_CIPHER_AES_ENABLED select EXPERIMENTAL help Enable mbedTLS shim layer compliant with crypto APIs. You will need to fill in a relevant value to CONFIG_MBEDTLS_HEAP_SIZE. config CRYPTO_MBEDTLS_SHIM_DRV_NAME string "Device name for mbedTLS Pseudo device" default "CRYPTO_MTLS" depends on CRYPTO_MBEDTLS_SHIM help Device name for mbedTLS Pseudo device. config CRYPTO_MBEDTLS_SHIM_MAX_SESSION int "Maximum of sessions mbedTLS shim driver can handle" default 2 depends on CRYPTO_MBEDTLS_SHIM help This can be used to tweak the amount of sessions the driver can handle in parallel. source "drivers/crypto/Kconfig.ataes132a" source "drivers/crypto/Kconfig.stm32" source "drivers/crypto/Kconfig.nrf_ecb" source "drivers/crypto/Kconfig.intel" source "drivers/crypto/Kconfig.npcx" source "drivers/crypto/Kconfig.xec" source "drivers/crypto/Kconfig.it8xxx2" source "drivers/crypto/Kconfig.mcux_dcp" source "drivers/crypto/Kconfig.smartbond" endif # CRYPTO ```
/content/code_sandbox/drivers/crypto/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
565
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_CRYPTO_CRYPTO_INTEL_SHA_REGISTERS_PRIV_H_ #define ZEPHYR_DRIVERS_CRYPTO_CRYPTO_INTEL_SHA_REGISTERS_PRIV_H_ #include <stdio.h> union PIBCS { uint32_t full; struct { uint32_t rsvd2 : 3; uint32_t bscie : 1; uint32_t rsvd4 : 1; uint32_t rsvd5 : 1; uint32_t teie : 1; uint32_t rsvd7 : 1; uint32_t bne : 1; uint32_t bf : 1; uint32_t rsvd10 : 1; uint32_t bsc : 1; uint32_t rsvd13 : 2; uint32_t te : 1; uint32_t rsvd15 : 1; uint32_t cs : 7; uint32_t fwcb : 1; uint32_t rsvd25 : 2; uint32_t peen : 1; uint32_t rsvd31 : 5; } part; }; union PIBBA { uint32_t full; struct { uint32_t rsvd6 : 7; uint32_t ba : 17; uint32_t rsvd31 : 8; } part; }; union PIBS { uint32_t full; struct { uint32_t rsvd3 : 4; uint32_t bs : 20; uint32_t rsvd31 : 8; } part; }; union PIBFPI { uint32_t full; struct { uint32_t wp : 24; uint32_t rsvd31 : 8; } part; }; union PIBRP { uint32_t full; struct { uint32_t rp : 24; uint32_t rsvd31 : 8; } part; }; union PIBWP { uint32_t full; struct { uint32_t wp : 24; uint32_t rsvd31 : 8; } part; }; union PIBSP { uint32_t full; struct { uint32_t rp : 24; uint32_t rsvd31 : 8; } part; }; union SHARLDW0 { uint32_t full; struct { uint32_t rsvd8 : 9; uint32_t lower_length : 23; } part; }; union SHARLDW1 { uint32_t full; struct { uint32_t upper_length : 32; } part; }; union SHAALDW0 { uint32_t full; struct { uint32_t rsvd8 : 9; uint32_t lower_length : 23; } part; }; union SHAALDW1 { uint32_t full; struct { uint32_t upper_length : 32; } part; }; union SHACTL { uint32_t full; struct { uint32_t en : 1; uint32_t rsvd1 : 1; uint32_t hrsm : 1; uint32_t hfm : 2; uint32_t rsvd15 : 11; uint32_t algo : 3; uint32_t rsvd31 : 13; } part; }; union SHASTS { uint32_t full; struct { uint32_t busy : 1; uint32_t rsvd31 : 31; } part; }; union SHAIVDWx { uint32_t full; struct { uint32_t dwx : 32; } part; }; union SHARDWx { uint32_t full; struct { uint32_t dwx : 32; } part; }; #endif ```
/content/code_sandbox/drivers/crypto/crypto_intel_sha_registers.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
836
```unknown # Atmel SAM TRNG configuration config ENTROPY_SAM_RNG bool "Atmel SAM MCU Family True Random Number Generator (TRNG) Driver" default y depends on DT_HAS_ATMEL_SAM_TRNG_ENABLED select ENTROPY_HAS_DRIVER help Enable True Random Number Generator (TRNG) driver for Atmel SAM MCUs. ```
/content/code_sandbox/drivers/entropy/Kconfig.sam
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
79
```unknown # mcux entropy generator driver configuration config ENTROPY_MCUX_RNGA bool "MCUX RNGA driver" default y depends on DT_HAS_NXP_KINETIS_RNGA_ENABLED select ENTROPY_HAS_DRIVER help This option enables the random number generator accelerator (RNGA) driver based on the MCUX RNGA driver. config ENTROPY_MCUX_TRNG bool "MCUX TRNG driver" default y depends on DT_HAS_NXP_KINETIS_TRNG_ENABLED select ENTROPY_HAS_DRIVER help This option enables the true random number generator (TRNG) driver based on the MCUX TRNG driver. config ENTROPY_MCUX_RNG bool "MCUX RNG driver" default y depends on DT_HAS_NXP_LPC_RNG_ENABLED select ENTROPY_HAS_DRIVER help This option enables the true random number generator (TRNG) driver based on the MCUX RNG driver on LPC Family. config ENTROPY_MCUX_CAAM bool "MCUX CAAM driver" default y depends on DT_HAS_NXP_IMX_CAAM_ENABLED select ENTROPY_HAS_DRIVER select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT help This option enables the CAAM driver based on the MCUX CAAM driver. # # Don't use use the MCUX TRNG as a random source as it is not designed # to supply a continuous random stream. Instead, it is used to provide # a seed to RNG generator. # # Use the software implemented xoroshiro RNG. # Use CSPRNG for cryptographically secure RNG source. # choice RNG_GENERATOR_CHOICE default XOSHIRO_RANDOM_GENERATOR if ENTROPY_MCUX_TRNG endchoice choice CSPRNG_GENERATOR_CHOICE default CTR_DRBG_CSPRNG_GENERATOR if ENTROPY_MCUX_TRNG endchoice if ENTROPY_MCUX_CAAM config ENTRY_MCUX_CAAM_POOL_SIZE int "CAAM random number pool size" range 4 1024 default 256 help Buffer length in bytes used to store random bytes generated by CAAM hardware. Please note, that size of the pool must be a power of 2. endif # ENTROPY_MCUX_CAAM ```
/content/code_sandbox/drivers/entropy/Kconfig.mcux
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
500
```c /* * */ #include <zephyr/drivers/entropy.h> #include <zephyr/internal/syscall_handler.h> static inline int z_vrfy_entropy_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t len) { K_OOPS(K_SYSCALL_DRIVER_ENTROPY(dev, get_entropy)); K_OOPS(K_SYSCALL_MEMORY_WRITE(buffer, len)); return z_impl_entropy_get_entropy((const struct device *)dev, (uint8_t *)buffer, len); } #include <zephyr/syscalls/entropy_get_entropy_mrsh.c> ```
/content/code_sandbox/drivers/entropy/entropy_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
122
```c /* * * * Pseudo-random entropy generator for the ARCH_POSIX architecture: * Following the principle of reproducibility of the native_posix board * this entropy device will always generate the same random sequence when * initialized with the same seed * * This entropy source should only be used for testing. */ #define DT_DRV_COMPAT zephyr_native_posix_rng #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/init.h> #include <zephyr/sys/util.h> #include <stdlib.h> #include <string.h> #include <zephyr/arch/posix/posix_trace.h> #include "soc.h" #include "cmdline.h" /* native_posix command line options header */ #include "nsi_host_trampolines.h" #include "fake_entropy_native_bottom.h" static unsigned int seed = 0x5678; static bool seed_random; static int entropy_native_posix_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { ARG_UNUSED(dev); while (length) { /* * Note that only 1 thread (Zephyr thread or HW models), runs at * a time, therefore there is no need to use random_r() */ long value = nsi_host_random(); size_t to_copy = MIN(length, sizeof(long int)); memcpy(buffer, &value, to_copy); buffer += to_copy; length -= to_copy; } return 0; } static int entropy_native_posix_get_entropy_isr(const struct device *dev, uint8_t *buf, uint16_t len, uint32_t flags) { ARG_UNUSED(flags); /* * entropy_native_posix_get_entropy() is also safe for ISRs * and always produces data. */ entropy_native_posix_get_entropy(dev, buf, len); return len; } static int entropy_native_posix_init(const struct device *dev) { ARG_UNUSED(dev); entropy_native_seed(seed, seed_random); posix_print_warning("WARNING: " "Using a test - not safe - entropy source\n"); return 0; } static const struct entropy_driver_api entropy_native_posix_api_funcs = { .get_entropy = entropy_native_posix_get_entropy, .get_entropy_isr = entropy_native_posix_get_entropy_isr }; DEVICE_DT_INST_DEFINE(0, entropy_native_posix_init, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_native_posix_api_funcs); static void add_fake_entropy_option(void) { static struct args_struct_t entropy_options[] = { { .option = "seed", .name = "r_seed", .type = 'u', .dest = (void *)&seed, .descript = "A 32-bit integer seed value for the entropy device, such as " "97229 (decimal), 0x17BCD (hex), or 0275715 (octal)" }, { .is_switch = true, .option = "seed-random", .type = 'b', .dest = (void *)&seed_random, .descript = "Seed the random generator from /dev/urandom. " "Note your test may not be reproducible if you set this option" }, ARG_TABLE_ENDMARKER }; native_add_command_line_opts(entropy_options); } NATIVE_TASK(add_fake_entropy_option, PRE_BOOT_1, 10); ```
/content/code_sandbox/drivers/entropy/fake_entropy_native_posix.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
750
```unknown # Organisation (CSIRO) ABN 41 687 119 230. config ENTROPY_BT_HCI bool "Bluetooth HCI RNG driver" default y depends on DT_HAS_ZEPHYR_BT_HCI_ENTROPY_ENABLED depends on BT_HCI_HOST select ENTROPY_HAS_DRIVER help Enable Random Number Generator from a Bluetooth HCI device. # Don't use use Bluetooth HCI as a random source since it will be slow. # Instead, use the software implemented xoshiro RNG. choice RNG_GENERATOR_CHOICE default XOSHIRO_RANDOM_GENERATOR if ENTROPY_BT_HCI endchoice ```
/content/code_sandbox/drivers/entropy/Kconfig.bt_hci
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
136
```unknown # LiteX PRBS entropy generator driver configuration config ENTROPY_LITEX_RNG bool "PRBS RNG driver" default y depends on DT_HAS_LITEX_PRBS_ENABLED select ENTROPY_HAS_DRIVER help This option enables the RNG module, which is an entropy number generator, based on Pseudo-Random Binary Sequences (PRBS) for LiteX SoC builder ```
/content/code_sandbox/drivers/entropy/Kconfig.litex
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
90
```unknown # gecko entropy generator driver configuration config ENTROPY_GECKO_TRNG bool "GECKO TRNG driver" default y depends on DT_HAS_SILABS_GECKO_TRNG_ENABLED select ENTROPY_HAS_DRIVER select CRYPTO_ACC_GECKO_TRNG if SOC_SERIES_EFR32BG22 select CRYPTO_ACC_GECKO_TRNG if SOC_SERIES_EFR32BG27 help This option enables the true random number generator driver based on the TRNG. config ENTROPY_GECKO_SE bool "GECKO SE driver" default y depends on DT_HAS_SILABS_GECKO_SEMAILBOX_ENABLED select ENTROPY_HAS_DRIVER help This option enables the true random number generator driver based on the Secure Element (SE) module. ```
/content/code_sandbox/drivers/entropy/Kconfig.gecko
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
178
```c /* * */ #define DT_DRV_COMPAT microchip_xec_symcr #include <errno.h> #include <string.h> #include <zephyr/kernel.h> #include <zephyr/crypto/crypto.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(xec_symcr, CONFIG_CRYPTO_LOG_LEVEL); #include <soc.h> /* ROM API for Hash without using external files */ enum mchp_rom_hash_alg_id { MCHP_ROM_HASH_ALG_NONE = 0, MCHP_ROM_HASH_ALG_SHA1, MCHP_ROM_HASH_ALG_SHA224, MCHP_ROM_HASH_ALG_SHA256, MCHP_ROM_HASH_ALG_SHA384, MCHP_ROM_HASH_ALG_SHA512, MCHP_ROM_HASH_ALG_SM3, MCHP_ROM_HASH_ALG_MAX }; #define MCHP_XEC_STRUCT_HASH_STATE_STRUCT_SIZE 8 #define MCHP_XEC_STRUCT_HASH_STRUCT_SIZE 240 struct mchphashstate { uint32_t v[MCHP_XEC_STRUCT_HASH_STATE_STRUCT_SIZE / 4]; }; struct mchphash { uint32_t v[MCHP_XEC_STRUCT_HASH_STRUCT_SIZE / 4]; }; #define MCHP_XEC_ROM_API_BASE DT_REG_ADDR(DT_NODELABEL(rom_api)) #define MCHP_XEC_ROM_API_ADDR(n) \ (((uint32_t)(MCHP_XEC_ROM_API_BASE) + ((uint32_t)(n) * 4u)) | BIT(0)) #define MCHP_XEC_ROM_HASH_CREATE_SHA224_ID 95 #define mchp_xec_rom_hash_create_sha224 \ ((int (*)(struct mchphash *)) MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_CREATE_SHA224_ID)) #define MCHP_XEC_ROM_HASH_CREATE_SHA256_ID 96 #define mchp_xec_rom_hash_create_sha256 \ ((int (*)(struct mchphash *)) MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_CREATE_SHA256_ID)) #define MCHP_XEC_ROM_HASH_CREATE_SHA384_ID 97 #define mchp_xec_rom_hash_create_sha384 \ ((int (*)(struct mchphash *)) MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_CREATE_SHA384_ID)) #define MCHP_XEC_ROM_HASH_CREATE_SHA512_ID 98 #define mchp_xec_rom_hash_create_sha512 \ ((int (*)(struct mchphash *)) MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_CREATE_SHA512_ID)) #define MCHP_XEC_ROM_HASH_INIT_STATE_ID 100 #define mec172x_rom_hash_init_state \ ((void (*)(struct mchphash *, struct mchphashstate *, char *)) \ MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_INIT_STATE_ID)) #define MCHP_XEC_ROM_HASH_RESUME_STATE_ID 101 #define mchp_xec_rom_hash_resume_state \ ((void (*)(struct mchphash *, struct mchphashstate *)) \ MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_RESUME_STATE_ID)) #define MCHP_XEC_ROM_HASH_SAVE_STATE_ID 102 #define mchp_xec_rom_hash_save_state \ ((int (*)(struct mchphash *)) MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_SAVE_STATE_ID)) #define MCHP_XEC_ROM_HASH_FEED_ID 103 #define mchp_xec_rom_hash_feed \ ((int (*)(struct mchphash *, const uint8_t *, size_t)) \ MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_FEED_ID)) #define MCHP_XEC_ROM_HASH_DIGEST_ID 104 #define mchp_xec_rom_hash_digest \ ((int (*)(struct mchphash *, char *)) MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_DIGEST_ID)) #define MCHP_XEC_ROM_HASH_WAIT_ID 105 #define mec172x_rom_hash_wait \ ((int (*)(struct mchphash *)) MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_HASH_WAIT_ID)) #define MCHP_XEC_ROM_AH_DMA_INIT_ID 144 #define mchp_xec_rom_ah_dma_init \ ((int (*)(uint8_t)) MCHP_XEC_ROM_API_ADDR(MCHP_XEC_ROM_AH_DMA_INIT_ID)) #define MCHP_ROM_AH_DMA_INIT_NO_RESET 0 #define MCHP_ROM_AH_DMA_INIT_WITH_RESET 1 #define MCHP_XEC_SYMCR_CAPS_SUPPORT \ (CAP_RAW_KEY | CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS | CAP_NO_IV_PREFIX) #define MCHP_XEC_SYMCR_MAX_SESSION 1 #define MCHP_XEC_STATE_BUF_SIZE 256 #define MCHP_XEC_BLOCK_BUF_SIZE 128 struct xec_symcr_hash_session { struct mchphash mhctx; struct mchphashstate mhstate; enum hash_algo algo; enum mchp_rom_hash_alg_id rom_algo; bool open; size_t blksz; size_t blklen; uint8_t blockbuf[MCHP_XEC_BLOCK_BUF_SIZE] __aligned(4); uint8_t statebuf[MCHP_XEC_STATE_BUF_SIZE] __aligned(4); }; struct xec_symcr_config { uint32_t regbase; const struct device *clk_dev; struct mchp_xec_pcr_clk_ctrl clk_ctrl; uint8_t irq_num; uint8_t girq; uint8_t girq_pos; uint8_t rsvd1; }; struct xec_symcr_data { struct xec_symcr_hash_session hash_sessions[MCHP_XEC_SYMCR_MAX_SESSION]; }; static int mchp_xec_get_unused_session_index(struct xec_symcr_data *data) { int i; for (i = 0; i < MCHP_XEC_SYMCR_MAX_SESSION; i++) { if (!data->hash_sessions[i].open) { data->hash_sessions[i].open = true; return i; } } return -EPERM; } struct hash_alg_to_rom { enum hash_algo algo; enum mchp_rom_hash_alg_id rom_algo; }; const struct hash_alg_to_rom hash_alg_tbl[] = { { CRYPTO_HASH_ALGO_SHA224, MCHP_ROM_HASH_ALG_SHA224 }, { CRYPTO_HASH_ALGO_SHA256, MCHP_ROM_HASH_ALG_SHA256 }, { CRYPTO_HASH_ALGO_SHA384, MCHP_ROM_HASH_ALG_SHA384 }, { CRYPTO_HASH_ALGO_SHA512, MCHP_ROM_HASH_ALG_SHA512 }, }; static enum mchp_rom_hash_alg_id lookup_hash_alg(enum hash_algo algo) { for (size_t n = 0; n < ARRAY_SIZE(hash_alg_tbl); n++) { if (hash_alg_tbl[n].algo == algo) { return hash_alg_tbl[n].rom_algo; } } return MCHP_ROM_HASH_ALG_NONE; } /* SHA-1, 224, and 256 use block size of 64 bytes * SHA-384 and 512 use 128 bytes. */ static size_t hash_block_size(enum hash_algo algo) { switch (algo) { case CRYPTO_HASH_ALGO_SHA384: case CRYPTO_HASH_ALGO_SHA512: return 128u; default: return 64u; } } static int init_rom_hash_context(enum mchp_rom_hash_alg_id rom_algo, struct mchphash *c) { int ret = 0; if (!c) { return -EINVAL; } switch (rom_algo) { case MCHP_ROM_HASH_ALG_SHA224: ret = mchp_xec_rom_hash_create_sha224(c); break; case MCHP_ROM_HASH_ALG_SHA256: ret = mchp_xec_rom_hash_create_sha256(c); break; case MCHP_ROM_HASH_ALG_SHA384: ret = mchp_xec_rom_hash_create_sha384(c); break; case MCHP_ROM_HASH_ALG_SHA512: ret = mchp_xec_rom_hash_create_sha512(c); break; default: return -EINVAL; } if (ret) { /* use zephyr return value */ ret = -EIO; } return ret; } /* use zephyr return values */ int mchp_xec_rom_hash_init_state_wrapper(struct mchphash *c, struct mchphashstate *h, uint8_t *dmamem) { if (!c || !h || !dmamem) { return -EINVAL; } mec172x_rom_hash_init_state(c, h, (char *)dmamem); return 0; } int mchp_xec_rom_hash_resume_state_wrapper(struct mchphash *c, struct mchphashstate *h) { if (!c || !h) { return -EINVAL; } mchp_xec_rom_hash_resume_state(c, h); return 0; } int mchp_xec_rom_hash_save_state_wrapper(struct mchphash *c) { if (!c) { return -EINVAL; } if (mchp_xec_rom_hash_save_state(c) != 0) { return -EIO; } return 0; } int mchp_xec_rom_hash_feed_wrapper(struct mchphash *c, const uint8_t *msg, size_t sz) { if ((!c) || (!msg && sz)) { return -EINVAL; } if (mchp_xec_rom_hash_feed(c, (const char *)msg, sz) != 0) { return -EIO; } return 0; } int mchp_xec_rom_hash_digest_wrapper(struct mchphash *c, uint8_t *digest) { if (!c || !digest) { return -EINVAL; } if (mchp_xec_rom_hash_digest(c, (char *)digest)) { return -EIO; } return 0; } /* Wait for hardware to finish. * returns 0 if hardware finished with no errors * returns -EIO if hardware stopped due to error * returns -EINVAL if parameter is bad, hardware may still be running! */ int mchp_xec_rom_hash_wait_wrapper(struct mchphash *c) { if (!c) { return -EINVAL; } if (mec172x_rom_hash_wait(c) != 0) { return -EIO; } return 0; } /* Called by application for update(finish==false) * and compute final hash digest(finish==true) */ static int xec_symcr_do_hash(struct hash_ctx *ctx, struct hash_pkt *pkt, bool finish) { struct xec_symcr_hash_session *hs = NULL; struct mchphash *c = NULL; struct mchphashstate *cstate = NULL; size_t fill_len = 0, rem_len = 0; int ret = 0; if (!ctx || !pkt) { return -EINVAL; } hs = (struct xec_symcr_hash_session *)ctx->drv_sessn_state; c = &hs->mhctx; cstate = &hs->mhstate; if (!hs->open) { LOG_ERR("Session not open"); return -EIO; } if (!finish && !pkt->in_len) { return 0; /* nothing to do */ } /* Not final digest computation and not enough data to run engine */ if (!finish && ((hs->blklen + pkt->in_len) < hs->blksz)) { memcpy(&hs->blockbuf[hs->blklen], pkt->in_buf, pkt->in_len); hs->blklen += pkt->in_len; return 0; } ret = init_rom_hash_context(hs->rom_algo, c); if (ret) { LOG_ERR("ROM context init error %d", ret); return ret; } ret = mchp_xec_rom_hash_resume_state_wrapper(c, cstate); if (ret) { LOG_ERR("Resume state error %d", ret); return ret; } fill_len = pkt->in_len; rem_len = 0; if (!finish) { rem_len = pkt->in_len & (hs->blksz - 1u); fill_len = pkt->in_len & ~(hs->blksz - 1u); if (hs->blklen) { fill_len = ((hs->blklen + pkt->in_len) & ~(hs->blksz - 1u)) - hs->blklen; rem_len = pkt->in_len - fill_len; } } if (hs->blklen) { ret = mchp_xec_rom_hash_feed_wrapper(c, (const uint8_t *)hs->blockbuf, hs->blklen); if (ret) { LOG_ERR("ROM hash feed error %d", ret); return ret; } hs->blklen = 0; /* consumed */ } ret = mchp_xec_rom_hash_feed_wrapper(c, (const uint8_t *)pkt->in_buf, fill_len); if (ret) { LOG_ERR("ROM hash feed error %d", ret); return ret; } if (finish) { ret = mchp_xec_rom_hash_digest_wrapper(c, pkt->out_buf); if (ret) { LOG_ERR("ROM Hash final error %d", ret); return ret; } } else { ret = mchp_xec_rom_hash_save_state(c); if (ret) { LOG_ERR("ROM hash save state error %d", ret); return ret; } } ret = mchp_xec_rom_hash_wait_wrapper(c); if (ret) { LOG_ERR("ROM hash wait error %d", ret); return ret; } if (finish) { hs->blklen = 0; } else { memcpy(hs->blockbuf, &pkt->in_buf[fill_len], rem_len); hs->blklen = rem_len; } return 0; } static int xec_symcr_hash_session_begin(const struct device *dev, struct hash_ctx *ctx, enum hash_algo algo) { struct xec_symcr_data *data = dev->data; struct xec_symcr_hash_session *hs = NULL; struct mchphash *c = NULL; struct mchphashstate *cstate = NULL; enum mchp_rom_hash_alg_id rom_algo = MCHP_ROM_HASH_ALG_NONE; int session_idx = 0; int ret = 0; if (ctx->flags & ~(MCHP_XEC_SYMCR_CAPS_SUPPORT)) { LOG_ERR("Unsupported flag"); return -EINVAL; } rom_algo = lookup_hash_alg(algo); if (rom_algo == MCHP_ROM_HASH_ALG_NONE) { LOG_ERR("Unsupported algo %d", algo); return -EINVAL; } session_idx = mchp_xec_get_unused_session_index(data); if (session_idx < 0) { LOG_ERR("No session available"); return -ENOSPC; } hs = &data->hash_sessions[session_idx]; hs->algo = algo; hs->rom_algo = rom_algo; hs->open = false; hs->blklen = 0; hs->blksz = hash_block_size(algo); ctx->drv_sessn_state = hs; ctx->started = false; ctx->hash_hndlr = xec_symcr_do_hash; /* reset HW at beginning of session */ ret = mchp_xec_rom_ah_dma_init(MCHP_ROM_AH_DMA_INIT_WITH_RESET); if (ret) { LOG_ERR("ROM HW init error %d", ret); return -EIO; } c = &hs->mhctx; cstate = &hs->mhstate; ret = init_rom_hash_context(hs->rom_algo, c); if (ret) { LOG_ERR("ROM HW context init error %d", ret); return ret; } ret = mchp_xec_rom_hash_init_state_wrapper(c, cstate, hs->statebuf); if (ret) { LOG_ERR("ROM HW init state error %d", ret); } hs->open = true; return ret; } /* * struct hash_ctx { * const struct device *device; this device driver's instance structure * void *drv_sessn_state; pointer to driver instance struct session state. Defined by driver * hash_op hash_hndlr; pointer to this driver function. App calls via pointer to do operations * bool started; true if multipart hash has been started * uint16_t flags; app populates this before calling hash_begin_session * } */ static int xec_symcr_hash_session_free(const struct device *dev, struct hash_ctx *ctx) { struct xec_symcr_hash_session *hs = NULL; int ret = 0; ret = mchp_xec_rom_ah_dma_init(MCHP_ROM_AH_DMA_INIT_WITH_RESET); if (ret) { ret = -EIO; LOG_ERR("ROM HW reset error %d", ret); } hs = (struct xec_symcr_hash_session *)ctx->drv_sessn_state; memset(hs, 0, sizeof(struct xec_symcr_hash_session)); return ret; } static int xec_symcr_query_hw_caps(const struct device *dev) { return MCHP_XEC_SYMCR_CAPS_SUPPORT; } static int xec_symcr_init(const struct device *dev) { const struct xec_symcr_config *cfg = dev->config; int ret; if (!device_is_ready(cfg->clk_dev)) { LOG_ERR("clock device not ready"); return -ENODEV; } ret = clock_control_on(cfg->clk_dev, (clock_control_subsys_t *)&cfg->clk_ctrl); if (ret < 0) { LOG_ERR("clock on error %d", ret); return ret; } ret = mchp_xec_rom_ah_dma_init(MCHP_ROM_AH_DMA_INIT_WITH_RESET); if (ret) { ret = -EIO; } return ret; } static const struct crypto_driver_api xec_symcr_api = { .query_hw_caps = xec_symcr_query_hw_caps, .hash_begin_session = xec_symcr_hash_session_begin, .hash_free_session = xec_symcr_hash_session_free, }; #define XEC_SYMCR_PCR_INFO(i) \ MCHP_XEC_PCR_SCR_ENCODE(DT_INST_CLOCKS_CELL(i, regidx), \ DT_INST_CLOCKS_CELL(i, bitpos), \ DT_INST_CLOCKS_CELL(i, domain)) #define XEC_SYMCR_INIT(inst) \ \ static struct xec_symcr_data xec_symcr_data_##inst; \ \ static const struct xec_symcr_config xec_symcr_cfg_##inst = { \ .regbase = DT_INST_REG_ADDR(inst), \ .clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(inst)), \ .clk_ctrl = { \ .pcr_info = XEC_SYMCR_PCR_INFO(inst), \ }, \ .irq_num = DT_INST_IRQN(inst), \ .girq = DT_INST_PROP_BY_IDX(inst, girqs, 0), \ .girq_pos = DT_INST_PROP_BY_IDX(inst, girqs, 1), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, &xec_symcr_init, NULL, \ &xec_symcr_data_##inst, &xec_symcr_cfg_##inst, \ POST_KERNEL, CONFIG_CRYPTO_INIT_PRIORITY, \ &xec_symcr_api); DT_INST_FOREACH_STATUS_OKAY(XEC_SYMCR_INIT) ```
/content/code_sandbox/drivers/crypto/crypto_mchp_xec_symcr.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,368
```unknown # TI CC13xx / CC26xx entropy driver configuration config ENTROPY_CC13XX_CC26XX_RNG bool "TI SimpleLink CC13xx / CC26xx True Random Number Generator (TRNG)" default y depends on DT_HAS_TI_CC13XX_CC26XX_TRNG_ENABLED select ENTROPY_HAS_DRIVER select RING_BUFFER help This option enables the driver for the True Random Number Generator (TRNG) for TI SimpleLink CC13xx / CC26xx SoCs. if ENTROPY_CC13XX_CC26XX_RNG config ENTROPY_CC13XX_CC26XX_POOL_SIZE int "Number of bytes in the entropy pool" default 512 help The size in bytes of the buffer used to store entropy generated by the hardware. Should be a power of two for high performance. config ENTROPY_CC13XX_CC26XX_SAMPLES_PER_CYCLE int "Number of samples to generate entropy" range 256 16777216 default 240000 help The number of samples used to generate entropy. The time required to generate 64 bits of entropy is determined by the number of FROs enabled, the sampling (system) clock frequency, and this value. config ENTROPY_CC13XX_CC26XX_ALARM_THRESHOLD int "Threshold for detected repeated patterns" range 0 $(UINT8_MAX) default $(UINT8_MAX) help The number of samples detected with repeating patterns before an alarm event is triggered. The associated FRO is automatically shut down. config ENTROPY_CC13XX_CC26XX_SHUTDOWN_THRESHOLD int "Threshold for the number of FROs automatically shut down" range 0 24 default 0 help The number of FROs allowed to be shutdown before the driver attempts to take corrective action. endif # ENTROPY_CC13XX_CC26XX_RNG ```
/content/code_sandbox/drivers/entropy/Kconfig.cc13xx_cc26xx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
406
```unknown # STM32 entropy generator driver configuration menuconfig ENTROPY_STM32_RNG bool "STM32 RNG driver" default y depends on DT_HAS_ST_STM32_RNG_ENABLED select ENTROPY_HAS_DRIVER select USE_STM32_LL_RNG help This option enables the RNG processor, which is a entropy number generator, based on a continuous analog noise, that provides a entropy 32-bit value to the host when read. It is available for F4 (except STM32F401 & STM32F411), L4, F7, H7 and G4 series. if ENTROPY_STM32_RNG config ENTROPY_STM32_THR_POOL_SIZE int "Thread-mode random number pool size" range ENTROPY_STM32_THR_THRESHOLD 256 default 8 help Buffer length in bytes used to store entropy bytes generated by the hardware to make them ready for thread mode consumers. Please note, that size of the pool must be a power of 2. config ENTROPY_STM32_THR_THRESHOLD int "Thread-mode random number pool low-water threshold" range 4 $(UINT8_MAX) help Low water-mark threshold in bytes to trigger entropy generation for thread mode consumers. As soon as the number of available bytes in the buffer goes below this number hardware entropy generation will be started. config ENTROPY_STM32_ISR_POOL_SIZE int "ISR-mode random number pool size" range ENTROPY_STM32_ISR_THRESHOLD 256 default 16 help Buffer length in bytes used to store entropy bytes generated by the hardware to make them ready for ISR consumers. Please note, that size of the pool must be a power of 2. config ENTROPY_STM32_ISR_THRESHOLD int "ISR-mode random number pool low-water threshold" range 12 $(UINT8_MAX) help Low water-mark threshold in bytes to trigger entropy generation for ISR consumers. As soon as the number of available bytes in the buffer goes below this number hardware entropy generation will be started. config ENTROPY_STM32_CLK_CHECK bool "Runtime clock configuration check" default y help Enables a check on RNG clock configuration. Correct clock configuration depends on STM32 series. Check reference manual if an error is reported. This check assumes CED (Clock Error Detected) bit is enabled (when available, CED is enabeld by default). Disable this check if CED is disabled. endif # ENTROPY_STM32_RNG ```
/content/code_sandbox/drivers/entropy/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
559
```c /* * */ #define DT_DRV_COMPAT nxp_imx_caam #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/random/random.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include "fsl_caam.h" struct mcux_entropy_config { CAAM_Type *base; }; static caam_job_ring_interface_t jrif0 __attribute__((__section__(".nocache"))); static uint8_t rng_buff_pool[CONFIG_ENTRY_MCUX_CAAM_POOL_SIZE] __attribute__((__section__(".nocache"))); /* This semaphore is needed to prevent race condition to static variables in the HAL driver */ K_SEM_DEFINE(mcux_caam_sem, 1, 1) static int entropy_mcux_caam_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { const struct mcux_entropy_config *config = dev->config; status_t status; caam_handle_t handle; uint16_t read_length = 0; uint16_t insert_idx = 0; int ret = 0; k_timeout_t sem_timeout = K_MSEC(10); handle.jobRing = kCAAM_JobRing0; /* * The buffer passed to the CAAM RNG function needs to be in non-cache * memory. Use an intermediate buffer to stage the data to the user * buffer. */ while (insert_idx < length) { read_length = MIN(sizeof(rng_buff_pool), (length - insert_idx)); ret = k_sem_take(&mcux_caam_sem, sem_timeout); if (ret) { return ret; } status = CAAM_RNG_GetRandomData( config->base, &handle, kCAAM_RngStateHandle0, &rng_buff_pool[0], read_length, kCAAM_RngDataAny, NULL); k_sem_give(&mcux_caam_sem); memcpy(&buffer[insert_idx], &rng_buff_pool[0], read_length); insert_idx += read_length; } return 0; } static const struct entropy_driver_api entropy_mcux_caam_api_funcs = { .get_entropy = entropy_mcux_caam_get_entropy }; static const struct mcux_entropy_config entropy_mcux_config = { .base = (CAAM_Type *)DT_INST_REG_ADDR(0) }; static int entropy_mcux_caam_init(const struct device *dev) { const struct mcux_entropy_config *config = dev->config; caam_config_t conf; status_t status; CAAM_GetDefaultConfig(&conf); conf.jobRingInterface[0] = &jrif0; status = CAAM_Init(config->base, &conf); __ASSERT_NO_MSG(!status); if (status != 0) { return -ENODEV; } return 0; } DEVICE_DT_INST_DEFINE(0, entropy_mcux_caam_init, NULL, NULL, &entropy_mcux_config, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_mcux_caam_api_funcs); ```
/content/code_sandbox/drivers/entropy/entropy_mcux_caam.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
661
```c /* * */ #define DT_DRV_COMPAT atmel_sam_trng #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include <errno.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <soc.h> #include <string.h> struct trng_sam_dev_cfg { Trng *regs; }; static inline bool _ready(Trng * const trng) { #ifdef TRNG_ISR_DATRDY return trng->TRNG_ISR & TRNG_ISR_DATRDY; #else return trng->INTFLAG.bit.DATARDY; #endif } static inline uint32_t _data(Trng * const trng) { #ifdef REG_TRNG_DATA (void) trng; return TRNG->DATA.reg; #else return trng->TRNG_ODATA; #endif } static int entropy_sam_wait_ready(Trng * const trng) { /* According to the reference manual, the generator provides * one 32-bit random value every 84 peripheral clock cycles. * MCK may not be smaller than HCLK/4, so it should not take * more than 336 HCLK ticks. Assuming the CPU can do 1 * instruction per HCLK the number of times to loop before * the TRNG is ready is less than 1000. And that is when * assuming the loop only takes 1 instruction. So looping a * million times should be more than enough. */ int timeout = 1000000; while (!_ready(trng)) { if (timeout-- == 0) { return -ETIMEDOUT; } } return 0; } static int entropy_sam_get_entropy_internal(const struct device *dev, uint8_t *buffer, uint16_t length) { const struct trng_sam_dev_cfg *config = dev->config; Trng *const trng = config->regs; while (length > 0) { size_t to_copy; uint32_t value; int res; res = entropy_sam_wait_ready(trng); if (res < 0) { return res; } value = _data(trng); to_copy = MIN(length, sizeof(value)); memcpy(buffer, &value, to_copy); buffer += to_copy; length -= to_copy; } return 0; } static int entropy_sam_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { return entropy_sam_get_entropy_internal(dev, buffer, length); } static int entropy_sam_get_entropy_isr(const struct device *dev, uint8_t *buffer, uint16_t length, uint32_t flags) { uint16_t cnt = length; if ((flags & ENTROPY_BUSYWAIT) == 0U) { const struct trng_sam_dev_cfg *config = dev->config; /* No busy wait; return whatever data is available. */ Trng * const trng = config->regs; do { size_t to_copy; uint32_t value; if (!_ready(trng)) { /* Data not ready */ break; } value = _data(trng); to_copy = MIN(length, sizeof(value)); memcpy(buffer, &value, to_copy); buffer += to_copy; length -= to_copy; } while (length > 0); return cnt - length; } else { /* Allowed to busy-wait */ int ret = entropy_sam_get_entropy_internal(dev, buffer, length); if (ret == 0) { /* Data retrieved successfully. */ return cnt; } return ret; } } static int entropy_sam_init(const struct device *dev) { const struct trng_sam_dev_cfg *config = dev->config; Trng *const trng = config->regs; #ifdef MCLK /* Enable the MCLK */ MCLK->APBCMASK.bit.TRNG_ = 1; /* Enable the TRNG */ trng->CTRLA.bit.ENABLE = 1; #else /* Enable TRNG in PMC */ const struct atmel_sam_pmc_config clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0); (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&clock_cfg); /* Enable the TRNG */ trng->TRNG_CR = TRNG_CR_KEY_PASSWD | TRNG_CR_ENABLE; #endif return 0; } static const struct entropy_driver_api entropy_sam_api = { .get_entropy = entropy_sam_get_entropy, .get_entropy_isr = entropy_sam_get_entropy_isr }; static const struct trng_sam_dev_cfg trng_sam_cfg = { .regs = (Trng *)DT_INST_REG_ADDR(0), }; DEVICE_DT_INST_DEFINE(0, entropy_sam_init, NULL, NULL, &trng_sam_cfg, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_sam_api); ```
/content/code_sandbox/drivers/entropy/entropy_sam.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,115
```unknown config ENTROPY_MAX32_TRNG bool "ADI MAX32XXX MCU Family True Random Number Generator (TRNG) Driver" default y depends on DT_HAS_ADI_MAX32_TRNG_ENABLED select ENTROPY_HAS_DRIVER help Enable True Random Number Generator (TRNG) driver for ADI MAX32XXX MCUs. ```
/content/code_sandbox/drivers/entropy/Kconfig.max32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
74
```unknown # esp32 entropy generator driver configuration config ENTROPY_ESP32_RNG bool "ESP32 entropy number generator driver" default y depends on DT_HAS_ESPRESSIF_ESP32_TRNG_ENABLED select ENTROPY_HAS_DRIVER help This option enables the entropy number generator for ESP32 SoCs. With Wi-Fi and Bluetooth disabled, this will produce pseudo-entropy numbers: noise from these radios are used to feed entropy in this generator. ```
/content/code_sandbox/drivers/entropy/Kconfig.esp32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
102
```objective-c /* * */ #ifndef DRIVERS_ENTROPY_FAKE_ENTROPY_NATIVE_BOTTOM_H #define DRIVERS_ENTROPY_FAKE_ENTROPY_NATIVE_BOTTOM_H #ifdef __cplusplus extern "C" { #endif void entropy_native_seed(unsigned int seed, bool seed_random); #ifdef __cplusplus } #endif #endif /* DRIVERS_ENTROPY_FAKE_ENTROPY_NATIVE_BOTTOM_H */ ```
/content/code_sandbox/drivers/entropy/fake_entropy_native_bottom.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
80
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_drbg #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/entropy.h> #include <zephyr/pm/device.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(entropy_npcx_drbg, CONFIG_ENTROPY_LOG_LEVEL); #include "soc_ncl.h" /* Reseed after 100 number generations */ #define NPCX_DRBG_SECURITY_STRENGTH \ ((enum ncl_drbg_security_strength)CONFIG_ENTROPY_NPCX_DRBG_SECURITY_STRENGTH) #define NPCX_DRBG_RESEED_INTERVAL CONFIG_ENTROPY_NPCX_DRBG_RESEED_INTERVAL #define NPCX_DRBG_HANDLE_SIZE DT_INST_PROP(0, context_buffer_size) struct entropy_npcx_drbg_dev_data { struct k_sem sem_lock; uint8_t handle[NPCX_DRBG_HANDLE_SIZE] __aligned(4); }; /* * The base address of the table that holds the function pointer for each * DRBG API in ROM. */ #define NPCX_NCL_DRBG_BASE_ADDR ((const struct npcx_ncl_drbg *)DT_INST_REG_ADDR_BY_IDX(0, 0)) /* The following table holds the function pointer for each DRBG API in NPCX ROM. */ struct npcx_ncl_drbg { /* Get the DRBG context size required by DRBG APIs. */ uint32_t (*get_context_size)(void); /* Initialize DRBG context. */ enum ncl_status (*init_context)(void *ctx); /* Power on/off DRBG module. */ enum ncl_status (*power)(void *ctx, uint8_t enable); /* Finalize DRBG context. */ enum ncl_status (*finalize_context)(void *ctx); /* Initialize the DRBG hardware module and enable interrupts. */ enum ncl_status (*init)(void *ctx, bool int_enable); /* * Configure DRBG, pres_resistance enables/disables (1/0) prediction * resistance */ enum ncl_status (*config)(void *ctx, uint32_t reseed_interval, uint8_t pred_resistance); /* * This routine creates a first instantiation of the DRBG mechanism * parameters. The routine pulls an initial seed from the HW RNG module * and resets the reseed counter. DRBG and SHA modules should be * activated prior to the this operation. */ enum ncl_status (*instantiate)(void *ctx, enum ncl_drbg_security_strength sec_strength, const uint8_t *pers_string, uint32_t pers_string_len); /* Uninstantiate DRBG module */ enum ncl_status (*uninstantiate)(void *ctx); /* Reseeds the internal state of the given instantce */ enum ncl_status (*reseed)(void *ctc, uint8_t *add_data, uint32_t add_data_len); /* Generates a random number from the current internal state. */ enum ncl_status (*generate)(void *ctx, const uint8_t *add_data, uint32_t add_data_len, uint8_t *out_buff, uint32_t out_buff_len); /* Clear all DRBG SSPs (Sensitive Security Parameters) in HW & driver */ enum ncl_status (*clear)(void *ctx); }; #define NPCX_NCL_DRBG ((const struct npcx_ncl_drbg *)NPCX_NCL_DRBG_BASE_ADDR) /* The 2nd index of the reg property holds the address of NCL_SHA_Power ROM API */ #define NPCX_NCL_SHA_POWER_ADDR ((const struct npcx_ncl_drbg *)DT_INST_REG_ADDR_BY_IDX(0, 1)) struct npcx_ncl_sha { /* Power on/off SHA module. */ enum ncl_status (*power)(void *ctx, uint8_t on); }; #define NPCX_NCL_SHA_POWER ((const struct npcx_ncl_sha *)NPCX_NCL_SHA_POWER_ADDR) static int entropy_npcx_drbg_enable_sha_power(void *ctx, bool enable) { enum ncl_status ncl_ret; ncl_ret = NPCX_NCL_SHA_POWER->power(ctx, enable); if (ncl_ret != NCL_STATUS_OK) { LOG_ERR("Fail to %s SHA power: err 0x%02x", enable ? "enable" : "disable", ncl_ret); return -EIO; } return 0; } static int entropy_npcx_drbg_enable_drbg_power(void *ctx, bool enable) { enum ncl_status ncl_ret; ncl_ret = NPCX_NCL_DRBG->power(ctx, enable); if (ncl_ret != NCL_STATUS_OK) { LOG_ERR("Fail to %s DRBG power: err 0x%02x", enable ? "enable" : "disable", ncl_ret); return -EIO; } return 0; } static int entropy_npcx_drbg_get_entropy(const struct device *dev, uint8_t *buf, uint16_t len) { struct entropy_npcx_drbg_dev_data *const data = dev->data; enum ncl_status ncl_ret; void *ctx = data->handle; int ret = 0; k_sem_take(&data->sem_lock, K_FOREVER); ret = entropy_npcx_drbg_enable_sha_power(ctx, true); if (ret != 0) { goto err_exit; } ncl_ret = NPCX_NCL_DRBG->generate(ctx, NULL, 0, buf, len); if (ncl_ret != NCL_STATUS_OK) { LOG_ERR("Fail to generate: err 0x%02x", ncl_ret); ret = -EIO; goto err_exit; } ret = entropy_npcx_drbg_enable_sha_power(ctx, false); err_exit: k_sem_give(&data->sem_lock); return ret; } static int entropy_npcx_drbg_init(const struct device *dev) { struct entropy_npcx_drbg_dev_data *const data = dev->data; uint32_t handle_size_required; enum ncl_status ncl_ret; void *ctx = data->handle; int ret; handle_size_required = NPCX_NCL_DRBG->get_context_size(); if (handle_size_required != NPCX_DRBG_HANDLE_SIZE) { LOG_ERR("Unexpected NCL DRBG context_size = %d", handle_size_required); return -ENOSR; } ret = entropy_npcx_drbg_enable_sha_power(ctx, true); if (ret != 0) { return ret; } ret = entropy_npcx_drbg_enable_drbg_power(ctx, true); if (ret != 0) { return ret; } ncl_ret = NPCX_NCL_DRBG->init_context(ctx); if (ncl_ret != NCL_STATUS_OK) { LOG_ERR("Fail to init ctx: err 0x%02x", ncl_ret); return -EIO; } ncl_ret = NPCX_NCL_DRBG->init(ctx, false); if (ncl_ret != NCL_STATUS_OK) { LOG_ERR("Fail to init: err 0x%02x", ncl_ret); return -EIO; } ncl_ret = NPCX_NCL_DRBG->config(ctx, NPCX_DRBG_RESEED_INTERVAL, false); if (ncl_ret != NCL_STATUS_OK) { LOG_ERR("Fail to config: err 0x%02x", ncl_ret); return -EIO; } ncl_ret = NPCX_NCL_DRBG->instantiate(ctx, NPCX_DRBG_SECURITY_STRENGTH, NULL, 0); if (ncl_ret != NCL_STATUS_OK) { LOG_ERR("Fail to config: err 0x%02x", ncl_ret); return -EIO; } ret = entropy_npcx_drbg_enable_sha_power(ctx, false); if (ret != 0) { return ret; } /* Locking semaphore initialized to 1 (unlocked) */ k_sem_init(&data->sem_lock, 1, 1); return 0; } #ifdef CONFIG_PM_DEVICE static int entropy_npcx_drbg_suspend(const struct device *dev) { struct entropy_npcx_drbg_dev_data *const data = dev->data; void *ctx = data->handle; return entropy_npcx_drbg_enable_drbg_power(ctx, false); } static int entropy_npcx_drbg_resume(const struct device *dev) { struct entropy_npcx_drbg_dev_data *const data = dev->data; void *ctx = data->handle; return entropy_npcx_drbg_enable_drbg_power(ctx, true); } static int entropy_npcx_drbg_pm_action(const struct device *dev, enum pm_device_action action) { switch (action) { case PM_DEVICE_ACTION_SUSPEND: return entropy_npcx_drbg_suspend(dev); case PM_DEVICE_ACTION_RESUME: return entropy_npcx_drbg_resume(dev); default: return -ENOTSUP; } } #endif /* CONFIG_PM_DEVICE */ static const struct entropy_driver_api entropy_npcx_drbg_api = { .get_entropy = entropy_npcx_drbg_get_entropy, }; static struct entropy_npcx_drbg_dev_data entropy_npcx_drbg_data; PM_DEVICE_DT_INST_DEFINE(0, entropy_npcx_drbg_pm_action); DEVICE_DT_INST_DEFINE(0, entropy_npcx_drbg_init, PM_DEVICE_DT_INST_GET(0), &entropy_npcx_drbg_data, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_npcx_drbg_api); ```
/content/code_sandbox/drivers/entropy/entropy_npcx_drbg.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,062
```c /* * * * Bottom/Linux side of the pseudo-random entropy generator for * ARCH_POSIX architecture */ #undef _XOPEN_SOURCE #define _XOPEN_SOURCE 700 #include <stdbool.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <sys/random.h> #include "nsi_tracing.h" void entropy_native_seed(unsigned int seed, bool seed_random) { if (seed_random == false) { srandom(seed); } else { unsigned int buf; int err = getrandom(&buf, sizeof(buf), 0); if (err != sizeof(buf)) { nsi_print_error_and_exit("Could not get random number (%i, %s)\n", err, strerror(errno)); } srandom(buf); /* Let's print the seed so users can still reproduce the run if they need to */ nsi_print_trace("Random generator seeded with 0x%X\n", buf); } } ```
/content/code_sandbox/drivers/entropy/fake_entropy_native_bottom.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
206
```c /* * */ #define DT_DRV_COMPAT nxp_kinetis_rnga #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/random/random.h> #include <zephyr/init.h> #include "fsl_rnga.h" static uint8_t entropy_mcux_rnga_get_uint8(void) { uint32_t random; uint8_t output = 0U; int i; RNGA_SetMode(RNG, kRNGA_ModeNormal); /* The Reference manual states that back to back reads from * the RNGA deliver one or two bits of entropy per 32-bit * word, therefore to deliver 8 bits of entropy we need * between 4 and 8 samples. Conservatively, we take 8. */ for (i = 0; i < 8; i++) { status_t status; status = RNGA_GetRandomData(RNG, &random, sizeof(random)); __ASSERT_NO_MSG(!status); output ^= random; } RNGA_SetMode(RNG, kRNGA_ModeSleep); return output; } static int entropy_mcux_rnga_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { int i; ARG_UNUSED(dev); for (i = 0; i < length; i++) { buffer[i] = entropy_mcux_rnga_get_uint8(); } return 0; } static const struct entropy_driver_api entropy_mcux_rnga_api_funcs = { .get_entropy = entropy_mcux_rnga_get_entropy }; static int entropy_mcux_rnga_init(const struct device *dev) { uint32_t seed = k_cycle_get_32(); ARG_UNUSED(dev); RNGA_Init(RNG); /* The range of seed values acquired by this method is likely * to be relatively small. The RNGA hardware uses two free * running oscillators to add entropy to the seed value, we * take care below to ensure the read rate is lower than the * rate at which the hardware can add entropy. */ RNGA_Seed(RNG, seed); RNGA_SetMode(RNG, kRNGA_ModeSleep); return 0; } DEVICE_DT_INST_DEFINE(0, entropy_mcux_rnga_init, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_mcux_rnga_api_funcs); ```
/content/code_sandbox/drivers/entropy/entropy_mcux_rnga.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
529
```c /* * */ #define DT_DRV_COMPAT openisa_rv32m1_trng #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/random/random.h> #include <zephyr/init.h> #include "fsl_trng.h" struct rv32m1_entropy_config { TRNG_Type *base; }; static int entropy_rv32m1_trng_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { const struct rv32m1_entropy_config *config = dev->config; status_t status; ARG_UNUSED(dev); status = TRNG_GetRandomData(config->base, buffer, length); __ASSERT_NO_MSG(!status); return 0; } static const struct entropy_driver_api entropy_rv32m1_trng_api_funcs = { .get_entropy = entropy_rv32m1_trng_get_entropy }; static struct rv32m1_entropy_config entropy_rv32m1_config = { .base = (TRNG_Type *)DT_INST_REG_ADDR(0) }; static int entropy_rv32m1_trng_init(const struct device *); DEVICE_DT_INST_DEFINE(0, entropy_rv32m1_trng_init, NULL, NULL, &entropy_rv32m1_config, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_rv32m1_trng_api_funcs); static int entropy_rv32m1_trng_init(const struct device *dev) { const struct rv32m1_entropy_config *config = dev->config; trng_config_t conf; status_t status; ARG_UNUSED(dev); status = TRNG_GetDefaultConfig(&conf); __ASSERT_NO_MSG(!status); status = TRNG_Init(config->base, &conf); __ASSERT_NO_MSG(!status); return 0; } ```
/content/code_sandbox/drivers/entropy/entropy_rv32m1_trng.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
391
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_trng #include <string.h> #include <soc/rtc.h> #include <soc/wdev_reg.h> #include <esp_system.h> #include <soc.h> #include <esp_cpu.h> #include <zephyr/drivers/entropy.h> static inline uint32_t entropy_esp32_get_u32(void) { /* The PRNG which implements WDEV_RANDOM register gets 2 bits * of extra entropy from a hardware randomness source every APB clock cycle * (provided WiFi or BT are enabled). To make sure entropy is not drained * faster than it is added, this function needs to wait for at least 16 APB * clock cycles after reading previous word. This implementation may actually * wait a bit longer due to extra time spent in arithmetic and branch statements. */ uint32_t cpu_to_apb_freq_ratio = esp_clk_cpu_freq() / esp_clk_apb_freq(); static uint32_t last_ccount; uint32_t ccount; do { ccount = esp_cpu_get_cycle_count(); } while (ccount - last_ccount < cpu_to_apb_freq_ratio * 16); last_ccount = ccount; return REG_READ(WDEV_RND_REG); } static int entropy_esp32_get_entropy(const struct device *dev, uint8_t *buf, uint16_t len) { assert(buf != NULL); uint8_t *buf_bytes = buf; while (len > 0) { uint32_t word = entropy_esp32_get_u32(); uint32_t to_copy = MIN(sizeof(word), len); memcpy(buf_bytes, &word, to_copy); buf_bytes += to_copy; len -= to_copy; } return 0; } static int entropy_esp32_init(const struct device *dev) { /* clock initialization handled by clock manager */ return 0; } static const struct entropy_driver_api entropy_esp32_api_funcs = { .get_entropy = entropy_esp32_get_entropy }; DEVICE_DT_INST_DEFINE(0, entropy_esp32_init, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_esp32_api_funcs); ```
/content/code_sandbox/drivers/entropy/entropy_esp32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
473
```unknown # PSA Crypto Random source configuration options config ENTROPY_PSA_CRYPTO_RNG bool "PSA Crypto Random source Entropy driver" depends on DT_HAS_ZEPHYR_PSA_CRYPTO_RNG_ENABLED select ENTROPY_HAS_DRIVER default y help Enable the PSA Crypto source Entropy driver. ```
/content/code_sandbox/drivers/entropy/Kconfig.psa_crypto
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
69
```c /* * */ #include <zephyr/drivers/entropy.h> #include <zephyr/kernel.h> #include <soc.h> #include <zephyr/irq.h> #include <zephyr/sys/barrier.h> #include <DA1469xAB.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(smartbond_entropy, CONFIG_ENTROPY_LOG_LEVEL); #define DT_DRV_COMPAT renesas_smartbond_trng #define IRQN DT_INST_IRQN(0) #define IRQ_PRIO DT_INST_IRQ(0, priority) struct rng_pool { uint8_t first_alloc; uint8_t first_read; uint8_t last; uint8_t mask; uint8_t threshold; FLEXIBLE_ARRAY_DECLARE(uint8_t, buffer); }; #define RNG_POOL_DEFINE(name, len) uint8_t name[sizeof(struct rng_pool) + (len)] BUILD_ASSERT((CONFIG_ENTROPY_SMARTBOND_ISR_POOL_SIZE & (CONFIG_ENTROPY_SMARTBOND_ISR_POOL_SIZE - 1)) == 0, "The CONFIG_ENTROPY_SMARTBOND_ISR_POOL_SIZE must be a power of 2!"); BUILD_ASSERT((CONFIG_ENTROPY_SMARTBOND_THR_POOL_SIZE & (CONFIG_ENTROPY_SMARTBOND_THR_POOL_SIZE - 1)) == 0, "The CONFIG_ENTROPY_SMARTBOND_THR_POOL_SIZE must be a power of 2!"); struct entropy_smartbond_dev_data { struct k_sem sem_lock; struct k_sem sem_sync; RNG_POOL_DEFINE(isr, CONFIG_ENTROPY_SMARTBOND_ISR_POOL_SIZE); RNG_POOL_DEFINE(thr, CONFIG_ENTROPY_SMARTBOND_THR_POOL_SIZE); }; static struct entropy_smartbond_dev_data entropy_smartbond_data; /* TRNG FIFO definitions are not in DA1469x.h */ #define DA1469X_TRNG_FIFO_SIZE (32 * sizeof(uint32_t)) #define DA1469X_TRNG_FIFO_ADDR (0x30050000UL) #define FIFO_COUNT_MASK \ (TRNG_TRNG_FIFOLVL_REG_TRNG_FIFOFULL_Msk | TRNG_TRNG_FIFOLVL_REG_TRNG_FIFOLVL_Msk) static inline void entropy_smartbond_pm_policy_state_lock_get(void) { #if defined(CONFIG_PM_DEVICE) /* * Prevent the SoC from etering the normal sleep state as PDC does not support * waking up the application core following TRNG events. */ pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); #endif } static inline void entropy_smartbond_pm_policy_state_lock_put(void) { #if defined(CONFIG_PM_DEVICE) /* Allow the SoC to enter the nornmal sleep state once TRNG is inactive */ pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); #endif } static void trng_enable(bool enable) { unsigned int key; key = irq_lock(); if (enable) { CRG_TOP->CLK_AMBA_REG |= CRG_TOP_CLK_AMBA_REG_TRNG_CLK_ENABLE_Msk; TRNG->TRNG_CTRL_REG = TRNG_TRNG_CTRL_REG_TRNG_ENABLE_Msk; /* * Sleep is not allowed as long as the ISR and thread SW FIFOs * are being filled with random numbers. */ entropy_smartbond_pm_policy_state_lock_get(); } else { CRG_TOP->CLK_AMBA_REG &= ~CRG_TOP_CLK_AMBA_REG_TRNG_CLK_ENABLE_Msk; TRNG->TRNG_CTRL_REG = 0; NVIC_ClearPendingIRQ(IRQN); entropy_smartbond_pm_policy_state_lock_put(); } irq_unlock(key); } static int trng_available(void) { return TRNG->TRNG_FIFOLVL_REG & FIFO_COUNT_MASK; } static inline uint32_t trng_fifo_read(void) { return *(uint32_t *)DA1469X_TRNG_FIFO_ADDR; } static int random_word_get(uint8_t buf[4]) { uint32_t word = 0; int retval = -EAGAIN; unsigned int key; key = irq_lock(); if (trng_available()) { word = trng_fifo_read(); retval = 0; } irq_unlock(key); buf[0] = (uint8_t)word; buf[1] = (uint8_t)(word >> 8); buf[2] = (uint8_t)(word >> 16); buf[3] = (uint8_t)(word >> 24); return retval; } static uint16_t rng_pool_get(struct rng_pool *rngp, uint8_t *buf, uint16_t len) { uint32_t last = rngp->last; uint32_t mask = rngp->mask; uint8_t *dst = buf; uint32_t first, available; uint32_t other_read_in_progress; unsigned int key; key = irq_lock(); first = rngp->first_alloc; /* * The other_read_in_progress is non-zero if rngp->first_read != first, * which means that lower-priority code (which was interrupted by this * call) already allocated area for read. */ other_read_in_progress = (rngp->first_read ^ first); available = (last - first) & mask; if (available < len) { len = available; } /* * Move alloc index forward to signal, that part of the buffer is * now reserved for this call. */ rngp->first_alloc = (first + len) & mask; irq_unlock(key); while (likely(len--)) { *dst++ = rngp->buffer[first]; first = (first + 1) & mask; } /* * If this call is the last one accessing the pool, move read index * to signal that all allocated regions are now read and could be * overwritten. */ if (likely(!other_read_in_progress)) { key = irq_lock(); rngp->first_read = rngp->first_alloc; irq_unlock(key); } len = dst - buf; available = available - len; if (available <= rngp->threshold) { trng_enable(true); } return len; } static int rng_pool_put(struct rng_pool *rngp, uint8_t byte) { uint8_t first = rngp->first_read; uint8_t last = rngp->last; uint8_t mask = rngp->mask; /* Signal error if the pool is full. */ if (((last - first) & mask) == mask) { return -ENOBUFS; } rngp->buffer[last] = byte; rngp->last = (last + 1) & mask; return 0; } static const uint8_t *rng_pool_put_bytes(struct rng_pool *rngp, const uint8_t *bytes, const uint8_t *limit) { unsigned int key; key = irq_lock(); for (; bytes < limit; ++bytes) { if (rng_pool_put(rngp, *bytes) < 0) { break; } } irq_unlock(key); return bytes; } static void rng_pool_init(struct rng_pool *rngp, uint16_t size, uint8_t threshold) { rngp->first_alloc = 0U; rngp->first_read = 0U; rngp->last = 0U; rngp->mask = size - 1; rngp->threshold = threshold; } static void smartbond_trng_isr(const void *arg) { uint8_t word[4]; const uint8_t *const limit = word + 4; const uint8_t *ptr; bool thread_signaled = false; ARG_UNUSED(arg); while (true) { if (random_word_get(word) < 0) { /* Nothing in FIFO -> nothing to do */ break; } ptr = word; /* Put bytes in ISR FIFO first */ ptr = rng_pool_put_bytes((struct rng_pool *)(entropy_smartbond_data.isr), ptr, limit); if (ptr < limit) { /* Put leftovers in thread FIFO */ if (!thread_signaled) { thread_signaled = true; k_sem_give(&entropy_smartbond_data.sem_sync); } ptr = rng_pool_put_bytes((struct rng_pool *)(entropy_smartbond_data.thr), ptr, limit); } /* Bytes did not fit in isr nor thread FIFO, disable TRNG for now */ if (ptr < limit) { trng_enable(false); break; } } } static int entropy_smartbond_get_entropy(const struct device *dev, uint8_t *buf, uint16_t len) { ARG_UNUSED(dev); /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_smartbond_data == dev->data); while (len) { uint16_t bytes; k_sem_take(&entropy_smartbond_data.sem_lock, K_FOREVER); bytes = rng_pool_get((struct rng_pool *)(entropy_smartbond_data.thr), buf, len); k_sem_give(&entropy_smartbond_data.sem_lock); if (bytes == 0U) { /* Pool is empty: Sleep until next interrupt. */ k_sem_take(&entropy_smartbond_data.sem_sync, K_FOREVER); continue; } len -= bytes; buf += bytes; } return 0; } static int entropy_smartbond_get_entropy_isr(const struct device *dev, uint8_t *buf, uint16_t len, uint32_t flags) { ARG_UNUSED(dev); uint16_t cnt = len; /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_smartbond_data == dev->data); if (likely((flags & ENTROPY_BUSYWAIT) == 0U)) { return rng_pool_get((struct rng_pool *)(entropy_smartbond_data.isr), buf, len); } if (len) { unsigned int key; int irq_enabled; key = irq_lock(); irq_enabled = irq_is_enabled(IRQN); irq_disable(IRQN); irq_unlock(key); trng_enable(true); /* Clear NVIC pending bit. This ensures that a subsequent * RNG event will set the Cortex-M single-bit event register * to 1 (the bit is set when NVIC pending IRQ status is * changed from 0 to 1) */ NVIC_ClearPendingIRQ(IRQN); do { uint8_t bytes[4]; const uint8_t *ptr = bytes; const uint8_t *const limit = bytes + 4; while (!trng_available()) { /* * To guarantee waking up from the event, the * SEV-On-Pend feature must be enabled (enabled * during ARCH initialization). * * DSB is recommended by spec before WFE (to * guarantee completion of memory transactions) */ barrier_dsync_fence_full(); __WFE(); __SEV(); __WFE(); } NVIC_ClearPendingIRQ(IRQN); if (random_word_get(bytes) != 0) { continue; } while (ptr < limit && len) { buf[--len] = *ptr++; } /* Store remaining data for later use */ if (unlikely(ptr < limit)) { rng_pool_put_bytes((struct rng_pool *)(entropy_smartbond_data.isr), ptr, limit); } } while (len); if (irq_enabled) { irq_enable(IRQN); } } return cnt; } #if defined(CONFIG_PM_DEVICE) static int entropy_smartbond_pm_action(const struct device *dev, enum pm_device_action action) { int ret = 0; switch (action) { case PM_DEVICE_ACTION_RESUME: /* * No need to turn on TRNG. It should be done when we the space in the FIFOs * are below the defined ISR and thread FIFO's thresholds. * * \sa CONFIG_ENTROPY_SMARTBOND_THR_THRESHOLD * \sa CONFIG_ENTROPY_SMARTBOND_ISR_THRESHOLD * */ break; case PM_DEVICE_ACTION_SUSPEND: /* At this point TRNG should be disabled; no need to turn it off. */ break; default: ret = -ENOTSUP; } return ret; } #endif static const struct entropy_driver_api entropy_smartbond_api_funcs = { .get_entropy = entropy_smartbond_get_entropy, .get_entropy_isr = entropy_smartbond_get_entropy_isr}; static int entropy_smartbond_init(const struct device *dev) { /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_smartbond_data == dev->data); /* Locking semaphore initialized to 1 (unlocked) */ k_sem_init(&entropy_smartbond_data.sem_lock, 1, 1); /* Syncing semaphore */ k_sem_init(&entropy_smartbond_data.sem_sync, 0, 1); rng_pool_init((struct rng_pool *)(entropy_smartbond_data.thr), CONFIG_ENTROPY_SMARTBOND_THR_POOL_SIZE, CONFIG_ENTROPY_SMARTBOND_THR_THRESHOLD); rng_pool_init((struct rng_pool *)(entropy_smartbond_data.isr), CONFIG_ENTROPY_SMARTBOND_ISR_POOL_SIZE, CONFIG_ENTROPY_SMARTBOND_ISR_THRESHOLD); IRQ_CONNECT(IRQN, IRQ_PRIO, smartbond_trng_isr, &entropy_smartbond_data, 0); irq_enable(IRQN); trng_enable(true); return 0; } PM_DEVICE_DT_INST_DEFINE(0, entropy_smartbond_pm_action); DEVICE_DT_INST_DEFINE(0, entropy_smartbond_init, PM_DEVICE_DT_INST_GET(0), &entropy_smartbond_data, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_smartbond_api_funcs); ```
/content/code_sandbox/drivers/entropy/entropy_smartbond.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,037
```c /* * */ #define DT_DRV_COMPAT silabs_gecko_trng #include <zephyr/drivers/entropy.h> #include <string.h> #include "soc.h" #include "em_cmu.h" #if defined(CONFIG_CRYPTO_ACC_GECKO_TRNG) /* * Select the correct Crypto ACC FIFO memory base address. * * Problem: Gecko SDK doesn't provide macros that check if SL_TRUSTZONE is used or not for Crypto * ACC RNGOUT FIFO memory base address, like it does for register address definitions. * * Solution: Check which register base address is used for the Crypto ACC peripheral and select an * appropriate FIFO memory base address. */ #if (CRYPTOACC_BASE == CRYPTOACC_S_BASE) #define S2_FIFO_BASE CRYPTOACC_RNGOUT_FIFO_S_MEM_BASE #else #define S2_FIFO_BASE CRYPTOACC_RNGOUT_FIFO_MEM_BASE #endif /** * Series 2 SoCs have different TRNG register definitions */ #if defined(_SILICON_LABS_32B_SERIES_2_CONFIG_2) /* xG22 */ #define S2_FIFO_LEVEL (CRYPTOACC_RNGCTRL->FIFOLEVEL) #define S2_CTRL (CRYPTOACC_RNGCTRL->RNGCTRL) #define S2_CTRL_ENABLE (CRYPTOACC_RNGCTRL_ENABLE) #elif defined(_SILICON_LABS_32B_SERIES_2_CONFIG_7) /* xG27 */ #define S2_FIFO_LEVEL (CRYPTOACC->NDRNG_FIFOLEVEL) #define S2_CTRL (CRYPTOACC->NDRNG_CONTROL) #define S2_CTRL_ENABLE (CRYPTOACC_NDRNG_CONTROL_ENABLE) #else /* _SILICON_LABS_32B_SERIES_2_CONFIG_* */ #error "Building for unsupported Series 2 SoC" #endif /* _SILICON_LABS_32B_SERIES_2_CONFIG_* */ #endif /* CONFIG_CRYPTO_ACC_GECKO_TRNG */ static void entropy_gecko_trng_read(uint8_t *output, size_t len) { #ifndef CONFIG_CRYPTO_ACC_GECKO_TRNG uint32_t tmp; uint32_t *data = (uint32_t *) output; /* Read known good available data. */ while (len >= 4) { *data++ = TRNG0->FIFO; len -= 4; } if (len > 0) { /* Handle the case where len is not a multiple of 4 * and FIFO data is available. */ tmp = TRNG0->FIFO; memcpy(data, (const uint8_t *) &tmp, len); } #else memcpy(output, ((const uint8_t *) S2_FIFO_BASE), len); #endif } static int entropy_gecko_trng_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { size_t count = 0; size_t available; ARG_UNUSED(dev); while (length) { #ifndef CONFIG_CRYPTO_ACC_GECKO_TRNG available = TRNG0->FIFOLEVEL * 4; #else available = S2_FIFO_LEVEL * 4; #endif if (available == 0) { return -EINVAL; } count = SL_MIN(length, available); entropy_gecko_trng_read(buffer, count); buffer += count; length -= count; } return 0; } static int entropy_gecko_trng_get_entropy_isr(const struct device *dev, uint8_t *buf, uint16_t len, uint32_t flags) { if ((flags & ENTROPY_BUSYWAIT) == 0U) { /* No busy wait; return whatever data is available. */ size_t count; #ifndef CONFIG_CRYPTO_ACC_GECKO_TRNG size_t available = TRNG0->FIFOLEVEL * 4; #else size_t available = S2_FIFO_LEVEL * 4; #endif if (available == 0) { return -ENODATA; } count = SL_MIN(len, available); entropy_gecko_trng_read(buf, count); return count; } else { /* Allowed to busy-wait */ int ret = entropy_gecko_trng_get_entropy(dev, buf, len); if (ret == 0) { /* Data retrieved successfully. */ return len; } return ret; } } static int entropy_gecko_trng_init(const struct device *dev) { /* Enable the TRNG0 clock. */ #ifndef CONFIG_CRYPTO_ACC_GECKO_TRNG CMU_ClockEnable(cmuClock_TRNG0, true); /* Enable TRNG0. */ TRNG0->CONTROL = TRNG_CONTROL_ENABLE; #else /* Enable the CRYPTO ACC clock. */ CMU_ClockEnable(cmuClock_CRYPTOACC, true); /* Enable TRNG */ S2_CTRL |= S2_CTRL_ENABLE; #endif return 0; } static struct entropy_driver_api entropy_gecko_trng_api_funcs = { .get_entropy = entropy_gecko_trng_get_entropy, .get_entropy_isr = entropy_gecko_trng_get_entropy_isr }; DEVICE_DT_INST_DEFINE(0, entropy_gecko_trng_init, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_gecko_trng_api_funcs); ```
/content/code_sandbox/drivers/entropy/entropy_gecko_trng.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,146
```unknown # nRF5 entropy generator driver configuration config ENTROPY_NRF_FORCE_ALT bool depends on SOC_COMPATIBLE_NRF help This option can be enabled to force an alternative implementation of the entropy driver. menuconfig ENTROPY_NRF5_RNG bool "nRF5 RNG driver" default y depends on !ENTROPY_NRF_FORCE_ALT depends on DT_HAS_NORDIC_NRF_RNG_ENABLED select ENTROPY_HAS_DRIVER help This option enables the RNG peripheral, which is a random number generator, based on internal thermal noise, that provides a random 8-bit value to the host when read. if ENTROPY_NRF5_RNG config ENTROPY_NRF5_BIAS_CORRECTION bool "Bias correction (uniform distribution)" help This option enables the RNG bias correction, which guarantees a uniform distribution of 0 and 1. When this option is enabled, the time to generate a byte cannot be guaranteed. config ENTROPY_NRF5_THR_POOL_SIZE int "Thread-mode random number pool size" range ENTROPY_NRF5_THR_THRESHOLD 256 default 8 help Buffer length in bytes used to store entropy bytes generated by the hardware to make them ready for thread mode consumers. Please note, that size of the pool must be a power of 2. config ENTROPY_NRF5_THR_THRESHOLD int "Thread-mode random number pool low-water threshold" range 4 $(UINT8_MAX) help Low water-mark threshold in bytes to trigger entropy generation for thread mode consumers. As soon as the number of available bytes in the buffer goes below this number hardware entropy generation will be started. config ENTROPY_NRF5_ISR_POOL_SIZE int "ISR-mode random number pool size" range ENTROPY_NRF5_ISR_THRESHOLD 256 default 16 help Buffer length in bytes used to store entropy bytes generated by the hardware to make them ready for ISR consumers. Please note, that size of the pool must be a power of 2. config ENTROPY_NRF5_ISR_THRESHOLD int "ISR-mode random number pool low-water threshold" range 12 $(UINT8_MAX) help Low water-mark threshold in bytes to trigger entropy generation for ISR consumers. As soon as the number of available bytes in the buffer goes below this number hardware entropy generation will be started. endif # ENTROPY_NRF5_RNG ```
/content/code_sandbox/drivers/entropy/Kconfig.nrf5
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
542
```c /* * */ #define DT_DRV_COMPAT nxp_kinetis_trng #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/random/random.h> #include <zephyr/init.h> #include "fsl_trng.h" struct mcux_entropy_config { TRNG_Type *base; }; static int entropy_mcux_trng_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { const struct mcux_entropy_config *config = dev->config; status_t status; status = TRNG_GetRandomData(config->base, buffer, length); __ASSERT_NO_MSG(!status); return 0; } static const struct entropy_driver_api entropy_mcux_trng_api_funcs = { .get_entropy = entropy_mcux_trng_get_entropy }; static struct mcux_entropy_config entropy_mcux_config = { .base = (TRNG_Type *)DT_INST_REG_ADDR(0) }; static int entropy_mcux_trng_init(const struct device *dev) { const struct mcux_entropy_config *config = dev->config; trng_config_t conf; status_t status; status = TRNG_GetDefaultConfig(&conf); __ASSERT_NO_MSG(!status); status = TRNG_Init(config->base, &conf); __ASSERT_NO_MSG(!status); return 0; } DEVICE_DT_INST_DEFINE(0, entropy_mcux_trng_init, NULL, NULL, &entropy_mcux_config, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_mcux_trng_api_funcs); ```
/content/code_sandbox/drivers/entropy/entropy_mcux_trng.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
332
```unknown # Smartbond TRNG configuration config ENTROPY_SMARTBOND_TRNG bool "Renesas Smartbond MCU Family True Random Number Generator (TRNG) Driver" default y depends on DT_HAS_RENESAS_SMARTBOND_TRNG_ENABLED select ENTROPY_HAS_DRIVER help Enable True Random Number Generator (TRNG) driver for Renesas Smartbond MCUs. if ENTROPY_SMARTBOND_TRNG config ENTROPY_SMARTBOND_THR_POOL_SIZE int "Thread-mode random number pool size" range ENTROPY_SMARTBOND_THR_THRESHOLD 256 default 8 help Buffer length in bytes used to store entropy bytes generated by the hardware to make them ready for thread mode consumers. Please note, that size of the pool must be a power of 2. config ENTROPY_SMARTBOND_THR_THRESHOLD int "Thread-mode random number pool low-water threshold" range 4 $(UINT8_MAX) help Low water-mark threshold in bytes to trigger entropy generation for thread mode consumers. As soon as the number of available bytes in the buffer goes below this number hardware entropy generation will be started. config ENTROPY_SMARTBOND_ISR_POOL_SIZE int "ISR-mode random number pool size" range ENTROPY_SMARTBOND_ISR_THRESHOLD 256 default 16 help Buffer length in bytes used to store entropy bytes generated by the hardware to make them ready for ISR consumers. Please note, that size of the pool must be a power of 2. config ENTROPY_SMARTBOND_ISR_THRESHOLD int "ISR-mode random number pool low-water threshold" range 12 $(UINT8_MAX) help Low water-mark threshold in bytes to trigger entropy generation for ISR consumers. As soon as the number of available bytes in the buffer goes below this number hardware entropy generation will be started. endif # ENTROPY_SMARTBOND_TRNG ```
/content/code_sandbox/drivers/entropy/Kconfig.smartbond
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
426
```unknown # RV32M1 entropy generator driver configuration config ENTROPY_RV32M1_TRNG bool "RV32M1 TRNG driver" default y depends on DT_HAS_OPENISA_RV32M1_TRNG_ENABLED select ENTROPY_HAS_DRIVER help This option enables the true random number generator (TRNG) driver based on the RV32M1 TRNG driver. # Don't use use the RV32M1 TRNG as a random source since it can be quite slow. # Instead, use the software implemented xoshiro RNG. choice RNG_GENERATOR_CHOICE default XOSHIRO_RANDOM_GENERATOR if ENTROPY_RV32M1_TRNG endchoice ```
/content/code_sandbox/drivers/entropy/Kconfig.rv32m1
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
152
```unknown # NPCX DRBG driver configuration options menuconfig ENTROPY_NPCX_DRBG bool "NPCX DRBG driver" default y depends on DT_HAS_NUVOTON_NPCX_DRBG_ENABLED && SOC_NPCX9MFP select ENTROPY_HAS_DRIVER help This option enables the deterministic random bit generator (DRBG) driver for NPCX family of processors. if ENTROPY_NPCX_DRBG choice prompt "DRBG Security Strength Selection" default ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_128B help The chosen security strength defines the amount of entropy bits generated internally and passed to the conditioning component. config ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_112B bool "DRBG security strength 112 bits" config ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_128B bool "DRBG security strength 128 bits" config ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_192B bool "DRBG security strength 192 bits" config ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_256B bool "DRBG security strength 256 bits" config ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_128B_TEST bool "DRBG security strength 12b bits test" config ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_256B_TEST bool "DRBG security strength 256 bits test" endchoice config ENTROPY_NPCX_DRBG_SECURITY_STRENGTH int default 0 if ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_112B default 1 if ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_128B default 2 if ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_192B default 3 if ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_256B default 4 if ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_128B_TEST default 5 if ENTROPY_NPCX_DRBG_SECURITY_STRENGTH_256B_TEST config ENTROPY_NPCX_DRBG_RESEED_INTERVAL int "DRBG Reseed Interval" default 100 help Number of gererations allowed until next reseeding. endif ```
/content/code_sandbox/drivers/entropy/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
481
```c /* * */ #include <string.h> #include <zephyr/sys/util.h> #define DT_DRV_COMPAT adi_max32_trng #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/drivers/clock_control/adi_max32_clock_control.h> #include <wrap_max32_trng.h> struct max32_trng_config { const struct device *clock; struct max32_perclk perclk; }; static int api_get_entropy(const struct device *dev, uint8_t *buf, uint16_t len) { return MXC_TRNG_Random(buf, len); } static int api_get_entropy_isr(const struct device *dev, uint8_t *buf, uint16_t len, uint32_t flags) { int ret = 0; if ((flags & ENTROPY_BUSYWAIT) == 0) { uint32_t temp; uint32_t copy_len; uint32_t count = 0; while (len) { ret = Wrap_MXC_TRNG_RandomInt_NonBlocking(&temp); if (ret != 0) { break; /* Data not ready do not wait */ } copy_len = MIN(len, 4); memcpy(buf, (uint8_t *)&temp, copy_len); len -= copy_len; buf += copy_len; count += copy_len; } /* User would like to read len bytes but in non-blocking mode * the function might read less, in that case return value will be * number of bytes read, if its 0 that means no data reads function * will return -ENODATA */ ret = count ? count : -ENODATA; } else { /* Allowed to busy-wait */ ret = api_get_entropy(dev, buf, len); if (ret == 0) { ret = len; /* Data retrieved successfully. */ } } return ret; } static const struct entropy_driver_api entropy_max32_api = {.get_entropy = api_get_entropy, .get_entropy_isr = api_get_entropy_isr}; static int entropy_max32_init(const struct device *dev) { int ret; const struct max32_trng_config *cfg = dev->config; /* Enable clock */ ret = clock_control_on(cfg->clock, (clock_control_subsys_t)&cfg->perclk); return ret; } static const struct max32_trng_config max32_trng_cfg = { .clock = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(0)), .perclk.bus = DT_INST_CLOCKS_CELL(0, offset), .perclk.bit = DT_INST_CLOCKS_CELL(0, bit), }; DEVICE_DT_INST_DEFINE(0, entropy_max32_init, NULL, NULL, &max32_trng_cfg, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_max32_api); ```
/content/code_sandbox/drivers/entropy/entropy_max32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
617
```c /* * */ #define DT_DRV_COMPAT nxp_lpc_rng #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/random/random.h> #include <zephyr/init.h> #include "fsl_rng.h" struct mcux_entropy_config { RNG_Type *base; }; static int entropy_mcux_rng_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { const struct mcux_entropy_config *config = dev->config; status_t status; status = RNG_GetRandomData(config->base, buffer, length); __ASSERT_NO_MSG(!status); return 0; } static const struct entropy_driver_api entropy_mcux_rng_api_funcs = { .get_entropy = entropy_mcux_rng_get_entropy }; static const struct mcux_entropy_config entropy_mcux_config = { .base = (RNG_Type *)DT_INST_REG_ADDR(0) }; static int entropy_mcux_rng_init(const struct device *dev) { ARG_UNUSED(dev); RNG_Init(entropy_mcux_config.base); return 0; } DEVICE_DT_INST_DEFINE(0, entropy_mcux_rng_init, NULL, NULL, &entropy_mcux_config, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_mcux_rng_api_funcs); ```
/content/code_sandbox/drivers/entropy/entropy_mcux_rng.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
277
```unknown # Entropy generator driver configuration options menuconfig ENTROPY_GENERATOR bool "Entropy drivers" help Include entropy drivers in system config. if ENTROPY_GENERATOR module = ENTROPY module-str = entropy source "subsys/logging/Kconfig.template.log_config" config ENTROPY_INIT_PRIORITY int "Entropy driver init priority" default KERNEL_INIT_PRIORITY_DEVICE help Entropy driver device initialization priority. source "drivers/entropy/Kconfig.b91" source "drivers/entropy/Kconfig.cc13xx_cc26xx" source "drivers/entropy/Kconfig.mcux" source "drivers/entropy/Kconfig.stm32" source "drivers/entropy/Kconfig.esp32" source "drivers/entropy/Kconfig.nrf5" source "drivers/entropy/Kconfig.sam" source "drivers/entropy/Kconfig.smartbond" source "drivers/entropy/Kconfig.native_posix" source "drivers/entropy/Kconfig.rv32m1" source "drivers/entropy/Kconfig.litex" source "drivers/entropy/Kconfig.gecko" source "drivers/entropy/Kconfig.neorv32" source "drivers/entropy/Kconfig.bt_hci" source "drivers/entropy/Kconfig.psa_crypto" source "drivers/entropy/Kconfig.npcx" source "drivers/entropy/Kconfig.max32" config ENTROPY_HAS_DRIVER bool help This is an option to be enabled by individual entropy driver to signal that there is a true entropy driver. endif ```
/content/code_sandbox/drivers/entropy/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
313
```c /* * */ #define DT_DRV_COMPAT ti_cc13xx_cc26xx_trng #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/irq.h> #include <zephyr/pm/policy.h> #include <zephyr/pm/device.h> #include <zephyr/sys/ring_buffer.h> #include <zephyr/sys/sys_io.h> #include <driverlib/prcm.h> #include <driverlib/trng.h> #include <ti/drivers/Power.h> #include <ti/drivers/power/PowerCC26X2.h> #define CPU_FREQ DT_PROP(DT_PATH(cpus, cpu_0), clock_frequency) #define US_PER_SAMPLE (1000000ULL * \ CONFIG_ENTROPY_CC13XX_CC26XX_SAMPLES_PER_CYCLE / CPU_FREQ + 1ULL) struct entropy_cc13xx_cc26xx_data { struct k_sem lock; struct k_sem sync; struct ring_buf pool; uint8_t data[CONFIG_ENTROPY_CC13XX_CC26XX_POOL_SIZE]; #ifdef CONFIG_PM Power_NotifyObj post_notify; bool constrained; #endif }; static void start_trng(struct entropy_cc13xx_cc26xx_data *data) { /* Initialization as described in TRM section 18.6.1.2 */ TRNGReset(); while (sys_read32(TRNG_BASE + TRNG_O_SWRESET)) { continue; } /* Set samples per cycle */ TRNGConfigure(0, CONFIG_ENTROPY_CC13XX_CC26XX_SAMPLES_PER_CYCLE, 0); /* De-tune FROs */ sys_write32(TRNG_FRODETUNE_FRO_MASK_M, TRNG_BASE + TRNG_O_FRODETUNE); /* Enable FROs */ sys_write32(TRNG_FROEN_FRO_MASK_M, TRNG_BASE + TRNG_O_FROEN); /* Set shutdown and alarm thresholds */ sys_write32((CONFIG_ENTROPY_CC13XX_CC26XX_SHUTDOWN_THRESHOLD << 16) | CONFIG_ENTROPY_CC13XX_CC26XX_ALARM_THRESHOLD, TRNG_BASE + TRNG_O_ALARMCNT); TRNGEnable(); TRNGIntEnable(TRNG_NUMBER_READY | TRNG_FRO_SHUTDOWN); } #ifdef CONFIG_PM_DEVICE static void stop_trng(struct entropy_cc13xx_cc26xx_data *data) { TRNGDisable(); TRNGIntClear(TRNG_NUMBER_READY | TRNG_FRO_SHUTDOWN); TRNGIntDisable(TRNG_NUMBER_READY | TRNG_FRO_SHUTDOWN); } #endif static void handle_shutdown_ovf(void) { uint32_t off; /* Clear shutdown */ TRNGIntClear(TRNG_FRO_SHUTDOWN); /* Disabled FROs */ off = sys_read32(TRNG_BASE + TRNG_O_ALARMSTOP); /* Clear alarms */ sys_write32(0, TRNG_BASE + TRNG_O_ALARMMASK); sys_write32(0, TRNG_BASE + TRNG_O_ALARMSTOP); /* De-tune FROs */ sys_write32(off, TRNG_BASE + TRNG_O_FRODETUNE); /* Re-enable FROs */ sys_write32(off, TRNG_BASE + TRNG_O_FROEN); } static int entropy_cc13xx_cc26xx_get_entropy(const struct device *dev, uint8_t *buf, uint16_t len) { struct entropy_cc13xx_cc26xx_data *data = dev->data; uint32_t cnt; #ifdef CONFIG_PM unsigned int key = irq_lock(); if (!data->constrained) { pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); data->constrained = true; } irq_unlock(key); #endif TRNGIntEnable(TRNG_NUMBER_READY); while (len) { k_sem_take(&data->lock, K_FOREVER); cnt = ring_buf_get(&data->pool, buf, len); k_sem_give(&data->lock); if (cnt) { buf += cnt; len -= cnt; } else { k_sem_take(&data->sync, K_FOREVER); } } return 0; } static void entropy_cc13xx_cc26xx_isr(const struct device *dev) { struct entropy_cc13xx_cc26xx_data *data = dev->data; uint32_t src = 0; uint32_t cnt; uint32_t num[2]; /* Interrupt service routine as described in TRM section 18.6.1.3.2 */ src = TRNGStatusGet(); if (src & TRNG_NUMBER_READY) { /* This function acknowledges the ready status */ num[1] = TRNGNumberGet(TRNG_HI_WORD); num[0] = TRNGNumberGet(TRNG_LOW_WORD); cnt = ring_buf_put(&data->pool, (uint8_t *)num, sizeof(num)); /* When pool is full disable interrupt and stop reading numbers */ if (cnt != sizeof(num)) { #ifdef CONFIG_PM if (data->constrained) { pm_policy_state_lock_put( PM_STATE_STANDBY, PM_ALL_SUBSTATES); data->constrained = false; } #endif TRNGIntDisable(TRNG_NUMBER_READY); } k_sem_give(&data->sync); } /* Change the shutdown FROs' oscillating frequency in an attempt to * prevent further locking on to the sampling clock frequency. */ if (src & TRNG_FRO_SHUTDOWN) { handle_shutdown_ovf(); } } static int entropy_cc13xx_cc26xx_get_entropy_isr(const struct device *dev, uint8_t *buf, uint16_t len, uint32_t flags) { struct entropy_cc13xx_cc26xx_data *data = dev->data; uint16_t cnt; uint16_t read = len; uint32_t src; uint32_t num[2]; unsigned int key; key = irq_lock(); cnt = ring_buf_get(&data->pool, buf, len); irq_unlock(key); if ((cnt == len) || ((flags & ENTROPY_BUSYWAIT) == 0U)) { read = cnt; } else { buf += cnt; len -= cnt; /* Allowed to busy-wait. We should use a polling approach */ while (len) { key = irq_lock(); src = TRNGStatusGet(); if (src & TRNG_NUMBER_READY) { /* * This function acknowledges the ready * status */ num[1] = TRNGNumberGet(TRNG_HI_WORD); num[0] = TRNGNumberGet(TRNG_LOW_WORD); ring_buf_put(&data->pool, (uint8_t *)num, sizeof(num)); } /* * If interrupts were enabled during busy wait, this * would allow us to pick up anything that has been put * in by the ISR as well. */ cnt = ring_buf_get(&data->pool, buf, len); if (src & TRNG_FRO_SHUTDOWN) { handle_shutdown_ovf(); } irq_unlock(key); if (cnt) { buf += cnt; len -= cnt; } else { k_busy_wait(US_PER_SAMPLE); } } } return read; } #ifdef CONFIG_PM /* * ======== post_notify_fxn ======== * Called by Power module when waking up the CPU from Standby. The TRNG needs * to be reconfigured afterwards, unless Zephyr's device PM turned it off, in * which case it'd be responsible for turning it back on and reconfiguring it. */ static int post_notify_fxn(unsigned int eventType, uintptr_t eventArg, uintptr_t clientArg) { const struct device *dev = (const struct device *)clientArg; int ret = Power_NOTIFYDONE; int16_t res_id; /* Reconfigure the hardware if returning from sleep */ if (eventType == PowerCC26XX_AWAKE_STANDBY) { res_id = PowerCC26XX_PERIPH_TRNG; if (Power_getDependencyCount(res_id) != 0) { /* Reconfigure and enable TRNG only if powered */ start_trng(dev->data); } } return (ret); } #endif #ifdef CONFIG_PM_DEVICE static int entropy_cc13xx_cc26xx_pm_action(const struct device *dev, enum pm_device_action action) { struct entropy_cc13xx_cc26xx_data *data = dev->data; switch (action) { case PM_DEVICE_ACTION_RESUME: Power_setDependency(PowerCC26XX_PERIPH_TRNG); start_trng(data); break; case PM_DEVICE_ACTION_SUSPEND: stop_trng(data); Power_releaseDependency(PowerCC26XX_PERIPH_TRNG); break; default: return -ENOTSUP; } return 0; } #endif /* CONFIG_PM_DEVICE */ static int entropy_cc13xx_cc26xx_init(const struct device *dev) { struct entropy_cc13xx_cc26xx_data *data = dev->data; /* Initialize driver data */ ring_buf_init(&data->pool, sizeof(data->data), data->data); #if defined(CONFIG_PM) Power_setDependency(PowerCC26XX_PERIPH_TRNG); /* Stay out of standby until buffer is filled with entropy */ pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); data->constrained = true; /* Register notification function */ Power_registerNotify(&data->post_notify, PowerCC26XX_AWAKE_STANDBY, post_notify_fxn, (uintptr_t)dev); #else /* Power TRNG domain */ PRCMPowerDomainOn(PRCM_DOMAIN_PERIPH); /* Enable TRNG peripheral clocks */ PRCMPeripheralRunEnable(PRCM_PERIPH_TRNG); /* Enabled the TRNG while in sleep mode to keep the entropy pool full. After * the pool is full the TRNG will enter idle mode when random numbers are no * longer being read. */ PRCMPeripheralSleepEnable(PRCM_PERIPH_TRNG); PRCMPeripheralDeepSleepEnable(PRCM_PERIPH_TRNG); /* Load PRCM settings */ PRCMLoadSet(); while (!PRCMLoadGet()) { continue; } /* Peripherals should not be accessed until power domain is on. */ while (PRCMPowerDomainsAllOn(PRCM_DOMAIN_PERIPH) != PRCM_DOMAIN_POWER_ON) { continue; } #endif start_trng(data); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), entropy_cc13xx_cc26xx_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); return 0; } static const struct entropy_driver_api entropy_cc13xx_cc26xx_driver_api = { .get_entropy = entropy_cc13xx_cc26xx_get_entropy, .get_entropy_isr = entropy_cc13xx_cc26xx_get_entropy_isr, }; static struct entropy_cc13xx_cc26xx_data entropy_cc13xx_cc26xx_data = { .lock = Z_SEM_INITIALIZER(entropy_cc13xx_cc26xx_data.lock, 1, 1), .sync = Z_SEM_INITIALIZER(entropy_cc13xx_cc26xx_data.sync, 0, 1), }; PM_DEVICE_DT_INST_DEFINE(0, entropy_cc13xx_cc26xx_pm_action); DEVICE_DT_INST_DEFINE(0, entropy_cc13xx_cc26xx_init, PM_DEVICE_DT_INST_GET(0), &entropy_cc13xx_cc26xx_data, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_cc13xx_cc26xx_driver_api); ```
/content/code_sandbox/drivers/entropy/entropy_cc13xx_cc26xx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,548
```c /* * */ #define DT_DRV_COMPAT neorv32_trng #include <zephyr/device.h> #include <zephyr/drivers/syscon.h> #include <zephyr/drivers/entropy.h> #include <zephyr/logging/log.h> #include <zephyr/pm/device.h> #include <zephyr/sys/sys_io.h> #include <soc.h> LOG_MODULE_REGISTER(neorv32_trng, CONFIG_ENTROPY_LOG_LEVEL); /* TRNG CTRL register bits */ #define NEORV32_TRNG_CTRL_DATA_MASK BIT_MASK(8) #define NEORV32_TRNG_CTRL_EN BIT(30) #define NEORV32_TRNG_CTRL_VALID BIT(31) struct neorv32_trng_config { const struct device *syscon; mm_reg_t base; }; static inline uint32_t neorv32_trng_read_ctrl(const struct device *dev) { const struct neorv32_trng_config *config = dev->config; return sys_read32(config->base); } static inline void neorv32_trng_write_ctrl(const struct device *dev, uint32_t ctrl) { const struct neorv32_trng_config *config = dev->config; sys_write32(ctrl, config->base); } static int neorv32_trng_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t len) { uint32_t ctrl; while (len > 0) { ctrl = neorv32_trng_read_ctrl(dev); if ((ctrl & NEORV32_TRNG_CTRL_VALID) != 0) { *buffer++ = ctrl & NEORV32_TRNG_CTRL_DATA_MASK; len--; } } return 0; } static int neorv32_trng_get_entropy_isr(const struct device *dev, uint8_t *buffer, uint16_t len, uint32_t flags) { uint32_t ctrl; int err; if ((flags & ENTROPY_BUSYWAIT) == 0) { ctrl = neorv32_trng_read_ctrl(dev); if ((ctrl & NEORV32_TRNG_CTRL_VALID) != 0) { *buffer = ctrl & NEORV32_TRNG_CTRL_DATA_MASK; return 1; } /* No entropy available */ return -ENODATA; } err = neorv32_trng_get_entropy(dev, buffer, len); if (err < 0) { return err; } return len; } static int neorv32_trng_init(const struct device *dev) { const struct neorv32_trng_config *config = dev->config; uint32_t features; int err; if (!device_is_ready(config->syscon)) { LOG_ERR("syscon device not ready"); return -EINVAL; } err = syscon_read_reg(config->syscon, NEORV32_SYSINFO_FEATURES, &features); if (err < 0) { LOG_ERR("failed to determine implemented features (err %d)", err); return err; } if ((features & NEORV32_SYSINFO_FEATURES_IO_TRNG) == 0) { LOG_ERR("neorv32 trng not supported"); return -ENODEV; } neorv32_trng_write_ctrl(dev, NEORV32_TRNG_CTRL_EN); return 0; } #ifdef CONFIG_PM_DEVICE static int neorv32_trng_pm_action(const struct device *dev, enum pm_device_action action) { switch (action) { case PM_DEVICE_ACTION_SUSPEND: neorv32_trng_write_ctrl(dev, 0); break; case PM_DEVICE_ACTION_RESUME: neorv32_trng_write_ctrl(dev, NEORV32_TRNG_CTRL_EN); break; default: return -ENOTSUP; } return 0; } #endif /* CONFIG_PM_DEVICE */ static const struct entropy_driver_api neorv32_trng_driver_api = { .get_entropy = neorv32_trng_get_entropy, .get_entropy_isr = neorv32_trng_get_entropy_isr, }; #define NEORV32_TRNG_INIT(n) \ static const struct neorv32_trng_config neorv32_trng_##n##_config = { \ .syscon = DEVICE_DT_GET(DT_INST_PHANDLE(n, syscon)), \ .base = DT_INST_REG_ADDR(n), \ }; \ \ PM_DEVICE_DT_INST_DEFINE(n, neorv32_trng_pm_action); \ \ DEVICE_DT_INST_DEFINE(n, &neorv32_trng_init, \ PM_DEVICE_DT_INST_GET(n), \ NULL, \ &neorv32_trng_##n##_config, \ PRE_KERNEL_1, \ CONFIG_ENTROPY_INIT_PRIORITY, \ &neorv32_trng_driver_api); DT_INST_FOREACH_STATUS_OKAY(NEORV32_TRNG_INIT) ```
/content/code_sandbox/drivers/entropy/entropy_neorv32_trng.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,071
```c /* * */ #define DT_DRV_COMPAT telink_b91_trng #include <trng.h> #include <zephyr/drivers/entropy.h> #include <string.h> /* API implementation: driver initialization */ static int entropy_b91_trng_init(const struct device *dev) { ARG_UNUSED(dev); trng_init(); return 0; } /* API implementation: get_entropy */ static int entropy_b91_trng_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { ARG_UNUSED(dev); uint32_t value = 0; while (length) { value = trng_rand(); if (length >= sizeof(value)) { memcpy(buffer, &value, sizeof(value)); buffer += sizeof(value); length -= sizeof(value); } else { memcpy(buffer, &value, length); break; } } return 0; } /* API implementation: get_entropy_isr */ static int entropy_b91_trng_get_entropy_isr(const struct device *dev, uint8_t *buffer, uint16_t length, uint32_t flags) { ARG_UNUSED(flags); /* No specific handling in case of running from ISR, just call standard API */ entropy_b91_trng_get_entropy(dev, buffer, length); return length; } /* Entropy driver APIs structure */ static const struct entropy_driver_api entropy_b91_trng_api = { .get_entropy = entropy_b91_trng_get_entropy, .get_entropy_isr = entropy_b91_trng_get_entropy_isr }; /* Entropy driver registration */ DEVICE_DT_INST_DEFINE(0, entropy_b91_trng_init, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_b91_trng_api); ```
/content/code_sandbox/drivers/entropy/entropy_b91_trng.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
370
```c /* * */ #include <zephyr/drivers/entropy.h> #include <zephyr/kernel.h> #include <zephyr/sys/atomic.h> #include <zephyr/sys/util.h> #include <soc.h> #include <hal/nrf_rng.h> #include <zephyr/irq.h> #define DT_DRV_COMPAT nordic_nrf_rng #define IRQN DT_INST_IRQN(0) #define IRQ_PRIO DT_INST_IRQ(0, priority) /* * The nRF5 RNG HW has several characteristics that need to be taken * into account by the driver to achieve energy efficient generation * of entropy. * * The RNG does not support continuously DMA'ing entropy into RAM, * values must be read out by the CPU byte-by-byte. But once started, * it will continue to generate bytes until stopped. * * The generation time for byte 0 after starting generation (with BIAS * correction) is: * * nRF51822 - 677us * nRF52810 - 248us * nRF52840 - 248us * * The generation time for byte N >= 1 after starting generation (with * BIAS correction) is: * * nRF51822 - 677us * nRF52810 - 120us * nRF52840 - 120us * * Due to the first byte in a stream of bytes being more costly on * some platforms a "water system" inspired algorithm is used to * amortize the cost of the first byte. * * The algorithm will delay generation of entropy until the amount of * bytes goes below THRESHOLD, at which point it will generate entropy * until the BUF_LEN limit is reached. * * The entropy level is checked at the end of every consumption of * entropy. * * The algorithm and HW together has these characteristics: * * Setting a low threshold will highly amortize the extra 120us cost * of the first byte on nRF52. * * Setting a high threshold will minimize the time spent waiting for * entropy. * * To minimize power consumption the threshold should either be set * low or high depending on the HFCLK-usage pattern of other * components. * * If the threshold is set close to the BUF_LEN, and the system * happens to anyway be using the HFCLK for several hundred us after * entropy is requested there will be no extra current-consumption for * keeping clocks running for entropy generation. * */ struct rng_pool { uint8_t first_alloc; uint8_t first_read; uint8_t last; uint8_t mask; uint8_t threshold; FLEXIBLE_ARRAY_DECLARE(uint8_t, buffer); }; #define RNG_POOL_DEFINE(name, len) uint8_t name[sizeof(struct rng_pool) + (len)] BUILD_ASSERT((CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE & (CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE - 1)) == 0, "The CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE must be a power of 2!"); BUILD_ASSERT((CONFIG_ENTROPY_NRF5_THR_POOL_SIZE & (CONFIG_ENTROPY_NRF5_THR_POOL_SIZE - 1)) == 0, "The CONFIG_ENTROPY_NRF5_THR_POOL_SIZE must be a power of 2!"); struct entropy_nrf5_dev_data { struct k_sem sem_lock; struct k_sem sem_sync; RNG_POOL_DEFINE(isr, CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE); RNG_POOL_DEFINE(thr, CONFIG_ENTROPY_NRF5_THR_POOL_SIZE); }; static struct entropy_nrf5_dev_data entropy_nrf5_data; static int random_byte_get(void) { int retval = -EAGAIN; unsigned int key; key = irq_lock(); if (nrf_rng_event_check(NRF_RNG, NRF_RNG_EVENT_VALRDY)) { retval = nrf_rng_random_value_get(NRF_RNG); nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY); } irq_unlock(key); return retval; } static uint16_t rng_pool_get(struct rng_pool *rngp, uint8_t *buf, uint16_t len) { uint32_t last = rngp->last; uint32_t mask = rngp->mask; uint8_t *dst = buf; uint32_t first, available; uint32_t other_read_in_progress; unsigned int key; key = irq_lock(); first = rngp->first_alloc; /* * The other_read_in_progress is non-zero if rngp->first_read != first, * which means that lower-priority code (which was interrupted by this * call) already allocated area for read. */ other_read_in_progress = (rngp->first_read ^ first); available = (last - first) & mask; if (available < len) { len = available; } /* * Move alloc index forward to signal, that part of the buffer is * now reserved for this call. */ rngp->first_alloc = (first + len) & mask; irq_unlock(key); while (likely(len--)) { *dst++ = rngp->buffer[first]; first = (first + 1) & mask; } /* * If this call is the last one accessing the pool, move read index * to signal that all allocated regions are now read and could be * overwritten. */ if (likely(!other_read_in_progress)) { key = irq_lock(); rngp->first_read = rngp->first_alloc; irq_unlock(key); } len = dst - buf; available = available - len; if (available <= rngp->threshold) { nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START); } return len; } static int rng_pool_put(struct rng_pool *rngp, uint8_t byte) { uint8_t first = rngp->first_read; uint8_t last = rngp->last; uint8_t mask = rngp->mask; /* Signal error if the pool is full. */ if (((last - first) & mask) == mask) { return -ENOBUFS; } rngp->buffer[last] = byte; rngp->last = (last + 1) & mask; return 0; } static void rng_pool_init(struct rng_pool *rngp, uint16_t size, uint8_t threshold) { rngp->first_alloc = 0U; rngp->first_read = 0U; rngp->last = 0U; rngp->mask = size - 1; rngp->threshold = threshold; } static void isr(const void *arg) { int byte, ret; ARG_UNUSED(arg); byte = random_byte_get(); if (byte < 0) { return; } ret = rng_pool_put((struct rng_pool *)(entropy_nrf5_data.isr), byte); if (ret < 0) { ret = rng_pool_put((struct rng_pool *)(entropy_nrf5_data.thr), byte); if (ret < 0) { nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_STOP); } k_sem_give(&entropy_nrf5_data.sem_sync); } } static int entropy_nrf5_get_entropy(const struct device *dev, uint8_t *buf, uint16_t len) { /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_nrf5_data == dev->data); while (len) { uint16_t bytes; k_sem_take(&entropy_nrf5_data.sem_lock, K_FOREVER); bytes = rng_pool_get((struct rng_pool *)(entropy_nrf5_data.thr), buf, len); k_sem_give(&entropy_nrf5_data.sem_lock); if (bytes == 0U) { /* Pool is empty: Sleep until next interrupt. */ k_sem_take(&entropy_nrf5_data.sem_sync, K_FOREVER); continue; } len -= bytes; buf += bytes; } return 0; } static int entropy_nrf5_get_entropy_isr(const struct device *dev, uint8_t *buf, uint16_t len, uint32_t flags) { uint16_t cnt = len; /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_nrf5_data == dev->data); if (likely((flags & ENTROPY_BUSYWAIT) == 0U)) { return rng_pool_get((struct rng_pool *)(entropy_nrf5_data.isr), buf, len); } if (len) { unsigned int key; int irq_enabled; key = irq_lock(); irq_enabled = irq_is_enabled(IRQN); irq_disable(IRQN); irq_unlock(key); nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY); nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START); /* Clear NVIC pending bit. This ensures that a subsequent * RNG event will set the Cortex-M single-bit event register * to 1 (the bit is set when NVIC pending IRQ status is * changed from 0 to 1) */ NVIC_ClearPendingIRQ(IRQN); do { int byte; while (!nrf_rng_event_check(NRF_RNG, NRF_RNG_EVENT_VALRDY)) { k_cpu_atomic_idle(irq_lock()); } byte = random_byte_get(); NVIC_ClearPendingIRQ(IRQN); if (byte < 0) { continue; } buf[--len] = byte; } while (len); if (irq_enabled) { irq_enable(IRQN); } } return cnt; } static int entropy_nrf5_init(const struct device *dev); static const struct entropy_driver_api entropy_nrf5_api_funcs = { .get_entropy = entropy_nrf5_get_entropy, .get_entropy_isr = entropy_nrf5_get_entropy_isr }; DEVICE_DT_INST_DEFINE(0, entropy_nrf5_init, NULL, &entropy_nrf5_data, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_nrf5_api_funcs); static int entropy_nrf5_init(const struct device *dev) { /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_nrf5_data == dev->data); /* Locking semaphore initialized to 1 (unlocked) */ k_sem_init(&entropy_nrf5_data.sem_lock, 1, 1); /* Synching semaphore */ k_sem_init(&entropy_nrf5_data.sem_sync, 0, 1); rng_pool_init((struct rng_pool *)(entropy_nrf5_data.thr), CONFIG_ENTROPY_NRF5_THR_POOL_SIZE, CONFIG_ENTROPY_NRF5_THR_THRESHOLD); rng_pool_init((struct rng_pool *)(entropy_nrf5_data.isr), CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE, CONFIG_ENTROPY_NRF5_ISR_THRESHOLD); /* Enable or disable bias correction */ if (IS_ENABLED(CONFIG_ENTROPY_NRF5_BIAS_CORRECTION)) { nrf_rng_error_correction_enable(NRF_RNG); } else { nrf_rng_error_correction_disable(NRF_RNG); } nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY); nrf_rng_int_enable(NRF_RNG, NRF_RNG_INT_VALRDY_MASK); nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START); IRQ_CONNECT(IRQN, IRQ_PRIO, isr, &entropy_nrf5_data, 0); irq_enable(IRQN); return 0; } ```
/content/code_sandbox/drivers/entropy/entropy_nrf5.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,560
```c /* * */ #define DT_DRV_COMPAT zephyr_psa_crypto_rng #include <zephyr/drivers/entropy.h> #include <psa/crypto.h> /* API implementation: PSA Crypto initialization */ static int entropy_psa_crypto_rng_init(const struct device *dev) { psa_status_t status = PSA_ERROR_CORRUPTION_DETECTED; ARG_UNUSED(dev); status = psa_crypto_init(); if (status != PSA_SUCCESS) { return -EIO; } return 0; } /* API implementation: get_entropy */ static int entropy_psa_crypto_rng_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { psa_status_t status = PSA_ERROR_CORRUPTION_DETECTED; ARG_UNUSED(dev); status = psa_generate_random(buffer, length); if (status != PSA_SUCCESS) { return -EIO; } return 0; } /* Entropy driver APIs structure */ static const struct entropy_driver_api entropy_psa_crypto_rng_api = { .get_entropy = entropy_psa_crypto_rng_get_entropy, }; /* Entropy driver registration */ DEVICE_DT_INST_DEFINE(0, entropy_psa_crypto_rng_init, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_psa_crypto_rng_api); ```
/content/code_sandbox/drivers/entropy/entropy_psa_crypto.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
270
```c /* * */ #define DT_DRV_COMPAT silabs_gecko_semailbox #include <zephyr/drivers/entropy.h> #include <soc.h> #include "em_cmu.h" #include "sl_se_manager.h" #include "sl_se_manager_entropy.h" static int entropy_gecko_se_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { ARG_UNUSED(dev); int err = 0; sl_status_t status; sl_se_command_context_t cmd_ctx; status = sl_se_init_command_context(&cmd_ctx); if (status == SL_STATUS_OK) { status = sl_se_get_random(&cmd_ctx, buffer, length); if (status != SL_STATUS_OK) { err = -EIO; } sl_se_deinit_command_context(&cmd_ctx); } else { err = -EIO; } return err; } static int entropy_gecko_se_init(const struct device *dev) { if (sl_se_init()) { return -EIO; } return 0; } static const struct entropy_driver_api entropy_gecko_se_api_funcs = { .get_entropy = entropy_gecko_se_get_entropy, }; #define GECKO_SE_INIT(n) \ DEVICE_DT_INST_DEFINE(n, \ entropy_gecko_se_init, NULL, \ NULL, NULL, \ PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, \ &entropy_gecko_se_api_funcs); \ DT_INST_FOREACH_STATUS_OKAY(GECKO_SE_INIT) ```
/content/code_sandbox/drivers/entropy/entropy_gecko_se.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
321
```c /* * Organisation (CSIRO) ABN 41 687 119 230. * */ #define DT_DRV_COMPAT zephyr_bt_hci_entropy #include <zephyr/drivers/entropy.h> #include <zephyr/bluetooth/hci.h> #include <string.h> static int entropy_bt_init(const struct device *dev) { /* Nothing to do */ return 0; } static int entropy_bt_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { if (!bt_is_ready()) { return -EAGAIN; } return bt_hci_le_rand(buffer, length); } /* HCI commands cannot be run from an interrupt context */ static const struct entropy_driver_api entropy_bt_api = { .get_entropy = entropy_bt_get_entropy, .get_entropy_isr = NULL }; #define ENTROPY_BT_HCI_INIT(inst) \ DEVICE_DT_INST_DEFINE(inst, entropy_bt_init, \ NULL, NULL, NULL, \ PRE_KERNEL_1, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &entropy_bt_api); DT_INST_FOREACH_STATUS_OKAY(ENTROPY_BT_HCI_INIT) ```
/content/code_sandbox/drivers/entropy/entropy_bt_hci.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
243
```c /* * */ #define DT_DRV_COMPAT litex_prbs #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <errno.h> #include <zephyr/init.h> #include <soc.h> #include <string.h> #include <zephyr/kernel.h> #define PRBS_STATUS DT_INST_REG_ADDR(0) #define PRBS_WIDTH DT_INST_REG_SIZE(0) static int entropy_prbs_get_entropy(const struct device *dev, uint8_t *buffer, uint16_t length) { while (length > 0) { size_t to_copy; uint32_t value; value = litex_read(PRBS_STATUS, PRBS_WIDTH); to_copy = MIN(length, sizeof(value)); memcpy(buffer, &value, to_copy); buffer += to_copy; length -= to_copy; } return 0; } static const struct entropy_driver_api entropy_prbs_api = { .get_entropy = entropy_prbs_get_entropy }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_prbs_api); ```
/content/code_sandbox/drivers/entropy/entropy_litex.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
242
```unknown config FAKE_ENTROPY_NATIVE_POSIX bool "Native posix entropy driver" default y depends on DT_HAS_ZEPHYR_NATIVE_POSIX_RNG_ENABLED select ENTROPY_HAS_DRIVER help This option enables the test random number generator for the native_posix board (ARCH_POSIX). This is based on the host random() API. Note that this entropy generator is only meant for test purposes and does not generate real entropy. It actually generates always the same sequence of random numbers if initialized with the same seed. ```
/content/code_sandbox/drivers/entropy/Kconfig.native_posix
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
120
```unknown # NEORV32 TRNG configuration config ENTROPY_NEORV32_TRNG bool "NEORV32 TRNG" default y depends on DT_HAS_NEORV32_TRNG_ENABLED depends on SYSCON select ENTROPY_HAS_DRIVER help This option enables the True Random Number Generator (TRNG) driver for the NEORV32. ```
/content/code_sandbox/drivers/entropy/Kconfig.neorv32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
85
```unknown # Telink B91 GPIO configuration options config ENTROPY_TELINK_B91_TRNG bool "Telink B91 Entropy driver" default y depends on DT_HAS_TELINK_B91_TRNG_ENABLED select ENTROPY_HAS_DRIVER help Enable the B91 Entropy driver. ```
/content/code_sandbox/drivers/entropy/Kconfig.b91
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
68
```unknown config REGULATOR_DA1469X bool "DA1469X regulators driver" default y depends on DT_HAS_RENESAS_SMARTBOND_REGULATOR_ENABLED help Enable support for the Smartbond DA1469x regulators. config REGULATOR_DA1469X_INIT_PRIORITY int "Renesas DA1469x regulators driver init priority" default 20 depends on REGULATOR_DA1469X help Init priority for the Renesas DA1469x regulators driver. ```
/content/code_sandbox/drivers/regulator/Kconfig.da1469x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
108
```unknown config REGULATOR_NPM6001 bool "nPM6001 PMIC regulator driver" default y depends on DT_HAS_NORDIC_NPM6001_REGULATOR_ENABLED select I2C select MFD help Enable the Nordic nPM6001 PMIC regulator driver config REGULATOR_NPM6001_INIT_PRIORITY int "nPM6001 regulator driver init priority" default 86 depends on REGULATOR_NPM6001 help Init priority for the Nordic nPM6001 regulator driver. ```
/content/code_sandbox/drivers/regulator/Kconfig.npm6001
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
114
```c /* * */ #define DT_DRV_COMPAT st_stm32_rng #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/entropy.h> #include <zephyr/random/random.h> #include <zephyr/init.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/util.h> #include <errno.h> #include <soc.h> #include <zephyr/pm/policy.h> #include <stm32_ll_bus.h> #include <stm32_ll_rcc.h> #include <stm32_ll_rng.h> #include <stm32_ll_pka.h> #include <stm32_ll_system.h> #include <zephyr/sys/printk.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/irq.h> #include <zephyr/sys/barrier.h> #include "stm32_hsem.h" #define IRQN DT_INST_IRQN(0) #define IRQ_PRIO DT_INST_IRQ(0, priority) #if defined(RNG_CR_CONDRST) #define STM32_CONDRST_SUPPORT #endif /* * This driver need to take into account all STM32 family: * - simple rng without hardware fifo and no DMA. * - Variable delay between two consecutive random numbers * (depending on family and clock settings) * * * Due to the first byte in a stream of bytes being more costly on * some platforms a "water system" inspired algorithm is used to * amortize the cost of the first byte. * * The algorithm will delay generation of entropy until the amount of * bytes goes below THRESHOLD, at which point it will generate entropy * until the BUF_LEN limit is reached. * * The entropy level is checked at the end of every consumption of * entropy. * */ struct rng_pool { uint8_t first_alloc; uint8_t first_read; uint8_t last; uint8_t mask; uint8_t threshold; FLEXIBLE_ARRAY_DECLARE(uint8_t, buffer); }; #define RNG_POOL_DEFINE(name, len) uint8_t name[sizeof(struct rng_pool) + (len)] BUILD_ASSERT((CONFIG_ENTROPY_STM32_ISR_POOL_SIZE & (CONFIG_ENTROPY_STM32_ISR_POOL_SIZE - 1)) == 0, "The CONFIG_ENTROPY_STM32_ISR_POOL_SIZE must be a power of 2!"); BUILD_ASSERT((CONFIG_ENTROPY_STM32_THR_POOL_SIZE & (CONFIG_ENTROPY_STM32_THR_POOL_SIZE - 1)) == 0, "The CONFIG_ENTROPY_STM32_THR_POOL_SIZE must be a power of 2!"); struct entropy_stm32_rng_dev_cfg { struct stm32_pclken *pclken; }; struct entropy_stm32_rng_dev_data { RNG_TypeDef *rng; const struct device *clock; struct k_sem sem_lock; struct k_sem sem_sync; struct k_work filling_work; bool filling_pools; RNG_POOL_DEFINE(isr, CONFIG_ENTROPY_STM32_ISR_POOL_SIZE); RNG_POOL_DEFINE(thr, CONFIG_ENTROPY_STM32_THR_POOL_SIZE); }; static struct stm32_pclken pclken_rng[] = STM32_DT_INST_CLOCKS(0); static struct entropy_stm32_rng_dev_cfg entropy_stm32_rng_config = { .pclken = pclken_rng }; static struct entropy_stm32_rng_dev_data entropy_stm32_rng_data = { .rng = (RNG_TypeDef *)DT_INST_REG_ADDR(0), }; static int entropy_stm32_suspend(void) { const struct device *dev = DEVICE_DT_GET(DT_DRV_INST(0)); struct entropy_stm32_rng_dev_data *dev_data = dev->data; const struct entropy_stm32_rng_dev_cfg *dev_cfg = dev->config; RNG_TypeDef *rng = dev_data->rng; int res; #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE) /* Prevent concurrent access with PM */ z_stm32_hsem_lock(CFG_HW_RNG_SEMID, HSEM_LOCK_WAIT_FOREVER); #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */ LL_RNG_Disable(rng); #ifdef CONFIG_SOC_SERIES_STM32WBAX uint32_t wait_cycles, rng_rate; if (LL_PKA_IsEnabled(PKA)) { return 0; } if (clock_control_get_rate(dev_data->clock, (clock_control_subsys_t) &dev_cfg->pclken[0], &rng_rate) < 0) { return -EIO; } wait_cycles = SystemCoreClock / rng_rate * 2; for (int i = wait_cycles; i >= 0; i--) { } #endif /* CONFIG_SOC_SERIES_STM32WBAX */ res = clock_control_off(dev_data->clock, (clock_control_subsys_t)&dev_cfg->pclken[0]); #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE) z_stm32_hsem_unlock(CFG_HW_RNG_SEMID); #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */ return res; } static int entropy_stm32_resume(void) { const struct device *dev = DEVICE_DT_GET(DT_DRV_INST(0)); struct entropy_stm32_rng_dev_data *dev_data = dev->data; const struct entropy_stm32_rng_dev_cfg *dev_cfg = dev->config; RNG_TypeDef *rng = dev_data->rng; int res; res = clock_control_on(dev_data->clock, (clock_control_subsys_t)&dev_cfg->pclken[0]); LL_RNG_Enable(rng); LL_RNG_EnableIT(rng); return res; } static void configure_rng(void) { RNG_TypeDef *rng = entropy_stm32_rng_data.rng; #ifdef STM32_CONDRST_SUPPORT uint32_t desired_nist_cfg = DT_INST_PROP_OR(0, nist_config, 0U); uint32_t desired_htcr = DT_INST_PROP_OR(0, health_test_config, 0U); uint32_t cur_nist_cfg = 0U; uint32_t cur_htcr = 0U; #if DT_INST_NODE_HAS_PROP(0, nist_config) /* * Configure the RNG_CR in compliance with the NIST SP800. * The nist-config is direclty copied from the DTS. * The RNG clock must be 48MHz else the clock DIV is not adpated. * The RNG_CR_CONDRST is set to 1 at the same time the RNG_CR is written */ cur_nist_cfg = READ_BIT(rng->CR, (RNG_CR_NISTC | RNG_CR_CLKDIV | RNG_CR_RNG_CONFIG1 | RNG_CR_RNG_CONFIG2 | RNG_CR_RNG_CONFIG3 #if defined(RNG_CR_ARDIS) | RNG_CR_ARDIS /* For STM32U5 series, the ARDIS bit7 is considered in the nist-config */ #endif /* RNG_CR_ARDIS */ )); #endif /* nist_config */ #if DT_INST_NODE_HAS_PROP(0, health_test_config) cur_htcr = LL_RNG_GetHealthConfig(rng); #endif /* health_test_config */ if (cur_nist_cfg != desired_nist_cfg || cur_htcr != desired_htcr) { MODIFY_REG(rng->CR, cur_nist_cfg, (desired_nist_cfg | RNG_CR_CONDRST)); #if DT_INST_NODE_HAS_PROP(0, health_test_config) #if DT_INST_NODE_HAS_PROP(0, health_test_magic) LL_RNG_SetHealthConfig(rng, DT_INST_PROP(0, health_test_magic)); #endif /* health_test_magic */ LL_RNG_SetHealthConfig(rng, desired_htcr); #endif /* health_test_config */ LL_RNG_DisableCondReset(rng); /* Wait for conditioning reset process to be completed */ while (LL_RNG_IsEnabledCondReset(rng) == 1) { } } #endif /* STM32_CONDRST_SUPPORT */ LL_RNG_Enable(rng); LL_RNG_EnableIT(rng); } static void acquire_rng(void) { entropy_stm32_resume(); #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE) /* Lock the RNG to prevent concurrent access */ z_stm32_hsem_lock(CFG_HW_RNG_SEMID, HSEM_LOCK_WAIT_FOREVER); /* RNG configuration could have been changed by the other core */ configure_rng(); #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */ } static void release_rng(void) { entropy_stm32_suspend(); #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE) z_stm32_hsem_unlock(CFG_HW_RNG_SEMID); #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */ } static int entropy_stm32_got_error(RNG_TypeDef *rng) { __ASSERT_NO_MSG(rng != NULL); if (LL_RNG_IsActiveFlag_CECS(rng)) { return 1; } if (LL_RNG_IsActiveFlag_SEIS(rng)) { return 1; } return 0; } #if defined(STM32_CONDRST_SUPPORT) /* SOCS w/ soft-reset support: execute the reset */ static int recover_seed_error(RNG_TypeDef *rng) { uint32_t count_timeout = 0; LL_RNG_EnableCondReset(rng); LL_RNG_DisableCondReset(rng); /* When reset process is done cond reset bit is read 0 * This typically takes: 2 AHB clock cycles + 2 RNG clock cycles. */ while (LL_RNG_IsEnabledCondReset(rng) || LL_RNG_IsActiveFlag_SEIS(rng) || LL_RNG_IsActiveFlag_SECS(rng)) { count_timeout++; if (count_timeout == 10) { return -ETIMEDOUT; } } return 0; } #else /* !STM32_CONDRST_SUPPORT */ /* SOCS w/o soft-reset support: flush pipeline */ static int recover_seed_error(RNG_TypeDef *rng) { LL_RNG_ClearFlag_SEIS(rng); for (int i = 0; i < 12; ++i) { LL_RNG_ReadRandData32(rng); } if (LL_RNG_IsActiveFlag_SEIS(rng) != 0) { return -EIO; } return 0; } #endif /* !STM32_CONDRST_SUPPORT */ static int random_byte_get(void) { int retval = -EAGAIN; unsigned int key; RNG_TypeDef *rng = entropy_stm32_rng_data.rng; key = irq_lock(); if (IS_ENABLED(CONFIG_ENTROPY_STM32_CLK_CHECK) && !k_is_pre_kernel()) { /* CECS bit signals that a clock configuration issue is detected, * which may lead to generation of non truly random data. */ __ASSERT(LL_RNG_IsActiveFlag_CECS(rng) == 0, "CECS = 1: RNG domain clock is too slow.\n" "\tSee ref man and update target clock configuration."); } if (LL_RNG_IsActiveFlag_SEIS(rng) && (recover_seed_error(rng) < 0)) { retval = -EIO; goto out; } if ((LL_RNG_IsActiveFlag_DRDY(rng) == 1)) { if (entropy_stm32_got_error(rng)) { retval = -EIO; goto out; } retval = LL_RNG_ReadRandData32(rng); if (retval == 0) { /* A seed error could have occurred between RNG_SR * polling and RND_DR output reading. */ retval = -EAGAIN; goto out; } retval &= 0xFF; } out: irq_unlock(key); return retval; } static uint16_t generate_from_isr(uint8_t *buf, uint16_t len) { uint16_t remaining_len = len; __ASSERT_NO_MSG(!irq_is_enabled(IRQN)); #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE) __ASSERT_NO_MSG(z_stm32_hsem_is_owned(CFG_HW_RNG_SEMID)); #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */ /* do not proceed if a Seed error occurred */ if (LL_RNG_IsActiveFlag_SECS(entropy_stm32_rng_data.rng) || LL_RNG_IsActiveFlag_SEIS(entropy_stm32_rng_data.rng)) { (void)random_byte_get(); /* this will recover the error */ return 0; /* return cnt is null : no random data available */ } /* Clear NVIC pending bit. This ensures that a subsequent * RNG event will set the Cortex-M single-bit event register * to 1 (the bit is set when NVIC pending IRQ status is * changed from 0 to 1) */ NVIC_ClearPendingIRQ(IRQN); do { int byte; while (LL_RNG_IsActiveFlag_DRDY( entropy_stm32_rng_data.rng) != 1) { /* * To guarantee waking up from the event, the * SEV-On-Pend feature must be enabled (enabled * during ARCH initialization). * * DSB is recommended by spec before WFE (to * guarantee completion of memory transactions) */ barrier_dsync_fence_full(); __WFE(); __SEV(); __WFE(); } byte = random_byte_get(); NVIC_ClearPendingIRQ(IRQN); if (byte < 0) { continue; } buf[--remaining_len] = byte; } while (remaining_len); return len; } static int start_pool_filling(bool wait) { unsigned int key; bool already_filling; key = irq_lock(); #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE) /* In non-blocking mode, return immediately if the RNG is not available */ if (!wait && z_stm32_hsem_try_lock(CFG_HW_RNG_SEMID) != 0) { irq_unlock(key); return -EAGAIN; } #else ARG_UNUSED(wait); #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */ already_filling = entropy_stm32_rng_data.filling_pools; entropy_stm32_rng_data.filling_pools = true; irq_unlock(key); if (unlikely(already_filling)) { return 0; } /* Prevent the clocks to be stopped during the duration the rng pool is * being populated. The ISR will release the constraint again when the * rng pool is filled. */ pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); if (IS_ENABLED(CONFIG_PM_S2RAM)) { pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES); } acquire_rng(); irq_enable(IRQN); return 0; } static void pool_filling_work_handler(struct k_work *work) { if (start_pool_filling(false) != 0) { /* RNG could not be acquired, try again */ k_work_submit(work); } } static uint16_t rng_pool_get(struct rng_pool *rngp, uint8_t *buf, uint16_t len) { uint32_t last = rngp->last; uint32_t mask = rngp->mask; uint8_t *dst = buf; uint32_t first, available; uint32_t other_read_in_progress; unsigned int key; key = irq_lock(); first = rngp->first_alloc; /* * The other_read_in_progress is non-zero if rngp->first_read != first, * which means that lower-priority code (which was interrupted by this * call) already allocated area for read. */ other_read_in_progress = (rngp->first_read ^ first); available = (last - first) & mask; if (available < len) { len = available; } /* * Move alloc index forward to signal, that part of the buffer is * now reserved for this call. */ rngp->first_alloc = (first + len) & mask; irq_unlock(key); while (likely(len--)) { *dst++ = rngp->buffer[first]; first = (first + 1) & mask; } /* * If this call is the last one accessing the pool, move read index * to signal that all allocated regions are now read and could be * overwritten. */ if (likely(!other_read_in_progress)) { key = irq_lock(); rngp->first_read = rngp->first_alloc; irq_unlock(key); } len = dst - buf; available = available - len; if (available <= rngp->threshold) { /* * Avoid starting pool filling from ISR as it might require * blocking if RNG is not available and a race condition could * also occur if this ISR has interrupted the RNG ISR. */ if (k_is_in_isr()) { k_work_submit(&entropy_stm32_rng_data.filling_work); } else { start_pool_filling(true); } } return len; } static int rng_pool_put(struct rng_pool *rngp, uint8_t byte) { uint8_t first = rngp->first_read; uint8_t last = rngp->last; uint8_t mask = rngp->mask; /* Signal error if the pool is full. */ if (((last - first) & mask) == mask) { return -ENOBUFS; } rngp->buffer[last] = byte; rngp->last = (last + 1) & mask; return 0; } static void rng_pool_init(struct rng_pool *rngp, uint16_t size, uint8_t threshold) { rngp->first_alloc = 0U; rngp->first_read = 0U; rngp->last = 0U; rngp->mask = size - 1; rngp->threshold = threshold; } static void stm32_rng_isr(const void *arg) { int byte, ret; ARG_UNUSED(arg); byte = random_byte_get(); if (byte < 0) { return; } ret = rng_pool_put((struct rng_pool *)(entropy_stm32_rng_data.isr), byte); if (ret < 0) { ret = rng_pool_put( (struct rng_pool *)(entropy_stm32_rng_data.thr), byte); if (ret < 0) { irq_disable(IRQN); release_rng(); pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); if (IS_ENABLED(CONFIG_PM_S2RAM)) { pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES); } entropy_stm32_rng_data.filling_pools = false; } k_sem_give(&entropy_stm32_rng_data.sem_sync); } } static int entropy_stm32_rng_get_entropy(const struct device *dev, uint8_t *buf, uint16_t len) { /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_stm32_rng_data == dev->data); while (len) { uint16_t bytes; k_sem_take(&entropy_stm32_rng_data.sem_lock, K_FOREVER); bytes = rng_pool_get( (struct rng_pool *)(entropy_stm32_rng_data.thr), buf, len); if (bytes == 0U) { /* Pool is empty: Sleep until next interrupt. */ k_sem_take(&entropy_stm32_rng_data.sem_sync, K_FOREVER); } k_sem_give(&entropy_stm32_rng_data.sem_lock); len -= bytes; buf += bytes; } return 0; } static int entropy_stm32_rng_get_entropy_isr(const struct device *dev, uint8_t *buf, uint16_t len, uint32_t flags) { uint16_t cnt = len; /* Check if this API is called on correct driver instance. */ __ASSERT_NO_MSG(&entropy_stm32_rng_data == dev->data); if (likely((flags & ENTROPY_BUSYWAIT) == 0U)) { return rng_pool_get( (struct rng_pool *)(entropy_stm32_rng_data.isr), buf, len); } if (len) { unsigned int key; int irq_enabled; bool rng_already_acquired; key = irq_lock(); irq_enabled = irq_is_enabled(IRQN); irq_disable(IRQN); irq_unlock(key); /* Do not release if IRQ is enabled. RNG will be released in ISR * when the pools are full. */ rng_already_acquired = z_stm32_hsem_is_owned(CFG_HW_RNG_SEMID) || irq_enabled; acquire_rng(); cnt = generate_from_isr(buf, len); /* Restore the state of the RNG lock and IRQ */ if (!rng_already_acquired) { release_rng(); } if (irq_enabled) { irq_enable(IRQN); } } return cnt; } static int entropy_stm32_rng_init(const struct device *dev) { struct entropy_stm32_rng_dev_data *dev_data; const struct entropy_stm32_rng_dev_cfg *dev_cfg; int res; __ASSERT_NO_MSG(dev != NULL); dev_data = dev->data; dev_cfg = dev->config; __ASSERT_NO_MSG(dev_data != NULL); __ASSERT_NO_MSG(dev_cfg != NULL); dev_data->clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(dev_data->clock)) { return -ENODEV; } res = clock_control_on(dev_data->clock, (clock_control_subsys_t)&dev_cfg->pclken[0]); __ASSERT_NO_MSG(res == 0); /* Configure domain clock if any */ if (DT_INST_NUM_CLOCKS(0) > 1) { res = clock_control_configure(dev_data->clock, (clock_control_subsys_t)&dev_cfg->pclken[1], NULL); __ASSERT(res == 0, "Could not select RNG domain clock"); } /* Locking semaphore initialized to 1 (unlocked) */ k_sem_init(&dev_data->sem_lock, 1, 1); /* Synching semaphore */ k_sem_init(&dev_data->sem_sync, 0, 1); k_work_init(&dev_data->filling_work, pool_filling_work_handler); rng_pool_init((struct rng_pool *)(dev_data->thr), CONFIG_ENTROPY_STM32_THR_POOL_SIZE, CONFIG_ENTROPY_STM32_THR_THRESHOLD); rng_pool_init((struct rng_pool *)(dev_data->isr), CONFIG_ENTROPY_STM32_ISR_POOL_SIZE, CONFIG_ENTROPY_STM32_ISR_THRESHOLD); IRQ_CONNECT(IRQN, IRQ_PRIO, stm32_rng_isr, &entropy_stm32_rng_data, 0); #if !defined(CONFIG_SOC_SERIES_STM32WBX) && !defined(CONFIG_STM32H7_DUAL_CORE) /* For multi-core MCUs, RNG configuration is automatically performed * after acquiring the RNG in start_pool_filling() */ configure_rng(); #endif /* !CONFIG_SOC_SERIES_STM32WBX && !CONFIG_STM32H7_DUAL_CORE */ start_pool_filling(true); return 0; } #ifdef CONFIG_PM_DEVICE static int entropy_stm32_rng_pm_action(const struct device *dev, enum pm_device_action action) { struct entropy_stm32_rng_dev_data *dev_data = dev->data; int res = 0; /* Remove warning on some platforms */ ARG_UNUSED(dev_data); switch (action) { case PM_DEVICE_ACTION_SUSPEND: #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE) /* Lock to Prevent concurrent access with PM */ z_stm32_hsem_lock(CFG_HW_RNG_SEMID, HSEM_LOCK_WAIT_FOREVER); /* Call release_rng instead of entropy_stm32_suspend to avoid double hsem_unlock */ #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */ release_rng(); break; case PM_DEVICE_ACTION_RESUME: if (IS_ENABLED(CONFIG_PM_S2RAM)) { #if DT_INST_NODE_HAS_PROP(0, health_test_config) entropy_stm32_resume(); #if DT_INST_NODE_HAS_PROP(0, health_test_magic) LL_RNG_SetHealthConfig(dev_data->rng, DT_INST_PROP(0, health_test_magic)); #endif /* health_test_magic */ if (LL_RNG_GetHealthConfig(dev_data->rng) != DT_INST_PROP_OR(0, health_test_config, 0U)) { entropy_stm32_rng_init(dev); } else if (!entropy_stm32_rng_data.filling_pools) { /* Resume RNG only if it was suspended during filling pool */ #if defined(CONFIG_SOC_SERIES_STM32WBX) || defined(CONFIG_STM32H7_DUAL_CORE) /* Lock to Prevent concurrent access with PM */ z_stm32_hsem_lock(CFG_HW_RNG_SEMID, HSEM_LOCK_WAIT_FOREVER); /* * Call release_rng instead of entropy_stm32_suspend * to avoid double hsem_unlock */ #endif /* CONFIG_SOC_SERIES_STM32WBX || CONFIG_STM32H7_DUAL_CORE */ release_rng(); } #endif /* health_test_config */ } else { /* Resume RNG only if it was suspended during filling pool */ if (entropy_stm32_rng_data.filling_pools) { res = entropy_stm32_resume(); } } break; default: return -ENOTSUP; } return res; } #endif /* CONFIG_PM_DEVICE */ static const struct entropy_driver_api entropy_stm32_rng_api = { .get_entropy = entropy_stm32_rng_get_entropy, .get_entropy_isr = entropy_stm32_rng_get_entropy_isr }; PM_DEVICE_DT_INST_DEFINE(0, entropy_stm32_rng_pm_action); DEVICE_DT_INST_DEFINE(0, entropy_stm32_rng_init, PM_DEVICE_DT_INST_GET(0), &entropy_stm32_rng_data, &entropy_stm32_rng_config, PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY, &entropy_stm32_rng_api); ```
/content/code_sandbox/drivers/entropy/entropy_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,871
```unknown config REGULATOR_FIXED bool "GPIO-controlled regulators" default y depends on DT_HAS_REGULATOR_FIXED_ENABLED depends on GPIO help Enable the driver for GPIO-controlled regulators if REGULATOR_FIXED config REGULATOR_FIXED_INIT_PRIORITY int "Init priority" default 75 help Device driver initialization priority endif # REGULATOR_FIXED ```
/content/code_sandbox/drivers/regulator/Kconfig.fixed
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
77
```c /* */ #define DT_DRV_COMPAT nordic_npm1300_regulator #include <errno.h> #include <string.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/regulator.h> #include <zephyr/drivers/mfd/npm1300.h> #include <zephyr/dt-bindings/regulator/npm1300.h> #include <zephyr/sys/linear_range.h> #include <zephyr/sys/util.h> /* nPM1300 voltage sources */ enum npm1300_sources { NPM1300_SOURCE_BUCK1, NPM1300_SOURCE_BUCK2, NPM1300_SOURCE_LDO1, NPM1300_SOURCE_LDO2, }; /* nPM1300 gpio control channels */ enum npm1300_gpio_type { NPM1300_GPIO_TYPE_ENABLE, NPM1300_GPIO_TYPE_RETENTION, NPM1300_GPIO_TYPE_PWM }; /* nPM1300 regulator base addresses */ #define BUCK_BASE 0x04U #define LDSW_BASE 0x08U #define SHIP_BASE 0x0BU /* nPM1300 regulator register offsets */ #define BUCK_OFFSET_EN_SET 0x00U #define BUCK_OFFSET_EN_CLR 0x01U #define BUCK_OFFSET_PWM_SET 0x04U #define BUCK_OFFSET_PWM_CLR 0x05U #define BUCK_OFFSET_VOUT_NORM 0x08U #define BUCK_OFFSET_VOUT_RET 0x09U #define BUCK_OFFSET_EN_CTRL 0x0CU #define BUCK_OFFSET_VRET_CTRL 0x0DU #define BUCK_OFFSET_PWM_CTRL 0x0EU #define BUCK_OFFSET_SW_CTRL 0x0FU #define BUCK_OFFSET_VOUT_STAT 0x10U #define BUCK_OFFSET_CTRL0 0x15U #define BUCK_OFFSET_STATUS 0x34U /* nPM1300 ldsw register offsets */ #define LDSW_OFFSET_EN_SET 0x00U #define LDSW_OFFSET_EN_CLR 0x01U #define LDSW_OFFSET_STATUS 0x04U #define LDSW_OFFSET_GPISEL 0x05U #define LDSW_OFFSET_CONFIG 0x07U #define LDSW_OFFSET_LDOSEL 0x08U #define LDSW_OFFSET_VOUTSEL 0x0CU /* nPM1300 ship register offsets */ #define SHIP_OFFSET_SHIP 0x02U #define BUCK1_ON_MASK 0x04U #define BUCK2_ON_MASK 0x40U #define LDSW1_ON_MASK 0x03U #define LDSW2_ON_MASK 0x0CU #define LDSW1_SOFTSTART_MASK 0x0CU #define LDSW1_SOFTSTART_SHIFT 2U #define LDSW2_SOFTSTART_MASK 0x30U #define LDSW2_SOFTSTART_SHIFT 4U struct regulator_npm1300_pconfig { const struct device *mfd; struct gpio_dt_spec dvs_state_pins[5]; }; struct regulator_npm1300_config { struct regulator_common_config common; const struct device *mfd; uint8_t source; int32_t retention_uv; struct gpio_dt_spec enable_gpios; struct gpio_dt_spec retention_gpios; struct gpio_dt_spec pwm_gpios; uint8_t soft_start; }; struct regulator_npm1300_data { struct regulator_common_data data; }; /* Linear range for output voltage, common for all bucks and LDOs on this device */ static const struct linear_range buckldo_range = LINEAR_RANGE_INIT(1000000, 100000, 0U, 23U); unsigned int regulator_npm1300_count_voltages(const struct device *dev) { const struct regulator_npm1300_config *config = dev->config; switch (config->source) { case NPM1300_SOURCE_BUCK1: case NPM1300_SOURCE_BUCK2: case NPM1300_SOURCE_LDO1: case NPM1300_SOURCE_LDO2: return linear_range_values_count(&buckldo_range); default: return 0; } } int regulator_npm1300_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { const struct regulator_npm1300_config *config = dev->config; switch (config->source) { case NPM1300_SOURCE_BUCK1: case NPM1300_SOURCE_BUCK2: case NPM1300_SOURCE_LDO1: case NPM1300_SOURCE_LDO2: return linear_range_get_value(&buckldo_range, idx, volt_uv); default: return -EINVAL; } } static int retention_set_voltage(const struct device *dev, int32_t retention_uv) { const struct regulator_npm1300_config *config = dev->config; uint16_t idx; uint8_t chan; int ret; switch (config->source) { case NPM1300_SOURCE_BUCK1: chan = 0U; break; case NPM1300_SOURCE_BUCK2: chan = 1U; break; default: return -ENOTSUP; } ret = linear_range_get_win_index(&buckldo_range, retention_uv, retention_uv, &idx); if (ret == -EINVAL) { return ret; } return mfd_npm1300_reg_write(config->mfd, BUCK_BASE, BUCK_OFFSET_VOUT_RET + (chan * 2U), idx); } static int buck_get_voltage_index(const struct device *dev, uint8_t chan, uint8_t *idx) { const struct regulator_npm1300_config *config = dev->config; uint8_t sel; int ret; ret = mfd_npm1300_reg_read(config->mfd, BUCK_BASE, BUCK_OFFSET_SW_CTRL, &sel); if (ret < 0) { return ret; } if ((sel >> chan) & 1U) { /* SW control */ return mfd_npm1300_reg_read(config->mfd, BUCK_BASE, BUCK_OFFSET_VOUT_NORM + (chan * 2U), idx); } /* VSET pin control */ return mfd_npm1300_reg_read(config->mfd, BUCK_BASE, BUCK_OFFSET_VOUT_STAT + chan, idx); } static int buck_set_voltage(const struct device *dev, uint8_t chan, int32_t min_uv, int32_t max_uv) { const struct regulator_npm1300_config *config = dev->config; uint8_t mask; uint8_t curr_idx; uint16_t idx; int ret; ret = linear_range_get_win_index(&buckldo_range, min_uv, max_uv, &idx); if (ret == -EINVAL) { return ret; } /* Get current setting, and return if current and new index match */ ret = buck_get_voltage_index(dev, chan, &curr_idx); if ((ret < 0) || (idx == curr_idx)) { return ret; } ret = mfd_npm1300_reg_write(config->mfd, BUCK_BASE, BUCK_OFFSET_VOUT_NORM + (chan * 2U), idx); if (ret < 0) { return ret; } /* Enable SW control of buck output */ mask = BIT(chan); return mfd_npm1300_reg_update(config->mfd, BUCK_BASE, BUCK_OFFSET_SW_CTRL, mask, mask); } static int ldo_set_voltage(const struct device *dev, uint8_t chan, int32_t min_uv, int32_t max_uv) { const struct regulator_npm1300_config *config = dev->config; uint16_t idx; int ret; ret = linear_range_get_win_index(&buckldo_range, min_uv, max_uv, &idx); if (ret == -EINVAL) { return ret; } return mfd_npm1300_reg_write(config->mfd, LDSW_BASE, LDSW_OFFSET_VOUTSEL + chan, idx); } int regulator_npm1300_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_npm1300_config *config = dev->config; switch (config->source) { case NPM1300_SOURCE_BUCK1: return buck_set_voltage(dev, 0, min_uv, max_uv); case NPM1300_SOURCE_BUCK2: return buck_set_voltage(dev, 1, min_uv, max_uv); case NPM1300_SOURCE_LDO1: return ldo_set_voltage(dev, 0, min_uv, max_uv); case NPM1300_SOURCE_LDO2: return ldo_set_voltage(dev, 1, min_uv, max_uv); default: return -ENODEV; } } static int buck_get_voltage(const struct device *dev, uint8_t chan, int32_t *volt_uv) { uint8_t idx; int ret; ret = buck_get_voltage_index(dev, chan, &idx); if (ret < 0) { return ret; } return linear_range_get_value(&buckldo_range, idx, volt_uv); } static int ldo_get_voltage(const struct device *dev, uint8_t chan, int32_t *volt_uv) { const struct regulator_npm1300_config *config = dev->config; uint8_t idx; int ret; ret = mfd_npm1300_reg_read(config->mfd, LDSW_BASE, LDSW_OFFSET_VOUTSEL + chan, &idx); if (ret < 0) { return ret; } return linear_range_get_value(&buckldo_range, idx, volt_uv); } int regulator_npm1300_get_voltage(const struct device *dev, int32_t *volt_uv) { const struct regulator_npm1300_config *config = dev->config; switch (config->source) { case NPM1300_SOURCE_BUCK1: return buck_get_voltage(dev, 0, volt_uv); case NPM1300_SOURCE_BUCK2: return buck_get_voltage(dev, 1, volt_uv); case NPM1300_SOURCE_LDO1: return ldo_get_voltage(dev, 0, volt_uv); case NPM1300_SOURCE_LDO2: return ldo_get_voltage(dev, 1, volt_uv); default: return -ENODEV; } } static int set_buck_mode(const struct device *dev, uint8_t chan, regulator_mode_t mode) { const struct regulator_npm1300_config *config = dev->config; uint8_t pfm_mask = BIT(chan); uint8_t pfm_data; uint8_t pwm_reg; int ret; switch (mode) { case NPM1300_BUCK_MODE_PWM: pfm_data = 0U; pwm_reg = BUCK_OFFSET_PWM_SET; break; case NPM1300_BUCK_MODE_AUTO: pfm_data = 0U; pwm_reg = BUCK_OFFSET_PWM_CLR; break; case NPM1300_BUCK_MODE_PFM: pfm_data = pfm_mask; pwm_reg = BUCK_OFFSET_PWM_CLR; break; default: return -ENOTSUP; } ret = mfd_npm1300_reg_update(config->mfd, BUCK_BASE, BUCK_OFFSET_CTRL0, pfm_data, pfm_mask); if (ret < 0) { return ret; } return mfd_npm1300_reg_write(config->mfd, BUCK_BASE, pwm_reg + (chan * 2U), 1U); } static int set_ldsw_mode(const struct device *dev, uint8_t chan, regulator_mode_t mode) { const struct regulator_npm1300_config *config = dev->config; switch (mode) { case NPM1300_LDSW_MODE_LDO: return mfd_npm1300_reg_write(config->mfd, LDSW_BASE, LDSW_OFFSET_LDOSEL + chan, 1U); case NPM1300_LDSW_MODE_LDSW: return mfd_npm1300_reg_write(config->mfd, LDSW_BASE, LDSW_OFFSET_LDOSEL + chan, 0U); default: return -ENOTSUP; } } int regulator_npm1300_set_mode(const struct device *dev, regulator_mode_t mode) { const struct regulator_npm1300_config *config = dev->config; switch (config->source) { case NPM1300_SOURCE_BUCK1: return set_buck_mode(dev, 0, mode); case NPM1300_SOURCE_BUCK2: return set_buck_mode(dev, 1, mode); case NPM1300_SOURCE_LDO1: return set_ldsw_mode(dev, 0, mode); case NPM1300_SOURCE_LDO2: return set_ldsw_mode(dev, 1, mode); default: return -ENOTSUP; } } int regulator_npm1300_enable(const struct device *dev) { const struct regulator_npm1300_config *config = dev->config; switch (config->source) { case NPM1300_SOURCE_BUCK1: return mfd_npm1300_reg_write(config->mfd, BUCK_BASE, BUCK_OFFSET_EN_SET, 1U); case NPM1300_SOURCE_BUCK2: return mfd_npm1300_reg_write(config->mfd, BUCK_BASE, BUCK_OFFSET_EN_SET + 2U, 1U); case NPM1300_SOURCE_LDO1: return mfd_npm1300_reg_write(config->mfd, LDSW_BASE, LDSW_OFFSET_EN_SET, 1U); case NPM1300_SOURCE_LDO2: return mfd_npm1300_reg_write(config->mfd, LDSW_BASE, LDSW_OFFSET_EN_SET + 2U, 1U); default: return 0; } } int regulator_npm1300_disable(const struct device *dev) { const struct regulator_npm1300_config *config = dev->config; switch (config->source) { case NPM1300_SOURCE_BUCK1: return mfd_npm1300_reg_write(config->mfd, BUCK_BASE, BUCK_OFFSET_EN_CLR, 1U); case NPM1300_SOURCE_BUCK2: return mfd_npm1300_reg_write(config->mfd, BUCK_BASE, BUCK_OFFSET_EN_CLR + 2U, 1U); case NPM1300_SOURCE_LDO1: return mfd_npm1300_reg_write(config->mfd, LDSW_BASE, LDSW_OFFSET_EN_CLR, 1U); case NPM1300_SOURCE_LDO2: return mfd_npm1300_reg_write(config->mfd, LDSW_BASE, LDSW_OFFSET_EN_CLR + 2U, 1U); default: return 0; } } static int regulator_npm1300_set_buck_pin_ctrl(const struct device *dev, uint8_t chan, uint8_t pin, uint8_t inv, enum npm1300_gpio_type type) { const struct regulator_npm1300_config *config = dev->config; uint8_t ctrl; uint8_t mask; switch (chan) { case 0: /* Invert control in bit 6, pin control in bits 2-0 */ ctrl = (inv << 6U) | (pin + 1U); mask = BIT(6U) | BIT_MASK(3U); break; case 1: /* Invert control in bit 7, pin control in bits 5-3 */ ctrl = (inv << 7U) | ((pin + 1U) << 3U); mask = BIT(7U) | (BIT_MASK(3U) << 3U); break; default: return -EINVAL; } switch (type) { case NPM1300_GPIO_TYPE_ENABLE: return mfd_npm1300_reg_update(config->mfd, BUCK_BASE, BUCK_OFFSET_EN_CTRL, ctrl, mask); case NPM1300_GPIO_TYPE_PWM: return mfd_npm1300_reg_update(config->mfd, BUCK_BASE, BUCK_OFFSET_PWM_CTRL, ctrl, mask); case NPM1300_GPIO_TYPE_RETENTION: return mfd_npm1300_reg_update(config->mfd, BUCK_BASE, BUCK_OFFSET_VRET_CTRL, ctrl, mask); default: return -ENOTSUP; } } static int regulator_npm1300_set_ldsw_pin_ctrl(const struct device *dev, uint8_t chan, uint8_t pin, uint8_t inv, enum npm1300_gpio_type type) { const struct regulator_npm1300_config *config = dev->config; uint8_t ctrl; if (type != NPM1300_GPIO_TYPE_ENABLE) { return -ENOTSUP; } ctrl = (pin + 1U) | (inv << 3U); return mfd_npm1300_reg_write(config->mfd, LDSW_BASE, LDSW_OFFSET_GPISEL + chan, ctrl); } int regulator_npm1300_set_pin_ctrl(const struct device *dev, const struct gpio_dt_spec *spec, enum npm1300_gpio_type type) { const struct regulator_npm1300_config *config = dev->config; uint8_t inv; if (spec->port == NULL) { return 0; } inv = (spec->dt_flags & GPIO_ACTIVE_LOW) != 0U; switch (config->source) { case NPM1300_SOURCE_BUCK1: return regulator_npm1300_set_buck_pin_ctrl(dev, 0, spec->pin, inv, type); case NPM1300_SOURCE_BUCK2: return regulator_npm1300_set_buck_pin_ctrl(dev, 1, spec->pin, inv, type); case NPM1300_SOURCE_LDO1: return regulator_npm1300_set_ldsw_pin_ctrl(dev, 0, spec->pin, inv, type); case NPM1300_SOURCE_LDO2: return regulator_npm1300_set_ldsw_pin_ctrl(dev, 1, spec->pin, inv, type); default: return -ENODEV; } } int regulator_npm1300_dvs_state_set(const struct device *dev, regulator_dvs_state_t state) { const struct regulator_npm1300_pconfig *pconfig = dev->config; const struct gpio_dt_spec *spec; int ret; for (size_t idx = 0U; idx < 5U; idx++) { spec = &pconfig->dvs_state_pins[idx]; if (spec->port != NULL) { ret = gpio_pin_set_dt(spec, ((state >> idx) & 1U) != 0U); if (ret != 0) { return ret; } } } return 0; } int regulator_npm1300_ship_mode(const struct device *dev) { const struct regulator_npm1300_pconfig *pconfig = dev->config; return mfd_npm1300_reg_write(pconfig->mfd, SHIP_BASE, SHIP_OFFSET_SHIP, 1U); } static const struct regulator_parent_driver_api parent_api = { .dvs_state_set = regulator_npm1300_dvs_state_set, .ship_mode = regulator_npm1300_ship_mode, }; int regulator_npm1300_common_init(const struct device *dev) { const struct regulator_npm1300_pconfig *pconfig = dev->config; const struct gpio_dt_spec *spec; int ret; for (size_t idx = 0U; idx < 5U; idx++) { spec = &pconfig->dvs_state_pins[idx]; if (spec->port != NULL) { if (!gpio_is_ready_dt(spec)) { return -ENODEV; } ret = gpio_pin_configure_dt(spec, GPIO_OUTPUT); if (ret != 0) { return ret; } } } return 0; } static int get_enabled_reg(const struct device *dev, uint8_t base, uint8_t offset, uint8_t mask, bool *enabled) { const struct regulator_npm1300_config *config = dev->config; uint8_t data; int ret = mfd_npm1300_reg_read(config->mfd, base, offset, &data); if (ret != 0) { return ret; } *enabled = (data & mask) != 0U; return 0; } static int get_enabled(const struct device *dev, bool *enabled) { const struct regulator_npm1300_config *config = dev->config; switch (config->source) { case NPM1300_SOURCE_BUCK1: return get_enabled_reg(dev, BUCK_BASE, BUCK_OFFSET_STATUS, BUCK1_ON_MASK, enabled); case NPM1300_SOURCE_BUCK2: return get_enabled_reg(dev, BUCK_BASE, BUCK_OFFSET_STATUS, BUCK2_ON_MASK, enabled); case NPM1300_SOURCE_LDO1: return get_enabled_reg(dev, LDSW_BASE, LDSW_OFFSET_STATUS, LDSW1_ON_MASK, enabled); case NPM1300_SOURCE_LDO2: return get_enabled_reg(dev, LDSW_BASE, LDSW_OFFSET_STATUS, LDSW2_ON_MASK, enabled); default: return -ENODEV; } } static int soft_start_set(const struct device *dev, uint8_t soft_start) { const struct regulator_npm1300_config *config = dev->config; switch (config->source) { case NPM1300_SOURCE_LDO1: return mfd_npm1300_reg_update(config->mfd, LDSW_BASE, LDSW_OFFSET_CONFIG, soft_start << LDSW1_SOFTSTART_SHIFT, LDSW1_SOFTSTART_MASK); case NPM1300_SOURCE_LDO2: return mfd_npm1300_reg_update(config->mfd, LDSW_BASE, LDSW_OFFSET_CONFIG, soft_start << LDSW2_SOFTSTART_SHIFT, LDSW2_SOFTSTART_MASK); default: return -ENOTSUP; } } int regulator_npm1300_init(const struct device *dev) { const struct regulator_npm1300_config *config = dev->config; bool enabled; int ret = 0; if (!device_is_ready(config->mfd)) { return -ENODEV; } ret = get_enabled(dev, &enabled); if (ret < 0) { return ret; } ret = regulator_common_init(dev, enabled); if (ret < 0) { return ret; } /* Configure retention voltage */ if (config->retention_uv != 0) { ret = retention_set_voltage(dev, config->retention_uv); if (ret != 0) { return ret; } } /* Configure soft start */ if (config->soft_start != UINT8_MAX) { ret = soft_start_set(dev, config->soft_start); if (ret != 0) { return ret; } } /* Configure GPIO pin control */ ret = regulator_npm1300_set_pin_ctrl(dev, &config->enable_gpios, NPM1300_GPIO_TYPE_ENABLE); if (ret != 0) { return ret; } ret = regulator_npm1300_set_pin_ctrl(dev, &config->retention_gpios, NPM1300_GPIO_TYPE_RETENTION); if (ret != 0) { return ret; } ret = regulator_npm1300_set_pin_ctrl(dev, &config->pwm_gpios, NPM1300_GPIO_TYPE_PWM); if (ret != 0) { return ret; } return ret; } static const struct regulator_driver_api api = {.enable = regulator_npm1300_enable, .disable = regulator_npm1300_disable, .count_voltages = regulator_npm1300_count_voltages, .list_voltage = regulator_npm1300_list_voltage, .set_voltage = regulator_npm1300_set_voltage, .get_voltage = regulator_npm1300_get_voltage, .set_mode = regulator_npm1300_set_mode}; #define REGULATOR_NPM1300_DEFINE(node_id, id, _source) \ static struct regulator_npm1300_data data_##id; \ \ static const struct regulator_npm1300_config config_##id = { \ .common = REGULATOR_DT_COMMON_CONFIG_INIT(node_id), \ .mfd = DEVICE_DT_GET(DT_GPARENT(node_id)), \ .source = _source, \ .retention_uv = DT_PROP_OR(node_id, retention_microvolt, 0), \ .soft_start = DT_ENUM_IDX_OR(node_id, soft_start_microamp, UINT8_MAX), \ .enable_gpios = GPIO_DT_SPEC_GET_OR(node_id, enable_gpios, {0}), \ .retention_gpios = GPIO_DT_SPEC_GET_OR(node_id, retention_gpios, {0}), \ .pwm_gpios = GPIO_DT_SPEC_GET_OR(node_id, pwm_gpios, {0})}; \ \ DEVICE_DT_DEFINE(node_id, regulator_npm1300_init, NULL, &data_##id, &config_##id, \ POST_KERNEL, CONFIG_REGULATOR_NPM1300_INIT_PRIORITY, &api); #define REGULATOR_NPM1300_DEFINE_COND(inst, child, source) \ COND_CODE_1(DT_NODE_EXISTS(DT_INST_CHILD(inst, child)), \ (REGULATOR_NPM1300_DEFINE(DT_INST_CHILD(inst, child), child##inst, source)), \ ()) #define REGULATOR_NPM1300_DEFINE_ALL(inst) \ static const struct regulator_npm1300_pconfig config_##inst = { \ .mfd = DEVICE_DT_GET(DT_INST_PARENT(inst)), \ .dvs_state_pins = {GPIO_DT_SPEC_INST_GET_BY_IDX_OR(inst, dvs_gpios, 0, {0}), \ GPIO_DT_SPEC_INST_GET_BY_IDX_OR(inst, dvs_gpios, 1, {0}), \ GPIO_DT_SPEC_INST_GET_BY_IDX_OR(inst, dvs_gpios, 2, {0}), \ GPIO_DT_SPEC_INST_GET_BY_IDX_OR(inst, dvs_gpios, 3, {0}), \ GPIO_DT_SPEC_INST_GET_BY_IDX_OR(inst, dvs_gpios, 4, {0})}}; \ \ DEVICE_DT_INST_DEFINE(inst, regulator_npm1300_common_init, NULL, NULL, &config_##inst, \ POST_KERNEL, CONFIG_REGULATOR_NPM1300_COMMON_INIT_PRIORITY, \ &parent_api); \ \ REGULATOR_NPM1300_DEFINE_COND(inst, buck1, NPM1300_SOURCE_BUCK1) \ REGULATOR_NPM1300_DEFINE_COND(inst, buck2, NPM1300_SOURCE_BUCK2) \ REGULATOR_NPM1300_DEFINE_COND(inst, ldo1, NPM1300_SOURCE_LDO1) \ REGULATOR_NPM1300_DEFINE_COND(inst, ldo2, NPM1300_SOURCE_LDO2) DT_INST_FOREACH_STATUS_OKAY(REGULATOR_NPM1300_DEFINE_ALL) ```
/content/code_sandbox/drivers/regulator/regulator_npm1300.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,862
```c /* */ #define DT_DRV_COMPAT nordic_npm6001_regulator #include <errno.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/regulator.h> #include <zephyr/dt-bindings/regulator/npm6001.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/linear_range.h> #include <zephyr/sys/util.h> /* nPM6001 voltage sources */ enum npm6001_sources { NPM6001_SOURCE_BUCK0, NPM6001_SOURCE_BUCK1, NPM6001_SOURCE_BUCK2, NPM6001_SOURCE_BUCK3, NPM6001_SOURCE_LDO0, NPM6001_SOURCE_LDO1, }; /* nPM6001 regulator related registers */ #define NPM6001_TASKS_START_BUCK3 0x02U #define NPM6001_TASKS_START_LDO0 0x03U #define NPM6001_TASKS_START_LDO1 0x04U #define NPM6001_TASKS_STOP_BUCK3 0x08U #define NPM6001_TASKS_STOP_LDO0 0x09U #define NPM6001_TASKS_STOP_LDO1 0x0AU #define NPM6001_TASKS_UPDATE_VOUTPWM 0x0EU #define NPM6001_EVENTS_THWARN 0x1EU #define NPM6001_EVENTS_BUCK0OC 0x1FU #define NPM6001_EVENTS_BUCK1OC 0x20U #define NPM6001_EVENTS_BUCK2OC 0x21U #define NPM6001_EVENTS_BUCK3OC 0x22U #define NPM6001_BUCK0VOUTULP 0x3AU #define NPM6001_BUCK1VOUTULP 0x3CU #define NPM6001_BUCK2VOUTULP 0x40U #define NPM6001_BUCK3VOUT 0x45U #define NPM6001_LDO0VOUT 0x46U #define NPM6001_BUCK0CONFPWMMODE 0x4AU #define NPM6001_BUCK1CONFPWMMODE 0x4BU #define NPM6001_BUCK2CONFPWMMODE 0x4CU #define NPM6001_BUCK3CONFPWMMODE 0x4DU #define NPM6001_BUCKMODEPADCONF 0x4EU #define NPM6001_PADDRIVESTRENGTH 0x53U #define NPM6001_OVERRIDEPWRUPBUCK 0xABU /* nPM6001 LDO0VOUT values */ #define NPM6001_LDO0VOUT_SET1V8 0x06U #define NPM6001_LDO0VOUT_SET2V1 0x0BU #define NPM6001_LDO0VOUT_SET2V41 0x10U #define NPM6001_LDO0VOUT_SET2V7 0x15U #define NPM6001_LDO0VOUT_SET3V0 0x1AU #define NPM6001_LDO0VOUT_SET3V3 0x1EU /* nPM6001 BUCKXCONFPWMMODE fields */ #define NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM_MSK 0x8U #define NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM_POS 3 #define NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM BIT(NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM_POS) /* nPM6001 OVERRIDEPWRUPBUCK fields */ #define NPM6001_OVERRIDEPWRUPBUCK_BUCK1DISABLE_MSK 0x22U #define NPM6001_OVERRIDEPWRUPBUCK_BUCK2DISABLE_MSK 0x44U #define NPM6001_OVERRIDEPWRUPBUCK_BUCK1DISABLE BIT(1) #define NPM6001_OVERRIDEPWRUPBUCK_BUCK2DISABLE BIT(2) struct regulator_npm6001_config { struct regulator_common_config common; struct i2c_dt_spec i2c; uint8_t source; }; struct regulator_npm6001_data { struct regulator_common_data data; }; struct regulator_npm6001_vmap { uint8_t reg_val; int32_t volt_uv; }; static const struct linear_range buck0_range = LINEAR_RANGE_INIT(1800000, 100000U, 0x0U, 0xFU); static const struct linear_range buck1_range = LINEAR_RANGE_INIT(700000, 50000U, 0x0U, 0xEU); static const struct linear_range buck2_range = LINEAR_RANGE_INIT(1200000, 50000U, 0xAU, 0xEU); static const struct linear_range buck3_range = LINEAR_RANGE_INIT(500000, 25000U, 0x0U, 0x70U); static const struct regulator_npm6001_vmap ldo0_voltages[] = { {NPM6001_LDO0VOUT_SET1V8, 1800000}, {NPM6001_LDO0VOUT_SET2V1, 2100000}, {NPM6001_LDO0VOUT_SET2V41, 2410000}, {NPM6001_LDO0VOUT_SET2V7, 2700000}, {NPM6001_LDO0VOUT_SET3V0, 3000000}, {NPM6001_LDO0VOUT_SET3V3, 3300000}, }; static int regulator_npm6001_ldo0_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { if (idx >= ARRAY_SIZE(ldo0_voltages)) { return -EINVAL; } *volt_uv = ldo0_voltages[idx].volt_uv; return 0; } static int regulator_npm6001_buck012_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv, const struct linear_range *range, uint8_t vout_reg, uint8_t conf_reg) { const struct regulator_npm6001_config *config = dev->config; uint8_t conf, buf[3]; uint16_t idx; int ret; ret = linear_range_get_win_index(range, min_uv, max_uv, &idx); if (ret == -EINVAL) { return ret; } /* force PWM mode when updating voltage */ ret = i2c_reg_read_byte_dt(&config->i2c, conf_reg, &conf); if (ret < 0) { return ret; } if ((conf & NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM) == 0U) { ret = i2c_reg_write_byte_dt(&config->i2c, conf_reg, conf | NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM); if (ret < 0) { return ret; } } /* write voltage in both ULP/PWM registers */ buf[0] = vout_reg; buf[1] = (uint8_t)idx; buf[2] = (uint8_t)idx; ret = i2c_write_dt(&config->i2c, buf, sizeof(buf)); if (ret < 0) { return ret; } ret = i2c_reg_write_byte_dt(&config->i2c, NPM6001_TASKS_UPDATE_VOUTPWM, 1U); if (ret < 0) { return ret; } /* restore HYS mode if enabled before */ if ((conf & NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM) == 0U) { ret = i2c_reg_write_byte_dt(&config->i2c, conf_reg, conf); if (ret < 0) { return ret; } } return 0; } static int regulator_npm6001_buck3_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_npm6001_config *config = dev->config; uint16_t idx; uint8_t conf; int ret; ret = linear_range_get_win_index(&buck3_range, min_uv, max_uv, &idx); if (ret == -EINVAL) { return ret; } /* force PWM mode when updating voltage */ ret = i2c_reg_read_byte_dt(&config->i2c, NPM6001_BUCK3CONFPWMMODE, &conf); if (ret < 0) { return ret; } if ((conf & NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM) == 0U) { ret = i2c_reg_write_byte_dt(&config->i2c, NPM6001_BUCK3CONFPWMMODE, conf | NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM); if (ret < 0) { return ret; } } ret = i2c_reg_write_byte_dt(&config->i2c, NPM6001_BUCK3VOUT, (uint8_t)idx); if (ret < 0) { return ret; } /* restore HYS mode if enabled before */ if ((conf & NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM) == 0U) { ret = i2c_reg_write_byte_dt(&config->i2c, NPM6001_BUCK3CONFPWMMODE, conf); if (ret < 0) { return ret; } } return 0; } static int regulator_npm6001_ldo0_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_npm6001_config *config = dev->config; uint8_t val = 0U; size_t i; for (i = 0U; i < ARRAY_SIZE(ldo0_voltages); i++) { if ((min_uv <= ldo0_voltages[i].volt_uv) && (max_uv >= ldo0_voltages[i].volt_uv)) { val = ldo0_voltages[i].reg_val; break; } } if (i == ARRAY_SIZE(ldo0_voltages)) { return -EINVAL; } return i2c_reg_write_byte_dt(&config->i2c, NPM6001_LDO0VOUT, val); } static int regulator_npm6001_buck0123_get_voltage(const struct device *dev, const struct linear_range *range, uint8_t vout_reg, int32_t *volt_uv) { const struct regulator_npm6001_config *config = dev->config; uint8_t idx; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, vout_reg, &idx); if (ret < 0) { return ret; } return linear_range_get_value(range, idx, volt_uv); } static int regulator_npm6001_ldo0_get_voltage(const struct device *dev, int32_t *volt_uv) { const struct regulator_npm6001_config *config = dev->config; uint8_t val; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, NPM6001_LDO0VOUT, &val); if (ret < 0) { return ret; } for (size_t i = 0U; i < ARRAY_SIZE(ldo0_voltages); i++) { if (val == ldo0_voltages[i].reg_val) { *volt_uv = ldo0_voltages[i].volt_uv; return 0; } } __ASSERT(NULL, "Unexpected voltage"); return -EINVAL; } static unsigned int regulator_npm6001_count_voltages(const struct device *dev) { const struct regulator_npm6001_config *config = dev->config; switch (config->source) { case NPM6001_SOURCE_BUCK0: return linear_range_values_count(&buck0_range); case NPM6001_SOURCE_BUCK1: return linear_range_values_count(&buck1_range); case NPM6001_SOURCE_BUCK2: return linear_range_values_count(&buck2_range); case NPM6001_SOURCE_BUCK3: return linear_range_values_count(&buck3_range); case NPM6001_SOURCE_LDO0: return 6U; case NPM6001_SOURCE_LDO1: return 1U; default: __ASSERT(NULL, "Unexpected source"); } return 0; } static int regulator_npm6001_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { const struct regulator_npm6001_config *config = dev->config; switch (config->source) { case NPM6001_SOURCE_BUCK0: return linear_range_get_value(&buck0_range, idx, volt_uv); case NPM6001_SOURCE_BUCK1: return linear_range_get_value(&buck1_range, idx, volt_uv); case NPM6001_SOURCE_BUCK2: return linear_range_get_value(&buck2_range, idx + buck2_range.min_idx, volt_uv); case NPM6001_SOURCE_BUCK3: return linear_range_get_value(&buck3_range, idx, volt_uv); case NPM6001_SOURCE_LDO0: return regulator_npm6001_ldo0_list_voltage(dev, idx, volt_uv); case NPM6001_SOURCE_LDO1: *volt_uv = 1800000; break; default: __ASSERT(NULL, "Unexpected source"); } return 0; } static int regulator_npm6001_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_npm6001_config *config = dev->config; switch (config->source) { case NPM6001_SOURCE_BUCK0: return regulator_npm6001_buck012_set_voltage(dev, min_uv, max_uv, &buck0_range, NPM6001_BUCK0VOUTULP, NPM6001_BUCK0CONFPWMMODE); case NPM6001_SOURCE_BUCK1: return regulator_npm6001_buck012_set_voltage(dev, min_uv, max_uv, &buck1_range, NPM6001_BUCK1VOUTULP, NPM6001_BUCK1CONFPWMMODE); case NPM6001_SOURCE_BUCK2: return regulator_npm6001_buck012_set_voltage(dev, min_uv, max_uv, &buck2_range, NPM6001_BUCK2VOUTULP, NPM6001_BUCK2CONFPWMMODE); case NPM6001_SOURCE_BUCK3: return regulator_npm6001_buck3_set_voltage(dev, min_uv, max_uv); case NPM6001_SOURCE_LDO0: return regulator_npm6001_ldo0_set_voltage(dev, min_uv, max_uv); case NPM6001_SOURCE_LDO1: if ((min_uv != 1800000) && (max_uv != 1800000)) { return -EINVAL; } break; default: __ASSERT(NULL, "Unexpected source"); } return 0; } static int regulator_npm6001_get_voltage(const struct device *dev, int32_t *volt_uv) { const struct regulator_npm6001_config *config = dev->config; switch (config->source) { case NPM6001_SOURCE_BUCK0: return regulator_npm6001_buck0123_get_voltage(dev, &buck0_range, NPM6001_BUCK0VOUTULP, volt_uv); case NPM6001_SOURCE_BUCK1: return regulator_npm6001_buck0123_get_voltage(dev, &buck1_range, NPM6001_BUCK1VOUTULP, volt_uv); case NPM6001_SOURCE_BUCK2: return regulator_npm6001_buck0123_get_voltage(dev, &buck2_range, NPM6001_BUCK2VOUTULP, volt_uv); case NPM6001_SOURCE_BUCK3: return regulator_npm6001_buck0123_get_voltage(dev, &buck3_range, NPM6001_BUCK3VOUT, volt_uv); case NPM6001_SOURCE_LDO0: return regulator_npm6001_ldo0_get_voltage(dev, volt_uv); case NPM6001_SOURCE_LDO1: *volt_uv = 1800000U; break; default: __ASSERT(NULL, "Unexpected source"); } return 0; } static int regulator_npm6001_set_mode(const struct device *dev, regulator_mode_t mode) { const struct regulator_npm6001_config *config = dev->config; uint8_t conf_reg; if (mode > NPM6001_MODE_PWM) { return -ENOTSUP; } switch (config->source) { case NPM6001_SOURCE_BUCK0: conf_reg = NPM6001_BUCK0CONFPWMMODE; break; case NPM6001_SOURCE_BUCK1: conf_reg = NPM6001_BUCK1CONFPWMMODE; break; case NPM6001_SOURCE_BUCK2: conf_reg = NPM6001_BUCK2CONFPWMMODE; break; case NPM6001_SOURCE_BUCK3: conf_reg = NPM6001_BUCK3CONFPWMMODE; break; default: return -ENOTSUP; } return i2c_reg_update_byte_dt(&config->i2c, conf_reg, NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM_MSK, mode << NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM_POS); } static int regulator_npm6001_get_mode(const struct device *dev, regulator_mode_t *mode) { const struct regulator_npm6001_config *config = dev->config; uint8_t conf_reg, conf; int ret; switch (config->source) { case NPM6001_SOURCE_BUCK0: conf_reg = NPM6001_BUCK0CONFPWMMODE; break; case NPM6001_SOURCE_BUCK1: conf_reg = NPM6001_BUCK1CONFPWMMODE; break; case NPM6001_SOURCE_BUCK2: conf_reg = NPM6001_BUCK2CONFPWMMODE; break; case NPM6001_SOURCE_BUCK3: conf_reg = NPM6001_BUCK3CONFPWMMODE; break; default: return -ENOTSUP; } ret = i2c_reg_read_byte_dt(&config->i2c, conf_reg, &conf); if (ret < 0) { return ret; } *mode = (conf & NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM_MSK) >> NPM6001_BUCKXCONFPWMMODE_SETFORCEPWM_POS; return 0; } static int regulator_npm6001_enable(const struct device *dev) { const struct regulator_npm6001_config *config = dev->config; switch (config->source) { case NPM6001_SOURCE_BUCK1: return i2c_reg_update_byte_dt(&config->i2c, NPM6001_OVERRIDEPWRUPBUCK, NPM6001_OVERRIDEPWRUPBUCK_BUCK1DISABLE_MSK, 0U); case NPM6001_SOURCE_BUCK2: return i2c_reg_update_byte_dt(&config->i2c, NPM6001_OVERRIDEPWRUPBUCK, NPM6001_OVERRIDEPWRUPBUCK_BUCK2DISABLE_MSK, 0U); case NPM6001_SOURCE_BUCK3: return i2c_reg_write_byte_dt(&config->i2c, NPM6001_TASKS_START_BUCK3, 1U); case NPM6001_SOURCE_LDO0: return i2c_reg_write_byte_dt(&config->i2c, NPM6001_TASKS_START_LDO0, 1U); case NPM6001_SOURCE_LDO1: return i2c_reg_write_byte_dt(&config->i2c, NPM6001_TASKS_START_LDO1, 1U); default: return 0; } } static int regulator_npm6001_disable(const struct device *dev) { const struct regulator_npm6001_config *config = dev->config; switch (config->source) { case NPM6001_SOURCE_BUCK1: return i2c_reg_update_byte_dt(&config->i2c, NPM6001_OVERRIDEPWRUPBUCK, NPM6001_OVERRIDEPWRUPBUCK_BUCK1DISABLE_MSK, NPM6001_OVERRIDEPWRUPBUCK_BUCK1DISABLE); case NPM6001_SOURCE_BUCK2: return i2c_reg_update_byte_dt(&config->i2c, NPM6001_OVERRIDEPWRUPBUCK, NPM6001_OVERRIDEPWRUPBUCK_BUCK2DISABLE_MSK, NPM6001_OVERRIDEPWRUPBUCK_BUCK2DISABLE); case NPM6001_SOURCE_BUCK3: return i2c_reg_write_byte_dt(&config->i2c, NPM6001_TASKS_STOP_BUCK3, 1U); case NPM6001_SOURCE_LDO0: return i2c_reg_write_byte_dt(&config->i2c, NPM6001_TASKS_STOP_LDO0, 1U); case NPM6001_SOURCE_LDO1: return i2c_reg_write_byte_dt(&config->i2c, NPM6001_TASKS_STOP_LDO1, 1U); default: return 0; } } static int regulator_npm6001_get_error_flags(const struct device *dev, regulator_error_flags_t *flags) { const struct regulator_npm6001_config *config = dev->config; uint8_t oc_reg, val; int ret; *flags = 0U; /* read thermal warning */ ret = i2c_reg_read_byte_dt(&config->i2c, NPM6001_EVENTS_THWARN, &val); if (ret < 0) { return ret; } if (val != 0U) { /* clear thermal warning */ ret = i2c_reg_write_byte_dt(&config->i2c, NPM6001_EVENTS_THWARN, 0U); if (ret < 0) { return ret; } *flags |= REGULATOR_ERROR_OVER_TEMP; } /* read overcurrent event */ switch (config->source) { case NPM6001_SOURCE_BUCK0: oc_reg = NPM6001_EVENTS_BUCK0OC; break; case NPM6001_SOURCE_BUCK1: oc_reg = NPM6001_EVENTS_BUCK1OC; break; case NPM6001_SOURCE_BUCK2: oc_reg = NPM6001_EVENTS_BUCK2OC; break; case NPM6001_SOURCE_BUCK3: oc_reg = NPM6001_EVENTS_BUCK3OC; break; default: return 0; } ret = i2c_reg_read_byte_dt(&config->i2c, oc_reg, &val); if (ret < 0) { return ret; } if (val != 0U) { /* clear overcurrent event */ ret = i2c_reg_write_byte_dt(&config->i2c, oc_reg, 0U); if (ret < 0) { return ret; } *flags |= REGULATOR_ERROR_OVER_CURRENT; } return 0; } static int regulator_npm6001_init(const struct device *dev) { const struct regulator_npm6001_config *config = dev->config; bool is_enabled; regulator_common_data_init(dev); if (!i2c_is_ready_dt(&config->i2c)) { return -ENODEV; } /* BUCK1/2 are ON by default */ is_enabled = (config->source == NPM6001_SOURCE_BUCK1) || (config->source == NPM6001_SOURCE_BUCK2); return regulator_common_init(dev, is_enabled); } static const struct regulator_driver_api api = { .enable = regulator_npm6001_enable, .disable = regulator_npm6001_disable, .count_voltages = regulator_npm6001_count_voltages, .list_voltage = regulator_npm6001_list_voltage, .set_voltage = regulator_npm6001_set_voltage, .get_voltage = regulator_npm6001_get_voltage, .set_mode = regulator_npm6001_set_mode, .get_mode = regulator_npm6001_get_mode, .get_error_flags = regulator_npm6001_get_error_flags, }; #define REGULATOR_NPM6001_DEFINE(node_id, id, _source) \ static struct regulator_npm6001_data data_##id; \ \ static const struct regulator_npm6001_config config_##id = { \ .common = REGULATOR_DT_COMMON_CONFIG_INIT(node_id), \ .i2c = I2C_DT_SPEC_GET(DT_GPARENT(node_id)), \ .source = _source, \ }; \ \ DEVICE_DT_DEFINE(node_id, regulator_npm6001_init, NULL, &data_##id, &config_##id, \ POST_KERNEL, CONFIG_REGULATOR_NPM6001_INIT_PRIORITY, &api); #define REGULATOR_NPM6001_DEFINE_COND(inst, child, source) \ COND_CODE_1(DT_NODE_EXISTS(DT_INST_CHILD(inst, child)), \ (REGULATOR_NPM6001_DEFINE(DT_INST_CHILD(inst, child), child##inst, source)), \ ()) #define REGULATOR_NPM6001_DEFINE_ALL(inst) \ REGULATOR_NPM6001_DEFINE_COND(inst, buck0, NPM6001_SOURCE_BUCK0) \ REGULATOR_NPM6001_DEFINE_COND(inst, buck1, NPM6001_SOURCE_BUCK1) \ REGULATOR_NPM6001_DEFINE_COND(inst, buck2, NPM6001_SOURCE_BUCK2) \ REGULATOR_NPM6001_DEFINE_COND(inst, buck3, NPM6001_SOURCE_BUCK3) \ REGULATOR_NPM6001_DEFINE_COND(inst, ldo0, NPM6001_SOURCE_LDO0) \ REGULATOR_NPM6001_DEFINE_COND(inst, ldo1, NPM6001_SOURCE_LDO1) DT_INST_FOREACH_STATUS_OKAY(REGULATOR_NPM6001_DEFINE_ALL) ```
/content/code_sandbox/drivers/regulator/regulator_npm6001.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,944
```unknown config REGULATOR_MAX20335 bool "MAX20335 PMIC regulator driver" default y depends on DT_HAS_MAXIM_MAX20335_REGULATOR_ENABLED select I2C select MFD help Enable the Maxim MAX20335 PMIC regulator driver if REGULATOR_MAX20335 config REGULATOR_MAXIM_MAX20335_COMMON_INIT_PRIORITY int "MAX20335 regulator driver init priority (common part)" default 86 help Init priority for the Maxim MAX20335 regulator driver (common part). It must be greater than I2C and MFD init priority. config REGULATOR_MAXIM_MAX20335_INIT_PRIORITY int "MAX20335 regulator driver init priority" default 87 help Init priority for the Maxim MAX20335 regulator driver. It must be greater than REGULATOR_MAXIM_MAX20335_COMMON_INIT_PRIORITY endif ```
/content/code_sandbox/drivers/regulator/Kconfig.max20335
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
187
```unknown config REGULATOR_GPIO bool "GPIO-controlled regulators" default y depends on DT_HAS_REGULATOR_GPIO_ENABLED select GPIO help Enable the driver for GPIO-controlled regulators if REGULATOR_GPIO config REGULATOR_GPIO_INIT_PRIORITY int "Init priority" default 75 help Device driver initialization priority endif # REGULATOR_GPIO ```
/content/code_sandbox/drivers/regulator/Kconfig.gpio
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
75
```c /* */ #define DT_DRV_COMPAT nxp_pca9420 #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/regulator.h> #include <zephyr/drivers/regulator/pca9420.h> #include <zephyr/sys/linear_range.h> #include <zephyr/sys/util.h> /** Register memory map. See datasheet for more details. */ /** General purpose registers */ /** @brief Top level system ctrl 0 */ #define PCA9420_TOP_CNTL0 0x09U /** @brief Top level system ctrl 2 */ #define PCA9420_TOP_CNTL2 0x0BU /** @brief Top level system ctrl 3 */ #define PCA9420_TOP_CNTL3 0x0CU /** Regulator status indication registers */ /** @brief Active Discharge configuration for mode 0_0 */ #define PCA9420_ACT_DISCHARGE_CNTL 0x21U /** @brief Mode configuration for mode 0_0 */ #define PCA9420_MODECFG_0_0 0x22U /** @brief Mode configuration for mode 0_1 */ #define PCA9420_MODECFG_0_1 0x23U /** @brief Mode configuration for mode 0_2 */ #define PCA9420_MODECFG_0_2 0x24U /** @brief Mode configuration for mode 0_3 */ #define PCA9420_MODECFG_0_3 0x25U /** @brief VIN input current limit selection */ #define PCA9420_TOP_CNTL0_VIN_ILIM_SEL_POS 5U #define PCA9420_TOP_CNTL0_VIN_ILIM_SEL_MASK 0xE0U #define PCA9420_TOP_CNTL0_VIN_ILIM_SEL_DISABLED 0x7U /** @brief ASYS UVLO threshold selection */ #define PCA9420_TOP_CNTL2_ASYS_UVLO_SEL_POS 6U #define PCA9420_TOP_CNTL2_ASYS_UVLO_SEL_MASK 0xC0U /** @brief I2C Mode control mask */ #define PCA9420_TOP_CNTL3_MODE_I2C_POS 3U #define PCA9420_TOP_CNTL3_MODE_I2C_MASK 0x18U /* * @brief Mode control selection mask. When this bit is set, the external * PMIC pins MODESEL0 and MODESEL1 can be used to select the active mode */ #define PCA9420_MODECFG_0_X_EN_MODE_SEL_BY_PIN 0x40U /* * @brief Mode configuration upon falling edge applied to ON pin. If set, * the device will switch to mode 0 when a valid falling edge is applied. * to the ON pin */ /** @brief Mode output voltage mask */ #define PCA9420_MODECFG_0_SW1_OUT_MASK 0x3FU #define PCA9420_MODECFG_0_SW1_OUT_POS 0U /** @brief SW2_OUT offset and voltage level mask */ #define PCA9420_MODECFG_1_SW2_OUT_MASK 0x3FU #define PCA9420_MODECFG_1_SW2_OUT_POS 0U /** @brief LDO1_OUT voltage level mask */ #define PCA9420_MODECFG_2_LDO1_OUT_MASK 0xF0U #define PCA9420_MODECFG_2_LDO1_OUT_POS 4U /** @brief SW1 Enable */ #define PCA9420_MODECFG_2_SW1_EN_MASK 0x08U #define PCA9420_MODECFG_2_SW1_EN_VAL 0x08U /** @brief SW2 Enable */ #define PCA9420_MODECFG_2_SW2_EN_MASK 0x04U #define PCA9420_MODECFG_2_SW2_EN_VAL 0x04U /** @brief LDO1 Enable */ #define PCA9420_MODECFG_2_LDO1_EN_MASK 0x02U #define PCA9420_MODECFG_2_LDO1_EN_VAL 0x02U /** @brief LDO2 Enable */ #define PCA9420_MODECFG_2_LDO2_EN_MASK 0x01U #define PCA9420_MODECFG_2_LDO2_EN_VAL 0x01U /** @brief LDO2_OUT offset and voltage level mask */ #define PCA9420_MODECFG_3_LDO2_OUT_MASK 0x3FU #define PCA9420_MODECFG_3_LDO2_OUT_POS 0U /** @brief SW1 active discharge control */ #define PCA9420_ACT_DISCHARGE_CNTL_SW1_MASK 0x08U #define PCA9420_ACT_DISCHARGE_CNTL_SW1_POS 4U /** @brief SW2 active discharge control */ #define PCA9420_ACT_DISCHARGE_CNTL_SW2_MASK 0x04U #define PCA9420_ACT_DISCHARGE_CNTL_SW2_POS 3U /** @brief LDO1 active discharge control */ #define PCA9420_ACT_DISCHARGE_CNTL_LDO1_MASK 0x02U #define PCA9420_ACT_DISCHARGE_CNTL_LDO1_POS 2U /** @brief LDO2 active discharge control */ #define PCA9420_ACT_DISCHARGE_CNTL_LDO2_MASK 0x01U #define PCA9420_ACT_DISCHARGE_CNTL_LDO2_POS 1U /** VIN ILIM resolution, uA/LSB */ #define PCA9420_VIN_ILIM_UA_LSB 170000 /** VIN ILIM minimum value, uA */ #define PCA9420_VIN_ILIM_MIN_UA 85000 /** Number of modes */ #define PCA9420_NUM_MODES 4U /** Offset applied to MODECFG* registers for a given mode */ #define PCA9420_MODECFG_OFFSET(mode) ((mode) * 4U) struct regulator_pca9420_desc { uint8_t enable_reg; uint8_t enable_mask; uint8_t enable_val; uint8_t vsel_reg; uint8_t vsel_mask; uint8_t vsel_pos; uint8_t ad_mask; uint8_t ad_pos; int32_t max_ua; uint8_t num_ranges; const struct linear_range *ranges; }; struct regulator_pca9420_common_config { struct i2c_dt_spec i2c; int32_t vin_ilim_ua; bool enable_modesel_pins; uint8_t asys_uvlo_sel_mv; }; struct regulator_pca9420_common_data { regulator_dvs_state_t dvs_state; }; struct regulator_pca9420_config { struct regulator_common_config common; bool enable_inverted; int32_t modes_uv[4]; const struct regulator_pca9420_desc *desc; const struct device *parent; }; struct regulator_pca9420_data { struct regulator_common_data data; }; static const struct linear_range buck1_ranges[] = { LINEAR_RANGE_INIT(500000, 25000U, 0x0U, 0x28U), LINEAR_RANGE_INIT(1500000, 0U, 0x29U, 0x3E), LINEAR_RANGE_INIT(1800000, 0U, 0x3FU, 0x3FU), }; static const struct linear_range buck2_ranges[] = { LINEAR_RANGE_INIT(1500000, 25000U, 0x0U, 0x18U), LINEAR_RANGE_INIT(2100000, 0U, 0x19U, 0x1F), LINEAR_RANGE_INIT(2700000, 25000U, 0x20U, 0x38U), LINEAR_RANGE_INIT(3300000, 0U, 0x39U, 0x3F), }; static const struct linear_range ldo1_ranges[] = { LINEAR_RANGE_INIT(1700000, 25000U, 0x0U, 0x9U), LINEAR_RANGE_INIT(1900000, 0U, 0x9U, 0xFU), }; static const struct linear_range ldo2_ranges[] = { LINEAR_RANGE_INIT(1500000, 25000U, 0x0U, 0x18U), LINEAR_RANGE_INIT(2100000, 0U, 0x19U, 0x1FU), LINEAR_RANGE_INIT(2700000, 25000U, 0x20U, 0x38U), LINEAR_RANGE_INIT(3300000, 0U, 0x39U, 0x3FU), }; static const struct regulator_pca9420_desc buck1_desc = { .enable_reg = PCA9420_MODECFG_0_2, .enable_mask = PCA9420_MODECFG_2_SW1_EN_MASK, .enable_val = PCA9420_MODECFG_2_SW1_EN_VAL, .vsel_mask = PCA9420_MODECFG_0_SW1_OUT_MASK, .vsel_pos = PCA9420_MODECFG_0_SW1_OUT_POS, .vsel_reg = PCA9420_MODECFG_0_0, .ad_mask = PCA9420_ACT_DISCHARGE_CNTL_SW1_MASK, .ad_pos = PCA9420_ACT_DISCHARGE_CNTL_SW1_POS, .max_ua = 250000, .ranges = buck1_ranges, .num_ranges = ARRAY_SIZE(buck1_ranges), }; static const struct regulator_pca9420_desc buck2_desc = { .enable_reg = PCA9420_MODECFG_0_2, .enable_mask = PCA9420_MODECFG_2_SW2_EN_MASK, .enable_val = PCA9420_MODECFG_2_SW2_EN_VAL, .vsel_mask = PCA9420_MODECFG_1_SW2_OUT_MASK, .vsel_pos = PCA9420_MODECFG_1_SW2_OUT_POS, .vsel_reg = PCA9420_MODECFG_0_1, .ad_mask = PCA9420_ACT_DISCHARGE_CNTL_SW2_MASK, .ad_pos = PCA9420_ACT_DISCHARGE_CNTL_SW2_POS, .max_ua = 500000, .ranges = buck2_ranges, .num_ranges = ARRAY_SIZE(buck2_ranges), }; static const struct regulator_pca9420_desc ldo1_desc = { .enable_reg = PCA9420_MODECFG_0_2, .enable_mask = PCA9420_MODECFG_2_LDO1_EN_MASK, .enable_val = PCA9420_MODECFG_2_LDO1_EN_VAL, .vsel_mask = PCA9420_MODECFG_2_LDO1_OUT_MASK, .vsel_pos = PCA9420_MODECFG_2_LDO1_OUT_POS, .vsel_reg = PCA9420_MODECFG_0_2, .ad_mask = PCA9420_ACT_DISCHARGE_CNTL_LDO1_MASK, .ad_pos = PCA9420_ACT_DISCHARGE_CNTL_LDO1_POS, .max_ua = 1000, .ranges = ldo1_ranges, .num_ranges = ARRAY_SIZE(ldo1_ranges), }; static const struct regulator_pca9420_desc ldo2_desc = { .enable_reg = PCA9420_MODECFG_0_2, .enable_mask = PCA9420_MODECFG_2_LDO2_EN_MASK, .enable_val = PCA9420_MODECFG_2_LDO2_EN_VAL, .vsel_reg = PCA9420_MODECFG_0_3, .vsel_mask = PCA9420_MODECFG_3_LDO2_OUT_MASK, .vsel_pos = PCA9420_MODECFG_3_LDO2_OUT_POS, .ad_mask = PCA9420_ACT_DISCHARGE_CNTL_LDO2_MASK, .ad_pos = PCA9420_ACT_DISCHARGE_CNTL_LDO2_POS, .max_ua = 250000, .ranges = ldo2_ranges, .num_ranges = ARRAY_SIZE(ldo2_ranges), }; static unsigned int regulator_pca9420_count_voltages(const struct device *dev) { const struct regulator_pca9420_config *config = dev->config; return linear_range_group_values_count(config->desc->ranges, config->desc->num_ranges); } static int regulator_pca9420_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { const struct regulator_pca9420_config *config = dev->config; return linear_range_group_get_value(config->desc->ranges, config->desc->num_ranges, idx, volt_uv); } static int regulator_pca9420_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_pca9420_config *config = dev->config; const struct regulator_pca9420_common_config *cconfig = config->parent->config; struct regulator_pca9420_common_data *cdata = config->parent->data; uint16_t idx; int ret; ret = linear_range_group_get_win_index(config->desc->ranges, config->desc->num_ranges, min_uv, max_uv, &idx); if (ret == -EINVAL) { return ret; } idx <<= config->desc->vsel_pos; return i2c_reg_update_byte_dt(&cconfig->i2c, config->desc->vsel_reg + PCA9420_MODECFG_OFFSET(cdata->dvs_state), config->desc->vsel_mask, (uint8_t)idx); } static int regulator_pca9420_get_voltage(const struct device *dev, int32_t *volt_uv) { const struct regulator_pca9420_config *config = dev->config; const struct regulator_pca9420_common_config *cconfig = config->parent->config; struct regulator_pca9420_common_data *cdata = config->parent->data; int ret; uint8_t raw_reg; ret = i2c_reg_read_byte_dt(&cconfig->i2c, config->desc->vsel_reg + PCA9420_MODECFG_OFFSET(cdata->dvs_state), &raw_reg); if (ret < 0) { return ret; } raw_reg = (raw_reg & config->desc->vsel_mask) >> config->desc->vsel_pos; return linear_range_group_get_value(config->desc->ranges, config->desc->num_ranges, raw_reg, volt_uv); } static int regulator_pca9420_get_current_limit(const struct device *dev, int32_t *curr_ua) { const struct regulator_pca9420_config *config = dev->config; const struct regulator_pca9420_common_config *cconfig = config->parent->config; if (cconfig->vin_ilim_ua == 0U) { *curr_ua = config->desc->max_ua; } else { *curr_ua = MIN(config->desc->max_ua, cconfig->vin_ilim_ua); } return 0; } static int regulator_pca9420_set_active_discharge(const struct device *dev, bool active_discharge) { const struct regulator_pca9420_config *config = dev->config; const struct regulator_pca9420_common_config *cconfig = config->parent->config; uint8_t dis_val; dis_val = (!active_discharge) << config->desc->ad_pos; return i2c_reg_update_byte_dt(&cconfig->i2c, PCA9420_ACT_DISCHARGE_CNTL, config->desc->ad_mask, dis_val); } static int regulator_pca9420_get_active_discharge(const struct device *dev, bool *active_discharge) { const struct regulator_pca9420_config *config = dev->config; const struct regulator_pca9420_common_config *cconfig = config->parent->config; uint8_t raw_reg; int ret; ret = i2c_reg_read_byte_dt(&cconfig->i2c, PCA9420_ACT_DISCHARGE_CNTL, &raw_reg); if (ret < 0) { return ret; } *active_discharge = !((raw_reg & config->desc->ad_mask) >> config->desc->ad_pos); return 0; } static int regulator_pca9420_enable(const struct device *dev) { const struct regulator_pca9420_config *config = dev->config; const struct regulator_pca9420_common_config *cconfig = config->parent->config; struct regulator_pca9420_common_data *cdata = config->parent->data; uint8_t en_val; en_val = config->enable_inverted ? 0 : config->desc->enable_val; return i2c_reg_update_byte_dt(&cconfig->i2c, config->desc->enable_reg + PCA9420_MODECFG_OFFSET(cdata->dvs_state), config->desc->enable_mask, en_val); } static int regulator_pca9420_disable(const struct device *dev) { const struct regulator_pca9420_config *config = dev->config; const struct regulator_pca9420_common_config *cconfig = config->parent->config; struct regulator_pca9420_common_data *cdata = config->parent->data; uint8_t dis_val; dis_val = config->enable_inverted ? config->desc->enable_val : 0; return i2c_reg_update_byte_dt(&cconfig->i2c, config->desc->enable_reg + PCA9420_MODECFG_OFFSET(cdata->dvs_state), config->desc->enable_mask, dis_val); } static const struct regulator_driver_api api = { .enable = regulator_pca9420_enable, .disable = regulator_pca9420_disable, .count_voltages = regulator_pca9420_count_voltages, .list_voltage = regulator_pca9420_list_voltage, .set_voltage = regulator_pca9420_set_voltage, .get_voltage = regulator_pca9420_get_voltage, .get_current_limit = regulator_pca9420_get_current_limit, .set_active_discharge = regulator_pca9420_set_active_discharge, .get_active_discharge = regulator_pca9420_get_active_discharge, }; static int regulator_pca9420_init(const struct device *dev) { const struct regulator_pca9420_config *config = dev->config; const struct regulator_pca9420_common_config *cconfig = config->parent->config; regulator_common_data_init(dev); if (!device_is_ready(config->parent)) { return -ENODEV; } /* configure mode voltages */ for (uint8_t i = 0U; i < ARRAY_SIZE(config->modes_uv); i++) { int ret; if (config->modes_uv[i] == 0) { /* disable mode if voltage is 0 */ ret = i2c_reg_update_byte_dt( &cconfig->i2c, config->desc->enable_reg + PCA9420_MODECFG_OFFSET(i), config->desc->enable_mask, 0U); if (ret < 0) { return ret; } } else if (config->modes_uv[i] > 0) { uint16_t idx; /* program mode voltage */ ret = linear_range_group_get_win_index( config->desc->ranges, config->desc->num_ranges, config->modes_uv[i], config->modes_uv[i], &idx); if (ret == -EINVAL) { return ret; } idx <<= config->desc->vsel_pos; ret = i2c_reg_update_byte_dt( &cconfig->i2c, config->desc->vsel_reg + PCA9420_MODECFG_OFFSET(i), config->desc->vsel_mask, (uint8_t)idx); if (ret < 0) { return ret; } } } return regulator_common_init(dev, false); } int regulator_pca9420_dvs_state_set(const struct device *dev, regulator_dvs_state_t state) { const struct regulator_pca9420_common_config *config = dev->config; struct regulator_pca9420_common_data *data = dev->data; int ret; if (state >= PCA9420_NUM_MODES) { return -ENOTSUP; } if (config->enable_modesel_pins) { /* * The user cannot set DVS state via this API, * but they may want to query/set voltages for another mode. * Return -EPERM to indicate change failed, but change the * dvs_state variable so the user can access the alternative * dvs mode settings. */ data->dvs_state = state; return -EPERM; } ret = i2c_reg_update_byte_dt(&config->i2c, PCA9420_TOP_CNTL3, PCA9420_TOP_CNTL3_MODE_I2C_MASK, state << PCA9420_TOP_CNTL3_MODE_I2C_POS); if (ret < 0) { return ret; } /* Record new DVS state */ data->dvs_state = state; return 0; } static const struct regulator_parent_driver_api parent_api = { .dvs_state_set = regulator_pca9420_dvs_state_set, }; static int regulator_pca9420_common_init(const struct device *dev) { const struct regulator_pca9420_common_config *config = dev->config; uint8_t reg_val = PCA9420_TOP_CNTL0_VIN_ILIM_SEL_DISABLED; int ret; if (!device_is_ready(config->i2c.bus)) { return -ENODEV; } if (config->enable_modesel_pins) { /* enable MODESEL0/1 pins for each mode */ for (uint8_t i = 0U; i < PCA9420_NUM_MODES; i++) { ret = i2c_reg_update_byte_dt( &config->i2c, PCA9420_MODECFG_0_0 + PCA9420_MODECFG_OFFSET(i), PCA9420_MODECFG_0_X_EN_MODE_SEL_BY_PIN, PCA9420_MODECFG_0_X_EN_MODE_SEL_BY_PIN); if (ret < 0) { return ret; } } } /* configure VIN current limit */ if (config->vin_ilim_ua != 0U) { reg_val = (config->vin_ilim_ua - PCA9420_VIN_ILIM_MIN_UA) / PCA9420_VIN_ILIM_UA_LSB; } ret = i2c_reg_update_byte_dt( &config->i2c, PCA9420_TOP_CNTL0, PCA9420_TOP_CNTL0_VIN_ILIM_SEL_MASK, reg_val << PCA9420_TOP_CNTL0_VIN_ILIM_SEL_POS); if (ret != 0) { return ret; } /* configure ASYS UVLO threshold */ return i2c_reg_update_byte_dt(&config->i2c, PCA9420_TOP_CNTL2, PCA9420_TOP_CNTL2_ASYS_UVLO_SEL_MASK, config->asys_uvlo_sel_mv << PCA9420_TOP_CNTL2_ASYS_UVLO_SEL_POS); } #define REGULATOR_PCA9420_DEFINE(node_id, id, name, _parent) \ static struct regulator_pca9420_data data_##id; \ \ static const struct regulator_pca9420_config config_##id = { \ .common = REGULATOR_DT_COMMON_CONFIG_INIT(node_id), \ .enable_inverted = DT_PROP(node_id, enable_inverted), \ .modes_uv = { \ DT_PROP_OR(node_id, nxp_mode0_microvolt, -1), \ DT_PROP_OR(node_id, nxp_mode1_microvolt, -1), \ DT_PROP_OR(node_id, nxp_mode2_microvolt, -1), \ DT_PROP_OR(node_id, nxp_mode3_microvolt, -1), \ }, \ .desc = &name ## _desc, \ .parent = _parent, \ }; \ \ DEVICE_DT_DEFINE(node_id, regulator_pca9420_init, NULL, &data_##id, \ &config_##id, POST_KERNEL, \ CONFIG_REGULATOR_PCA9420_INIT_PRIORITY, &api); #define REGULATOR_PCA9420_DEFINE_COND(inst, child, parent) \ COND_CODE_1(DT_NODE_EXISTS(DT_INST_CHILD(inst, child)), \ (REGULATOR_PCA9420_DEFINE(DT_INST_CHILD(inst, child), \ child ## inst, child, parent)), \ ()) #define REGULATOR_PCA9420_DEFINE_ALL(inst) \ static const struct regulator_pca9420_common_config config_##inst = { \ .i2c = I2C_DT_SPEC_INST_GET(inst), \ .vin_ilim_ua = DT_INST_PROP(inst, nxp_vin_ilim_microamp), \ .enable_modesel_pins = \ DT_INST_PROP(inst, nxp_enable_modesel_pins), \ .asys_uvlo_sel_mv = \ DT_INST_ENUM_IDX(inst, nxp_asys_uvlo_sel_millivolt), \ }; \ \ static struct regulator_pca9420_common_data data_##inst; \ \ DEVICE_DT_INST_DEFINE(inst, regulator_pca9420_common_init, NULL, \ &data_##inst, \ &config_##inst, POST_KERNEL, \ CONFIG_REGULATOR_PCA9420_COMMON_INIT_PRIORITY, \ &parent_api); \ \ REGULATOR_PCA9420_DEFINE_COND(inst, buck1, DEVICE_DT_INST_GET(inst)) \ REGULATOR_PCA9420_DEFINE_COND(inst, buck2, DEVICE_DT_INST_GET(inst)) \ REGULATOR_PCA9420_DEFINE_COND(inst, ldo1, DEVICE_DT_INST_GET(inst)) \ REGULATOR_PCA9420_DEFINE_COND(inst, ldo2, DEVICE_DT_INST_GET(inst)) DT_INST_FOREACH_STATUS_OKAY(REGULATOR_PCA9420_DEFINE_ALL) ```
/content/code_sandbox/drivers/regulator/regulator_pca9420.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,583
```c /* */ #define DT_DRV_COMPAT regulator_fixed #include <stdint.h> #include <zephyr/drivers/regulator.h> #include <zephyr/drivers/gpio.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(regulator_fixed, CONFIG_REGULATOR_LOG_LEVEL); struct regulator_fixed_config { struct regulator_common_config common; struct gpio_dt_spec enable; }; struct regulator_fixed_data { struct regulator_common_data common; }; static int regulator_fixed_enable(const struct device *dev) { const struct regulator_fixed_config *cfg = dev->config; int ret; if (!cfg->enable.port) { return -ENOTSUP; } ret = gpio_pin_set_dt(&cfg->enable, 1); if (ret < 0) { return ret; } return 0; } static int regulator_fixed_disable(const struct device *dev) { const struct regulator_fixed_config *cfg = dev->config; if (!cfg->enable.port) { return -ENOTSUP; } return gpio_pin_set_dt(&cfg->enable, 0); } static unsigned int regulator_fixed_count_voltages(const struct device *dev) { int32_t min_uv; return (regulator_common_get_min_voltage(dev, &min_uv) < 0) ? 0U : 1U; } static int regulator_fixed_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { if (idx != 0) { return -EINVAL; } if (regulator_common_get_min_voltage(dev, volt_uv) < 0) { return -EINVAL; } return 0; } static const struct regulator_driver_api regulator_fixed_api = { .enable = regulator_fixed_enable, .disable = regulator_fixed_disable, .count_voltages = regulator_fixed_count_voltages, .list_voltage = regulator_fixed_list_voltage, }; static int regulator_fixed_init(const struct device *dev) { const struct regulator_fixed_config *cfg = dev->config; regulator_common_data_init(dev); if (cfg->enable.port != NULL) { if (!gpio_is_ready_dt(&cfg->enable)) { LOG_ERR("GPIO port: %s not ready", cfg->enable.port->name); return -ENODEV; } int ret = gpio_pin_configure_dt(&cfg->enable, GPIO_OUTPUT); if (ret < 0) { return ret; } } return regulator_common_init(dev, false); } #define REGULATOR_FIXED_DEFINE(inst) \ BUILD_ASSERT(DT_INST_PROP_OR(inst, regulator_min_microvolt, 0) == \ DT_INST_PROP_OR(inst, regulator_max_microvolt, 0), \ "Regulator requires fixed voltages"); \ static struct regulator_fixed_data data##inst; \ \ static const struct regulator_fixed_config config##inst = { \ .common = REGULATOR_DT_INST_COMMON_CONFIG_INIT(inst), \ .enable = GPIO_DT_SPEC_INST_GET_OR(inst, enable_gpios, {0}), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, regulator_fixed_init, NULL, &data##inst, \ &config##inst, POST_KERNEL, \ CONFIG_REGULATOR_FIXED_INIT_PRIORITY, \ &regulator_fixed_api); DT_INST_FOREACH_STATUS_OKAY(REGULATOR_FIXED_DEFINE) ```
/content/code_sandbox/drivers/regulator/regulator_fixed.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
697
```c /* */ #define DT_DRV_COMPAT zephyr_fake_regulator #include <zephyr/devicetree.h> #include <zephyr/drivers/regulator.h> #include <zephyr/drivers/regulator/fake.h> #include <zephyr/fff.h> #include <zephyr/toolchain.h> /* regulator */ struct regulator_fake_config { struct regulator_common_config common; bool is_enabled; }; struct regulator_fake_data { struct regulator_common_data data; }; DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_enable, const struct device *); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_disable, const struct device *); DEFINE_FAKE_VALUE_FUNC(unsigned int, regulator_fake_count_voltages, const struct device *); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_list_voltage, const struct device *, unsigned int, int32_t *); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_set_voltage, const struct device *, int32_t, int32_t); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_get_voltage, const struct device *, int32_t *); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_set_current_limit, const struct device *, int32_t, int32_t); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_get_current_limit, const struct device *, int32_t *); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_set_mode, const struct device *, regulator_mode_t); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_get_mode, const struct device *, regulator_mode_t *); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_set_active_discharge, const struct device *, bool); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_get_active_discharge, const struct device *, bool *); DEFINE_FAKE_VALUE_FUNC(int, regulator_fake_get_error_flags, const struct device *, regulator_error_flags_t *); static struct regulator_driver_api api = { .enable = regulator_fake_enable, .disable = regulator_fake_disable, .count_voltages = regulator_fake_count_voltages, .list_voltage = regulator_fake_list_voltage, .set_voltage = regulator_fake_set_voltage, .get_voltage = regulator_fake_get_voltage, .set_current_limit = regulator_fake_set_current_limit, .get_current_limit = regulator_fake_get_current_limit, .set_mode = regulator_fake_set_mode, .get_mode = regulator_fake_get_mode, .set_active_discharge = regulator_fake_set_active_discharge, .get_active_discharge = regulator_fake_get_active_discharge, .get_error_flags = regulator_fake_get_error_flags, }; static int regulator_fake_init(const struct device *dev) { const struct regulator_fake_config *config = dev->config; regulator_common_data_init(dev); return regulator_common_init(dev, config->is_enabled); } /* parent regulator */ DEFINE_FAKE_VALUE_FUNC(int, regulator_parent_fake_dvs_state_set, const struct device *, regulator_dvs_state_t); DEFINE_FAKE_VALUE_FUNC(int, regulator_parent_fake_ship_mode, const struct device *); static struct regulator_parent_driver_api parent_api = { .dvs_state_set = regulator_parent_fake_dvs_state_set, .ship_mode = regulator_parent_fake_ship_mode, }; #define FAKE_DATA_NAME(node_id) _CONCAT(data_, DT_DEP_ORD(node_id)) #define FAKE_CONF_NAME(node_id) _CONCAT(config_, DT_DEP_ORD(node_id)) #define REGULATOR_FAKE_DEFINE(node_id) \ static struct regulator_fake_data FAKE_DATA_NAME(node_id); \ \ static const struct regulator_fake_config FAKE_CONF_NAME(node_id) = { \ .common = REGULATOR_DT_COMMON_CONFIG_INIT(node_id), \ .is_enabled = DT_PROP(node_id, fake_is_enabled_in_hardware), \ }; \ \ DEVICE_DT_DEFINE(node_id, regulator_fake_init, NULL, \ &FAKE_DATA_NAME(node_id), &FAKE_CONF_NAME(node_id), \ POST_KERNEL, CONFIG_REGULATOR_FAKE_INIT_PRIORITY, \ &api); #define REGULATOR_FAKE_DEFINE_ALL(inst) \ DEVICE_DT_INST_DEFINE(inst, NULL, NULL, NULL, NULL, POST_KERNEL, \ CONFIG_REGULATOR_FAKE_COMMON_INIT_PRIORITY, \ &parent_api); \ \ DT_INST_FOREACH_CHILD(inst, REGULATOR_FAKE_DEFINE) DT_INST_FOREACH_STATUS_OKAY(REGULATOR_FAKE_DEFINE_ALL) ```
/content/code_sandbox/drivers/regulator/regulator_fake.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
891
```c /* */ #define DT_DRV_COMPAT maxim_max20335_regulator #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/regulator.h> #include <zephyr/dt-bindings/regulator/max20335.h> #include <zephyr/kernel.h> #include <zephyr/sys/linear_range.h> #define MAX20335_BUCK1_CFG 0x0DU #define MAX20335_BUCK1_VSET 0x0EU #define MAX20335_BUCK2_CFG 0x0FU #define MAX20335_BUCK2_VSET 0x10U #define MAX20335_BUCK12_CSET 0x11U #define MAX20335_PWR_CMD 0x1FU #define MAX20335_BUCK1_CSET_MASK 0xF0U #define MAX20335_BUCK2_CSET_MASK 0x0FU #define MAX20335_BUCK2_CSET_SHIFT 4 #define MAX20335_BUCK_EN BIT(3) #define MAX20335_BUCK_EN_MASK GENMASK(4, 3) #define MAX20335_LDO1_CFG 0x12U #define MAX20335_LDO1_VSET 0x13U #define MAX20335_LDO2_CFG 0x14U #define MAX20335_LDO2_VSET 0x15U #define MAX20335_LDO3_CFG 0x16U #define MAX20335_LDO3_VSET 0x17U #define MAX20335_LDO_MODE_MASK BIT(0) #define MAX20335_LDO_EN BIT(1) #define MAX20335_LDO_EN_MASK GENMASK(2, 1) #define MAX20335_OFF_MODE 0xB2U enum max20335_pmic_sources { MAX20335_PMIC_SOURCE_BUCK1, MAX20335_PMIC_SOURCE_BUCK2, MAX20335_PMIC_SOURCE_LDO1, MAX20335_PMIC_SOURCE_LDO2, MAX20335_PMIC_SOURCE_LDO3, }; struct regulator_max20335_desc { uint8_t vsel_reg; uint8_t enable_mask; uint8_t enable_val; uint8_t cfg_reg; const struct linear_range *uv_range; const struct linear_range *ua_range; }; struct regulator_max20335_common_config { struct i2c_dt_spec bus; }; struct regulator_max20335_config { struct regulator_common_config common; struct i2c_dt_spec bus; const struct regulator_max20335_desc *desc; uint8_t source; }; struct regulator_max20335_data { struct regulator_common_data common; }; static const struct linear_range buck1_range = LINEAR_RANGE_INIT(700000, 25000U, 0x0U, 0x3FU); static const struct linear_range buck2_range = LINEAR_RANGE_INIT(700000, 50000U, 0x0U, 0x3FU); static const struct linear_range buck12_current_limit_range = LINEAR_RANGE_INIT(50000, 25000U, 0x02U, 0x0FU); static const struct linear_range ldo1_range = LINEAR_RANGE_INIT(800000, 100000U, 0x0U, 0x1CU); static const struct linear_range ldo23_range = LINEAR_RANGE_INIT(900000, 100000U, 0x0U, 0x1FU); static const struct regulator_max20335_desc __maybe_unused buck1_desc = { .vsel_reg = MAX20335_BUCK1_VSET, .enable_mask = MAX20335_BUCK_EN_MASK, .enable_val = MAX20335_BUCK_EN, .cfg_reg = MAX20335_BUCK1_CFG, .uv_range = &buck1_range, .ua_range = &buck12_current_limit_range, }; static const struct regulator_max20335_desc __maybe_unused buck2_desc = { .vsel_reg = MAX20335_BUCK2_VSET, .enable_mask = MAX20335_BUCK_EN_MASK, .enable_val = MAX20335_BUCK_EN, .cfg_reg = MAX20335_BUCK2_CFG, .uv_range = &buck2_range, .ua_range = &buck12_current_limit_range, }; static const struct regulator_max20335_desc __maybe_unused ldo1_desc = { .vsel_reg = MAX20335_LDO1_VSET, .enable_mask = MAX20335_LDO_EN_MASK, .enable_val = MAX20335_LDO_EN, .cfg_reg = MAX20335_LDO1_CFG, .uv_range = &ldo1_range, }; static const struct regulator_max20335_desc __maybe_unused ldo2_desc = { .vsel_reg = MAX20335_LDO2_VSET, .enable_mask = MAX20335_LDO_EN_MASK, .enable_val = MAX20335_LDO_EN, .cfg_reg = MAX20335_LDO2_CFG, .uv_range = &ldo23_range, }; static const struct regulator_max20335_desc __maybe_unused ldo3_desc = { .vsel_reg = MAX20335_LDO3_VSET, .enable_mask = MAX20335_LDO_EN_MASK, .enable_val = MAX20335_LDO_EN, .cfg_reg = MAX20335_LDO3_CFG, .uv_range = &ldo23_range, }; static int regulator_max20335_set_enable(const struct device *dev, bool enable) { const struct regulator_max20335_config *config = dev->config; return i2c_reg_update_byte_dt(&config->bus, config->desc->cfg_reg, config->desc->enable_mask, enable ? config->desc->enable_val : 0); } static int regulator_max20335_enable(const struct device *dev) { return regulator_max20335_set_enable(dev, true); } static int regulator_max20335_disable(const struct device *dev) { return regulator_max20335_set_enable(dev, false); } static int regulator_max20335_set_mode(const struct device *dev, regulator_mode_t mode) { const struct regulator_max20335_config *config = dev->config; if (mode > MAX20335_LOAD_SWITCH_MODE) { return -ENOTSUP; } switch (config->source) { case MAX20335_PMIC_SOURCE_LDO1: __fallthrough; case MAX20335_PMIC_SOURCE_LDO2: __fallthrough; case MAX20335_PMIC_SOURCE_LDO3: return i2c_reg_update_byte_dt(&config->bus, config->desc->cfg_reg, MAX20335_LDO_MODE_MASK, mode); default: return -ENOTSUP; } } static unsigned int regulator_max20335_count_voltages(const struct device *dev) { const struct regulator_max20335_config *config = dev->config; return linear_range_values_count(config->desc->uv_range); } static int regulator_max20335_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { const struct regulator_max20335_config *config = dev->config; return linear_range_get_value(config->desc->uv_range, idx, volt_uv); } static int regulator_max20335_set_buck_ldo_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv, const struct linear_range *range, uint8_t vout_reg) { const struct regulator_max20335_config *config = dev->config; uint16_t idx; int ret; ret = linear_range_get_win_index(range, min_uv, max_uv, &idx); if (ret == -EINVAL) { return ret; } return i2c_reg_write_byte_dt(&config->bus, vout_reg, (uint8_t)idx); } static int regulator_max20335_buck12_ldo123_get_voltage(const struct device *dev, const struct linear_range *range, uint8_t vout_reg, int32_t *volt_uv) { const struct regulator_max20335_config *config = dev->config; uint8_t idx; int ret; ret = i2c_reg_read_byte_dt(&config->bus, vout_reg, &idx); if (ret < 0) { return ret; } return linear_range_get_value(range, idx, volt_uv); } static int regulator_max20335_get_voltage(const struct device *dev, int32_t *volt_uv) { const struct regulator_max20335_config *config = dev->config; return regulator_max20335_buck12_ldo123_get_voltage(dev, config->desc->uv_range, config->desc->vsel_reg, volt_uv); } static int regulator_max20335_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_max20335_config *config = dev->config; return regulator_max20335_set_buck_ldo_voltage(dev, min_uv, max_uv, config->desc->uv_range, config->desc->vsel_reg); } static unsigned int regulator_max20335_count_current_limits(const struct device *dev) { const struct regulator_max20335_config *config = dev->config; if (config->source != MAX20335_PMIC_SOURCE_BUCK1 && config->source != MAX20335_PMIC_SOURCE_BUCK2) { return -ENOTSUP; } return linear_range_values_count(config->desc->ua_range); } static int regulator_max20335_list_current_limit(const struct device *dev, unsigned int idx, int32_t *current_ua) { const struct regulator_max20335_config *config = dev->config; if (config->source != MAX20335_PMIC_SOURCE_BUCK1 && config->source != MAX20335_PMIC_SOURCE_BUCK2) { return -ENOTSUP; } return linear_range_get_value(config->desc->ua_range, idx, current_ua); } static int regulator_max20335_set_current_limit(const struct device *dev, int32_t min_ua, int32_t max_ua) { const struct regulator_max20335_config *config = dev->config; uint8_t val; uint16_t idx; int ret; if (config->source != MAX20335_PMIC_SOURCE_BUCK1 && config->source != MAX20335_PMIC_SOURCE_BUCK2) { return -ENOTSUP; } ret = i2c_reg_read_byte_dt(&config->bus, MAX20335_BUCK12_CSET, &val); if (ret < 0) { return ret; } ret = linear_range_get_win_index(config->desc->ua_range, min_ua, max_ua, &idx); if (ret == -EINVAL) { return ret; } switch (config->source) { case MAX20335_PMIC_SOURCE_BUCK1: val = idx | (val & MAX20335_BUCK1_CSET_MASK); break; case MAX20335_PMIC_SOURCE_BUCK2: val = (idx << MAX20335_BUCK2_CSET_SHIFT) | (val & MAX20335_BUCK2_CSET_MASK); break; default: return -ENOTSUP; } return i2c_reg_write_byte_dt(&config->bus, MAX20335_BUCK12_CSET, val); } static int regulator_max20335_power_off(const struct device *dev) { const struct regulator_max20335_common_config *common_config = dev->config; return i2c_reg_write_byte_dt(&common_config->bus, MAX20335_PWR_CMD, MAX20335_OFF_MODE); } static int regulator_max20335_init(const struct device *dev) { const struct regulator_max20335_config *config = dev->config; if (!i2c_is_ready_dt(&config->bus)) { return -ENODEV; } regulator_common_data_init(dev); return regulator_common_init(dev, false); } static int regulator_max20335_common_init(const struct device *dev) { const struct regulator_max20335_common_config *common_config = dev->config; if (!i2c_is_ready_dt(&common_config->bus)) { return -ENODEV; } return 0; } static const struct regulator_parent_driver_api parent_api = { .ship_mode = regulator_max20335_power_off, }; static const struct regulator_driver_api api = { .enable = regulator_max20335_enable, .disable = regulator_max20335_disable, .set_mode = regulator_max20335_set_mode, .count_voltages = regulator_max20335_count_voltages, .list_voltage = regulator_max20335_list_voltage, .set_voltage = regulator_max20335_set_voltage, .get_voltage = regulator_max20335_get_voltage, .count_current_limits = regulator_max20335_count_current_limits, .list_current_limit = regulator_max20335_list_current_limit, .set_current_limit = regulator_max20335_set_current_limit, }; #define REGULATOR_MAX20335_DEFINE(node_id, id, child_name, _source) \ static const struct regulator_max20335_config regulator_max20335_config_##id = { \ .common = REGULATOR_DT_COMMON_CONFIG_INIT(node_id), \ .bus = I2C_DT_SPEC_GET(DT_GPARENT(node_id)), \ .desc = &child_name##_desc, \ .source = _source, \ }; \ \ static struct regulator_max20335_data regulator_max20335_data_##id; \ DEVICE_DT_DEFINE(node_id, regulator_max20335_init, NULL, \ &regulator_max20335_data_##id, \ &regulator_max20335_config_##id, \ POST_KERNEL, \ CONFIG_REGULATOR_MAXIM_MAX20335_INIT_PRIORITY, \ &api); #define REGULATOR_MAX20335_DEFINE_COND(inst, child, source) \ COND_CODE_1(DT_NODE_EXISTS(DT_INST_CHILD(inst, child)), \ (REGULATOR_MAX20335_DEFINE(DT_INST_CHILD(inst, child), \ child##inst, child, source)), \ ()) #define REGULATOR_MAX20335_DEFINE_ALL(inst) \ static const struct regulator_max20335_common_config common_config_##inst = { \ .bus = I2C_DT_SPEC_GET(DT_INST_PARENT(inst)), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, regulator_max20335_common_init, \ NULL, NULL, &common_config_##inst, POST_KERNEL, \ CONFIG_REGULATOR_MAXIM_MAX20335_COMMON_INIT_PRIORITY, \ &parent_api); \ \ REGULATOR_MAX20335_DEFINE_COND(inst, buck1, MAX20335_PMIC_SOURCE_BUCK1) \ REGULATOR_MAX20335_DEFINE_COND(inst, buck2, MAX20335_PMIC_SOURCE_BUCK2) \ REGULATOR_MAX20335_DEFINE_COND(inst, ldo1, MAX20335_PMIC_SOURCE_LDO1) \ REGULATOR_MAX20335_DEFINE_COND(inst, ldo2, MAX20335_PMIC_SOURCE_LDO2) \ REGULATOR_MAX20335_DEFINE_COND(inst, ldo3, MAX20335_PMIC_SOURCE_LDO3) DT_INST_FOREACH_STATUS_OKAY(REGULATOR_MAX20335_DEFINE_ALL) ```
/content/code_sandbox/drivers/regulator/regulator_max20335.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,274
```c /* */ #define DT_DRV_COMPAT cirrus_cp9314 #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/i2c.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/regulator.h> #include <zephyr/sys_clock.h> #include <zephyr/sys/util.h> LOG_MODULE_REGISTER(CP9314, CONFIG_REGULATOR_LOG_LEVEL); #define CP9314_REG_DEVICE_ID 0x0 #define CP9314_DEV_ID 0xA4 #define CP9314_REG_VOUT_UVP 0x2 #define CP9314_VOUT_UVP_DIS_0 BIT(7) #define CP9314_VOUT_UVP_DIS_1 BIT(3) #define CP9314_VOUT_UVP_DIS CP9314_VOUT_UVP_DIS_0 | CP9314_VOUT_UVP_DIS_1 #define CP9314_VOUT_UVP GENMASK(1, 0) #define CP9314_REG_OPTION_REG_1 0x3 #define CP9314_LB1_DELAY_CFG GENMASK(5, 4) #define CP9314_LB1_DELTA_CFG_0 GENMASK(3, 0) #define CP9314_REG_OPTION_REG_2 0x4 #define CP9314_LB2_DELTA_CFG_0 GENMASK(7, 5) #define CP9314_MODE_CTRL_MIN_FREQ_0 GENMASK(2, 0) #define CP9314_REG_IIN_OCP 0x5 #define CP9314_IIN_OCP_DIS BIT(7) #define CP9314_TM_IIN_OC_CFG GENMASK(2, 0) #define CP9314_REG_IIN_PEAK_OCP 0x6 #define CP9314_IIN_PEAK_OCP_DIS BIT(7) #define CP9314_IIN_PEAK_OCP GENMASK(2, 0) #define CP9314_REG_VIN2OUT_OVP 0x7 #define CP9314_VIN2OUT_OVP GENMASK(1, 0) #define CP9314_REG_VIN2OUT_UVP 0x8 #define CP9314_VIN2OUT_UVP GENMASK(1, 0) #define CP9314_REG_CONVERTER 0x9 #define CP9314_FASTSHDN_PIN_STS BIT(6) #define CP9314_PGOOD_PIN_STS BIT(5) #define CP9314_ACTIVE_STS BIT(1) #define CP9314_REG_CTRL1 0xA #define CP9314_CP_EN BIT(7) #define CP9314_MODE_CTRL_EN BIT(3) #define CP9314_REG_CTRL4 0xD #define CP9314_SYNC_FUNCTION_EN BIT(7) #define CP9314_SYNC_HOST_EN BIT(6) #define CP9314_FRC_SYNC_MODE BIT(5) #define CP9314_FRC_OP_MODE BIT(3) #define CP9314_MODE_MASK GENMASK(2, 0) #define CP9314_MODE_2TO1 1 #define CP9314_MODE_3TO1 2 #define CP9314_REG_FLT_FLAG 0x12 #define CP9314_VIN_OVP_FLAG BIT(1) #define CP9314_VOUT_OVP_FLAG BIT(0) #define CP9314_REG_COMP_FLAG0 0x2A #define CP9314_IIN_OCP_FLAG BIT(4) #define CP9314_REG_COMP_FLAG1 0x2C #define CP9314_VIN2OUT_OVP_FLAG BIT(0) #define CP9314_REG_LION_CFG_1 0x31 #define CP9314_LB2_DELTA_CFG_1 GENMASK(7, 5) #define CP9314_REG_LION_INT_MASK_2 0x32 #define CP9314_CLEAR_INT BIT(6) #define CP9314_REG_LION_CFG_3 0x34 #define CP9314_LB_MIN_FREQ_SEL_0 GENMASK(7, 6) #define CP9314_MODE_CTRL_UPDATE_BW_1 GENMASK(5, 3) #define CP9314_REG_LB_CTRL 0x38 #define CP9314_LB1_DELTA_CFG_1 GENMASK(6, 3) #define CP9314_LB_MIN_FREQ_SEL_1 GENMASK(2, 1) #define CP9314_REG_CRUS_CTRL 0x40 #define CP9314_CRUS_KEY_LOCK 0x0 #define CP9314_CRUS_KEY_UNLOCK 0xAA #define CP9314_CRUS_KEY_SOFT_RESET 0xC6 #define CP9314_REG_TRIM_5 0x46 #define CP9314_CSI_CHOP_EN BIT(2) #define CP9314_REG_TRIM_8 0x49 #define CP9314_MODE_CTRL_UPDATE_BW_0 GENMASK(2, 0) #define CP9314_REG_TRIM_9 0x4A #define CP9314_FORCE_KEY_POLARITY BIT(2) #define CP9314_TM_KEY_POLARITY BIT(1) #define CP9314_KEY_ACTIVE_LOW 0 #define CP9314_KEY_ACTIVE_HIGH CP9314_TM_KEY_POLARITY #define CP9314_REG_BST_CP_PD_CFG 0x58 #define CP9314_LB1_BLANK_CFG BIT(5) #define CP9314_REG_CFG_9 0x59 #define CP9314_VOUT_PCHG_TIME_CFG_0 GENMASK(2, 1) #define CP9314_REG_CFG_10 0x5A #define CP9314_VOUT_PCHG_TIME_CFG_1 GENMASK(1, 0) #define CP9314_REG_BC_STS_C 0x62 #define CP9314_CHIP_REV_MASK GENMASK(7, 4) #define CP9314_CHIP_REV_B0 0x10 #define CP9314_REG_FORCE_SC_MISC 0x69 #define CP9314_FORCE_CSI_EN BIT(0) #define CP9314_REG_TSBAT_CTRL 0x72 #define CP9314_LB1_STOP_PHASE_SEL BIT(4) #define CP9314_REG_TEST_MODE_CTRL 0x66 #define CP9314_SOFT_RESET_REQ BIT(0) #define CP9314_REG_LION_COMP_CTRL_1 0x79 #define CP9314_VIN_SWITCH_OK_DIS_0 BIT(3) #define CP9314_VOUT_OV_CFG_0 GENMASK(5, 4) #define CP9314_VIN_SWITCH_OK_CFG GENMASK(1, 0) #define CP9314_REG_LION_COMP_CTRL_2 0x7A #define CP9314_VOUT_OV_CFG_1 GENMASK(3, 2) #define CP9314_REG_LION_COMP_CTRL_3 0x7B #define CP9314_VIN_OV_CFG_0 GENMASK(4, 3) #define CP9314_VIN_OV_CFG_1 GENMASK(1, 0) #define CP9314_VIN_OV_CFG CP9314_VIN_OV_CFG_0 | CP9314_VIN_OV_CFG_1 #define CP9314_REG_LION_COMP_CTRL_4 0x7C #define CP9314_FORCE_IIN_OC_CFG BIT(1) #define CP9314_VIN_SWITCH_OK_DIS_1 BIT(5) #define CP9314_REG_PTE_REG_2 0x8B #define CP9314_PTE_2_MASK GENMASK(7, 5) #define CP9314_PTE_2_OTP_1 0x0 #define CP9314_PTE_2_OTP_2 0x1 #define CP9314_FAULT1_STS 0x9A #define CP9314_VIN_OV_STS BIT(4) #define CP9314_SYS_STS 0x98 #define CP9314_VIN_UV_STS BIT(7) #define CP9314_REG_TM_SEQ_CTRL_1 0xAA #define CP9314_TM_CSI_EN BIT(5) #define CP9314_REG_STS_PIN_ADC_0 0xB4 #define CP9314_STS_PROG_LVL GENMASK(7, 4) #define CP9314_STS_ADDR_LVL GENMASK(3, 0) #define CP9314_SOFT_RESET_DELAY_MSEC 200 #define CP9314_EN_DEBOUNCE_USEC 3000 #define CP9314_T_STARTUP_MSEC 120 #define CP9314_DEVICE_MODE_HOST_4GANG_0x78 0x0 #define CP9314_DEVICE_MODE_HOST_4GANG_0x72 0x1 #define CP9314_DEVICE_MODE_HOST_3GANG_0x78 0x2 #define CP9314_DEVICE_MODE_HOST_3GANG_0x72 0x3 #define CP9314_DEVICE_MODE_HOST_2GANG_0x78 0x4 #define CP9314_DEVICE_MODE_HOST_2GANG_0x72 0x5 #define CP9314_DEVICE_MODE_HOST_STANDALONE_0x78 0x6 #define CP9314_DEVICE_MODE_HOST_STANDALONE_0x72 0x7 #define CP9314_DEVICE_MODE_DEVICE_4_0x68 0x8 #define CP9314_DEVICE_MODE_DEVICE_4_0x54 0x9 #define CP9314_DEVICE_MODE_DEVICE_3_0x56 0xA #define CP9314_DEVICE_MODE_DEVICE_3_0x53 0xB #define CP9314_DEVICE_MODE_DEVICE_2_0x79 0xC #define CP9314_DEVICE_MODE_DEVICE_2_0x73 0xD enum cp9314_sync_roles { CP9314_ROLE_HOST, CP9314_ROLE_DEV2, CP9314_ROLE_DEV3, CP9314_ROLE_DEV4, CP9314_ROLE_STANDALONE, }; struct regulator_cp9314_config { struct regulator_common_config common; struct i2c_dt_spec i2c; struct gpio_dt_spec en_pin; struct gpio_dt_spec pgood_pin; uint8_t initial_op_mode_idx; }; struct regulator_cp9314_data { struct regulator_common_data data; enum cp9314_sync_roles sync_role; }; struct cp9314_reg_patch { uint8_t reg_addr; uint8_t mask; uint8_t value; }; /* * HW errata patch for B0 silicon. Intended to correct POR configuration values for protection * comparators, disable OCP comparators, and enable the output undervoltage comparator. */ static struct cp9314_reg_patch b0_reg_patch[18] = { {CP9314_REG_CRUS_CTRL, GENMASK(7, 0), CP9314_CRUS_KEY_UNLOCK}, {CP9314_REG_LION_COMP_CTRL_3, CP9314_VIN_OV_CFG, 0x1B}, {CP9314_REG_LION_COMP_CTRL_1, CP9314_VOUT_OV_CFG_0, 0x30}, {CP9314_REG_LION_COMP_CTRL_2, CP9314_VOUT_OV_CFG_1, 0xC}, {CP9314_REG_VIN2OUT_OVP, CP9314_VIN2OUT_OVP, 0x2}, {CP9314_REG_VIN2OUT_UVP, CP9314_VIN2OUT_UVP, 0x1}, {CP9314_REG_VOUT_UVP, CP9314_VOUT_UVP_DIS, 0}, {CP9314_REG_VOUT_UVP, CP9314_VOUT_UVP, 0}, {CP9314_REG_LION_COMP_CTRL_1, CP9314_VIN_SWITCH_OK_DIS_0, 0}, {CP9314_REG_LION_COMP_CTRL_4, CP9314_VIN_SWITCH_OK_DIS_1, 0}, {CP9314_REG_LION_COMP_CTRL_1, CP9314_VIN_SWITCH_OK_CFG, 0}, {CP9314_REG_LION_CFG_3, CP9314_LB_MIN_FREQ_SEL_0, 0x80}, {CP9314_REG_LB_CTRL, CP9314_LB_MIN_FREQ_SEL_1, 0x4}, {CP9314_REG_TRIM_8, CP9314_MODE_CTRL_UPDATE_BW_0, 0x2}, {CP9314_REG_LION_CFG_3, CP9314_MODE_CTRL_UPDATE_BW_1, 0x2}, {CP9314_REG_IIN_OCP, CP9314_IIN_OCP_DIS, CP9314_IIN_OCP_DIS}, {CP9314_REG_IIN_PEAK_OCP, CP9314_IIN_PEAK_OCP_DIS, CP9314_IIN_PEAK_OCP_DIS}, {CP9314_REG_CRUS_CTRL, GENMASK(7, 0), CP9314_CRUS_KEY_LOCK}, }; /* OTP memory errata patch for OTP v1. Corrects trim errata. */ static struct cp9314_reg_patch otp_1_patch[3] = { {CP9314_REG_OPTION_REG_1, CP9314_LB1_DELAY_CFG, 0}, {CP9314_REG_BST_CP_PD_CFG, CP9314_LB1_BLANK_CFG, CP9314_LB1_BLANK_CFG}, {CP9314_REG_TSBAT_CTRL, CP9314_LB1_STOP_PHASE_SEL, CP9314_LB1_STOP_PHASE_SEL}, }; static int regulator_cp9314_get_error_flags(const struct device *dev, regulator_error_flags_t *flags) { const struct regulator_cp9314_config *config = dev->config; uint8_t val[3]; int ret; *flags = 0U; ret = i2c_reg_read_byte_dt(&config->i2c, CP9314_REG_FLT_FLAG, &val[0]); if (ret < 0) { return ret; } if (FIELD_GET(CP9314_VIN_OVP_FLAG, val[0]) || FIELD_GET(CP9314_VOUT_OVP_FLAG, val[0])) { *flags |= REGULATOR_ERROR_OVER_VOLTAGE; } ret = i2c_reg_read_byte_dt(&config->i2c, CP9314_REG_COMP_FLAG0, &val[1]); if (ret < 0) { return ret; } if (FIELD_GET(CP9314_IIN_OCP_FLAG, val[1])) { *flags |= REGULATOR_ERROR_OVER_CURRENT; } ret = i2c_reg_read_byte_dt(&config->i2c, CP9314_REG_COMP_FLAG1, &val[2]); if (ret < 0) { return ret; } if (FIELD_GET(CP9314_VIN2OUT_OVP_FLAG, val[2])) { *flags |= REGULATOR_ERROR_OVER_VOLTAGE; } LOG_DBG("FLT_FLAG = 0x%x, COMP_FLAG0 = 0x%x, COMP_FLAG1 = 0x%x", val[0], val[1], val[2]); return 0; } static int regulator_cp9314_disable(const struct device *dev) { const struct regulator_cp9314_config *config = dev->config; if (config->en_pin.port != NULL) { return gpio_pin_set_dt(&config->en_pin, 0); } return i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_CTRL1, CP9314_CP_EN, 0); } static int regulator_cp9314_enable(const struct device *dev) { const struct regulator_cp9314_config *config = dev->config; uint8_t value; int ret; ret = i2c_reg_read_byte_dt(&config->i2c, CP9314_REG_CONVERTER, &value); if (ret < 0) { return ret; } if (value & CP9314_ACTIVE_STS) { return 0; } ret = i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_LION_INT_MASK_2, CP9314_CLEAR_INT, CP9314_CLEAR_INT); if (ret < 0) { return ret; } ret = i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_LION_INT_MASK_2, CP9314_CLEAR_INT, 0); if (ret < 0) { return ret; } if (config->en_pin.port != NULL) { ret = gpio_pin_set_dt(&config->en_pin, 1); if (ret < 0) { return ret; } } else { ret = i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_CTRL1, CP9314_CP_EN, CP9314_CP_EN); if (ret < 0) { LOG_ERR("Unable to set CP_EN"); return ret; } } k_msleep(CP9314_T_STARTUP_MSEC); if (config->pgood_pin.port != NULL) { ret = gpio_pin_get_dt(&config->pgood_pin); if (ret < 0) { return ret; } else if (ret == 0) { return -EINVAL; } } else { ret = i2c_reg_read_byte_dt(&config->i2c, CP9314_REG_CONVERTER, &value); if (ret < 0) { return ret; } else if (FIELD_GET(CP9314_PGOOD_PIN_STS, value) == 0U) { return -EINVAL; } } return 0; } static int cp9314_cfg_sync(const struct device *dev) { const struct regulator_cp9314_config *config = dev->config; struct regulator_cp9314_data *data = dev->data; uint8_t value = 0; int ret; if (data->sync_role == CP9314_ROLE_HOST) { value = CP9314_SYNC_HOST_EN; } ret = i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_CTRL4, CP9314_SYNC_HOST_EN, value); if (ret < 0) { return ret; } ret = i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_CTRL4, CP9314_SYNC_FUNCTION_EN, CP9314_SYNC_FUNCTION_EN); if (ret < 0) { return ret; } return i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_CTRL4, CP9314_FRC_SYNC_MODE, CP9314_FRC_SYNC_MODE); } static int regulator_cp9314_b0_init(const struct device *dev) { const struct regulator_cp9314_config *config = dev->config; int ret; for (size_t i = 0U; i < ARRAY_SIZE(b0_reg_patch); i++) { ret = i2c_reg_update_byte_dt(&config->i2c, b0_reg_patch[i].reg_addr, b0_reg_patch[i].mask, b0_reg_patch[i].value); if (ret < 0) { return ret; } } return 0; } static int cp9314_do_soft_reset(const struct device *dev) { const struct regulator_cp9314_config *config = dev->config; int ret; ret = i2c_reg_write_byte_dt(&config->i2c, CP9314_REG_CRUS_CTRL, CP9314_CRUS_KEY_SOFT_RESET); if (ret < 0) { return ret; } ret = i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_TEST_MODE_CTRL, CP9314_SOFT_RESET_REQ, CP9314_SOFT_RESET_REQ); if (ret < 0) { return ret; } k_msleep(CP9314_SOFT_RESET_DELAY_MSEC); return 0; } static int regulator_cp9314_otp_init(const struct device *dev) { const struct regulator_cp9314_config *config = dev->config; uint8_t value; int i, ret; /* * The PTE_2 field in the PTE_REG_2 register contains the value representing the OTP * burned on the CP9314 device. The PTE_2 values in relation to the OTP table names * are shown below. * * OTP-1 = 0x0, OTP-2 = 0x1, OTP-3 = 0x3, OTP-4 = 0x4 */ ret = i2c_reg_read_byte_dt(&config->i2c, CP9314_REG_PTE_REG_2, &value); if (ret < 0) { return ret; } value = FIELD_GET(CP9314_PTE_2_MASK, value); ret = i2c_reg_write_byte_dt(&config->i2c, CP9314_REG_CRUS_CTRL, CP9314_CRUS_KEY_UNLOCK); if (ret < 0) { return ret; } if (value == CP9314_PTE_2_OTP_1) { for (i = 0; i < ARRAY_SIZE(otp_1_patch); i++) { i2c_reg_update_byte_dt(&config->i2c, otp_1_patch[i].reg_addr, otp_1_patch[i].mask, otp_1_patch[i].value); } } if (value <= CP9314_PTE_2_OTP_2) { i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_CFG_9, CP9314_VOUT_PCHG_TIME_CFG_0, 0); } return i2c_reg_write_byte_dt(&config->i2c, CP9314_REG_CRUS_CTRL, CP9314_CRUS_KEY_LOCK); } static int regulator_cp9314_init(const struct device *dev) { const struct regulator_cp9314_config *config = dev->config; struct regulator_cp9314_data *data = dev->data; uint8_t value; int ret; if (!i2c_is_ready_dt(&config->i2c)) { LOG_ERR("Bus device is not ready"); return -ENODEV; } ret = i2c_reg_read_byte_dt(&config->i2c, CP9314_REG_DEVICE_ID, &value); if (ret < 0) { LOG_ERR("No device found:%d\n", ret); return ret; } if (value != CP9314_DEV_ID) { LOG_ERR("Invalid device ID found:0x%x!\n", value); return -ENOTSUP; } if (config->pgood_pin.port != NULL) { if (!gpio_is_ready_dt(&config->pgood_pin)) { return -ENODEV; } ret = gpio_pin_configure_dt(&config->pgood_pin, GPIO_INPUT); if (ret < 0) { return ret; } } if (config->en_pin.port != NULL) { if (!gpio_is_ready_dt(&config->en_pin)) { return -ENODEV; } ret = gpio_pin_configure_dt(&config->en_pin, GPIO_OUTPUT_INACTIVE); if (ret < 0) { return ret; } k_usleep(CP9314_EN_DEBOUNCE_USEC); } ret = cp9314_do_soft_reset(dev); if (ret < 0) { return ret; } ret = i2c_reg_read_byte_dt(&config->i2c, CP9314_REG_BC_STS_C, &value); if (ret < 0) { return ret; } value &= CP9314_CHIP_REV_MASK; switch (value) { case CP9314_CHIP_REV_B0: LOG_INF("Found CP9314 REV:0x%x\n", value); ret = regulator_cp9314_b0_init(dev); if (ret < 0) { return ret; } break; default: LOG_ERR("Invalid CP9314 REV:0x%x\n", value); return -ENOTSUP; } ret = regulator_cp9314_otp_init(dev); if (ret < 0) { return ret; } ret = i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_CTRL4, CP9314_FRC_OP_MODE, CP9314_FRC_OP_MODE); if (ret < 0) { return ret; } ret = i2c_reg_read_byte_dt(&config->i2c, CP9314_REG_STS_PIN_ADC_0, &value); if (ret < 0) { return ret; } value = FIELD_PREP(CP9314_STS_ADDR_LVL, value); switch (value) { case CP9314_DEVICE_MODE_HOST_4GANG_0x78: case CP9314_DEVICE_MODE_HOST_4GANG_0x72: case CP9314_DEVICE_MODE_HOST_3GANG_0x78: case CP9314_DEVICE_MODE_HOST_3GANG_0x72: case CP9314_DEVICE_MODE_HOST_2GANG_0x78: case CP9314_DEVICE_MODE_HOST_2GANG_0x72: data->sync_role = CP9314_ROLE_HOST; break; case CP9314_DEVICE_MODE_HOST_STANDALONE_0x78: case CP9314_DEVICE_MODE_HOST_STANDALONE_0x72: data->sync_role = CP9314_ROLE_STANDALONE; break; case CP9314_DEVICE_MODE_DEVICE_4_0x68: case CP9314_DEVICE_MODE_DEVICE_4_0x54: data->sync_role = CP9314_ROLE_DEV4; break; case CP9314_DEVICE_MODE_DEVICE_3_0x56: case CP9314_DEVICE_MODE_DEVICE_3_0x53: data->sync_role = CP9314_ROLE_DEV3; break; case CP9314_DEVICE_MODE_DEVICE_2_0x79: case CP9314_DEVICE_MODE_DEVICE_2_0x73: data->sync_role = CP9314_ROLE_DEV2; break; default: return -EINVAL; } if (data->sync_role != CP9314_ROLE_STANDALONE) { ret = cp9314_cfg_sync(dev); if (ret < 0) { return ret; } } if (config->initial_op_mode_idx != 0) { ret = i2c_reg_update_byte_dt(&config->i2c, CP9314_REG_CTRL4, CP9314_MODE_MASK, config->initial_op_mode_idx); if (ret < 0) { return ret; } } regulator_common_data_init(dev); return regulator_common_init(dev, false); } static const struct regulator_driver_api api = { .enable = regulator_cp9314_enable, .disable = regulator_cp9314_disable, .get_error_flags = regulator_cp9314_get_error_flags, }; #define REGULATOR_CP9314_DEFINE(inst) \ static struct regulator_cp9314_data data_##inst; \ \ static const struct regulator_cp9314_config config_##inst = { \ .common = REGULATOR_DT_INST_COMMON_CONFIG_INIT(inst), \ .i2c = I2C_DT_SPEC_INST_GET(inst), \ .en_pin = GPIO_DT_SPEC_INST_GET_OR(inst, cirrus_en_gpios, {}), \ .pgood_pin = GPIO_DT_SPEC_INST_GET_OR(inst, cirrus_pgood_gpios, {}), \ .initial_op_mode_idx = \ DT_INST_ENUM_IDX_OR(inst, cirrus_initial_switched_capacitor_mode, -1) + 1, \ }; \ \ DEVICE_DT_INST_DEFINE(inst, regulator_cp9314_init, NULL, &data_##inst, &config_##inst, \ POST_KERNEL, CONFIG_REGULATOR_CP9314_INIT_PRIORITY, &api); DT_INST_FOREACH_STATUS_OKAY(REGULATOR_CP9314_DEFINE) ```
/content/code_sandbox/drivers/regulator/regulator_cp9314.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,072
```c /* */ #define DT_DRV_COMPAT regulator_gpio #include <stdint.h> #include <zephyr/drivers/regulator.h> #include <zephyr/drivers/gpio.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(regulator_gpio, CONFIG_REGULATOR_LOG_LEVEL); struct regulator_gpio_config { struct regulator_common_config common; const struct gpio_dt_spec *gpios; uint8_t num_gpios; const int32_t *states; uint8_t states_cnt; const struct gpio_dt_spec enable; }; struct regulator_gpio_data { struct regulator_common_data common; int32_t current_volt_uv; }; static int regulator_gpio_apply_state(const struct device *dev, uint32_t state) { const struct regulator_gpio_config *cfg = dev->config; for (unsigned int gpio_idx = 0; gpio_idx < cfg->num_gpios; gpio_idx++) { int ret; int new_state_of_gpio = (state >> gpio_idx) & 0x1; ret = gpio_pin_get_dt(&cfg->gpios[gpio_idx]); if (ret < 0) { LOG_ERR("%s: can't get pin state", dev->name); return ret; } if (ret != new_state_of_gpio) { ret = gpio_pin_set_dt(&cfg->gpios[gpio_idx], new_state_of_gpio); if (ret < 0) { LOG_ERR("%s: can't set pin state", dev->name); return ret; } } } return 0; } static int regulator_gpio_enable(const struct device *dev) { const struct regulator_gpio_config *cfg = dev->config; int ret; if (cfg->enable.port == NULL) { return 0; } ret = gpio_pin_set_dt(&cfg->enable, 1); if (ret < 0) { LOG_ERR("%s: can't enable regulator!", dev->name); return ret; } return 0; } static int regulator_gpio_disable(const struct device *dev) { const struct regulator_gpio_config *cfg = dev->config; if (cfg->enable.port == NULL) { return 0; } return gpio_pin_set_dt(&cfg->enable, 0); } static unsigned int regulator_gpio_count_voltages(const struct device *dev) { const struct regulator_gpio_config *cfg = dev->config; return cfg->states_cnt; } static int regulator_gpio_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { const struct regulator_gpio_config *cfg = dev->config; if (idx >= cfg->states_cnt) { LOG_ERR("%s: can't get list voltage for idx %u", dev->name, idx); return -EINVAL; } *volt_uv = cfg->states[idx * 2]; return 0; } static int regulator_gpio_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_gpio_config *cfg = dev->config; struct regulator_gpio_data *data = dev->data; int32_t best_voltage = INT32_MAX; unsigned int best_state; int ret = 0; /* choose minimum possible voltage in range provided by a caller */ for (unsigned int state_idx = 0; state_idx < cfg->states_cnt; state_idx++) { if (!IN_RANGE(cfg->states[state_idx * 2], min_uv, max_uv) || cfg->states[state_idx * 2] >= best_voltage) { continue; } best_voltage = cfg->states[state_idx * 2]; best_state = cfg->states[state_idx * 2 + 1]; } if (best_voltage == INT32_MAX) { LOG_ERR("%s: can't find voltage is states", dev->name); return -EINVAL; } if (best_voltage == data->current_volt_uv) { return 0; } ret = regulator_gpio_apply_state(dev, best_state); if (ret) { return ret; } data->current_volt_uv = best_voltage; return 0; } static int regulator_gpio_get_voltage(const struct device *dev, int32_t *volt_uv) { const struct regulator_gpio_data *data = dev->data; *volt_uv = data->current_volt_uv; return 0; } static const struct regulator_driver_api regulator_gpio_api = { .enable = regulator_gpio_enable, .disable = regulator_gpio_disable, .set_voltage = regulator_gpio_set_voltage, .get_voltage = regulator_gpio_get_voltage, .count_voltages = regulator_gpio_count_voltages, .list_voltage = regulator_gpio_list_voltage, }; static int regulator_gpio_init(const struct device *dev) { const struct regulator_gpio_config *cfg = dev->config; int ret; regulator_common_data_init(dev); for (unsigned int gpio_idx = 0; gpio_idx < cfg->num_gpios; gpio_idx++) { if (!gpio_is_ready_dt(&cfg->gpios[gpio_idx])) { LOG_ERR("%s: gpio pin: %s not ready", dev->name, cfg->gpios[gpio_idx].port ? cfg->gpios[gpio_idx].port->name : "null"); return -ENODEV; } ret = gpio_pin_configure_dt(&cfg->gpios[gpio_idx], GPIO_OUTPUT); if (ret < 0) { LOG_ERR("%s: can't configure pin (%d) as output", dev->name, cfg->gpios[gpio_idx].pin); return ret; } } if (cfg->enable.port != NULL) { if (!gpio_is_ready_dt(&cfg->enable)) { LOG_ERR("%s: gpio pin: %s not ready", dev->name, cfg->enable.port->name); return -ENODEV; } ret = gpio_pin_configure_dt(&cfg->enable, GPIO_OUTPUT | GPIO_OUTPUT_INIT_LOW); if (ret < 0) { LOG_ERR("%s: can't configure enable pin (%d) as output", dev->name, cfg->enable.pin); return ret; } } return regulator_common_init(dev, false); } #define REG_GPIO_CONTEXT_GPIOS_SPEC_ELEM(_node_id, _prop, _idx) \ GPIO_DT_SPEC_GET_BY_IDX(_node_id, _prop, _idx), #define REG_GPIO_CONTEXT_GPIOS_FOREACH_ELEM(inst) \ DT_FOREACH_PROP_ELEM(DT_DRV_INST(inst), gpios, REG_GPIO_CONTEXT_GPIOS_SPEC_ELEM) #define REG_GPIO_CONTEXT_GPIOS_INITIALIZE(inst) \ .gpios = (const struct gpio_dt_spec[]){REG_GPIO_CONTEXT_GPIOS_FOREACH_ELEM(inst)}, \ .num_gpios = DT_INST_PROP_LEN(inst, gpios) #define REGULATOR_GPIO_DEFINE(inst) \ static struct regulator_gpio_data data##inst = { \ .current_volt_uv = INT32_MAX, \ }; \ BUILD_ASSERT(!(DT_INST_PROP_LEN(inst, states) & 0x1), \ "Number of regulator states should be even"); \ static const struct regulator_gpio_config config##inst = { \ .common = REGULATOR_DT_INST_COMMON_CONFIG_INIT(inst), \ REG_GPIO_CONTEXT_GPIOS_INITIALIZE(inst), \ .enable = GPIO_DT_SPEC_INST_GET_OR(inst, enable_gpios, {0}), \ .states = ((const int[])DT_INST_PROP(inst, states)), \ .states_cnt = DT_INST_PROP_LEN(inst, states) / 2, \ }; \ DEVICE_DT_INST_DEFINE(inst, regulator_gpio_init, NULL, &data##inst, &config##inst, \ POST_KERNEL, CONFIG_REGULATOR_GPIO_INIT_PRIORITY, \ &regulator_gpio_api); DT_INST_FOREACH_STATUS_OKAY(REGULATOR_GPIO_DEFINE) ```
/content/code_sandbox/drivers/regulator/regulator_gpio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,662
```unknown config REGULATOR_FAKE bool "Fake regulator driver" default y depends on DT_HAS_ZEPHYR_FAKE_REGULATOR_ENABLED help Enable support for the FFF-based fake regulator driver. if REGULATOR_FAKE config REGULATOR_FAKE_COMMON_INIT_PRIORITY int "Fake regulator driver init priority (common part)" default 75 help Init priority for the fake regulator driver (common part). config REGULATOR_FAKE_INIT_PRIORITY int "Fake regulator driver init priority" default 76 help Init priority for the fake regulator driver. It must be greater than REGULATOR_FAKE_COMMON_INIT_PRIORITY. endif ```
/content/code_sandbox/drivers/regulator/Kconfig.fake
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
138
```c /* */ #include <zephyr/kernel.h> #include <zephyr/drivers/regulator.h> static void regulator_delay(uint32_t delay_us) { if (delay_us > 0U) { k_sleep(K_USEC(delay_us)); } } void regulator_common_data_init(const struct device *dev) { struct regulator_common_data *data = dev->data; #ifdef CONFIG_REGULATOR_THREAD_SAFE_REFCNT (void)k_mutex_init(&data->lock); #endif data->refcnt = 0; } int regulator_common_init(const struct device *dev, bool is_enabled) { const struct regulator_driver_api *api = dev->api; const struct regulator_common_config *config = dev->config; struct regulator_common_data *data = dev->data; int32_t current_uv; int ret; if (config->initial_mode != REGULATOR_INITIAL_MODE_UNKNOWN) { ret = regulator_set_mode(dev, config->initial_mode); if (ret < 0) { return ret; } } if (REGULATOR_ACTIVE_DISCHARGE_GET_BITS(config->flags) != REGULATOR_ACTIVE_DISCHARGE_DEFAULT) { ret = regulator_set_active_discharge(dev, (bool)REGULATOR_ACTIVE_DISCHARGE_GET_BITS(config->flags)); if (ret < 0) { return ret; } } if (config->init_uv > INT32_MIN) { ret = regulator_set_voltage(dev, config->init_uv, config->init_uv); if (ret < 0) { return ret; } } if (config->init_ua > INT32_MIN) { ret = regulator_set_current_limit(dev, config->init_ua, config->init_ua); if (ret < 0) { return ret; } } /* If we have valid range values, we try to match them before enabling */ if ((config->min_uv > INT32_MIN) || (config->max_uv < INT32_MAX)) { ret = regulator_get_voltage(dev, &current_uv); if (ret < 0) { return ret; } /* Snap to closest interval value if out of range */ if (current_uv < config->min_uv) { ret = regulator_set_voltage(dev, config->min_uv, config->min_uv); if (ret < 0) { return ret; } } else if (current_uv > config->max_uv) { ret = regulator_set_voltage(dev, config->max_uv, config->max_uv); if (ret < 0) { return ret; } } } if (is_enabled) { data->refcnt++; if ((config->flags & REGULATOR_BOOT_OFF) != 0U) { return regulator_disable(dev); } } else if ((config->flags & REGULATOR_INIT_ENABLED) != 0U) { ret = api->enable(dev); if (ret < 0) { return ret; } regulator_delay(config->startup_delay_us); data->refcnt++; } return 0; } int regulator_enable(const struct device *dev) { const struct regulator_driver_api *api = dev->api; const struct regulator_common_config *config = dev->config; struct regulator_common_data *data = dev->data; int ret = 0; /* enable not supported (always on) */ if (api->enable == NULL) { return 0; } /* regulator must stay always on */ if ((config->flags & REGULATOR_ALWAYS_ON) != 0U) { return 0; } #ifdef CONFIG_REGULATOR_THREAD_SAFE_REFCNT (void)k_mutex_lock(&data->lock, K_FOREVER); #endif data->refcnt++; if (data->refcnt == 1) { ret = api->enable(dev); if (ret < 0) { data->refcnt--; } else { regulator_delay(config->off_on_delay_us); } } #ifdef CONFIG_REGULATOR_THREAD_SAFE_REFCNT k_mutex_unlock(&data->lock); #endif return ret; } bool regulator_is_enabled(const struct device *dev) { const struct regulator_common_config *config = dev->config; struct regulator_common_data *data = dev->data; bool enabled; if ((config->flags & REGULATOR_ALWAYS_ON) != 0U) { enabled = true; } else { #ifdef CONFIG_REGULATOR_THREAD_SAFE_REFCNT (void)k_mutex_lock(&data->lock, K_FOREVER); #endif enabled = data->refcnt != 0; #ifdef CONFIG_REGULATOR_THREAD_SAFE_REFCNT k_mutex_unlock(&data->lock); #endif } return enabled; } int regulator_disable(const struct device *dev) { const struct regulator_driver_api *api = dev->api; const struct regulator_common_config *config = dev->config; struct regulator_common_data *data = dev->data; int ret = 0; /* disable not supported (always on) */ if (api->disable == NULL) { return 0; } /* regulator must stay always on */ if ((config->flags & REGULATOR_ALWAYS_ON) != 0U) { return 0; } #ifdef CONFIG_REGULATOR_THREAD_SAFE_REFCNT (void)k_mutex_lock(&data->lock, K_FOREVER); #endif if (data->refcnt > 0) { data->refcnt--; if (data->refcnt == 0) { ret = api->disable(dev); if (ret < 0) { data->refcnt++; } } } #ifdef CONFIG_REGULATOR_THREAD_SAFE_REFCNT k_mutex_unlock(&data->lock); #endif return ret; } bool regulator_is_supported_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_common_config *config = dev->config; unsigned int volt_cnt; /* voltage may not be allowed, even if supported */ if ((min_uv > config->max_uv) || (max_uv < config->min_uv)) { return false; } volt_cnt = regulator_count_voltages(dev); for (unsigned int idx = 0U; idx < volt_cnt; idx++) { int32_t volt_uv; (void)regulator_list_voltage(dev, idx, &volt_uv); if ((volt_uv >= min_uv) && (volt_uv <= max_uv)) { return true; } } return false; } int regulator_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_common_config *config = dev->config; const struct regulator_driver_api *api = dev->api; if (api->set_voltage == NULL) { return -ENOSYS; } /* voltage may not be allowed, even if supported */ if ((min_uv > config->max_uv) || (max_uv < config->min_uv)) { return -EINVAL; } return api->set_voltage(dev, min_uv, max_uv); } int regulator_set_current_limit(const struct device *dev, int32_t min_ua, int32_t max_ua) { const struct regulator_common_config *config = dev->config; const struct regulator_driver_api *api = dev->api; if (api->set_current_limit == NULL) { return -ENOSYS; } /* current limit may not be allowed, even if supported */ if ((min_ua > config->max_ua) || (max_ua < config->min_ua)) { return -EINVAL; } return api->set_current_limit(dev, min_ua, max_ua); } int regulator_set_mode(const struct device *dev, regulator_mode_t mode) { const struct regulator_common_config *config = dev->config; const struct regulator_driver_api *api = dev->api; if (api->set_mode == NULL) { return -ENOSYS; } /* no mode restrictions */ if (config->allowed_modes_cnt == 0U) { return api->set_mode(dev, mode); } /* check if mode is allowed, apply if it is */ for (uint8_t i = 0U; i < config->allowed_modes_cnt; i++) { if (mode == config->allowed_modes[i]) { return api->set_mode(dev, mode); } } return -ENOTSUP; } ```
/content/code_sandbox/drivers/regulator/regulator_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,819
```unknown config REGULATOR_NXP_VREF bool "NXP VREF peripheral driver" default y depends on DT_HAS_NXP_VREF_ENABLED help Enable the NXP VREF driver config REGULATOR_NXP_VREF_INIT_PRIORITY int "NXP VREF peripheral driver init priority" default 45 depends on DT_HAS_NXP_VREF_ENABLED help Init priority for the NXP VREF peripheral. ```
/content/code_sandbox/drivers/regulator/Kconfig.nxp_vref
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
93
```unknown config REGULATOR_NPM1100 bool "nPM1100 PMIC regulator driver" default y depends on DT_HAS_NORDIC_NPM1100_ENABLED help Enable the Nordic nPM1100 PMIC regulator driver config REGULATOR_NPM1100_INIT_PRIORITY int "nPM1100 regulator driver init priority" default KERNEL_INIT_PRIORITY_DEVICE depends on REGULATOR_NPM1100 help Init priority for the Nordic nPM1100 regulator driver. ```
/content/code_sandbox/drivers/regulator/Kconfig.npm1100
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
105
```unknown config REGULATOR_CP9314 bool "CP9314 Switched Cap Converter regulator driver" default y depends on DT_HAS_CIRRUS_CP9314_ENABLED help Enable the Cirrus Logic CP9314 Switched Cap Converter regulator driver config REGULATOR_CP9314_INIT_PRIORITY int "CP9314 regulator driver init priority" default KERNEL_INIT_PRIORITY_DEVICE depends on REGULATOR_CP9314 help Init priority for the Cirrus Logic CP9314 Switched Cap Converter regulator driver. It must be greater than I2C init priority. ```
/content/code_sandbox/drivers/regulator/Kconfig.cp9314
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
126
```unknown config REGULATOR_AXP192 bool "X-Power AXP192 PMIC regulator driver" default y depends on DT_HAS_X_POWERS_AXP192_REGULATOR_ENABLED depends on DT_HAS_X_POWERS_AXP192_ENABLED select I2C select MFD help Enable the AXP PMIC regulator driver if REGULATOR_AXP192 config REGULATOR_AXP192_INIT_PRIORITY int "AXP192 regulator driver init priority" default 86 help Init priority for the axp192 regulator driver. It must be greater than MFD_INIT_PRIORITY. endif ```
/content/code_sandbox/drivers/regulator/Kconfig.axp192
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
133
```c /* * */ #include <ctype.h> #include <stdlib.h> #include <string.h> #include <zephyr/device.h> #include <zephyr/shell/shell.h> #include <zephyr/drivers/regulator.h> #include <zephyr/toolchain.h> static int strtomicro(char *inp, char units, int32_t *val) { size_t len, start, end; int32_t mult, decdiv = 1; len = strlen(inp); if (len < 2) { return -EINVAL; } /* suffix */ if (tolower(inp[len - 1]) != units) { return -EINVAL; } if ((len > 2) && (inp[len - 2] == 'u')) { mult = 1; end = len - 3; } else if ((len > 2) && (inp[len - 2] == 'm')) { mult = 1000; end = len - 3; } else if (isdigit((unsigned char)inp[len - 2]) > 0) { mult = 1000000; end = len - 2; } else { return -EINVAL; } /* optional prefix (sign) */ if (inp[0] == '-') { mult *= -1; start = 1; } else if (inp[0] == '+') { start = 1; } else { start = 0; } /* numeric part */ *val = 0; for (size_t i = start; (i <= end) && (decdiv <= mult); i++) { if (isdigit((unsigned char)inp[i]) > 0) { *val = *val * 10 / decdiv + (int32_t)(inp[i] - '0') * mult / decdiv; if (decdiv > 1) { mult /= 10; } } else if (inp[i] == '.') { decdiv = 10; } else { return -EINVAL; } } return 0; } static void microtoshell(const struct shell *sh, char unit, int32_t val) { if (val > 100000) { shell_print(sh, "%d.%06d %c", val / 1000000, val % 1000000, unit); } else if (val > 1000) { shell_print(sh, "%d.%03d m%c", val / 1000, val % 1000, unit); } else { shell_print(sh, "%d u%c", val, unit); } } static int cmd_enable(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = regulator_enable(dev); if (ret < 0) { shell_error(sh, "Could not enable regulator (%d)", ret); return ret; } return 0; } static int cmd_disable(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = regulator_disable(dev); if (ret < 0) { shell_error(sh, "Could not disable regulator (%d)", ret); return ret; } return 0; } static int cmd_is_enabled(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } if (regulator_is_enabled(dev)) { shell_print(sh, "Regulator is enabled"); } else { shell_print(sh, "Regulator is disabled"); } return 0; } static int cmd_vlist(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; unsigned int volt_cnt; int32_t last_volt_uv = 0; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } volt_cnt = regulator_count_voltages(dev); for (unsigned int i = 0U; i < volt_cnt; i++) { int32_t volt_uv; (void)regulator_list_voltage(dev, i, &volt_uv); /* do not print repeated voltages */ if ((i == 0U) || (last_volt_uv != volt_uv)) { microtoshell(sh, 'V', volt_uv); } last_volt_uv = volt_uv; } return 0; } static int cmd_vset(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int32_t min_uv, max_uv; int ret; dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = strtomicro(argv[2], 'v', &min_uv); if (ret < 0) { shell_error(sh, "Invalid min. voltage: %s", argv[2]); return ret; } if (argc == 4) { ret = strtomicro(argv[3], 'v', &max_uv); if (ret < 0) { shell_error(sh, "Invalid max. voltage: %s", argv[3]); return ret; } } else { max_uv = min_uv; } ret = regulator_set_voltage(dev, min_uv, max_uv); if (ret < 0) { shell_error(sh, "Could not set voltage (%d)", ret); return ret; } return 0; } static int cmd_vget(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int32_t volt_uv; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = regulator_get_voltage(dev, &volt_uv); if (ret < 0) { shell_error(sh, "Could not get voltage (%d)", ret); return ret; } microtoshell(sh, 'V', volt_uv); return 0; } static int cmd_clist(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; unsigned int current_cnt; int32_t last_current_ua; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } current_cnt = regulator_count_current_limits(dev); for (unsigned int i = 0U; i < current_cnt; i++) { int32_t current_ua; (void)regulator_list_current_limit(dev, i, &current_ua); /* do not print repeated current limits */ if ((i == 0U) || (last_current_ua != current_ua)) { microtoshell(sh, 'A', current_ua); } last_current_ua = current_ua; } return 0; } static int cmd_iset(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int32_t min_ua, max_ua; int ret; dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = strtomicro(argv[2], 'a', &min_ua); if (ret < 0) { shell_error(sh, "Invalid min. current: %s", argv[2]); return ret; } if (argc == 4) { ret = strtomicro(argv[3], 'a', &max_ua); if (ret < 0) { shell_error(sh, "Invalid max. current: %s", argv[3]); return ret; } } else { max_ua = min_ua; } ret = regulator_set_current_limit(dev, min_ua, max_ua); if (ret < 0) { shell_error(sh, "Could not set current limit (%d)", ret); return ret; } return 0; } static int cmd_iget(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int32_t curr_ua; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = regulator_get_current_limit(dev, &curr_ua); if (ret < 0) { shell_error(sh, "Could not get current limit (%d)", ret); return ret; } microtoshell(sh, 'A', curr_ua); return 0; } static int cmd_modeset(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; regulator_mode_t mode; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } mode = (regulator_mode_t)strtoul(argv[2], NULL, 10); ret = regulator_set_mode(dev, mode); if (ret < 0) { shell_error(sh, "Could not set mode (%d)", ret); return ret; } return 0; } static int cmd_modeget(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; regulator_mode_t mode; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = regulator_get_mode(dev, &mode); if (ret < 0) { shell_error(sh, "Could not get mode (%d)", ret); return ret; } shell_print(sh, "Mode: %u", (unsigned int)mode); return 0; } static int cmd_adset(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; bool ad; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } if (strcmp(argv[2], "enable") == 0) { ad = true; } else if (strcmp(argv[2], "disable") == 0) { ad = false; } else { shell_error(sh, "Invalid parameter"); return -EINVAL; } ret = regulator_set_active_discharge(dev, ad); if (ret < 0) { shell_error(sh, "Could not set active discharge (%d)", ret); return ret; } return 0; } static int cmd_adget(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; bool ad; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = regulator_get_active_discharge(dev, &ad); if (ret < 0) { shell_error(sh, "Could not get active discharge (%d)", ret); return ret; } shell_print(sh, "Active Discharge: %s", ad ? "enabled" : "disabled"); return 0; } static int cmd_errors(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; regulator_error_flags_t errors; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = regulator_get_error_flags(dev, &errors); if (ret < 0) { shell_error(sh, "Could not get error flags (%d)", ret); return ret; } shell_print(sh, "Overvoltage:\t[%s]", ((errors & REGULATOR_ERROR_OVER_VOLTAGE) != 0U) ? "X" : " "); shell_print(sh, "Overcurrent:\t[%s]", ((errors & REGULATOR_ERROR_OVER_CURRENT) != 0U) ? "X" : " "); shell_print(sh, "Overtemp.:\t[%s]", ((errors & REGULATOR_ERROR_OVER_TEMP) != 0U) ? "X" : " "); return 0; } static int cmd_dvsset(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int ret = 0; regulator_dvs_state_t state; dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } state = shell_strtoul(argv[2], 10, &ret); if (ret < 0) { shell_error(sh, "Could not parse state (%d)", ret); return ret; } ret = regulator_parent_dvs_state_set(dev, state); if (ret < 0) { shell_error(sh, "Could not set DVS state (%d)", ret); return ret; } return 0; } static int cmd_shipmode(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int ret; ARG_UNUSED(argc); dev = device_get_binding(argv[1]); if (dev == NULL) { shell_error(sh, "Regulator device %s not available", argv[1]); return -ENODEV; } ret = regulator_parent_ship_mode(dev); if (ret < 0) { shell_error(sh, "Could not enable ship mode (%d)", ret); return ret; } return 0; } static void device_name_get(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; } SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get); SHELL_STATIC_SUBCMD_SET_CREATE( sub_regulator_cmds, SHELL_CMD_ARG(enable, &dsub_device_name, "Enable regulator\n" "Usage: enable <device>", cmd_enable, 2, 0), SHELL_CMD_ARG(disable, &dsub_device_name, "Disable regulator\n" "Usage: disable <device>", cmd_disable, 2, 0), SHELL_CMD_ARG(is_enabled, &dsub_device_name, "Report whether regulator is enabled or disabled\n" "Usage: is_enabled <device>", cmd_is_enabled, 2, 0), SHELL_CMD_ARG(vlist, &dsub_device_name, "List all supported voltages\n" "Usage: vlist <device>", cmd_vlist, 2, 0), SHELL_CMD_ARG(vset, &dsub_device_name, "Set voltage\n" "Input requires units, e.g. 200mv, 20.5mv, 10uv, 1v...\n" "Usage: vset <device> <minimum> [<maximum>]\n" "If maximum is not set, exact voltage will be requested", cmd_vset, 3, 1), SHELL_CMD_ARG(vget, &dsub_device_name, "Get voltage\n" "Usage: vget <device>", cmd_vget, 2, 0), SHELL_CMD_ARG(clist, &dsub_device_name, "List all supported current limits\n" "Usage: clist <device>", cmd_clist, 2, 0), SHELL_CMD_ARG(iset, &dsub_device_name, "Set current limit\n" "Input requires units, e.g. 200ma, 20.5ma, 10ua, 1a...\n" "Usage: iset <device> <minimum> [<maximum>]" "If maximum is not set, exact current will be requested", cmd_iset, 3, 1), SHELL_CMD_ARG(iget, &dsub_device_name, "Get current limit\n" "Usage: iget <device>", cmd_iget, 2, 0), SHELL_CMD_ARG(modeset, &dsub_device_name, "Set regulator mode\n" "Usage: modeset <device> <mode identifier>", cmd_modeset, 3, 0), SHELL_CMD_ARG(modeget, &dsub_device_name, "Get regulator mode\n" "Usage: modeget <device>", cmd_modeget, 2, 0), SHELL_CMD_ARG(adset, NULL, "Set active discharge\n" "Usage: adset <device> <enable/disable>", cmd_adset, 3, 0), SHELL_CMD_ARG(adget, NULL, "Get active discharge\n" "Usage: adget <device>", cmd_adget, 2, 0), SHELL_CMD_ARG(errors, &dsub_device_name, "Get errors\n" "Usage: errors <device>", cmd_errors, 2, 0), SHELL_CMD_ARG(dvsset, &dsub_device_name, "Set regulator dynamic voltage scaling state\n" "Usage: dvsset <device> <state identifier>", cmd_dvsset, 3, 0), SHELL_CMD_ARG(shipmode, &dsub_device_name, "Enable regulator ship mode\n" "Usage: shipmode <device>", cmd_shipmode, 2, 0), SHELL_SUBCMD_SET_END); SHELL_CMD_REGISTER(regulator, &sub_regulator_cmds, "Regulator playground", NULL); ```
/content/code_sandbox/drivers/regulator/regulator_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,200
```c /* */ #define DT_DRV_COMPAT nordic_npm1100 #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/regulator.h> #include <zephyr/dt-bindings/regulator/npm1100.h> #include <zephyr/toolchain.h> struct regulator_npm1100_pconfig { struct gpio_dt_spec iset; }; struct regulator_npm1100_config { struct regulator_common_config common; struct gpio_dt_spec mode; }; struct regulator_npm1100_data { struct regulator_common_data data; }; static int regulator_npm1100_set_mode(const struct device *dev, regulator_mode_t mode) { const struct regulator_npm1100_config *config = dev->config; if ((config->mode.port == NULL) || (mode > NPM1100_MODE_PWM)) { return -ENOTSUP; } return gpio_pin_set_dt(&config->mode, mode == NPM1100_MODE_AUTO ? 0 : 1); } static int regulator_npm1100_get_mode(const struct device *dev, regulator_mode_t *mode) { const struct regulator_npm1100_config *config = dev->config; int ret; if (config->mode.port == NULL) { return -ENOTSUP; } ret = gpio_pin_get_dt(&config->mode); if (ret < 0) { return ret; } *mode = (ret == 0) ? NPM1100_MODE_AUTO : NPM1100_MODE_PWM; return 0; } static __unused int regulator_npm1100_init(const struct device *dev) { const struct regulator_npm1100_config *config = dev->config; int ret; if (config->mode.port != NULL) { if (!gpio_is_ready_dt(&config->mode)) { return -ENODEV; } ret = gpio_pin_configure_dt(&config->mode, GPIO_INPUT | GPIO_OUTPUT_INACTIVE); if (ret < 0) { return ret; } } regulator_common_data_init(dev); return regulator_common_init(dev, true); } static int regulator_npm1100_common_init(const struct device *dev) { const struct regulator_npm1100_pconfig *config = dev->config; if (config->iset.port != NULL) { int ret; if (!gpio_is_ready_dt(&config->iset)) { return -ENODEV; } ret = gpio_pin_configure_dt(&config->iset, GPIO_OUTPUT_INACTIVE); if (ret < 0) { return ret; } } return 0; } static const __unused struct regulator_driver_api api = { .set_mode = regulator_npm1100_set_mode, .get_mode = regulator_npm1100_get_mode, }; #define REGULATOR_NPM1100_DEFINE_BUCK(node_id, id) \ static struct regulator_npm1100_data data_##id; \ \ static const struct regulator_npm1100_config config_##id = { \ .common = REGULATOR_DT_COMMON_CONFIG_INIT(node_id), \ .mode = GPIO_DT_SPEC_GET_OR(node_id, nordic_mode_gpios, {}), \ }; \ \ DEVICE_DT_DEFINE(node_id, regulator_npm1100_init, NULL, &data_##id, \ &config_##id, POST_KERNEL, \ CONFIG_REGULATOR_NPM1100_INIT_PRIORITY, &api); #define REGULATOR_NPM1100_DEFINE_BUCK_COND(inst) \ COND_CODE_1(DT_NODE_EXISTS(DT_INST_CHILD(inst, buck)), \ (REGULATOR_NPM1100_DEFINE_BUCK(DT_INST_CHILD(inst, buck), \ buck##inst)), \ ()) #define REGULATOR_NPM1100_DEFINE_ALL(inst) \ static const struct regulator_npm1100_pconfig config_##inst = { \ .iset = GPIO_DT_SPEC_INST_GET_OR(inst, nordic_iset_gpios, {}), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, regulator_npm1100_common_init, NULL, NULL, \ &config_##inst, POST_KERNEL, \ CONFIG_REGULATOR_NPM1100_INIT_PRIORITY, NULL); \ \ REGULATOR_NPM1100_DEFINE_BUCK_COND(inst) DT_INST_FOREACH_STATUS_OKAY(REGULATOR_NPM1100_DEFINE_ALL) ```
/content/code_sandbox/drivers/regulator/regulator_npm1100.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
931
```unknown config REGULATOR_ADP5360 bool "ADP5360 PMIC regulator driver" default y depends on DT_HAS_ADI_ADP5360_REGULATOR_ENABLED select I2C help Enable the Analog Devices ADP5360 PMIC regulator driver config REGULATOR_ADP5360_INIT_PRIORITY int "ADP5360 regulator driver init priority" default 76 depends on REGULATOR_ADP5360 help Init priority for the Analog Devices ADP5360 regulator driver. ```
/content/code_sandbox/drivers/regulator/Kconfig.adp5360
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
111
```c /* */ #define DT_DRV_COMPAT x_powers_axp192_regulator #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/regulator.h> #include <zephyr/sys/linear_range.h> #include <zephyr/sys/util.h> #include <zephyr/dt-bindings/regulator/axp192.h> #include <zephyr/logging/log.h> #include <zephyr/logging/log_instance.h> #include <zephyr/drivers/mfd/axp192.h> LOG_MODULE_REGISTER(regulator_axp192, CONFIG_REGULATOR_LOG_LEVEL); /* Output control registers */ #define AXP192_REG_EXTEN_DCDC2_CONTROL 0x10U #define AXP192_REG_DCDC123_LDO23_CONTROL 0x12U #define AXP192_REG_DCDC2_VOLTAGE 0x23U #define AXP192_REG_DCDC2_SLOPE 0x25U #define AXP192_REG_DCDC1_VOLTAGE 0x26U #define AXP192_REG_DCDC3_VOLTAGE 0x27U #define AXP192_REG_LDO23_VOLTAGE 0x28U #define AXP192_REG_DCDC123_WORKMODE 0x80U #define AXP192_REG_GPIO0_CONTROL 0x90U #define AXP192_REG_LDOIO0_VOLTAGE 0x91U struct regulator_axp192_desc { const uint8_t enable_reg; const uint8_t enable_mask; const uint8_t enable_val; const uint8_t vsel_reg; const uint8_t vsel_mask; const uint8_t vsel_bitpos; const int32_t max_ua; const uint8_t workmode_reg; const uint8_t workmode_mask; const uint8_t workmode_pwm_val; const uint8_t num_ranges; const struct linear_range *ranges; }; struct regulator_axp192_data { struct regulator_common_data data; }; struct regulator_axp192_config { struct regulator_common_config common; const struct regulator_axp192_desc *desc; const struct device *mfd; const struct i2c_dt_spec i2c; LOG_INSTANCE_PTR_DECLARE(log); }; static const struct linear_range dcdc1_ranges[] = { LINEAR_RANGE_INIT(700000U, 25000U, 0x00U, 0x7FU), }; static const struct regulator_axp192_desc dcdc1_desc = { .enable_reg = AXP192_REG_DCDC123_LDO23_CONTROL, .enable_mask = 0x01U, .enable_val = 0x01U, .vsel_reg = AXP192_REG_DCDC1_VOLTAGE, .vsel_mask = 0x7FU, .vsel_bitpos = 0U, .max_ua = 1200000U, .workmode_reg = AXP192_REG_DCDC123_WORKMODE, .workmode_mask = 0x08U, .workmode_pwm_val = 0x08U, .ranges = dcdc1_ranges, .num_ranges = ARRAY_SIZE(dcdc1_ranges), }; static const struct linear_range dcdc2_ranges[] = { LINEAR_RANGE_INIT(700000U, 25000U, 0x00U, 0x3FU), }; static const struct regulator_axp192_desc dcdc2_desc = { .enable_reg = AXP192_REG_EXTEN_DCDC2_CONTROL, .enable_mask = 0x01U, .enable_val = 0x01U, .vsel_reg = AXP192_REG_DCDC2_VOLTAGE, .vsel_mask = 0x3FU, .vsel_bitpos = 0U, .max_ua = 1600000U, .workmode_reg = AXP192_REG_DCDC123_WORKMODE, .workmode_mask = 0x04U, .workmode_pwm_val = 0x04U, .ranges = dcdc2_ranges, .num_ranges = ARRAY_SIZE(dcdc2_ranges), }; static const struct linear_range dcdc3_ranges[] = { LINEAR_RANGE_INIT(700000U, 25000U, 0x00U, 0x7FU), }; static const struct regulator_axp192_desc dcdc3_desc = { .enable_reg = AXP192_REG_DCDC123_LDO23_CONTROL, .enable_mask = 0x02U, .enable_val = 0x02U, .vsel_reg = AXP192_REG_DCDC3_VOLTAGE, .vsel_mask = 0x7FU, .vsel_bitpos = 0U, .max_ua = 700000U, .workmode_reg = AXP192_REG_DCDC123_WORKMODE, .workmode_mask = 0x02U, .workmode_pwm_val = 0x02U, .ranges = dcdc3_ranges, .num_ranges = ARRAY_SIZE(dcdc3_ranges), }; static const struct linear_range ldoio0_ranges[] = { LINEAR_RANGE_INIT(1800000u, 100000u, 0x00u, 0x0Fu), }; static const struct regulator_axp192_desc ldoio0_desc = { .enable_reg = AXP192_REG_GPIO0_CONTROL, .enable_mask = 0x07u, .enable_val = 0x03u, .vsel_reg = AXP192_REG_LDOIO0_VOLTAGE, .vsel_mask = 0xF0u, .vsel_bitpos = 4u, .max_ua = 50000u, .workmode_reg = 0u, .workmode_mask = 0u, .ranges = ldoio0_ranges, .num_ranges = ARRAY_SIZE(ldoio0_ranges), }; static const struct linear_range ldo2_ranges[] = { LINEAR_RANGE_INIT(1800000U, 100000U, 0x00U, 0x0FU), }; static const struct regulator_axp192_desc ldo2_desc = { .enable_reg = AXP192_REG_DCDC123_LDO23_CONTROL, .enable_mask = 0x04U, .enable_val = 0x04U, .vsel_reg = AXP192_REG_LDO23_VOLTAGE, .vsel_mask = 0xF0U, .vsel_bitpos = 4U, .max_ua = 200000U, .workmode_reg = 0U, .workmode_mask = 0U, .ranges = ldo2_ranges, .num_ranges = ARRAY_SIZE(ldo2_ranges), }; static const struct linear_range ldo3_ranges[] = { LINEAR_RANGE_INIT(1800000U, 100000U, 0x00U, 0x0FU), }; static const struct regulator_axp192_desc ldo3_desc = { .enable_reg = AXP192_REG_DCDC123_LDO23_CONTROL, .enable_mask = 0x08U, .enable_val = 0x08U, .vsel_reg = AXP192_REG_LDO23_VOLTAGE, .vsel_mask = 0x0FU, .vsel_bitpos = 0U, .max_ua = 200000U, .workmode_reg = 0U, .workmode_mask = 0U, .ranges = ldo3_ranges, .num_ranges = ARRAY_SIZE(ldo3_ranges), }; static int axp192_enable(const struct device *dev) { const struct regulator_axp192_config *config = dev->config; int ret; LOG_INST_DBG(config->log, "Enabling regulator"); LOG_INST_DBG(config->log, "[0x%02x]=0x%02x mask=0x%02x", config->desc->enable_reg, config->desc->enable_val, config->desc->enable_mask); /* special case for LDOIO0, which is multiplexed with GPIO0 */ if (config->desc->enable_reg == AXP192_REG_GPIO0_CONTROL) { ret = mfd_axp192_gpio_func_ctrl(config->mfd, dev, 0, AXP192_GPIO_FUNC_LDO); } else { ret = i2c_reg_update_byte_dt(&config->i2c, config->desc->enable_reg, config->desc->enable_mask, config->desc->enable_val); } if (ret != 0) { LOG_INST_ERR(config->log, "Failed to enable regulator"); } return ret; } static int axp192_disable(const struct device *dev) { const struct regulator_axp192_config *config = dev->config; int ret; LOG_INST_DBG(config->log, "Disabling regulator"); LOG_INST_DBG(config->log, "[0x%02x]=0 mask=0x%x", config->desc->enable_reg, config->desc->enable_mask); /* special case for LDOIO0, which is multiplexed with GPIO0 */ if (config->desc->enable_reg == AXP192_REG_GPIO0_CONTROL) { ret = mfd_axp192_gpio_func_ctrl(config->mfd, dev, 0, AXP192_GPIO_FUNC_OUTPUT_LOW); } else { ret = i2c_reg_update_byte_dt(&config->i2c, config->desc->enable_reg, config->desc->enable_mask, 0u); } if (ret != 0) { LOG_INST_ERR(config->log, "Failed to disable regulator"); } return ret; } static unsigned int axp192_count_voltages(const struct device *dev) { const struct regulator_axp192_config *config = dev->config; return linear_range_group_values_count(config->desc->ranges, config->desc->num_ranges); } static int axp192_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { const struct regulator_axp192_config *config = dev->config; return linear_range_group_get_value(config->desc->ranges, config->desc->num_ranges, idx, volt_uv); } static int axp192_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_axp192_config *config = dev->config; uint16_t idx; int ret; LOG_INST_DBG(config->log, "voltage = [min=%d, max=%d]", min_uv, max_uv); /* set voltage */ ret = linear_range_group_get_win_index(config->desc->ranges, config->desc->num_ranges, min_uv, max_uv, &idx); if (ret != 0) { LOG_INST_ERR(config->log, "No voltage range window could be detected"); return ret; } idx <<= config->desc->vsel_bitpos; LOG_INST_DBG(config->log, "[0x%x]=0x%x mask=0x%x", config->desc->vsel_reg, idx, config->desc->vsel_mask); ret = i2c_reg_update_byte_dt(&config->i2c, config->desc->vsel_reg, config->desc->vsel_mask, (uint8_t)idx); if (ret != 0) { LOG_INST_ERR(config->log, "Failed to set regulator voltage"); } return ret; } static int axp192_get_voltage(const struct device *dev, int32_t *volt_uv) { const struct regulator_axp192_config *config = dev->config; int ret; uint8_t raw_reg; /* read voltage */ ret = i2c_reg_read_byte_dt(&config->i2c, config->desc->vsel_reg, &raw_reg); if (ret != 0) { return ret; } raw_reg = (raw_reg & config->desc->vsel_mask) >> config->desc->vsel_bitpos; ret = linear_range_group_get_value(config->desc->ranges, config->desc->num_ranges, raw_reg, volt_uv); return ret; } static int axp192_set_mode(const struct device *dev, regulator_mode_t mode) { const struct regulator_axp192_config *config = dev->config; int ret; /* setting workmode is only possible for DCDC1-3 */ if ((mode == AXP192_DCDC_MODE_PWM) && (config->desc->workmode_reg != 0)) { /* configure PWM mode */ LOG_INST_DBG(config->log, "PWM mode enabled"); ret = i2c_reg_update_byte_dt(&config->i2c, config->desc->workmode_reg, config->desc->workmode_mask, config->desc->workmode_pwm_val); if (ret != 0) { return ret; } } else if (mode == AXP192_DCDC_MODE_AUTO) { /* configure AUTO mode (default) */ if (config->desc->workmode_reg != 0) { ret = i2c_reg_update_byte_dt(&config->i2c, config->desc->workmode_reg, config->desc->workmode_mask, 0u); if (ret != 0) { return ret; } } else { /* AUTO is default mode for LDOs that cannot be configured */ return 0; } } else { LOG_INST_ERR(config->log, "Setting DCDC workmode failed"); return -ENOTSUP; } return 0; } static int axp192_get_current_limit(const struct device *dev, int32_t *curr_ua) { const struct regulator_axp192_config *config = dev->config; *curr_ua = config->desc->max_ua; return 0; } static struct regulator_driver_api api = { .enable = axp192_enable, .disable = axp192_disable, .count_voltages = axp192_count_voltages, .list_voltage = axp192_list_voltage, .set_voltage = axp192_set_voltage, .get_voltage = axp192_get_voltage, .set_mode = axp192_set_mode, .get_current_limit = axp192_get_current_limit, }; static int regulator_axp192_init(const struct device *dev) { const struct regulator_axp192_config *config = dev->config; uint8_t enabled_val; bool is_enabled; int ret = 0; regulator_common_data_init(dev); if (!device_is_ready(config->mfd)) { LOG_INST_ERR(config->log, "Parent instance not ready!"); return -ENODEV; } /* read regulator state */ ret = i2c_reg_read_byte_dt(&config->i2c, config->desc->enable_reg, &enabled_val); if (ret != 0) { LOG_INST_ERR(config->log, "Reading enable status failed!"); return ret; } is_enabled = ((enabled_val & config->desc->enable_mask) == config->desc->enable_val); LOG_INST_DBG(config->log, "is_enabled: %d", is_enabled); return regulator_common_init(dev, is_enabled); } #define REGULATOR_AXP192_DEFINE(node_id, id, name) \ static struct regulator_axp192_data data_##id; \ LOG_INSTANCE_REGISTER(name, node_id, CONFIG_REGULATOR_LOG_LEVEL); \ static const struct regulator_axp192_config config_##id = { \ .common = REGULATOR_DT_COMMON_CONFIG_INIT(node_id), \ .desc = &name##_desc, \ .mfd = DEVICE_DT_GET(DT_GPARENT(node_id)), \ .i2c = I2C_DT_SPEC_GET(DT_GPARENT(node_id)), \ LOG_INSTANCE_PTR_INIT(log, name, node_id)}; \ DEVICE_DT_DEFINE(node_id, regulator_axp192_init, NULL, &data_##id, &config_##id, \ POST_KERNEL, CONFIG_REGULATOR_AXP192_INIT_PRIORITY, &api); #define REGULATOR_AXP192_DEFINE_COND(inst, child) \ COND_CODE_1(DT_NODE_EXISTS(DT_INST_CHILD(inst, child)), \ (REGULATOR_AXP192_DEFINE(DT_INST_CHILD(inst, child), child##inst, child)), ()) #define REGULATOR_AXP192_DEFINE_ALL(inst) \ REGULATOR_AXP192_DEFINE_COND(inst, dcdc1) \ REGULATOR_AXP192_DEFINE_COND(inst, dcdc2) \ REGULATOR_AXP192_DEFINE_COND(inst, dcdc3) \ REGULATOR_AXP192_DEFINE_COND(inst, ldoio0) \ REGULATOR_AXP192_DEFINE_COND(inst, ldo2) \ REGULATOR_AXP192_DEFINE_COND(inst, ldo3) DT_INST_FOREACH_STATUS_OKAY(REGULATOR_AXP192_DEFINE_ALL) ```
/content/code_sandbox/drivers/regulator/regulator_axp192.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,568
```c /* */ #define DT_DRV_COMPAT renesas_smartbond_regulator #include <stdint.h> #include <zephyr/drivers/regulator.h> #include <zephyr/logging/log.h> #include <zephyr/sys/linear_range.h> #include <zephyr/pm/pm.h> #include <zephyr/pm/device.h> #include <DA1469xAB.h> #include <zephyr/devicetree.h> LOG_MODULE_REGISTER(regulator_da1469x, CONFIG_REGULATOR_LOG_LEVEL); #define DCDC_REQUESTED (DCDC_DCDC_VDD_REG_DCDC_VDD_ENABLE_HV_Msk |\ DCDC_DCDC_VDD_REG_DCDC_VDD_ENABLE_LV_Msk) #define DA1469X_LDO_3V0_MODE_VBAT BIT(8) #define DA1469X_LDO_3V0_MODE_VBUS BIT(9) #define PLL_FREQ DT_PROP(DT_NODELABEL(pll), clock_frequency) #define PLL_VDD_UV 1200000 static const struct linear_range curren_ranges[] = { LINEAR_RANGE_INIT(30000, 30000, 0, 31), }; static const struct linear_range vdd_clamp_ranges[] = { LINEAR_RANGE_INIT(706000, 0, 15, 15), LINEAR_RANGE_INIT(798000, 0, 14, 14), LINEAR_RANGE_INIT(828000, 0, 13, 13), LINEAR_RANGE_INIT(861000, 0, 11, 11), LINEAR_RANGE_INIT(862000, 0, 12, 12), LINEAR_RANGE_INIT(889000, 0, 10, 10), LINEAR_RANGE_INIT(918000, 0, 9, 9), LINEAR_RANGE_INIT(946000, 0, 3, 3), LINEAR_RANGE_INIT(952000, 0, 8, 8), LINEAR_RANGE_INIT(978000, 0, 2, 2), LINEAR_RANGE_INIT(1005000, 0, 1, 1), LINEAR_RANGE_INIT(1030000, 0, 7, 7), LINEAR_RANGE_INIT(1037000, 0, 0, 0), LINEAR_RANGE_INIT(1058000, 0, 6, 6), LINEAR_RANGE_INIT(1089000, 0, 5, 5), LINEAR_RANGE_INIT(1120000, 0, 4, 4), }; static const struct linear_range vdd_ranges[] = { LINEAR_RANGE_INIT(900000, 100000, 0, 3), }; static const struct linear_range vdd_sleep_ranges[] = { LINEAR_RANGE_INIT(750000, 50000, 0, 3), }; static const struct linear_range v14_ranges[] = { LINEAR_RANGE_INIT(1200000, 50000, 0, 7), }; static const struct linear_range v30_ranges[] = { LINEAR_RANGE_INIT(3000000, 300000, 0, 1), }; static const struct linear_range v18_ranges[] = { LINEAR_RANGE_INIT(1200000, 600000, 0, 1), }; static const struct linear_range v18p_ranges[] = { LINEAR_RANGE_INIT(1800000, 0, 0, 0), }; enum da1469x_rail { VDD_CLAMP, VDD_SLEEP, VDD, V14, V18, V18P, V30, }; struct dcdc_regs { uint32_t v18; uint32_t v18p; uint32_t vdd; uint32_t v14; uint32_t ctrl1; }; static struct dcdc_regs dcdc_state; struct regulator_da1469x_desc { const struct linear_range *voltage_ranges; const struct linear_range *current_ranges; uint8_t voltage_range_count; /* Bit from POWER_CTRL_REG that can be used for enabling rail */ uint32_t enable_mask; uint32_t voltage_idx_mask; volatile uint32_t *dcdc_register; uint32_t *dcdc_register_shadow; }; static const struct regulator_da1469x_desc vdd_desc = { .voltage_ranges = vdd_ranges, .current_ranges = curren_ranges, .voltage_range_count = ARRAY_SIZE(vdd_ranges), .enable_mask = CRG_TOP_POWER_CTRL_REG_LDO_CORE_ENABLE_Msk, .voltage_idx_mask = CRG_TOP_POWER_CTRL_REG_VDD_LEVEL_Msk, .dcdc_register = &DCDC->DCDC_VDD_REG, .dcdc_register_shadow = &dcdc_state.vdd, }; static const struct regulator_da1469x_desc vdd_sleep_desc = { .voltage_ranges = vdd_sleep_ranges, .voltage_range_count = ARRAY_SIZE(vdd_sleep_ranges), .enable_mask = CRG_TOP_POWER_CTRL_REG_LDO_CORE_RET_ENABLE_SLEEP_Msk, .voltage_idx_mask = CRG_TOP_POWER_CTRL_REG_VDD_SLEEP_LEVEL_Msk, }; static const struct regulator_da1469x_desc vdd_clamp_desc = { .voltage_ranges = vdd_clamp_ranges, .voltage_range_count = ARRAY_SIZE(vdd_clamp_ranges), .enable_mask = 0, .voltage_idx_mask = CRG_TOP_POWER_CTRL_REG_VDD_CLAMP_LEVEL_Msk, }; static const struct regulator_da1469x_desc v14_desc = { .voltage_ranges = v14_ranges, .current_ranges = curren_ranges, .voltage_range_count = ARRAY_SIZE(v14_ranges), .enable_mask = CRG_TOP_POWER_CTRL_REG_LDO_RADIO_ENABLE_Msk, .voltage_idx_mask = CRG_TOP_POWER_CTRL_REG_V14_LEVEL_Msk, .dcdc_register = &DCDC->DCDC_V14_REG, .dcdc_register_shadow = &dcdc_state.v14, }; static const struct regulator_da1469x_desc v18_desc = { .voltage_ranges = v18_ranges, .current_ranges = curren_ranges, .voltage_range_count = ARRAY_SIZE(v18_ranges), .enable_mask = CRG_TOP_POWER_CTRL_REG_LDO_1V8_ENABLE_Msk | CRG_TOP_POWER_CTRL_REG_LDO_1V8_RET_ENABLE_SLEEP_Msk, .voltage_idx_mask = CRG_TOP_POWER_CTRL_REG_V18_LEVEL_Msk, .dcdc_register = &DCDC->DCDC_V18_REG, .dcdc_register_shadow = &dcdc_state.v18, }; static const struct regulator_da1469x_desc v18p_desc = { .voltage_ranges = v18p_ranges, .current_ranges = curren_ranges, .voltage_range_count = ARRAY_SIZE(v18p_ranges), .enable_mask = CRG_TOP_POWER_CTRL_REG_LDO_1V8P_ENABLE_Msk | CRG_TOP_POWER_CTRL_REG_LDO_1V8P_RET_ENABLE_SLEEP_Msk, .voltage_idx_mask = 0, .dcdc_register = &DCDC->DCDC_V18P_REG, .dcdc_register_shadow = &dcdc_state.v18p, }; static const struct regulator_da1469x_desc v30_desc = { .voltage_ranges = v30_ranges, .voltage_range_count = ARRAY_SIZE(v30_ranges), .enable_mask = CRG_TOP_POWER_CTRL_REG_LDO_3V0_RET_ENABLE_SLEEP_Msk | CRG_TOP_POWER_CTRL_REG_LDO_3V0_MODE_Msk, .voltage_idx_mask = CRG_TOP_POWER_CTRL_REG_V30_LEVEL_Msk, }; #define DA1469X_LDO_VDD_CLAMP_RET 0 #define DA1469X_LDO_VDD_SLEEP_RET 0 #define DA1469X_LDO_VDD_RET CRG_TOP_POWER_CTRL_REG_LDO_CORE_RET_ENABLE_SLEEP_Msk #define DA1469X_LDO_V14_RET 0 #define DA1469X_LDO_V18_RET CRG_TOP_POWER_CTRL_REG_LDO_1V8_RET_ENABLE_SLEEP_Msk #define DA1469X_LDO_V18P_RET CRG_TOP_POWER_CTRL_REG_LDO_1V8P_RET_ENABLE_SLEEP_Msk #define DA1469X_LDO_V30_RET CRG_TOP_POWER_CTRL_REG_LDO_3V0_RET_ENABLE_SLEEP_Msk struct regulator_da1469x_config { struct regulator_common_config common; enum da1469x_rail rail; const struct regulator_da1469x_desc *desc; uint32_t power_bits; uint32_t dcdc_bits; }; struct regulator_da1469x_data { struct regulator_common_data common; }; static int regulator_da1469x_enable(const struct device *dev) { const struct regulator_da1469x_config *config = dev->config; uint32_t reg_val; if (config->desc->enable_mask & config->power_bits) { reg_val = CRG_TOP->POWER_CTRL_REG & ~(config->desc->enable_mask); reg_val |= config->power_bits & config->desc->enable_mask; CRG_TOP->POWER_CTRL_REG |= reg_val; } if (config->desc->dcdc_register) { reg_val = *config->desc->dcdc_register & ~(DCDC_DCDC_V14_REG_DCDC_V14_ENABLE_HV_Msk | DCDC_DCDC_V14_REG_DCDC_V14_ENABLE_LV_Msk); reg_val |= config->dcdc_bits; *config->desc->dcdc_register_shadow = reg_val; *config->desc->dcdc_register = reg_val; } /* * Enable DCDC if: * 1. it was not already enabled, and * 2. VBAT is above minimal value * 3. Just turned on rail requested DCDC */ if (((DCDC->DCDC_CTRL1_REG & DCDC_DCDC_CTRL1_REG_DCDC_ENABLE_Msk) == 0) && (CRG_TOP->ANA_STATUS_REG & CRG_TOP_ANA_STATUS_REG_COMP_VBAT_HIGH_Msk) && config->dcdc_bits & DCDC_REQUESTED) { DCDC->DCDC_CTRL1_REG |= DCDC_DCDC_CTRL1_REG_DCDC_ENABLE_Msk; dcdc_state.ctrl1 = DCDC->DCDC_CTRL1_REG; } return 0; } static int regulator_da1469x_disable(const struct device *dev) { const struct regulator_da1469x_config *config = dev->config; uint32_t reg_val; if (config->desc->enable_mask & config->power_bits) { CRG_TOP->POWER_CTRL_REG &= ~(config->desc->enable_mask & config->power_bits); } if (config->desc->dcdc_register) { reg_val = *config->desc->dcdc_register & ~(DCDC_DCDC_V14_REG_DCDC_V14_ENABLE_HV_Msk | DCDC_DCDC_V14_REG_DCDC_V14_ENABLE_LV_Msk); *config->desc->dcdc_register_shadow = reg_val; *config->desc->dcdc_register = reg_val; } /* Turn off DCDC if it's no longer requested by any rail */ if ((DCDC->DCDC_CTRL1_REG & DCDC_DCDC_CTRL1_REG_DCDC_ENABLE_Msk) && (DCDC->DCDC_VDD_REG & DCDC_REQUESTED) == 0 && (DCDC->DCDC_V14_REG & DCDC_REQUESTED) == 0 && (DCDC->DCDC_V18_REG & DCDC_REQUESTED) == 0 && (DCDC->DCDC_V18P_REG & DCDC_REQUESTED) == 0) { DCDC->DCDC_CTRL1_REG &= ~DCDC_DCDC_CTRL1_REG_DCDC_ENABLE_Msk; dcdc_state.ctrl1 = DCDC->DCDC_CTRL1_REG; } return 0; } static unsigned int regulator_da1469x_count_voltages(const struct device *dev) { const struct regulator_da1469x_config *config = dev->config; return linear_range_group_values_count(config->desc->voltage_ranges, config->desc->voltage_range_count); } static int regulator_da1469x_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { const struct regulator_da1469x_config *config = dev->config; if (config->desc->voltage_ranges) { return linear_range_group_get_value(config->desc->voltage_ranges, config->desc->voltage_range_count, idx, volt_uv); } return -ENOTSUP; } static int regulator_da1469x_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { int ret; const struct regulator_da1469x_config *config = dev->config; uint16_t idx; uint32_t mask; if ((SystemCoreClock == PLL_FREQ) && (config->rail == VDD)) { /* PLL requires that VDD be @1.2V */ if (max_uv < PLL_VDD_UV) { return -EPERM; } /* * The get index API should select the min voltage; * make sure the correct voltage is applied. */ if (min_uv < PLL_VDD_UV) { min_uv = PLL_VDD_UV; } } ret = linear_range_group_get_win_index(config->desc->voltage_ranges, config->desc->voltage_range_count, min_uv, max_uv, &idx); if (ret == 0) { mask = config->desc->voltage_idx_mask; /* * Mask is 0 for V18. * Setting value 1.8V is accepted since range is valid and already checked. */ if (mask) { CRG_TOP->POWER_CTRL_REG = (CRG_TOP->POWER_CTRL_REG & ~mask) | FIELD_PREP(mask, idx); } } return ret; } static int regulator_da1469x_get_voltage(const struct device *dev, int32_t *volt_uv) { const struct regulator_da1469x_config *config = dev->config; uint16_t idx; if (config->desc->voltage_idx_mask) { idx = FIELD_GET(config->desc->voltage_idx_mask, CRG_TOP->POWER_CTRL_REG); } else { idx = 0; } return linear_range_group_get_value(config->desc->voltage_ranges, config->desc->voltage_range_count, idx, volt_uv); } static int regulator_da1469x_set_current_limit(const struct device *dev, int32_t min_ua, int32_t max_ua) { const struct regulator_da1469x_config *config = dev->config; int ret; uint16_t idx; uint32_t reg_val; if (config->desc->current_ranges == NULL) { return -ENOTSUP; } ret = linear_range_group_get_win_index(config->desc->current_ranges, 1, min_ua, max_ua, &idx); if (ret) { return ret; } /* All registers have same bits layout */ reg_val = *config->desc->dcdc_register & ~(DCDC_DCDC_V14_REG_DCDC_V14_CUR_LIM_MAX_HV_Msk | DCDC_DCDC_V14_REG_DCDC_V14_CUR_LIM_MAX_LV_Msk | DCDC_DCDC_V14_REG_DCDC_V14_CUR_LIM_MIN_Msk); reg_val |= FIELD_PREP(DCDC_DCDC_V14_REG_DCDC_V14_CUR_LIM_MAX_HV_Msk, idx); reg_val |= FIELD_PREP(DCDC_DCDC_V14_REG_DCDC_V14_CUR_LIM_MAX_LV_Msk, idx); reg_val |= FIELD_PREP(DCDC_DCDC_V14_REG_DCDC_V14_CUR_LIM_MIN_Msk, idx); *config->desc->dcdc_register_shadow = reg_val; *config->desc->dcdc_register = reg_val; return ret; } static int regulator_da1469x_get_current_limit(const struct device *dev, int32_t *curr_ua) { const struct regulator_da1469x_config *config = dev->config; int ret; uint16_t idx; if (config->desc->current_ranges == NULL) { return -ENOTSUP; } idx = FIELD_GET(DCDC_DCDC_V14_REG_DCDC_V14_CUR_LIM_MAX_HV_Msk, *config->desc->dcdc_register); ret = linear_range_group_get_value(config->desc->current_ranges, 1, idx, curr_ua); return ret; } static const struct regulator_driver_api regulator_da1469x_api = { .enable = regulator_da1469x_enable, .disable = regulator_da1469x_disable, .count_voltages = regulator_da1469x_count_voltages, .list_voltage = regulator_da1469x_list_voltage, .set_voltage = regulator_da1469x_set_voltage, .get_voltage = regulator_da1469x_get_voltage, .set_current_limit = regulator_da1469x_set_current_limit, .get_current_limit = regulator_da1469x_get_current_limit, }; static int regulator_da1469x_init(const struct device *dev) { const struct regulator_da1469x_config *config = dev->config; regulator_common_data_init(dev); if ((config->rail == V30) && (config->power_bits & CRG_TOP_POWER_CTRL_REG_LDO_3V0_REF_Msk)) { CRG_TOP->POWER_CTRL_REG |= CRG_TOP_POWER_CTRL_REG_LDO_3V0_REF_Msk; } return regulator_common_init(dev, 0); } #if defined(CONFIG_PM_DEVICE) static int regulator_da1469x_pm_action(const struct device *dev, enum pm_device_action action) { const struct regulator_da1469x_config *config = dev->config; int ret = 0; switch (action) { case PM_DEVICE_ACTION_RESUME: if (config->desc->dcdc_register) { *config->desc->dcdc_register = *config->desc->dcdc_register_shadow; if ((CRG_TOP->ANA_STATUS_REG & CRG_TOP_ANA_STATUS_REG_COMP_VBAT_HIGH_Msk) && (*config->desc->dcdc_register_shadow & DCDC_REQUESTED)) { DCDC->DCDC_CTRL1_REG = dcdc_state.ctrl1; } } break; case PM_DEVICE_ACTION_SUSPEND: /* * Shadow register is saved on each regulator API call, there is no need * to save it here. */ break; default: ret = -ENOTSUP; } return ret; } #endif #define REGULATOR_DA1469X_DEFINE(node, id, rail_id) \ static struct regulator_da1469x_data data_##id; \ \ static const struct regulator_da1469x_config config_##id = { \ .common = REGULATOR_DT_COMMON_CONFIG_INIT(node), \ .desc = &id ## _desc, \ .power_bits = \ (DT_PROP(node, renesas_regulator_v30_clamp) * \ CRG_TOP_POWER_CTRL_REG_CLAMP_3V0_VBAT_ENABLE_Msk) | \ (DT_PROP(node, renesas_regulator_v30_vbus) * \ DA1469X_LDO_3V0_MODE_VBAT) | \ (DT_PROP(node, renesas_regulator_v30_vbat) * \ DA1469X_LDO_3V0_MODE_VBUS) | \ (DT_PROP(node, renesas_regulator_sleep_ldo) * \ (DA1469X_LDO_ ## rail_id ##_RET)) | \ (DT_PROP(node, renesas_regulator_v30_ref_bandgap) * \ CRG_TOP_POWER_CTRL_REG_LDO_3V0_REF_Msk), \ .dcdc_bits = \ (DT_PROP(node, renesas_regulator_dcdc_vbat_high) * \ DCDC_DCDC_VDD_REG_DCDC_VDD_ENABLE_HV_Msk) | \ (DT_PROP(node, renesas_regulator_dcdc_vbat_low) * \ DCDC_DCDC_VDD_REG_DCDC_VDD_ENABLE_LV_Msk), \ .rail = rail_id, \ }; \ PM_DEVICE_DT_DEFINE(node, regulator_da1469x_pm_action); \ DEVICE_DT_DEFINE(node, regulator_da1469x_init, \ PM_DEVICE_DT_GET(node), \ &data_##id, \ &config_##id, PRE_KERNEL_1, \ CONFIG_REGULATOR_DA1469X_INIT_PRIORITY, \ &regulator_da1469x_api); #define REGULATOR_DA1469X_DEFINE_COND(inst, child, source) \ COND_CODE_1(DT_NODE_EXISTS(DT_INST_CHILD(inst, child)), \ (REGULATOR_DA1469X_DEFINE( \ DT_INST_CHILD(inst, child), child, source)), \ ()) #define REGULATOR_DA1469X_DEFINE_ALL(inst) \ REGULATOR_DA1469X_DEFINE_COND(inst, vdd_clamp, VDD_CLAMP) \ REGULATOR_DA1469X_DEFINE_COND(inst, vdd_sleep, VDD_SLEEP) \ REGULATOR_DA1469X_DEFINE_COND(inst, vdd, VDD) \ REGULATOR_DA1469X_DEFINE_COND(inst, v14, V14) \ REGULATOR_DA1469X_DEFINE_COND(inst, v18, V18) \ REGULATOR_DA1469X_DEFINE_COND(inst, v18p, V18P) \ REGULATOR_DA1469X_DEFINE_COND(inst, v30, V30) \ DT_INST_FOREACH_STATUS_OKAY(REGULATOR_DA1469X_DEFINE_ALL) ```
/content/code_sandbox/drivers/regulator/regulator_da1469x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,664
```unknown menuconfig REGULATOR bool "Regulator drivers" help Include drivers for current/voltage regulators in system config if REGULATOR config REGULATOR_THREAD_SAFE_REFCNT bool "Thread-safe reference counting" depends on MULTITHREADING default y help When enabled, regulator reference counting is thread-safe. config REGULATOR_SHELL bool "Regulator shell" depends on SHELL help Enable regulator shell framework, for interacting with regulators via the shell interface module = REGULATOR module-str = regulator source "subsys/logging/Kconfig.template.log_config" source "drivers/regulator/Kconfig.axp192" source "drivers/regulator/Kconfig.adp5360" source "drivers/regulator/Kconfig.cp9314" source "drivers/regulator/Kconfig.da1469x" source "drivers/regulator/Kconfig.fake" source "drivers/regulator/Kconfig.fixed" source "drivers/regulator/Kconfig.gpio" source "drivers/regulator/Kconfig.max20335" source "drivers/regulator/Kconfig.npm1100" source "drivers/regulator/Kconfig.npm1300" source "drivers/regulator/Kconfig.npm6001" source "drivers/regulator/Kconfig.pca9420" source "drivers/regulator/Kconfig.rpi_pico" source "drivers/regulator/Kconfig.nxp_vref" endif # REGULATOR ```
/content/code_sandbox/drivers/regulator/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
290
```unknown config REGULATOR_PCA9420 bool "NXP PCA9420 PMIC regulator driver" default y depends on DT_HAS_NXP_PCA9420_ENABLED select I2C help Enable the NXP PCA9420 PMIC regulator driver if REGULATOR_PCA9420 config REGULATOR_PCA9420_COMMON_INIT_PRIORITY int "NXP PCA9420 regulator driver init priority (common part)" default 75 help Init priority for the NXP PCA9420 regulator driver (common part). It must be greater than I2C init priority. config REGULATOR_PCA9420_INIT_PRIORITY int "NXP PCA9420 regulator driver init priority" default 76 help Init priority for the NXP PCA9420 regulator driver. It must be greater than REGULATOR_PCA9420_COMMON_INIT_PRIORITY. endif ```
/content/code_sandbox/drivers/regulator/Kconfig.pca9420
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
187
```c /* */ #define DT_DRV_COMPAT raspberrypi_core_supply_regulator #include <zephyr/devicetree.h> #include <zephyr/drivers/regulator.h> #include <zephyr/dt-bindings/regulator/rpi_pico.h> #include <zephyr/sys/linear_range.h> #include <zephyr/toolchain.h> #include <hardware/regs/vreg_and_chip_reset.h> #include <hardware/structs/vreg_and_chip_reset.h> static const struct linear_range core_ranges[] = { LINEAR_RANGE_INIT(800000u, 0u, 0u, 5u), LINEAR_RANGE_INIT(850000u, 50000u, 6u, 15u), }; static const size_t num_core_ranges = ARRAY_SIZE(core_ranges); struct regulator_rpi_pico_config { struct regulator_common_config common; vreg_and_chip_reset_hw_t * const reg; const bool brown_out_detection; const uint32_t brown_out_threshold; }; struct regulator_rpi_pico_data { struct regulator_common_data data; }; /* * APIs */ static unsigned int regulator_rpi_pico_count_voltages(const struct device *dev) { return linear_range_group_values_count(core_ranges, num_core_ranges); } static int regulator_rpi_pico_list_voltage(const struct device *dev, unsigned int idx, int32_t *volt_uv) { return linear_range_group_get_value(core_ranges, num_core_ranges, idx, volt_uv); } static int regulator_rpi_pico_set_voltage(const struct device *dev, int32_t min_uv, int32_t max_uv) { const struct regulator_rpi_pico_config *config = dev->config; uint16_t idx; int ret; ret = linear_range_group_get_win_index(core_ranges, num_core_ranges, min_uv, max_uv, &idx); if (ret < 0) { return ret; } config->reg->vreg = ((config->reg->vreg & ~VREG_AND_CHIP_RESET_VREG_VSEL_BITS) | (idx << VREG_AND_CHIP_RESET_VREG_VSEL_LSB)); return 0; } static int regulator_rpi_pico_get_voltage(const struct device *dev, int32_t *volt_uv) { const struct regulator_rpi_pico_config *config = dev->config; return linear_range_group_get_value( core_ranges, num_core_ranges, ((config->reg->vreg & VREG_AND_CHIP_RESET_VREG_VSEL_BITS) >> VREG_AND_CHIP_RESET_VREG_VSEL_LSB), volt_uv); } static int regulator_rpi_pico_enable(const struct device *dev) { const struct regulator_rpi_pico_config *config = dev->config; config->reg->vreg |= BIT(VREG_AND_CHIP_RESET_VREG_EN_LSB); return 0; } static int regulator_rpi_pico_disable(const struct device *dev) { const struct regulator_rpi_pico_config *config = dev->config; config->reg->vreg &= ~BIT(VREG_AND_CHIP_RESET_VREG_EN_LSB); return 0; } static int regulator_rpi_pico_set_mode(const struct device *dev, regulator_mode_t mode) { const struct regulator_rpi_pico_config *config = dev->config; if (mode & REGULATOR_RPI_PICO_MODE_HI_Z) { config->reg->vreg |= REGULATOR_RPI_PICO_MODE_HI_Z; } else { config->reg->vreg &= (~REGULATOR_RPI_PICO_MODE_HI_Z); } return 0; } static int regulator_rpi_pico_get_mode(const struct device *dev, regulator_mode_t *mode) { const struct regulator_rpi_pico_config *config = dev->config; *mode = (config->reg->vreg & REGULATOR_RPI_PICO_MODE_HI_Z); return 0; } static int regulator_rpi_pico_init(const struct device *dev) { const struct regulator_rpi_pico_config *config = dev->config; if (config->brown_out_detection) { config->reg->bod = (BIT(VREG_AND_CHIP_RESET_BOD_EN_LSB) | (config->brown_out_threshold << VREG_AND_CHIP_RESET_BOD_VSEL_LSB)); } else { config->reg->bod &= ~BIT(VREG_AND_CHIP_RESET_BOD_EN_LSB); } regulator_common_data_init(dev); return regulator_common_init(dev, true); } static const struct regulator_driver_api api = { .enable = regulator_rpi_pico_enable, .disable = regulator_rpi_pico_disable, .count_voltages = regulator_rpi_pico_count_voltages, .list_voltage = regulator_rpi_pico_list_voltage, .set_voltage = regulator_rpi_pico_set_voltage, .get_voltage = regulator_rpi_pico_get_voltage, .set_mode = regulator_rpi_pico_set_mode, .get_mode = regulator_rpi_pico_get_mode, }; #define REGULATOR_RPI_PICO_DEFINE_ALL(inst) \ static struct regulator_rpi_pico_data data_##inst; \ \ static const struct regulator_rpi_pico_config config_##inst = { \ .common = REGULATOR_DT_COMMON_CONFIG_INIT(inst), \ .reg = (vreg_and_chip_reset_hw_t * const)DT_INST_REG_ADDR(inst), \ .brown_out_detection = DT_INST_PROP(inst, raspberrypi_brown_out_detection), \ .brown_out_threshold = DT_INST_ENUM_IDX(inst, raspberrypi_brown_out_threshold), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, regulator_rpi_pico_init, NULL, &data_##inst, &config_##inst, \ POST_KERNEL, CONFIG_REGULATOR_RPI_PICO_INIT_PRIORITY, &api); DT_INST_FOREACH_STATUS_OKAY(REGULATOR_RPI_PICO_DEFINE_ALL) ```
/content/code_sandbox/drivers/regulator/regulator_rpi_pico.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,236