text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ #define DT_DRV_COMPAT vnd_w1 /* * This is not a real 1-Wire driver. It is only used to instantiate struct * devices for the "vnd,w1" devicetree compatibe used in test code. */ #include <zephyr/drivers/w1.h> struct w1_vnd_config { /** w1 master config, common to all drivers */ struct w1_master_config master_config; }; struct w1_vnd_data { /** w1 master data, common to all drivers */ struct w1_master_data master_data; }; static int w1_vnd_reset_bus(const struct device *dev) { return -ENOTSUP; } static int w1_vnd_read_bit(const struct device *dev) { return -ENOTSUP; } static int w1_vnd_write_bit(const struct device *dev, const bool bit) { return -ENOTSUP; } static int w1_vnd_read_byte(const struct device *dev) { return -ENOTSUP; } static int w1_vnd_write_byte(const struct device *dev, const uint8_t byte) { return -ENOTSUP; } static int w1_vnd_configure(const struct device *dev, enum w1_settings_type type, uint32_t value) { return -ENOTSUP; } static const struct w1_driver_api w1_vnd_api = { .reset_bus = w1_vnd_reset_bus, .read_bit = w1_vnd_read_bit, .write_bit = w1_vnd_write_bit, .read_byte = w1_vnd_read_byte, .write_byte = w1_vnd_write_byte, .configure = w1_vnd_configure, }; #define W1_VND_INIT(n) \ static const struct w1_vnd_config w1_vnd_cfg_##inst = { \ .master_config.slave_count = W1_INST_SLAVE_COUNT(inst) \ }; \ static struct w1_vnd_data w1_vnd_data_##inst = {}; \ DEVICE_DT_INST_DEFINE(n, NULL, NULL, &w1_vnd_data_##inst, \ &w1_vnd_cfg_##inst, POST_KERNEL, \ CONFIG_W1_INIT_PRIORITY, &w1_vnd_api); DT_INST_FOREACH_STATUS_OKAY(W1_VND_INIT) ```
/content/code_sandbox/drivers/w1/w1_test.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
490
```c /* * */ /** * @brief 1-Wire network related functions. * * The following procedures wrap basic w1 syscalls, they should be callable * from user mode as well as supervisor mode, therefore _ZEPHYR_SUPERVISOR__ * is not defined for this file such that inline macros do not skip * the arch_is_user_context() check. */ #include <zephyr/logging/log.h> #include <zephyr/drivers/w1.h> LOG_MODULE_REGISTER(w1, CONFIG_W1_LOG_LEVEL); #define W1_SEARCH_DISCREPANCY_INIT 0 #define W1_SEARCH_LAST_SLAVE 65 #define W1_SEARCH_NO_SLAVE 66 /* @brief Search bus for next slave. * * This function searches the next 1-Wire slave on the bus. * It sets the found ROM and the last discrepancy in case more than one * slave took part in the search. * In case only one slave took part in the search, the discrepancy is set to * W1_SEARCH_LAST_SLAVE, and in case no slave participated in the search, * the discrepancy is set to W1_SEARCH_NO_SLAVE. * * The implementation is similar to suggested in the maxim application note 187. * The master reads the first ROM bit and its complementary value of all slaves. * Due to physical characteristics, the value received is a * logical AND of all slaves' 1st bit. Slaves only continue to * participate in the search procedure if the next bit the master sends matches * their own addresses' bit. This allows the master to branch through 64-bit * addresses in order to detect all slaves. * The 1st bit received is stored in bit 1 of rom_inv_64, the 2nd in bit 2 and so * on, until bit 64. * As a result, each byte of the ROM has the correct bit order, but the received * bytes (big-endian) stored in rom_inv_64 are in inverse byte order. * * Note: Filtering by families is currently not supported. * * @param dev Pointer to the device structure for the w1 instance. * @param command Command to chose between normal and alarm search. * @param family This parameter is currently not supported. * @param last_discrepancy This must be set to W1_SEARCH_DISCREPANCY_INIT before * the first call, it carries the search progress for * further calls. * @param rom_inv_64 The found ROM: It must be set to zero before first * call and carries the last found ROM for furter calls. * The ROM is stored in inverse byte order. * * @retval 0 If successful. * @retval -errno Negative error code in case of 1-wire read/write error. */ static int search_slave(const struct device *dev, uint8_t command, uint8_t family, size_t *last_discrepancy, uint64_t *rom_inv_64) { int ret; size_t next_discrepancy; bool last_id_bit; bool last_complement_id_bit; ARG_UNUSED(family); __ASSERT_NO_MSG(command == W1_CMD_SEARCH_ROM || command == W1_CMD_SEARCH_ALARM); ret = w1_reset_bus(dev); if (ret < 0) { return ret; } if (ret == 0) { *last_discrepancy = W1_SEARCH_NO_SLAVE; return 0; } ret = w1_write_byte(dev, command); if (ret < 0) { return ret; } next_discrepancy = W1_SEARCH_LAST_SLAVE; for (size_t id_bit_nr = 1; id_bit_nr < W1_SEARCH_LAST_SLAVE; id_bit_nr++) { ret = w1_read_bit(dev); if (ret < 0) { return ret; } last_id_bit = (bool)ret; ret = w1_read_bit(dev); if (ret < 0) { return ret; } last_complement_id_bit = (bool)ret; if (last_id_bit && last_complement_id_bit) { /* * No slave participating: * We can stop following the branch. */ LOG_DBG("No slave paricipating"); *last_discrepancy = W1_SEARCH_NO_SLAVE; return 0; } else if (last_id_bit != last_complement_id_bit) { /* * All slaves connected have same ROM bit value: * We can directly follow last_id_bit branch. */ } else { /* * Discrepancy detected: bit value at id_bit_nr does * not match for all slaves on the bus. */ if ((id_bit_nr > *last_discrepancy) || ((id_bit_nr < *last_discrepancy) && (*rom_inv_64 & BIT64(id_bit_nr - 1)))) { /* * - id_bit_nr > last_discrepancy: * Start always w/ branch of 1s * - id_bit_nr < last_discrepancy: * Follow same branch as before */ last_id_bit = true; next_discrepancy = id_bit_nr; } else { /* * - id_bit_nr == last_discrepancy: * 1-path already done, therefore go 0 path * - id_bit_nr < last_discrepancy: * Follow same branch as before */ } } /* * Send and store the chosen bit: all not matching slaves will * no longer participate in this search until they are reset. */ ret = w1_write_bit(dev, last_id_bit); if (ret < 0) { return ret; } *rom_inv_64 &= ~BIT64(id_bit_nr - 1); *rom_inv_64 |= last_id_bit ? BIT64(id_bit_nr - 1) : 0; } *last_discrepancy = next_discrepancy; return 0; } int z_impl_w1_search_bus(const struct device *dev, uint8_t command, uint8_t family, w1_search_callback_t callback, void *user_data) { size_t last_discrepancy = W1_SEARCH_DISCREPANCY_INIT; uint64_t found_rom_inv_64 = 0; struct w1_rom found_rom = { 0 }; int found_cnt = 0; int ret; (void)w1_lock_bus(dev); do { ret = search_slave(dev, command, family, &last_discrepancy, &found_rom_inv_64); if (ret < 0) { found_cnt = ret; break; } if (last_discrepancy == W1_SEARCH_NO_SLAVE) { break; } found_cnt++; /* * ROM is stored in found_rom_inv_64 in "inverse byte order" => * Only big-endian targets need to swap, such that struct's * bytes are stored in big-endian byte order. */ if (IS_ENABLED(CONFIG_BIG_ENDIAN)) { sys_memcpy_swap(&found_rom, &found_rom_inv_64, 8); } else { *(uint64_t *)&found_rom = found_rom_inv_64; } LOG_DBG("ROM found: nr %u, %016llx", found_cnt, w1_rom_to_uint64(&found_rom)); if (callback != NULL) { callback(found_rom, user_data); } } while (last_discrepancy != W1_SEARCH_LAST_SLAVE); (void)w1_unlock_bus(dev); return found_cnt; } int w1_read_rom(const struct device *dev, struct w1_rom *rom) { int ret; (void)w1_lock_bus(dev); ret = w1_reset_bus(dev); if (ret == 0) { ret = -ENODEV; goto out; } if (ret < 0) { goto out; } ret = w1_write_byte(dev, W1_CMD_READ_ROM); if (ret < 0) { goto out; } ret = w1_read_block(dev, (uint8_t *)rom, sizeof(struct w1_rom)); if (ret < 0) { goto out; } if (w1_crc8((uint8_t *)rom, sizeof(struct w1_rom)) != 0) { ret = -EIO; } out: (void)w1_unlock_bus(dev); return ret; }; static int match_rom(const struct device *dev, const struct w1_slave_config *config) { int ret; uint8_t cmd; if (!config->overdrive) { if (w1_configure(dev, W1_SETTING_SPEED, 0) < 0) { return -EIO; } } ret = w1_reset_bus(dev); if (ret == 0) { return -ENODEV; } if (ret < 0) { return ret; } cmd = config->overdrive ? W1_CMD_OVERDRIVE_MATCH_ROM : W1_CMD_MATCH_ROM; ret = w1_write_byte(dev, cmd); if (ret < 0) { return ret; } ret = w1_write_block(dev, (uint8_t *)&config->rom, 8); if (ret < 0) { return ret; } if (config->overdrive) { if (w1_configure(dev, W1_SETTING_SPEED, 1) < 0) { return -EIO; } } return 0; }; int w1_match_rom(const struct device *dev, const struct w1_slave_config *config) { int ret; (void)w1_lock_bus(dev); ret = match_rom(dev, config); (void)w1_unlock_bus(dev); return ret; } int w1_resume_command(const struct device *dev) { int ret; (void)w1_lock_bus(dev); ret = w1_reset_bus(dev); if (ret == 0) { ret = -ENODEV; goto out; } if (ret < 0) { goto out; } ret = w1_write_byte(dev, W1_CMD_RESUME); out: (void)w1_unlock_bus(dev); return ret; } static int skip_rom(const struct device *dev, const struct w1_slave_config *config) { int ret; uint8_t cmd; if (!config->overdrive) { if (w1_configure(dev, W1_SETTING_SPEED, 0) < 0) { return -EIO; } } ret = w1_reset_bus(dev); if (ret == 0) { return -ENODEV; } if (ret < 0) { return ret; } cmd = config->overdrive ? W1_CMD_OVERDRIVE_SKIP_ROM : W1_CMD_SKIP_ROM; ret = w1_write_byte(dev, cmd); if (ret < 0) { return ret; } if (config->overdrive) { if (w1_configure(dev, W1_SETTING_SPEED, 1) < 0) { return -EIO; } } return 0; } int w1_skip_rom(const struct device *dev, const struct w1_slave_config *config) { int ret; (void)w1_lock_bus(dev); ret = skip_rom(dev, config); (void)w1_unlock_bus(dev); return ret; } static int reset_select(const struct device *dev, const struct w1_slave_config *config) { if (IS_ENABLED(CONFIG_W1_NET_FORCE_MULTIDROP_ADDRESSING) || w1_get_slave_count(dev) > 1) { return match_rom(dev, config); } return skip_rom(dev, config); } int w1_reset_select(const struct device *dev, const struct w1_slave_config *config) { int ret; (void)w1_lock_bus(dev); ret = reset_select(dev, config); (void)w1_unlock_bus(dev); return ret; } static int write_read(const struct device *dev, const struct w1_slave_config *config, const uint8_t *write_buf, size_t write_len, uint8_t *read_buf, size_t read_len) { int ret; ret = reset_select(dev, config); if (ret != 0) { return ret; } ret = w1_write_block(dev, write_buf, write_len); if (ret < 0) { return ret; } if (read_buf == NULL && read_len > 0) { return -EIO; } return w1_read_block(dev, read_buf, read_len); }; int w1_write_read(const struct device *dev, const struct w1_slave_config *config, const uint8_t *write_buf, size_t write_len, uint8_t *read_buf, size_t read_len) { int ret; (void)w1_lock_bus(dev); ret = write_read(dev, config, write_buf, write_len, read_buf, read_len); (void)w1_unlock_bus(dev); return ret; }; ```
/content/code_sandbox/drivers/w1/w1_net.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,844
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_W1_W1_DS2482_84_H_ #define ZEPHYR_DRIVERS_W1_W1_DS2482_84_H_ #include <zephyr/drivers/i2c.h> #include <zephyr/kernel.h> #define CMD_1WT 0x78 #define CMD_1WSB 0x87 #define CMD_1WRB 0x96 #define CMD_1WWB 0xa5 #define CMD_1WRS 0xb4 #define CMD_CHSL 0xc3 /* DS2482-800 only */ #define CMD_ADJP 0xc3 /* DS2484 only */ #define CMD_WCFG 0xd2 #define CMD_SRP 0xe1 #define CMD_DRST 0xf0 #define REG_NONE 0x00 /* special value */ #define REG_CONFIG 0xc3 #define REG_DATA 0xe1 #define REG_STATUS 0xf0 #define REG_CHANNEL 0xd2 /* DS2482-800 only */ #define REG_PORT 0xb4 /* DS2484 only */ /* * Device Configuration Register */ #define DEVICE_APU_pos 0 #define DEVICE_APU_msk BIT(DEVICE_APU_pos) #define DEVICE_PDN_pos 1 /* DS2484 only */ #define DEVICE_PDN_msk BIT(DEVICE_PDN_pos) /* DS2484 only */ #define DEVICE_SPU_pos 2 #define DEVICE_SPU_msk BIT(DEVICE_SPU_pos) #define DEVICE_1WS_pos 3 #define DEVICE_1WS_msk BIT(DEVICE_1WS_pos) /* * Status Register */ #define STATUS_1WB_pos 0 #define STATUS_1WB_msk BIT(STATUS_1WB_pos) #define STATUS_PPD_pos 1 #define STATUS_PPD_msk BIT(STATUS_PPD_pos) #define STATUS_SD_pos 2 #define STATUS_SD_msk BIT(STATUS_SD_pos) #define STATUS_LL_pos 3 #define STATUS_LL_msk BIT(STATUS_LL_pos) #define STATUS_RST_pos 4 #define STATUS_RST_msk BIT(STATUS_RST_pos) #define STATUS_SBR_pos 5 #define STATUS_SBR_msk BIT(STATUS_SBR_pos) #define STATUS_TSB_pos 6 #define STATUS_TSB_msk BIT(STATUS_TSB_pos) #define STATUS_DIR_pos 7 #define STATUS_DIR_msk BIT(STATUS_DIR_pos) /* * Channel Selection Codes, DS2482-800 only */ #define CHSL_IO0 0xf0 #define CHSL_IO1 0xe1 #define CHSL_IO2 0xd2 #define CHSL_IO3 0xc3 #define CHSL_IO4 0xb4 #define CHSL_IO5 0xa5 #define CHSL_IO6 0x96 #define CHSL_IO7 0x87 /* * Channel Selection Codes (read back values), DS2482-800 only */ #define CHSL_RB_IO0 0xb8 #define CHSL_RB_IO1 0xb1 #define CHSL_RB_IO2 0xaa #define CHSL_RB_IO3 0xa3 #define CHSL_RB_IO4 0x9c #define CHSL_RB_IO5 0x95 #define CHSL_RB_IO6 0x8e #define CHSL_RB_IO7 0x87 /* * Port Configuration Register, DS2484 only */ #define PORT_VAL0_pos 0 #define PORT_VAL0_msk BIT(PORT_VAL0_pos) #define PORT_VAL1_pos 1 #define PORT_VAL1_msk BIT(PORT_VAL1_pos) #define PORT_VAL2_pos 2 #define PORT_VAL2_msk BIT(PORT_VAL2_pos) #define PORT_VAL3_pos 3 #define PORT_VAL3_msk BIT(PORT_VAL3_pos) /* * Bit Byte */ #define BIT_CLR_msk 0 #define BIT_SET_msk BIT(7) static inline int ds2482_84_write(const struct i2c_dt_spec *spec, uint8_t cmd, const uint8_t *data) { int ret; const uint8_t buf[] = {cmd, data ? *data : 0}; ret = i2c_write_dt(spec, buf, data ? 2 : 1); if (ret < 0) { return ret; } return 0; } static inline int ds2482_84_read(const struct i2c_dt_spec *spec, uint8_t rp, uint8_t *reg) { int ret; switch (rp) { case REG_NONE: /* * Special value: Don't change read pointer */ break; case REG_PORT: __fallthrough; case REG_CONFIG: __fallthrough; case REG_CHANNEL: __fallthrough; case REG_DATA: __fallthrough; case REG_STATUS: ret = ds2482_84_write(spec, CMD_SRP, &rp); if (ret < 0) { return ret; } break; default: return -EINVAL; } ret = i2c_read_dt(spec, reg, 1); if (ret < 0) { return ret; } return 0; } static inline int ds2482_84_reset_bus(const struct i2c_dt_spec *spec) { int ret; uint8_t reg; ret = ds2482_84_write(spec, CMD_1WRS, NULL); if (ret < 0) { return ret; } do { ret = ds2482_84_read(spec, REG_NONE, &reg); if (ret < 0) { return ret; } } while (reg & STATUS_1WB_msk); return reg & STATUS_PPD_msk ? 1 : 0; } static inline int ds2482_84_reset_device(const struct i2c_dt_spec *spec) { int ret; uint8_t reg; ret = ds2482_84_write(spec, CMD_DRST, NULL); if (ret < 0) { return ret; } do { ret = ds2482_84_read(spec, REG_NONE, &reg); if (ret < 0) { return ret; } } while (!(reg & STATUS_RST_msk)); return 0; } static inline int ds2482_84_single_bit(const struct i2c_dt_spec *spec, uint8_t bit_msk) { int ret; uint8_t reg; ret = ds2482_84_write(spec, CMD_1WSB, &bit_msk); if (ret < 0) { return ret; } do { ret = ds2482_84_read(spec, REG_NONE, &reg); if (ret < 0) { return ret; } } while (reg & STATUS_1WB_msk); return reg & STATUS_SBR_msk ? 1 : 0; } static inline int ds2482_84_read_bit(const struct i2c_dt_spec *spec) { return ds2482_84_single_bit(spec, BIT_SET_msk); } static inline int ds2482_84_write_bit(const struct i2c_dt_spec *spec, bool bit) { return ds2482_84_single_bit(spec, bit ? BIT_SET_msk : BIT_CLR_msk); } static inline int ds2482_84_read_byte(const struct i2c_dt_spec *spec) { int ret; uint8_t reg; ret = ds2482_84_write(spec, CMD_1WRB, NULL); if (ret < 0) { return ret; } do { ret = ds2482_84_read(spec, REG_NONE, &reg); if (ret < 0) { return ret; } } while (reg & STATUS_1WB_msk); ret = ds2482_84_read(spec, REG_DATA, &reg); if (ret < 0) { return ret; } return reg; } static inline int ds2482_84_write_byte(const struct i2c_dt_spec *spec, uint8_t byte) { int ret; uint8_t reg; ret = ds2482_84_write(spec, CMD_1WWB, &byte); if (ret < 0) { return ret; } do { ret = ds2482_84_read(spec, REG_NONE, &reg); if (ret < 0) { return ret; } } while (reg & STATUS_1WB_msk); return 0; } static inline int ds2482_84_write_config(const struct i2c_dt_spec *spec, uint8_t cfg) { int ret; uint8_t reg = cfg | ~cfg << 4; if (cfg & ~(DEVICE_APU_msk | DEVICE_PDN_msk | DEVICE_SPU_msk | DEVICE_1WS_msk)) { return -EINVAL; } ret = ds2482_84_write(spec, CMD_WCFG, &reg); if (ret < 0) { return ret; } ret = ds2482_84_read(spec, REG_NONE, &reg); if (ret < 0) { return ret; } return (reg == cfg) ? 0 : -EIO; } #endif /* ZEPHYR_DRIVERS_W1_W1_DS2482_84_H_ */ ```
/content/code_sandbox/drivers/w1/w1_ds2482_84_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,000
```c /* * */ #include "w1_ds2482-800.h" #include "w1_ds2482_84_common.h" #include <zephyr/devicetree.h> #include <zephyr/drivers/i2c.h> #include <zephyr/logging/log.h> #define DT_DRV_COMPAT maxim_ds2482_800 LOG_MODULE_REGISTER(ds2482, CONFIG_W1_LOG_LEVEL); struct ds2482_config { const struct i2c_dt_spec i2c_spec; }; struct ds2482_data { struct k_mutex lock; }; int ds2482_change_bus_lock_impl(const struct device *dev, bool lock) { struct ds2482_data *data = dev->data; return lock ? k_mutex_lock(&data->lock, K_FOREVER) : k_mutex_unlock(&data->lock); } static int ds2482_init(const struct device *dev) { int ret; const struct ds2482_config *config = dev->config; struct ds2482_data *data = dev->data; k_mutex_init(&data->lock); if (!i2c_is_ready_dt(&config->i2c_spec)) { return -ENODEV; } ret = ds2482_84_reset_device(&config->i2c_spec); if (ret < 0) { LOG_ERR("Device reset failed: %d", ret); return ret; } return 0; } #define DS2482_INIT(inst) \ static const struct ds2482_config inst_##inst##_config = { \ .i2c_spec = I2C_DT_SPEC_INST_GET(inst), \ }; \ static struct ds2482_data inst_##inst##_data; \ DEVICE_DT_INST_DEFINE(inst, ds2482_init, NULL, &inst_##inst##_data, &inst_##inst##_config, \ POST_KERNEL, CONFIG_W1_INIT_PRIORITY, NULL); DT_INST_FOREACH_STATUS_OKAY(DS2482_INIT) /* * Make sure that this driver is not initialized before the i2c bus is available */ BUILD_ASSERT(CONFIG_W1_INIT_PRIORITY > CONFIG_I2C_INIT_PRIORITY); ```
/content/code_sandbox/drivers/w1/w1_ds2482-800.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
441
```c /* * */ #include "w1_ds2482-800.h" #include "w1_ds2482_84_common.h" #include <zephyr/devicetree.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/w1.h> #include <zephyr/logging/log.h> #define DT_DRV_COMPAT maxim_ds2482_800_channel LOG_MODULE_DECLARE(ds2482, CONFIG_W1_LOG_LEVEL); struct ds2482_config { struct w1_master_config w1_config; const struct device *parent; const struct i2c_dt_spec i2c_spec; uint8_t reg_channel; uint8_t reg_channel_rb; uint8_t reg_config; }; struct ds2482_data { struct w1_master_data w1_data; }; static int ds2482_reset_bus(const struct device *dev) { const struct ds2482_config *config = dev->config; return ds2482_84_reset_bus(&config->i2c_spec); } static int ds2482_read_bit(const struct device *dev) { const struct ds2482_config *config = dev->config; return ds2482_84_read_bit(&config->i2c_spec); } static int ds2482_write_bit(const struct device *dev, bool bit) { const struct ds2482_config *config = dev->config; return ds2482_84_write_bit(&config->i2c_spec, bit); } static int ds2482_read_byte(const struct device *dev) { const struct ds2482_config *config = dev->config; return ds2482_84_read_byte(&config->i2c_spec); } static int ds2482_write_byte(const struct device *dev, uint8_t byte) { const struct ds2482_config *config = dev->config; return ds2482_84_write_byte(&config->i2c_spec, byte); } static int ds2482_configure(const struct device *dev, enum w1_settings_type type, uint32_t value) { const struct ds2482_config *config = dev->config; uint8_t reg_config = config->reg_config; switch (type) { case W1_SETTING_SPEED: WRITE_BIT(reg_config, DEVICE_1WS_pos, value); break; case W1_SETTING_STRONG_PULLUP: WRITE_BIT(reg_config, DEVICE_SPU_pos, value); break; default: return -EINVAL; } return ds2482_84_write_config(&config->i2c_spec, reg_config); } static int ds2482_set_channel(const struct i2c_dt_spec *spec, uint8_t channel, uint8_t channel_rb) { int ret; uint8_t reg = channel; ret = ds2482_84_write(spec, CMD_CHSL, &reg); if (ret < 0) { return ret; } ret = ds2482_84_read(spec, REG_NONE, &reg); if (ret < 0) { return ret; } return (reg == channel_rb) ? 0 : -EIO; } static int ds2482_change_bus_lock(const struct device *dev, bool lock) { int ret; const struct ds2482_config *config = dev->config; ret = ds2482_change_bus_lock_impl(config->parent, lock); if (ret < 0) { LOG_ERR("Failed to acquire bus lock: %d", ret); return ret; } if (!lock) { return 0; } /* * Set channel for subsequent operations */ ret = ds2482_set_channel(&config->i2c_spec, config->reg_channel, config->reg_channel_rb); if (ret < 0) { return ret; } /* * Restore default channel configuration */ ret = ds2482_84_write_config(&config->i2c_spec, config->reg_config); if (ret < 0) { return ret; } return 0; } static int ds2482_init(const struct device *dev) { const struct ds2482_config *config = dev->config; if (!device_is_ready(config->parent)) { return -ENODEV; } return 0; } static const struct w1_driver_api ds2482_driver_api = { .reset_bus = ds2482_reset_bus, .read_bit = ds2482_read_bit, .write_bit = ds2482_write_bit, .read_byte = ds2482_read_byte, .write_byte = ds2482_write_byte, .configure = ds2482_configure, .change_bus_lock = ds2482_change_bus_lock, }; #define DS2482_CHANNEL_INIT(inst) \ static const struct ds2482_config inst_##inst##_config = { \ .w1_config.slave_count = W1_INST_SLAVE_COUNT(inst), \ .parent = DEVICE_DT_GET(DT_INST_PARENT(inst)), \ .i2c_spec = I2C_DT_SPEC_GET(DT_INST_PARENT(inst)), \ .reg_channel = UTIL_CAT(CHSL_IO, DT_INST_REG_ADDR(inst)), \ .reg_channel_rb = UTIL_CAT(CHSL_RB_IO, DT_INST_REG_ADDR(inst)), \ .reg_config = DT_INST_PROP(inst, active_pullup) << DEVICE_APU_pos, \ }; \ static struct ds2482_data inst_##inst##_data = {0}; \ DEVICE_DT_INST_DEFINE(inst, ds2482_init, NULL, &inst_##inst##_data, &inst_##inst##_config, \ POST_KERNEL, CONFIG_W1_INIT_PRIORITY, &ds2482_driver_api); DT_INST_FOREACH_STATUS_OKAY(DS2482_CHANNEL_INIT) /* * Make sure that this driver is not initialized before the i2c bus is available */ BUILD_ASSERT(CONFIG_W1_INIT_PRIORITY > CONFIG_I2C_INIT_PRIORITY); ```
/content/code_sandbox/drivers/w1/w1_ds2482-800_channel.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,229
```c /* * */ #include <stdlib.h> #include <zephyr/drivers/w1.h> #include <zephyr/shell/shell.h> #include <zephyr/kernel.h> #define BUF_SIZE CONFIG_W1_SHELL_BUFFER_SIZE static uint8_t msg_buf[BUF_SIZE]; #define W1DEV_X_NOT_FOUND "1-Wire device not found: \"%s\"" #define OPTION_HELP_RESET "-r Perform bus reset before executing cmd." static const char *w1_settings_name[W1_SETINGS_TYPE_COUNT] = { [W1_SETTING_SPEED] = "speed", [W1_SETTING_STRONG_PULLUP] = "spu", }; static int read_io_options(const struct shell *sh, int pos, char **argv, bool *reset) { char *arg = argv[pos]; if (arg[0] != '-') { return pos; } for (arg = &arg[1]; *arg; arg++) { switch (*arg) { case 'r': *reset = true; break; default: shell_error(sh, "Unknown option %c", *arg); return -EINVAL; } } return ++pos; } /* 1-Wire reset bus <device> */ static int cmd_w1_reset_bus(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int ret; dev = device_get_binding(argv[1]); if (!dev) { shell_error(sh, W1DEV_X_NOT_FOUND, argv[1]); return -EINVAL; } (void)w1_lock_bus(dev); ret = w1_reset_bus(dev); if (ret < 0) { shell_error(sh, "Failed to reset bus [%d]", ret); } (void)w1_unlock_bus(dev); return ret; } /* 1-Wire read_bit <device> */ static int cmd_w1_read_bit(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int ret; dev = device_get_binding(argv[1]); if (!dev) { shell_error(sh, W1DEV_X_NOT_FOUND, argv[1]); return -EINVAL; } (void)w1_lock_bus(dev); ret = w1_read_bit(dev); if (ret < 0) { shell_error(sh, "Failed to read bit [%d]", ret); } else { shell_print(sh, "Output: 0b%x", ret); } (void)w1_unlock_bus(dev); return ret; } /* 1-Wire read_byte <device> */ static int cmd_w1_read_byte(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int ret; dev = device_get_binding(argv[1]); if (!dev) { shell_error(sh, W1DEV_X_NOT_FOUND, argv[1]); return -EINVAL; } (void)w1_lock_bus(dev); ret = w1_read_byte(dev); if (ret < 0) { shell_error(sh, "Failed to read byte [%d]", ret); } else { shell_print(sh, "Output: 0x%x", ret); } (void)w1_unlock_bus(dev); return ret; } /* 1-Wire read_block <device> [num_bytes] */ static int cmd_w1_read_block(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; char *end_ptr; size_t read_len; int ret; dev = device_get_binding(argv[1]); if (!dev) { shell_error(sh, W1DEV_X_NOT_FOUND, argv[1]); return -EINVAL; } read_len = strtoul(argv[2], &end_ptr, 0); if (*end_ptr != '\0') { shell_error(sh, "num_bytes is not a number"); return -EINVAL; } if (read_len > BUF_SIZE) { shell_error(sh, "num_bytes limited to: %u", BUF_SIZE); return -EINVAL; } (void)w1_lock_bus(dev); ret = w1_read_block(dev, msg_buf, read_len); if (ret < 0) { shell_error(sh, "Failed to read byte [%d]", ret); goto out; } shell_fprintf(sh, SHELL_NORMAL, "Output:"); for (int i = 0; i < read_len; i++) { shell_fprintf(sh, SHELL_NORMAL, " 0x%02x", msg_buf[i]); } shell_fprintf(sh, SHELL_NORMAL, "\n"); out: (void)w1_unlock_bus(dev); return ret; } /* 1-Wire write_bit <device> <bit_value> */ static int cmd_w1_write_bit(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; unsigned long input = strtoul(argv[2], NULL, 0); int ret; dev = device_get_binding(argv[1]); if (!dev) { shell_error(sh, W1DEV_X_NOT_FOUND, argv[1]); return -EINVAL; } if (input > 1UL) { shell_error(sh, "input must not be > 0b1"); return -EINVAL; } (void)w1_lock_bus(dev); ret = w1_write_byte(dev, (bool)input); if (ret < 0) { shell_error(sh, "Failed to write bit [%d]", ret); } (void)w1_unlock_bus(dev); return ret; } /* 1-Wire write_byte <device> <byte_value> */ static int cmd_w1_write_byte(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; unsigned long input; int pos = 1; bool reset = false; int ret; dev = device_get_binding(argv[pos]); if (!dev) { shell_error(sh, W1DEV_X_NOT_FOUND, argv[pos]); return -EINVAL; } pos++; pos = read_io_options(sh, pos, argv, &reset); if (pos < 0) { return -EINVAL; } if (argc <= pos) { shell_error(sh, "Missing data to be written."); return -EINVAL; } input = strtoul(argv[pos], NULL, 0); if (input > 0xFFUL) { shell_error(sh, "input must not be > 0xFF"); return -EINVAL; } (void)w1_lock_bus(dev); if (reset) { ret = w1_reset_bus(dev); if (ret <= 0) { shell_error(sh, "Failed to reset bus [%d]", ret); goto out; } } ret = w1_write_byte(dev, (uint8_t)input); if (ret < 0) { shell_error(sh, "Failed to write byte [%d]", ret); } out: (void)w1_unlock_bus(dev); return ret; } /* 1-Wire write_block <device> <byt1> [byte2, ...] */ static int cmd_w1_write_block(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int i; int pos = 1; bool reset = false; int ret; dev = device_get_binding(argv[pos]); if (!dev) { shell_error(sh, W1DEV_X_NOT_FOUND, argv[1]); return -EINVAL; } pos++; pos = read_io_options(sh, pos, argv, &reset); if (pos < 0) { return -EINVAL; } if (argc <= pos) { shell_error(sh, "Missing data to be written."); return -EINVAL; } if ((argc - pos) > BUF_SIZE) { shell_error(sh, "Too much data to be written."); return -EINVAL; } (void)w1_lock_bus(dev); i = 0; do { msg_buf[i] = (uint8_t)strtoul(argv[i + pos], NULL, 16); i++; } while ((i + pos) < argc); if (reset) { ret = w1_reset_bus(dev); if (ret <= 0) { shell_error(sh, "Failed to reset bus [%d]", ret); goto out; } } ret = w1_write_block(dev, msg_buf, i); if (ret < 0) { shell_error(sh, "Failed to write block [%d]", ret); } out: (void)w1_unlock_bus(dev); return ret; } /* 1-Wire config <device> <type> <value> */ static int cmd_w1_configure(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; char *type_endptr; char *type_name = argv[2]; int ret; uint32_t type = strtoul(type_name, &type_endptr, 0); uint32_t value = strtoul(argv[3], NULL, 0); dev = device_get_binding(argv[1]); if (!dev) { shell_error(sh, W1DEV_X_NOT_FOUND, argv[1]); return -EINVAL; } /* if type is not given as number, search it via the name */ if (*type_endptr != '\0') { for (type = 0; type < ARRAY_SIZE(w1_settings_name); type++) { if (strcmp(type_name, w1_settings_name[type]) == 0) { break; } } if (type == ARRAY_SIZE(w1_settings_name)) { shell_error(sh, "Unknown config name (%s)", type_name); return -ENOTSUP; } } if (type > W1_SETINGS_TYPE_COUNT) { shell_error(sh, "invalid type %u", type); return -EINVAL; } (void)w1_lock_bus(dev); ret = w1_configure(dev, type, value); if (ret < 0) { shell_error(sh, "Failed to configure [%d]", ret); goto out; } shell_info(sh, "Applied config: %s = %u (0x%08x)", w1_settings_name[type], value, value); out: (void)w1_unlock_bus(dev); return ret; } static void search_callback(struct w1_rom rom, void *user_data) { const struct shell *sh = (const struct shell *)user_data; shell_print(sh, "ROM found: %016llx", w1_rom_to_uint64(&rom)); } /* 1-Wire search <device> */ static int cmd_w1_search(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int ret; dev = device_get_binding(argv[1]); if (!dev) { shell_error(sh, W1DEV_X_NOT_FOUND, argv[1]); return -EINVAL; } (void)w1_lock_bus(dev); ret = w1_search_rom(dev, search_callback, (void *)sh); if (ret < 0) { shell_error(sh, "Failed to initiate search [%d]", ret); } else { shell_print(sh, "Found %d device(s)", ret); } (void)w1_unlock_bus(dev); return ret; } SHELL_STATIC_SUBCMD_SET_CREATE(sub_w1, SHELL_CMD_ARG(reset, NULL, "Reset 1-Wire bus.\n" "Usage: <device>", cmd_w1_reset_bus, 2, 0), SHELL_CMD_ARG(read_bit, NULL, "Read 1-Wire bit.\n" "Usage: <device>", cmd_w1_read_bit, 2, 0), SHELL_CMD_ARG(read_byte, NULL, "Read 1-Wire byte.\n" "Usage: <device>", cmd_w1_read_byte, 2, 0), SHELL_CMD_ARG(read_block, NULL, "Read 1-Wire block.\n" "Usage: <device> <num_bytes>", cmd_w1_read_block, 3, 0), SHELL_CMD_ARG(write_bit, NULL, "Write 1-Wire bit.\n" "Usage: <device> <bit>", cmd_w1_write_bit, 3, 0), SHELL_CMD_ARG(write_byte, NULL, "Write 1-Wire byte.\n" "Usage: <device> [-r] <byte>\n" OPTION_HELP_RESET, cmd_w1_write_byte, 3, 1), SHELL_CMD_ARG(write_block, NULL, "Write 1-Wire block.\n" "Usage: <device> [-r] <byte1> [<byte2>, ...]\n" OPTION_HELP_RESET, cmd_w1_write_block, 3, BUF_SIZE), SHELL_CMD_ARG(config, NULL, "Configure 1-Wire host.\n" "Usage: <device> <type> <value>\n" "<type> is either a name or an id.", cmd_w1_configure, 4, 0), SHELL_CMD_ARG(search, NULL, "1-Wire devices.\n" "Usage: <device>", cmd_w1_search, 2, 0), SHELL_SUBCMD_SET_END /* Array terminated. */ ); SHELL_CMD_ARG_REGISTER(w1, &sub_w1, "1-Wire commands", NULL, 2, 0); ```
/content/code_sandbox/drivers/w1/w1_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,913
```unknown # 1-Wire configuration options menuconfig W1 bool prompt "1-Wire bus drivers" select CRC help Enable 1-Wire Drivers if W1 module = W1 module-str = W1 source "subsys/logging/Kconfig.template.log_config" config W1_INIT_PRIORITY int default 65 prompt "Init priority" help 1-Wire device driver initialization priority. config W1_SHELL bool "1-Wire Shell" depends on SHELL help Enable 1-Wire Shell for testing. config W1_SHELL_BUFFER_SIZE int default 16 depends on W1_SHELL prompt "1-Wire Shell buffer size" help Size of the send and receive buffer in bytes. To be able to write more than 8 bytes make sure to also increase SHELL_ARGC_MAX accordingly. rsource "Kconfig.ds2482-800" rsource "Kconfig.ds2484" rsource "Kconfig.ds2477_85" rsource "Kconfig.ds2485" rsource "Kconfig.test" rsource "Kconfig.zephyr_gpio" rsource "Kconfig.zephyr_serial" config W1_NET bool "1-Wire network layer" default y help Enable 1-wire network layer config W1_NET_FORCE_MULTIDROP_ADDRESSING bool "Force 1-Wire multidrop addressing" depends on W1_NET help This option will ignore the number of 1-Wire slave devices defined in the DTS. Specifically, this option avoids the SKIP ROM command, which is otherwise used in case of a single 1-Wire slave on the bus. In most cases, enabling this option is not needed, yet it is essential if multiple 1-Wire devices unspecified in the DTS are interfaced. endif # W1 ```
/content/code_sandbox/drivers/w1/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
416
```unknown # Configuration options for the Zephyr GPIO 1-Wire Master driver config W1_ZEPHYR_GPIO bool "1-wire GPIO" default y depends on DT_HAS_ZEPHYR_W1_GPIO_ENABLED help This option enables the Zephyr GPIO 1-Wire master driver. The bus reset, and bit read and write operations are executed via byte read and write operations on top of the Zephyr GPIO driver interface. if W1_ZEPHYR_GPIO config W1_ZEPHYR_GPIO_TIME_CRITICAL bool "Force time critical operations" default y help This option forces the 1-Wire GPIO driver to use time critical operations for bus reset, and bit read and write operations. Time critical communications operations are not interrupted while being generated. endif # W1_ZEPHYR_GPIO ```
/content/code_sandbox/drivers/w1/Kconfig.zephyr_gpio
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
187
```unknown # Configuration options for the Zephyr serial 1-Wire Master driver config W1_ZEPHYR_SERIAL bool "1-wire Serial" select SERIAL default y depends on DT_HAS_ZEPHYR_W1_SERIAL_ENABLED select UART_USE_RUNTIME_CONFIGURE help This option enables the Zephyr serial 1-Wire master driver. The bus reset, and bit read and write operations are executed via bytes read and write operations on top of the Zephyr serial driver interface. if W1_ZEPHYR_SERIAL config W1_ZEPHYR_SERIAL_BIT_TIMEOUT int "Bit read timeout in us" default 380 help The bit read timeout specifies how long the driver tries to read the uart byte (1-wire standard speed data bit). For serial drivers where poll_out does not block until the transfer is complete, it must at least be greater than 87us (10bits / 115.2 kBd). endif # W1_ZEPHYR_SERIAL ```
/content/code_sandbox/drivers/w1/Kconfig.zephyr_serial
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
222
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_EDAC_IBECC_H_ #define ZEPHYR_DRIVERS_EDAC_IBECC_H_ /* TODO: Add to include/sys/util.h */ #define BITFIELD(val, h, l) (((val) & GENMASK(h, l)) >> l) #define BITFIELD64(val, h, l) (((val) & GENMASK64(h, l)) >> l) #define PCI_VENDOR_ID_INTEL 0x8086 /* Supported SKU map */ #define PCI_DEVICE_ID_SKU5 0x4514 #define PCI_DEVICE_ID_SKU6 0x4528 #define PCI_DEVICE_ID_SKU7 0x452a #define PCI_DEVICE_ID_SKU8 0x4516 #define PCI_DEVICE_ID_SKU9 0x452c #define PCI_DEVICE_ID_SKU10 0x452e #define PCI_DEVICE_ID_SKU11 0x4532 #define PCI_DEVICE_ID_SKU12 0x4518 #define PCI_DEVICE_ID_SKU13 0x451a #define PCI_DEVICE_ID_SKU14 0x4534 #define PCI_DEVICE_ID_SKU15 0x4536 /* TODO: Move to correct place NMI registers */ /* NMI Status and Control Register (NMI_STS_CNT) */ #define NMI_STS_CNT_REG 0x61 /* Set by any source of PCH SERR (SERR_NMI_STS) */ #define NMI_STS_SRC_SERR BIT(7) /* Mask for all source bits in the NMI_STS_CNT_REG */ #define NMI_STS_SRC_MASK GENMASK(7, 6) /** * Writing 1 SERR NMI are disabled and cleared, writing 0 * SERR NMIs are enabled */ #define NMI_STS_SERR_EN BIT(2) /* Mask for all enable bits in the NMI_STS_CNT_REG */ #define NMI_STS_MASK_EN GENMASK(3, 0) /** * In-Band Error Correction Code (IBECC) protects data at a cache line * granularity (64 Bytes) with 16 bits SECDED code. * Reports following fields: * - CMI (Converged Memory Interface) Address * - Syndrome * - Error Type (Correctable, Uncorrectable) */ /** * PCI Configuration space registers area */ /* Top of Upper Usable DRAM, offset 0xa8, 64 bit */ #define TOUUD_REG 0x2a #define TOUUD_MASK GENMASK64(38, 20) /* Top of Low Usable DRAM, offset 0xbc, 32 bit */ #define TOLUD_REG 0x2f #define TOLUD_MASK GENMASK(31, 20) /* Total amount of physical memory, offset 0xa0, 64 bit */ #define TOM_REG 0x28 #define TOM_MASK GENMASK64(38, 20) /* Base address for the Host Memory Mapped Configuration space, * offset 0x48, 64 bit */ #define MCHBAR_REG 0x12 #define MCHBAR_MASK GENMASK64(38, 16) #define MCHBAR_ENABLE BIT64(0) /* Size of Host Memory Mapped Configuration space (64K) */ #define MCH_SIZE 0x10000 /* Capability register, offset 0xec, 32 bit */ #define CAPID0_C_REG 0x3b #define CAPID0_C_IBECC_ENABLED BIT(15) /* Register controlling reporting error SERR, offset 0xc8, 16 bit */ #define ERRSTS_REG 0x32 #define ERRSTS_IBECC_COR BIT(6) /* Correctable error */ #define ERRSTS_IBECC_UC BIT(7) /* Uncorrectable error */ /* Register controlling Host Bridge responses to system errors, * offset 0xca, 16 bit * * TODO: Fix this after PCI access is fixed, now we have to access * ERRSTS_REG with 32 bit access and get this 16 bits */ #define ERRCMD_REG 0x32 #define ERRCMD_IBECC_COR BIT(6) /* Correctable error */ #define ERRCMD_IBECC_UC BIT(7) /* Uncorrectable error */ /** * Host Memory Mapped Configuration Space (MCHBAR) registers area */ #define CHANNEL_HASH 0x5024 /* ECC Injection Registers */ #define IBECC_INJ_ADDR_BASE 0xdd88 #define INJ_ADDR_BASE_MASK GENMASK64(38, 6) #define IBECC_INJ_ADDR_MASK 0xdd80 #define INJ_ADDR_BASE_MASK_MASK GENMASK64(38, 6) #define IBECC_INJ_ADDR_CTRL 0xdd98 #define INJ_CTRL_COR 0x1 #define INJ_CTRL_UC 0x5 /* Error Logging Registers */ /* ECC Error Log register, 64 bit (ECC_ERROR_LOG) */ #define IBECC_ECC_ERROR_LOG 0xdd70 /* Uncorrectable (Multiple-bit) Error Status (MERRSTS) */ #define ECC_ERROR_MERRSTS BIT64(63) /* Correctable Error Status (CERRSTS) */ #define ECC_ERROR_CERRSTS BIT64(62) #define ECC_ERROR_ERRTYPE(val) BITFIELD64(val, 63, 62) /* CMI address of the address block of main memory where error happened */ #define ECC_ERROR_ERRADD(val) ((val) & GENMASK64(38, 5)) /* ECC Error Syndrome (ERRSYND) */ #define ECC_ERROR_ERRSYND(val) BITFIELD64(val, 61, 46) /* Parity Error Log (PARITY_ERR_LOG) */ #define IBECC_PARITY_ERROR_LOG 0xdd78 /* Error Status (ERRSTS) */ #define PARITY_ERROR_ERRSTS BIT64(63) /* Memory configuration registers */ #define DRAM_MAX_CHANNELS 2 #define DRAM_MAX_DIMMS 2 /* Memory channel decoding register, 32 bit */ #define MAD_INTER_CHAN 0x5000 #define INTER_CHAN_DDR_TYPE(v) BITFIELD(v, 2, 0) /* Enhanced channel mode for LPDDR4 */ #define INTER_CHAN_ECHM(v) BITFIELD(v, 3, 3) /* Channel L mapping to physical channel */ #define INTER_CHAN_CH_L_MAP(v) BITFIELD(v, 4, 4) /* Channel S size in multiples of 0.5GB */ #define INTER_CHAN_CH_S_SIZE BITFIELD(v, 19, 12) /* DRAM decode stage 2 registers, 32 bit */ #define MAD_INTRA_CH(index) (0x5004 + index * sizeof(uint32_t)) /* Virtual DIMM L mapping to physical DIMM */ #define DIMM_L_MAP(v) BITFIELD(v, 0, 0) /* DIMM channel characteristic 2 registers, 32 bit */ #define MAD_DIMM_CH(index) (0x500c + index * sizeof(uint32_t)) /* Size of DIMM L in 0.5GB multiples */ #define DIMM_L_SIZE(v) (BITFIELD(v, 6, 0) << 29) /* DIMM L width of DDR chips (DLW) */ #define DIMM_L_WIDTH(v) BITFIELD(v, 8, 7) /* Size of DIMM S in 0.5GB multiples */ #define DIMM_S_SIZE(v) (BITFIELD(v, 22, 16) << 29) /* DIMM S width of DDR chips (DSW) */ #define DIMM_S_WIDTH(v) BITFIELD(v, 25, 24) /* MC Channel Selection register, 32 bit */ #define CHANNEL_HASH 0x5024 /* MC Enhanced Channel Selection register, 32 bit */ #define CHANNEL_EHASH 0x5028 struct ibecc_error { uint32_t type; uint64_t address; uint16_t syndrome; }; #define PCI_HOST_BRIDGE PCIE_BDF(0, 0, 0) #endif /* ZEPHYR_DRIVERS_EDAC_IBECC_H_ */ ```
/content/code_sandbox/drivers/edac/ibecc.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,730
```c /* * */ #include <stdlib.h> #include <zephyr/device.h> #include <zephyr/shell/shell.h> #include <zephyr/drivers/edac.h> #include "ibecc.h" /** * EDAC Error Injection interface * * edac inject addr [value] Physical memory address base * edac inject mask [value] Physical memory address mask * edac inject error_type Show / Set EDAC error type * edac inject trigger Trigger injection * * edac inject test_default Set default injection parameters * * edac disable_nmi Experimental disable NMI * edac enable_nmi Experimental enable NMI * * EDAC Report interface * * edac info Show EDAC ECC / Parity error info * edac info ecc_error [show|clear] Show ECC Errors * edac info parity_error [show|clear] Show Parity Errors * * Physical memory access interface using devmem shell module * * devmem [width [value]] Physical memory read / write */ static void decode_ecc_error(const struct shell *sh, uint64_t ecc_error) { uint64_t erradd = ECC_ERROR_ERRADD(ecc_error); unsigned long errsynd = ECC_ERROR_ERRSYND(ecc_error); shell_fprintf(sh, SHELL_NORMAL, "CMI Error address: 0x%llx\n", erradd); shell_fprintf(sh, SHELL_NORMAL, "Error Syndrome: 0x%lx\n", errsynd); if (ecc_error & ECC_ERROR_MERRSTS) { shell_fprintf(sh, SHELL_NORMAL, "Uncorrectable Error (UE)\n"); } if (ecc_error & ECC_ERROR_CERRSTS) { shell_fprintf(sh, SHELL_NORMAL, "Correctable Error (CE)\n"); } } static int ecc_error_show(const struct shell *sh, const struct device *dev) { uint64_t error; int err; err = edac_ecc_error_log_get(dev, &error); if (err != 0 && err != -ENODATA) { shell_error(sh, "Error getting error log (err %d)", err); return err; } shell_fprintf(sh, SHELL_NORMAL, "ECC Error: 0x%llx\n", error); if (error != 0) { decode_ecc_error(sh, error); } return 0; } static int parity_error_show(const struct shell *sh, const struct device *dev) { uint64_t error; int err; err = edac_parity_error_log_get(dev, &error); if (err != 0 && err != -ENODATA) { shell_error(sh, "Error getting parity error log (err %d)", err); return err; } shell_fprintf(sh, SHELL_NORMAL, "Parity Error: 0x%llx\n", error); return 0; } static int cmd_edac_info(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } shell_fprintf(sh, SHELL_NORMAL, "Show EDAC status\n"); err = ecc_error_show(sh, dev); if (err != 0) { return err; } err = parity_error_show(sh, dev); if (err != 0) { return err; } shell_fprintf(sh, SHELL_NORMAL, "Errors correctable: %d Errors uncorrectable %d\n", edac_errors_cor_get(dev), edac_errors_uc_get(dev)); return err; } #if defined(CONFIG_EDAC_ERROR_INJECT) static int cmd_inject_addr(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } if (argc > 2) { /* Usage */ shell_fprintf(sh, SHELL_NORMAL, "Usage: edac inject %s [addr]\n", argv[0]); return -ENOTSUP; } if (argc == 1) { uint64_t addr; err = edac_inject_get_param1(dev, &addr); if (err != 0) { shell_error(sh, "Error getting address (err %d)", err); return err; } shell_fprintf(sh, SHELL_NORMAL, "Injection address base: 0x%llx\n", addr); } else { unsigned long value = strtoul(argv[1], NULL, 16); shell_fprintf(sh, SHELL_NORMAL, "Set injection address base to: %s\n", argv[1]); err = edac_inject_set_param1(dev, value); if (err != 0) { shell_error(sh, "Error setting address (err %d)", err); return err; } } return err; } static int cmd_inject_mask(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } if (argc > 2) { /* Usage */ shell_fprintf(sh, SHELL_NORMAL, "Usage: edac inject %s [mask]\n", argv[0]); return -ENOTSUP; } if (argc == 1) { uint64_t mask; err = edac_inject_get_param2(dev, &mask); if (err != 0) { shell_error(sh, "Error getting mask (err %d)", err); return err; } shell_fprintf(sh, SHELL_NORMAL, "Injection address mask: 0x%llx\n", mask); } else { uint64_t value = strtoul(argv[1], NULL, 16); shell_fprintf(sh, SHELL_NORMAL, "Set injection address mask to %llx\n", value); err = edac_inject_set_param2(dev, value); if (err != 0) { shell_error(sh, "Error setting mask (err %d)", err); return err; } } return err; } static int cmd_inject_trigger(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } shell_fprintf(sh, SHELL_NORMAL, "Triggering injection\n"); edac_inject_error_trigger(dev); return 0; } static int cmd_inject_disable_nmi(const struct shell *sh, size_t argc, char **argv) { sys_out8((sys_in8(0x70) | 0x80), 0x70); return 0; } static int cmd_inject_enable_nmi(const struct shell *sh, size_t argc, char **argv) { sys_out8((sys_in8(0x70) & 0x7F), 0x70); return 0; } static const char *get_error_type(uint32_t type) { switch (type) { case EDAC_ERROR_TYPE_DRAM_COR: return "correctable"; case EDAC_ERROR_TYPE_DRAM_UC: return "uncorrectable"; default: return "unknown"; } } static int cmd_inject_error_type_show(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; uint32_t error_type; int err; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } err = edac_inject_get_error_type(dev, &error_type); if (err != 0) { shell_error(sh, "Error getting error type (err %d)", err); return err; } shell_fprintf(sh, SHELL_NORMAL, "Injection error type: %s\n", get_error_type(error_type)); return err; } static int set_error_type(const struct shell *sh, uint32_t error_type) { const struct device *dev; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } shell_fprintf(sh, SHELL_NORMAL, "Set injection error type: %s\n", get_error_type(error_type)); return edac_inject_set_error_type(dev, error_type); } static int cmd_inject_error_type_cor(const struct shell *sh, size_t argc, char **argv) { return set_error_type(sh, EDAC_ERROR_TYPE_DRAM_COR); } static int cmd_inject_error_type_uc(const struct shell *sh, size_t argc, char **argv) { return set_error_type(sh, EDAC_ERROR_TYPE_DRAM_UC); } static int cmd_inject_test(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } edac_inject_set_param1(dev, 0x1000); edac_inject_set_param2(dev, INJ_ADDR_BASE_MASK_MASK); edac_inject_set_error_type(dev, EDAC_ERROR_TYPE_DRAM_COR); edac_inject_error_trigger(dev); return 0; } SHELL_STATIC_SUBCMD_SET_CREATE(sub_inject_error_type_cmds, SHELL_CMD(correctable, NULL, "Set correctable error type", cmd_inject_error_type_cor), SHELL_CMD(uncorrectable, NULL, "Set uncorrectable error type", cmd_inject_error_type_uc), SHELL_SUBCMD_SET_END /* Array terminated */ ); /* EDAC Error Injection shell commands */ SHELL_STATIC_SUBCMD_SET_CREATE(sub_inject_cmds, SHELL_CMD(addr, NULL, "Get / Set physical address", cmd_inject_addr), SHELL_CMD(mask, NULL, "Get / Set address mask", cmd_inject_mask), SHELL_CMD_ARG(trigger, NULL, "Trigger injection", cmd_inject_trigger, 1, 0), SHELL_CMD(error_type, &sub_inject_error_type_cmds, "Get / Set injection error type", cmd_inject_error_type_show), SHELL_CMD(disable_nmi, NULL, "Disable NMI", cmd_inject_disable_nmi), SHELL_CMD(enable_nmi, NULL, "Enable NMI", cmd_inject_enable_nmi), SHELL_CMD_ARG(test_default, NULL, "Test default injection parameters", cmd_inject_test, 1, 0), SHELL_SUBCMD_SET_END /* Array terminated */ ); #endif /* CONFIG_EDAC_ERROR_INJECT */ static int cmd_ecc_error_show(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } return ecc_error_show(sh, dev); } static int cmd_ecc_error_clear(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } err = edac_ecc_error_log_clear(dev); if (err != 0) { shell_error(sh, "Error clear ecc error log (err %d)", err); return err; } shell_fprintf(sh, SHELL_NORMAL, "ECC Error Log cleared\n"); return 0; } SHELL_STATIC_SUBCMD_SET_CREATE(sub_ecc_error_cmds, SHELL_CMD(show, NULL, "Show ECC errors", cmd_ecc_error_show), SHELL_CMD(clear, NULL, "Clear ECC errors", cmd_ecc_error_clear), SHELL_SUBCMD_SET_END /* Array terminated */ ); static int cmd_parity_error_show(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } return parity_error_show(sh, dev); } static int cmd_parity_error_clear(const struct shell *sh, size_t argc, char **argv) { const struct device *dev; int err; dev = DEVICE_DT_GET(DT_NODELABEL(ibecc)); if (!device_is_ready(dev)) { shell_error(sh, "IBECC device not ready"); return -ENODEV; } err = edac_parity_error_log_clear(dev); if (err != 0) { shell_error(sh, "Error clear parity error log (err %d)", err); return err; } shell_fprintf(sh, SHELL_NORMAL, "Parity Error Log cleared\n"); return 0; } SHELL_STATIC_SUBCMD_SET_CREATE(sub_parity_error_cmds, SHELL_CMD(show, NULL, "Show Parity errors", cmd_parity_error_show), SHELL_CMD(clear, NULL, "Clear Parity errors", cmd_parity_error_clear), SHELL_SUBCMD_SET_END /* Array terminated */ ); /* EDAC Info shell commands */ SHELL_STATIC_SUBCMD_SET_CREATE(sub_info_cmds, SHELL_CMD(ecc_error, &sub_ecc_error_cmds, "ECC Error Show / Clear commands", cmd_ecc_error_show), SHELL_CMD(parity_error, &sub_parity_error_cmds, "Parity Error Show / Clear commands", cmd_parity_error_show), SHELL_SUBCMD_SET_END /* Array terminated */ ); SHELL_STATIC_SUBCMD_SET_CREATE(sub_edac_cmds, SHELL_CMD(info, &sub_info_cmds, "Show EDAC information\n" "edac info <subcommands>", cmd_edac_info), #if defined(CONFIG_EDAC_ERROR_INJECT) /* This does not work with SHELL_COND_CMD */ SHELL_CMD(inject, &sub_inject_cmds, "Inject ECC error commands\n" "edac inject <subcommands>", NULL), #endif SHELL_SUBCMD_SET_END /* Array terminated. */ ); SHELL_CMD_REGISTER(edac, &sub_edac_cmds, "EDAC information", cmd_edac_info); ```
/content/code_sandbox/drivers/edac/shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,258
```unknown # # EDAC configuration options menuconfig EDAC bool "Error Detection and Correction (EDAC) drivers" help Enable Error Detection and Correction (EDAC) driver. if EDAC config EDAC_ERROR_INJECT bool "EDAC Error Injection mechanism" help Enable Error injection capability for test error checking and reporting. Should not be enabled in production system. config EDAC_SHELL bool "EDAC Shell" depends on SHELL help Enable EDAC shell for debugging EDAC. config EDAC_IBECC bool "In-Band ECC (IBECC)" depends on X86 && PCIE help This option selects In-Band ECC (IBECC) IP support. module = EDAC module-str = edac source "subsys/logging/Kconfig.template.log_config" endif #EDAC ```
/content/code_sandbox/drivers/edac/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
182
```c /* * */ #define DT_DRV_COMPAT intel_ibecc #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/pcie/pcie.h> #include <zephyr/drivers/edac.h> #include "ibecc.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(edac_ibecc, CONFIG_EDAC_LOG_LEVEL); #define DEVICE_NODE DT_NODELABEL(ibecc) struct ibecc_data { mem_addr_t mchbar; edac_notify_callback_f cb; uint32_t error_type; /* Error count */ unsigned int errors_cor; unsigned int errors_uc; }; static void ibecc_write_reg64(const struct device *dev, uint16_t reg, uint64_t value) { struct ibecc_data *data = dev->data; mem_addr_t reg_addr = data->mchbar + reg; sys_write64(value, reg_addr); } static uint64_t ibecc_read_reg64(const struct device *dev, uint16_t reg) { struct ibecc_data *data = dev->data; mem_addr_t reg_addr = data->mchbar + reg; return sys_read64(reg_addr); } #if defined(CONFIG_EDAC_ERROR_INJECT) static void ibecc_write_reg32(const struct device *dev, uint16_t reg, uint32_t value) { struct ibecc_data *data = dev->data; mem_addr_t reg_addr = data->mchbar + reg; sys_write32(value, reg_addr); } #endif static bool ibecc_enabled(const pcie_bdf_t bdf) { return !!(pcie_conf_read(bdf, CAPID0_C_REG) & CAPID0_C_IBECC_ENABLED); } static void ibecc_errcmd_setup(const pcie_bdf_t bdf, bool enable) { uint32_t errcmd; errcmd = pcie_conf_read(bdf, ERRCMD_REG); if (enable) { errcmd |= (ERRCMD_IBECC_COR | ERRCMD_IBECC_UC) << 16; } else { errcmd &= ~(ERRCMD_IBECC_COR | ERRCMD_IBECC_UC) << 16; } pcie_conf_write(bdf, ERRCMD_REG, errcmd); } static void ibecc_errsts_clear(const pcie_bdf_t bdf) { uint32_t errsts; errsts = pcie_conf_read(bdf, ERRSTS_REG); if ((errsts & (ERRSTS_IBECC_COR | ERRSTS_IBECC_UC)) == 0) { return; } pcie_conf_write(bdf, ERRSTS_REG, errsts); } static void parse_ecclog(const struct device *dev, const uint64_t ecclog, struct ibecc_error *error_data) { struct ibecc_data *data = dev->data; if (ecclog == 0) { return; } error_data->type = ECC_ERROR_ERRTYPE(ecclog); error_data->address = ECC_ERROR_ERRADD(ecclog); error_data->syndrome = ECC_ERROR_ERRSYND(ecclog); if ((ecclog & ECC_ERROR_MERRSTS) != 0) { data->errors_uc++; } if ((ecclog & ECC_ERROR_CERRSTS) != 0) { data->errors_cor++; } } #if defined(CONFIG_EDAC_ERROR_INJECT) static int inject_set_param1(const struct device *dev, uint64_t addr) { if ((addr & ~INJ_ADDR_BASE_MASK) != 0) { return -EINVAL; } ibecc_write_reg64(dev, IBECC_INJ_ADDR_BASE, addr); return 0; } static int inject_get_param1(const struct device *dev, uint64_t *value) { *value = ibecc_read_reg64(dev, IBECC_INJ_ADDR_BASE); return 0; } static int inject_set_param2(const struct device *dev, uint64_t mask) { if ((mask & ~INJ_ADDR_BASE_MASK_MASK) != 0) { return -EINVAL; } ibecc_write_reg64(dev, IBECC_INJ_ADDR_MASK, mask); return 0; } static int inject_get_param2(const struct device *dev, uint64_t *value) { *value = ibecc_read_reg64(dev, IBECC_INJ_ADDR_MASK); return 0; } static int inject_set_error_type(const struct device *dev, uint32_t error_type) { struct ibecc_data *data = dev->data; data->error_type = error_type; return 0; } static int inject_get_error_type(const struct device *dev, uint32_t *error_type) { struct ibecc_data *data = dev->data; *error_type = data->error_type; return 0; } static int inject_error_trigger(const struct device *dev) { struct ibecc_data *data = dev->data; uint32_t ctrl = 0; switch (data->error_type) { case EDAC_ERROR_TYPE_DRAM_COR: ctrl |= INJ_CTRL_COR; break; case EDAC_ERROR_TYPE_DRAM_UC: ctrl |= INJ_CTRL_UC; break; default: /* This would clear error injection */ break; } ibecc_write_reg32(dev, IBECC_INJ_ADDR_CTRL, ctrl); return 0; } #endif /* CONFIG_EDAC_ERROR_INJECT */ static int ecc_error_log_get(const struct device *dev, uint64_t *value) { *value = ibecc_read_reg64(dev, IBECC_ECC_ERROR_LOG); /** * The ECC Error log register is only valid when ECC_ERROR_CERRSTS * or ECC_ERROR_MERRSTS error status bits are set */ if ((*value & (ECC_ERROR_MERRSTS | ECC_ERROR_CERRSTS)) == 0) { return -ENODATA; } return 0; } static int ecc_error_log_clear(const struct device *dev) { /* Clear all error bits */ ibecc_write_reg64(dev, IBECC_ECC_ERROR_LOG, ECC_ERROR_MERRSTS | ECC_ERROR_CERRSTS); return 0; } static int parity_error_log_get(const struct device *dev, uint64_t *value) { *value = ibecc_read_reg64(dev, IBECC_PARITY_ERROR_LOG); if (*value == 0) { return -ENODATA; } return 0; } static int parity_error_log_clear(const struct device *dev) { ibecc_write_reg64(dev, IBECC_PARITY_ERROR_LOG, PARITY_ERROR_ERRSTS); return 0; } static int errors_cor_get(const struct device *dev) { struct ibecc_data *data = dev->data; return data->errors_cor; } static int errors_uc_get(const struct device *dev) { struct ibecc_data *data = dev->data; return data->errors_uc; } static int notify_callback_set(const struct device *dev, edac_notify_callback_f cb) { struct ibecc_data *data = dev->data; unsigned int key = irq_lock(); data->cb = cb; irq_unlock(key); return 0; } static const struct edac_driver_api api = { #if defined(CONFIG_EDAC_ERROR_INJECT) /* Error Injection functions */ .inject_set_param1 = inject_set_param1, .inject_get_param1 = inject_get_param1, .inject_set_param2 = inject_set_param2, .inject_get_param2 = inject_get_param2, .inject_set_error_type = inject_set_error_type, .inject_get_error_type = inject_get_error_type, .inject_error_trigger = inject_error_trigger, #endif /* CONFIG_EDAC_ERROR_INJECT */ /* Error reporting & clearing functions */ .ecc_error_log_get = ecc_error_log_get, .ecc_error_log_clear = ecc_error_log_clear, .parity_error_log_get = parity_error_log_get, .parity_error_log_clear = parity_error_log_clear, /* Get error stats */ .errors_cor_get = errors_cor_get, .errors_uc_get = errors_uc_get, /* Notification callback set */ .notify_cb_set = notify_callback_set, }; static int edac_ibecc_init(const struct device *dev) { const pcie_bdf_t bdf = PCI_HOST_BRIDGE; struct ibecc_data *data = dev->data; uint64_t mchbar; uint32_t conf_data; conf_data = pcie_conf_read(bdf, PCIE_CONF_ID); switch (conf_data) { case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU5): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU6): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU7): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU8): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU9): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU10): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU11): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU12): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU13): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU14): __fallthrough; case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU15): break; default: LOG_ERR("PCI Probe failed"); /* LCOV_EXCL_BR_LINE */ return -ENODEV; } if (!ibecc_enabled(bdf)) { LOG_ERR("IBECC is not enabled"); /* LCOV_EXCL_BR_LINE */ return -ENODEV; } mchbar = pcie_conf_read(bdf, MCHBAR_REG); mchbar |= (uint64_t)pcie_conf_read(bdf, MCHBAR_REG + 1) << 32; /* Check that MCHBAR is enabled */ if ((mchbar & MCHBAR_ENABLE) == 0) { LOG_ERR("MCHBAR is not enabled"); /* LCOV_EXCL_BR_LINE */ return -ENODEV; } mchbar &= MCHBAR_MASK; device_map(&data->mchbar, mchbar, MCH_SIZE, K_MEM_CACHE_NONE); /* Enable Host Bridge generated SERR event */ ibecc_errcmd_setup(bdf, true); LOG_INF("IBECC driver initialized"); /* LCOV_EXCL_BR_LINE */ return 0; } static struct ibecc_data ibecc_data; DEVICE_DT_DEFINE(DEVICE_NODE, &edac_ibecc_init, NULL, &ibecc_data, NULL, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &api); /** * An IBECC error causes SERR_NMI_STS set and is indicated by * ERRSTS PCI registers by IBECC_UC and IBECC_COR fields. * Following needs to be done: * - Read ECC_ERR_LOG register * - Clear IBECC_UC and IBECC_COR fields of ERRSTS PCI * - Clear MERRSTS & CERRSTS fields of ECC_ERR_LOG register */ static struct k_spinlock nmi_lock; /* NMI handling */ static bool handle_nmi(void) { uint8_t status; status = sys_in8(NMI_STS_CNT_REG); if ((status & NMI_STS_SRC_SERR) == 0) { /* For other NMI sources return false to handle it by * Zephyr exception handler */ return false; } /* Re-enable SERR# NMI sources */ status = (status & NMI_STS_MASK_EN) | NMI_STS_SERR_EN; sys_out8(status, NMI_STS_CNT_REG); status &= ~NMI_STS_SERR_EN; sys_out8(status, NMI_STS_CNT_REG); return true; } bool z_x86_do_kernel_nmi(const struct arch_esf *esf) { const struct device *const dev = DEVICE_DT_GET(DEVICE_NODE); struct ibecc_data *data = dev->data; struct ibecc_error error_data; k_spinlock_key_t key; bool ret = true; uint64_t ecclog; key = k_spin_lock(&nmi_lock); /* Skip the same NMI handling for other cores and return handled */ if (arch_curr_cpu()->id != 0) { ret = true; goto out; } if (!handle_nmi()) { /* Indicate that we do not handle this NMI */ ret = false; goto out; } if (edac_ecc_error_log_get(dev, &ecclog) != 0) { goto out; } parse_ecclog(dev, ecclog, &error_data); if (data->cb != NULL) { data->cb(dev, &error_data); } edac_ecc_error_log_clear(dev); ibecc_errsts_clear(PCI_HOST_BRIDGE); out: k_spin_unlock(&nmi_lock, key); return ret; } ```
/content/code_sandbox/drivers/edac/edac_ibecc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,833
```objective-c /* */ #ifndef INCLUDE_ZEPHYR_DRIVERS_BBRAM_NPCX_H_ #define INCLUDE_ZEPHYR_DRIVERS_BBRAM_NPCX_H_ #include <stdint.h> #include <zephyr/devicetree.h> /** Device config */ struct bbram_npcx_config { /** BBRAM base address */ uintptr_t base_addr; /** BBRAM size (Unit:bytes) */ int size; /** Status register base address */ uintptr_t status_reg_addr; }; #ifdef CONFIG_BBRAM_NPCX_EMUL #define BBRAM_NPCX_DECL_CONFIG(inst) \ static uint8_t bbram_npcx_emul_buffer_##inst[DT_INST_REG_SIZE_BY_NAME(inst, memory)]; \ static uint8_t bbram_npcx_emul_status_##inst; \ static const struct bbram_npcx_config bbram_cfg_##inst = { \ .base_addr = (uintptr_t)bbram_npcx_emul_buffer_##inst, \ .size = DT_INST_REG_SIZE_BY_NAME(inst, memory), \ .status_reg_addr = (uintptr_t)&bbram_npcx_emul_status_##inst, \ } #else #define BBRAM_NPCX_DECL_CONFIG(inst) \ static const struct bbram_npcx_config bbram_cfg_##inst = { \ .base_addr = DT_INST_REG_ADDR_BY_NAME(inst, memory), \ .size = DT_INST_REG_SIZE_BY_NAME(inst, memory), \ .status_reg_addr = DT_INST_REG_ADDR_BY_NAME(inst, status), \ } #endif #endif /* INCLUDE_ZEPHYR_DRIVERS_BBRAM_NPCX_H_ */ ```
/content/code_sandbox/drivers/bbram/npcx.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
357
```unknown config BBRAM_STM32 bool "ST STM32 Battery-backed RAM drivers" default y depends on DT_HAS_ST_STM32_BBRAM_ENABLED depends on COUNTER_RTC_STM32 || RTC_STM32 help This option enables the BBRAM driver for STM32 family of processors. STM32 BBRAM are 32-bit registers which can be used for storing user application data. They are implemented in the backup domain that remains powered-on by VBAT when the VDD power is switched off. They are not reset by system reset or when the device wakes up from Standby mode. They are reset by a backup domain reset. ```
/content/code_sandbox/drivers/bbram/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
151
```c /* */ #include <zephyr/devicetree.h> #include <zephyr/drivers/emul.h> #include <zephyr/drivers/emul_bbram.h> #include "it8xxx2.h" #define DT_DRV_COMPAT ite_it8xxx2_bbram struct bbram_it8xxx2_emul_config { const struct device *dev; }; #define GET_CONFIG(target) \ ((const struct bbram_it8xxx2_config \ *)(((const struct bbram_it8xxx2_emul_config *)((target)->cfg))->dev->config)) static int it8xxx2_emul_backend_set_data(const struct emul *target, size_t offset, size_t count, const uint8_t *buffer) { const struct bbram_it8xxx2_config *config = GET_CONFIG(target); if (offset + count > config->size) { return -ERANGE; } bytecpy(((uint8_t *)config->base_addr + offset), buffer, count); return 0; } static int it8xxx2_emul_backend_get_data(const struct emul *target, size_t offset, size_t count, uint8_t *buffer) { const struct bbram_it8xxx2_config *config = GET_CONFIG(target); if (offset + count > config->size) { return -ERANGE; } bytecpy(buffer, ((uint8_t *)config->base_addr + offset), count); return 0; } static const struct emul_bbram_driver_api it8xxx2_emul_backend_api = { .set_data = it8xxx2_emul_backend_set_data, .get_data = it8xxx2_emul_backend_get_data, }; #define BBRAM_EMUL_INIT(inst) \ static struct bbram_it8xxx2_emul_config bbram_it8xxx2_emul_config_##inst = { \ .dev = DEVICE_DT_INST_GET(inst), \ }; \ EMUL_DT_INST_DEFINE(inst, NULL, NULL, &bbram_it8xxx2_emul_config_##inst, NULL, \ &it8xxx2_emul_backend_api) DT_INST_FOREACH_STATUS_OKAY(BBRAM_EMUL_INIT); ```
/content/code_sandbox/drivers/bbram/bbram_it8xxx2_emul.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
469
```c /* */ #include <zephyr/devicetree.h> #include <zephyr/drivers/emul.h> #include <zephyr/drivers/emul_bbram.h> #include "npcx.h" #define DT_DRV_COMPAT nuvoton_npcx_bbram struct bbram_npcx_emul_config { const struct device *dev; }; #define GET_CONFIG(target) \ ((const struct bbram_npcx_config \ *)(((const struct bbram_npcx_emul_config *)((target)->cfg))->dev->config)) static int npcx_emul_backend_set_data(const struct emul *target, size_t offset, size_t count, const uint8_t *buffer) { const struct bbram_npcx_config *config = GET_CONFIG(target); if (offset + count > config->size) { return -ERANGE; } bytecpy(((uint8_t *)config->base_addr + offset), buffer, count); return 0; } static int npcx_emul_backend_get_data(const struct emul *target, size_t offset, size_t count, uint8_t *buffer) { const struct bbram_npcx_config *config = GET_CONFIG(target); if (offset + count > config->size) { return -ERANGE; } bytecpy(buffer, ((uint8_t *)config->base_addr + offset), count); return 0; } static const struct emul_bbram_driver_api npcx_emul_backend_api = { .set_data = npcx_emul_backend_set_data, .get_data = npcx_emul_backend_get_data, }; #define BBRAM_EMUL_INIT(inst) \ static struct bbram_npcx_emul_config bbram_npcx_emul_config_##inst = { \ .dev = DEVICE_DT_INST_GET(inst), \ }; \ EMUL_DT_INST_DEFINE(inst, NULL, NULL, &bbram_npcx_emul_config_##inst, NULL, \ &npcx_emul_backend_api) DT_INST_FOREACH_STATUS_OKAY(BBRAM_EMUL_INIT); ```
/content/code_sandbox/drivers/bbram/bbram_npcx_emul.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
438
```c /* * */ #include <stdlib.h> #include <string.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/bbram.h> #include <zephyr/kernel.h> #include <zephyr/shell/shell.h> #include <zephyr/sys/util.h> /* Buffer is only needed for bytes that follow command, device and address. */ #define BUF_ARRAY_CNT (CONFIG_SHELL_ARGC_MAX - 3) static inline int parse_ul(const char *str, unsigned long *result) { char *end; unsigned long val; val = strtoul(str, &end, 0); if (*str == '\0' || *end != '\0') { return -EINVAL; } *result = val; return 0; } static inline int parse_u32(const char *str, uint32_t *result) { unsigned long val; if (parse_ul(str, &val) || val > 0xffffffff) { return -EINVAL; } *result = (uint32_t)val; return 0; } static inline int parse_u8(const char *str, uint8_t *result) { unsigned long val; if (parse_ul(str, &val) || val > 0xff) { return -EINVAL; } *result = (uint8_t)val; return 0; } static inline int parse_device(const struct shell *sh, size_t argc, char *argv[], const struct device **bbram_dev) { if (argc < 2) { shell_error(sh, "Missing BBRAM device"); return -EINVAL; } *bbram_dev = device_get_binding(argv[1]); if (!*bbram_dev) { shell_error(sh, "Given BBRAM device was not found"); return -ENODEV; } return 0; } static int cmd_read(const struct shell *sh, size_t argc, char *argv[]) { const struct device *bbram_dev; uint32_t addr; size_t size; int part_size, ret; ret = parse_device(sh, argc, argv, &bbram_dev); if (ret) { return ret; } if (argc < 3) { /* Dump whole BBRAM if address not provided. */ addr = 0; ret = bbram_get_size(bbram_dev, &size); if (ret < 0) { shell_error(sh, "Can't get BBRAM size: %d", ret); return -EIO; } } else { /* Parse address if provided. */ ret = parse_u32(argv[2], &addr); if (ret) { return ret; } /* If size not provided read one byte. */ size = 1; if (argc >= 4) { /* Parse size if provided. */ ret = parse_u32(argv[3], &size); if (ret) { return ret; } } } for (int cnt = 0; cnt < size; cnt += part_size) { uint8_t data[SHELL_HEXDUMP_BYTES_IN_LINE]; part_size = MIN(size - cnt, SHELL_HEXDUMP_BYTES_IN_LINE); ret = bbram_read(bbram_dev, addr, part_size, data); if (ret != 0) { shell_error(sh, "BBRAM read error: %d", ret); return -EIO; } shell_hexdump_line(sh, addr, data, part_size); addr += part_size; } shell_print(sh, ""); return 0; } static int cmd_write(const struct shell *sh, size_t argc, char *argv[]) { const struct device *bbram_dev; uint8_t buf[BUF_ARRAY_CNT]; uint32_t addr; size_t size = 0; int ret; ret = parse_device(sh, argc, argv, &bbram_dev); if (ret) { return ret; } if (argc < 3) { shell_error(sh, "Missing address"); return -EINVAL; } /* Parse address. */ ret = parse_u32(argv[2], &addr); if (ret) { return ret; } /* Parse bytes and place them in the buffer. */ for (int i = 3; i < argc; i++) { ret = parse_u8(argv[i], &buf[i - 3]); if (ret) { return ret; } size++; } if (size == 0) { shell_error(sh, "Missing data"); return -EINVAL; } ret = bbram_write(bbram_dev, addr, size, buf); if (ret < 0) { shell_error(sh, "BBRAM write error: %d", ret); return -EIO; } return 0; } static void device_name_get(size_t idx, struct shell_static_entry *entry) { const struct device *dev = shell_device_lookup(idx, NULL); entry->syntax = (dev != NULL) ? dev->name : NULL; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; } SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get); SHELL_STATIC_SUBCMD_SET_CREATE(bbram_cmds, SHELL_CMD_ARG(read, &dsub_device_name, "<device> [<address>] [<count>]", cmd_read, 2, 2), SHELL_CMD_ARG(write, &dsub_device_name, "<device> <address> <byte> [<byte>...]", cmd_write, 4, BUF_ARRAY_CNT), SHELL_SUBCMD_SET_END); static int cmd_bbram(const struct shell *sh, size_t argc, char **argv) { shell_error(sh, "%s: unknown parameter: %s", argv[0], argv[1]); return -EINVAL; } SHELL_CMD_ARG_REGISTER(bbram, &bbram_cmds, "Battery-backed RAM shell commands", cmd_bbram, 2, 0); ```
/content/code_sandbox/drivers/bbram/bbram_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,320
```c /* * */ #define DT_DRV_COMPAT microchip_mcp7940n #include <string.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/bbram.h> #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(bbram_microchip_mcp7940n, CONFIG_BBRAM_LOG_LEVEL); #define MICROCHIP_MCP7940N_SRAM_OFFSET 0x20 #define MICROCHIP_MCP7940N_SRAM_SIZE 64 #define MICROCHIP_MCP7940N_RTCWKDAY_REGISTER_ADDRESS 0x03 #define MICROCHIP_MCP7940N_RTCWKDAY_VBATEN_BIT BIT(3) #define MICROCHIP_MCP7940N_RTCWKDAY_PWRFAIL_BIT BIT(4) struct microchip_mcp7940n_bbram_data { struct k_mutex lock; }; struct microchip_mcp7940n_bbram_config { struct i2c_dt_spec i2c; }; static int microchip_mcp7940n_bbram_init(const struct device *dev) { const struct microchip_mcp7940n_bbram_config *config = dev->config; struct microchip_mcp7940n_bbram_data *data = dev->data; int32_t rc = 0; uint8_t buffer; if (!device_is_ready(config->i2c.bus)) { LOG_ERR("I2C device %s is not ready", config->i2c.bus->name); return -ENODEV; } k_mutex_init(&data->lock); rc = i2c_reg_read_byte_dt(&config->i2c, MICROCHIP_MCP7940N_RTCWKDAY_REGISTER_ADDRESS, &buffer); if (rc != 0) { LOG_ERR("Failed to read RTCWKDAY register: %d", rc); } return rc; } static int microchip_mcp7940n_bbram_size(const struct device *dev, size_t *size) { *size = MICROCHIP_MCP7940N_SRAM_SIZE; return 0; } static int microchip_mcp7940n_bbram_is_invalid(const struct device *dev) { const struct microchip_mcp7940n_bbram_config *config = dev->config; struct microchip_mcp7940n_bbram_data *data = dev->data; int32_t rc = 0; uint8_t buffer; bool data_valid = true; k_mutex_lock(&data->lock, K_FOREVER); rc = i2c_reg_read_byte_dt(&config->i2c, MICROCHIP_MCP7940N_RTCWKDAY_REGISTER_ADDRESS, &buffer); if ((buffer & MICROCHIP_MCP7940N_RTCWKDAY_PWRFAIL_BIT)) { data_valid = false; buffer &= (buffer ^ MICROCHIP_MCP7940N_RTCWKDAY_PWRFAIL_BIT); rc = i2c_reg_write_byte_dt(&config->i2c, MICROCHIP_MCP7940N_RTCWKDAY_REGISTER_ADDRESS, buffer); if (rc != 0) { LOG_ERR("Failed to write RTCWKDAY register: %d", rc); goto finish; } } finish: k_mutex_unlock(&data->lock); if (rc == 0 && data_valid == true) { rc = 1; } return rc; } static int microchip_mcp7940n_bbram_check_standby_power(const struct device *dev) { const struct microchip_mcp7940n_bbram_config *config = dev->config; struct microchip_mcp7940n_bbram_data *data = dev->data; int32_t rc = 0; uint8_t buffer; bool power_enabled = true; k_mutex_lock(&data->lock, K_FOREVER); rc = i2c_reg_read_byte_dt(&config->i2c, MICROCHIP_MCP7940N_RTCWKDAY_REGISTER_ADDRESS, &buffer); if (!(buffer & MICROCHIP_MCP7940N_RTCWKDAY_VBATEN_BIT)) { power_enabled = false; buffer |= MICROCHIP_MCP7940N_RTCWKDAY_VBATEN_BIT; rc = i2c_reg_write_byte_dt(&config->i2c, MICROCHIP_MCP7940N_RTCWKDAY_REGISTER_ADDRESS, buffer); if (rc != 0) { LOG_ERR("Failed to write RTCWKDAY register: %d", rc); goto finish; } } finish: k_mutex_unlock(&data->lock); if (rc == 0 && power_enabled == true) { rc = 1; } return rc; } static int microchip_mcp7940n_bbram_read(const struct device *dev, size_t offset, size_t size, uint8_t *buffer) { const struct microchip_mcp7940n_bbram_config *config = dev->config; struct microchip_mcp7940n_bbram_data *data = dev->data; size_t i = 0; int32_t rc = 0; if (size == 0 || (offset + size) > MICROCHIP_MCP7940N_SRAM_SIZE) { return -EINVAL; } k_mutex_lock(&data->lock, K_FOREVER); while (i < size) { LOG_DBG("Read from 0x%x", (MICROCHIP_MCP7940N_SRAM_OFFSET + offset + i)); rc = i2c_reg_read_byte_dt(&config->i2c, (MICROCHIP_MCP7940N_SRAM_OFFSET + offset + i), &buffer[i]); if (rc != 0) { goto finish; } ++i; } finish: k_mutex_unlock(&data->lock); return rc; } static int microchip_mcp7940n_bbram_write(const struct device *dev, size_t offset, size_t size, const uint8_t *buffer) { const struct microchip_mcp7940n_bbram_config *config = dev->config; struct microchip_mcp7940n_bbram_data *data = dev->data; size_t i = 0; int32_t rc = 0; if (size == 0 || (offset + size) > MICROCHIP_MCP7940N_SRAM_SIZE) { return -EINVAL; } k_mutex_lock(&data->lock, K_FOREVER); while (i < size) { LOG_DBG("Write 0x%x to 0x%x", buffer[i], (MICROCHIP_MCP7940N_SRAM_OFFSET + offset + i)); rc = i2c_reg_write_byte_dt(&config->i2c, (MICROCHIP_MCP7940N_SRAM_OFFSET + offset + i), buffer[i]); if (rc != 0) { goto finish; } ++i; } finish: k_mutex_unlock(&data->lock); return rc; } static const struct bbram_driver_api microchip_mcp7940n_bbram_api = { .get_size = microchip_mcp7940n_bbram_size, .check_invalid = microchip_mcp7940n_bbram_is_invalid, .check_standby_power = microchip_mcp7940n_bbram_check_standby_power, .read = microchip_mcp7940n_bbram_read, .write = microchip_mcp7940n_bbram_write, }; #define MICROCHIP_MCP7940N_BBRAM_DEVICE(inst) \ static struct microchip_mcp7940n_bbram_data microchip_mcp7940n_bbram_data_##inst; \ static const struct microchip_mcp7940n_bbram_config \ microchip_mcp7940n_bbram_config_##inst = { \ .i2c = I2C_DT_SPEC_INST_GET(inst), \ }; \ DEVICE_DT_INST_DEFINE(inst, \ &microchip_mcp7940n_bbram_init, \ NULL, \ &microchip_mcp7940n_bbram_data_##inst, \ &microchip_mcp7940n_bbram_config_##inst, \ POST_KERNEL, \ CONFIG_BBRAM_INIT_PRIORITY, \ &microchip_mcp7940n_bbram_api); DT_INST_FOREACH_STATUS_OKAY(MICROCHIP_MCP7940N_BBRAM_DEVICE) ```
/content/code_sandbox/drivers/bbram/bbram_microchip_mcp7940n.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,854
```unknown config BBRAM_IT8XXX2 bool "ITE IT81202 Battery-backed RAM drivers" default y depends on DT_HAS_ITE_IT8XXX2_BBRAM_ENABLED help This option enables the BBRAM driver for RISCV_ITE family of processors. config BBRAM_IT8XXX2_EMUL bool "Emulator for the ITE IT81202 BBRAM driver" default y depends on BBRAM_IT8XXX2 depends on EMUL help Enable the emulator for the ITE IT81202 BBRAM. ```
/content/code_sandbox/drivers/bbram/Kconfig.it8xxx2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
126
```c /* */ #define DT_DRV_COMPAT zephyr_bbram_emul #include <zephyr/drivers/bbram.h> #include <string.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(bbram, CONFIG_BBRAM_LOG_LEVEL); /** Device config */ struct bbram_emul_config { /** BBRAM size (Unit:bytes) */ int size; }; /** Device data */ struct bbram_emul_data { /** Memory */ uint8_t *data; /** Status register */ struct { /** True if BBRAM is in an invalid state */ uint8_t is_invalid : 1; /** True if BBRAM incurred a standby power failure */ uint8_t standby_failure : 1; /** True if BBRAM incurred a power failure */ uint8_t power_failure : 1; } status; }; int bbram_emul_set_invalid(const struct device *dev, bool is_invalid) { struct bbram_emul_data *data = dev->data; data->status.is_invalid = is_invalid; return 0; } int bbram_emul_set_standby_power_state(const struct device *dev, bool failure) { struct bbram_emul_data *data = dev->data; data->status.standby_failure = failure; return 0; } int bbram_emul_set_power_state(const struct device *dev, bool failure) { struct bbram_emul_data *data = dev->data; data->status.power_failure = failure; return 0; } static int bbram_emul_check_invalid(const struct device *dev) { struct bbram_emul_data *data = dev->data; bool is_invalid = data->status.is_invalid; data->status.is_invalid = false; return is_invalid; } static int bbram_emul_check_standby_power(const struct device *dev) { struct bbram_emul_data *data = dev->data; bool failure = data->status.standby_failure; data->status.standby_failure = false; return failure; } static int bbram_emul_check_power(const struct device *dev) { struct bbram_emul_data *data = dev->data; bool failure = data->status.power_failure; data->status.power_failure = false; return failure; } static int bbram_emul_get_size(const struct device *dev, size_t *size) { const struct bbram_emul_config *config = dev->config; *size = config->size; return 0; } static int bbram_emul_read(const struct device *dev, size_t offset, size_t size, uint8_t *data) { const struct bbram_emul_config *config = dev->config; struct bbram_emul_data *dev_data = dev->data; if (size < 1 || offset + size > config->size || bbram_emul_check_invalid(dev)) { return -EFAULT; } memcpy(data, dev_data->data + offset, size); return 0; } static int bbram_emul_write(const struct device *dev, size_t offset, size_t size, const uint8_t *data) { const struct bbram_emul_config *config = dev->config; struct bbram_emul_data *dev_data = dev->data; if (size < 1 || offset + size > config->size || bbram_emul_check_invalid(dev)) { return -EFAULT; } memcpy(dev_data->data + offset, data, size); return 0; } static const struct bbram_driver_api bbram_emul_driver_api = { .check_invalid = bbram_emul_check_invalid, .check_standby_power = bbram_emul_check_standby_power, .check_power = bbram_emul_check_power, .get_size = bbram_emul_get_size, .read = bbram_emul_read, .write = bbram_emul_write, }; #define BBRAM_INIT(inst) \ static uint8_t bbram_emul_mem_##inst[DT_INST_PROP(inst, size)]; \ static struct bbram_emul_data bbram_emul_data_##inst = { \ .data = bbram_emul_mem_##inst, \ }; \ static struct bbram_emul_config bbram_emul_config_##inst = { \ .size = DT_INST_PROP(inst, size), \ }; \ DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &bbram_emul_data_##inst, \ &bbram_emul_config_##inst, PRE_KERNEL_1, CONFIG_BBRAM_INIT_PRIORITY, \ &bbram_emul_driver_api); DT_INST_FOREACH_STATUS_OKAY(BBRAM_INIT); ```
/content/code_sandbox/drivers/bbram/bbram_emul.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,026
```unknown config BBRAM_EMUL bool "Battery-backed RAM emulated drivers" default y depends on DT_HAS_ZEPHYR_BBRAM_EMUL_ENABLED help This option enables the BBRAM emulation driver. ```
/content/code_sandbox/drivers/bbram/Kconfig.bbram_emul
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
50
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_bbram #include <zephyr/drivers/bbram.h> #include <errno.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(npcx_bbram, CONFIG_BBRAM_LOG_LEVEL); #include "npcx.h" #define NPCX_STATUS_IBBR BIT(7) #define NPCX_STATUS_VSBY BIT(1) #define NPCX_STATUS_VCC1 BIT(0) #define DRV_STATUS(dev) \ (*((volatile uint8_t *)((const struct bbram_npcx_config *)(dev)->config)->status_reg_addr)) static int get_bit_and_reset(const struct device *dev, int mask) { int result = DRV_STATUS(dev) & mask; /* * Clear the bit(s): * For emulator, write 0 to clear status bit(s). * For real chip, write 1 to clear status bit(s). */ #ifdef CONFIG_BBRAM_NPCX_EMUL DRV_STATUS(dev) &= ~mask; #else DRV_STATUS(dev) = mask; #endif return result; } static int bbram_npcx_check_invalid(const struct device *dev) { return get_bit_and_reset(dev, NPCX_STATUS_IBBR); } static int bbram_npcx_check_standby_power(const struct device *dev) { return get_bit_and_reset(dev, NPCX_STATUS_VSBY); } static int bbram_npcx_check_power(const struct device *dev) { return get_bit_and_reset(dev, NPCX_STATUS_VCC1); } static int bbram_npcx_get_size(const struct device *dev, size_t *size) { const struct bbram_npcx_config *config = dev->config; *size = config->size; return 0; } static int bbram_npcx_read(const struct device *dev, size_t offset, size_t size, uint8_t *data) { const struct bbram_npcx_config *config = dev->config; if (size < 1 || offset + size > config->size || bbram_npcx_check_invalid(dev)) { return -EINVAL; } bytecpy(data, ((uint8_t *)config->base_addr + offset), size); return 0; } static int bbram_npcx_write(const struct device *dev, size_t offset, size_t size, const uint8_t *data) { const struct bbram_npcx_config *config = dev->config; if (size < 1 || offset + size > config->size || bbram_npcx_check_invalid(dev)) { return -EINVAL; } bytecpy(((uint8_t *)config->base_addr + offset), data, size); return 0; } static const struct bbram_driver_api bbram_npcx_driver_api = { .check_invalid = bbram_npcx_check_invalid, .check_standby_power = bbram_npcx_check_standby_power, .check_power = bbram_npcx_check_power, .get_size = bbram_npcx_get_size, .read = bbram_npcx_read, .write = bbram_npcx_write, }; #define BBRAM_INIT(inst) \ BBRAM_NPCX_DECL_CONFIG(inst); \ DEVICE_DT_INST_DEFINE(inst, NULL, NULL, NULL, &bbram_cfg_##inst, PRE_KERNEL_1, \ CONFIG_BBRAM_INIT_PRIORITY, &bbram_npcx_driver_api); DT_INST_FOREACH_STATUS_OKAY(BBRAM_INIT); ```
/content/code_sandbox/drivers/bbram/bbram_npcx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
750
```c /* * */ #include <zephyr/drivers/bbram.h> #include <zephyr/internal/syscall_handler.h> static inline int z_vrfy_bbram_check_invalid(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_BBRAM)); return z_impl_bbram_check_invalid(dev); } #include <zephyr/syscalls/bbram_check_invalid_mrsh.c> static inline int z_vrfy_bbram_check_standby_power(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_BBRAM)); return z_impl_bbram_check_standby_power(dev); } #include <zephyr/syscalls/bbram_check_standby_power_mrsh.c> static inline int z_vrfy_bbram_check_power(const struct device *dev) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_BBRAM)); return z_impl_bbram_check_power(dev); } #include <zephyr/syscalls/bbram_check_power_mrsh.c> static inline int z_vrfy_bbram_get_size(const struct device *dev, size_t *size) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_BBRAM)); K_OOPS(K_SYSCALL_MEMORY_WRITE(size, sizeof(size_t))); return z_impl_bbram_get_size(dev, size); } #include <zephyr/syscalls/bbram_get_size_mrsh.c> static inline int z_vrfy_bbram_read(const struct device *dev, size_t offset, size_t size, uint8_t *data) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_BBRAM)); K_OOPS(K_SYSCALL_MEMORY_WRITE(data, size)); return z_impl_bbram_read(dev, offset, size, data); } #include <zephyr/syscalls/bbram_read_mrsh.c> static inline int z_vrfy_bbram_write(const struct device *dev, size_t offset, size_t size, const uint8_t *data) { K_OOPS(K_SYSCALL_OBJ(dev, K_OBJ_DRIVER_BBRAM)); K_OOPS(K_SYSCALL_MEMORY_READ(data, size)); return z_impl_bbram_write(dev, offset, size, data); } #include <zephyr/syscalls/bbram_write_mrsh.c> ```
/content/code_sandbox/drivers/bbram/bbram_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
486
```unknown config BBRAM_XEC bool "Microchip XEC Battery-backed RAM drivers" default y depends on DT_HAS_MICROCHIP_XEC_BBRAM_ENABLED help This option enables the BBRAM driver for Microchip XEC family of processors. ```
/content/code_sandbox/drivers/bbram/Kconfig.xec
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
60
```objective-c /* */ #ifndef INCLUDE_ZEPHYR_DRIVERS_BBRAM_IT8XXX2_H_ #define INCLUDE_ZEPHYR_DRIVERS_BBRAM_IT8XXX2_H_ #include <stdint.h> #include <zephyr/devicetree.h> /** Device config */ struct bbram_it8xxx2_config { /** BBRAM base address */ uintptr_t base_addr; /** BBRAM size (Unit:bytes) */ int size; }; #ifdef CONFIG_BBRAM_IT8XXX2_EMUL #define BBRAM_IT8XXX2_DECL_CONFIG(inst) \ static uint8_t bbram_it8xxx2_emul_buffer_##inst[DT_INST_REG_SIZE(inst)]; \ static const struct bbram_it8xxx2_config bbram_cfg_##inst = { \ .base_addr = (uintptr_t)bbram_it8xxx2_emul_buffer_##inst, \ .size = DT_INST_REG_SIZE(inst), \ } #else #define BBRAM_IT8XXX2_DECL_CONFIG(inst) \ static const struct bbram_it8xxx2_config bbram_cfg_##inst = { \ .base_addr = DT_INST_REG_ADDR(inst), \ .size = DT_INST_REG_SIZE(inst), \ } #endif #endif /* INCLUDE_ZEPHYR_DRIVERS_BBRAM_IT8XXX2_H_ */ ```
/content/code_sandbox/drivers/bbram/it8xxx2.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
291
```c /* * */ #define DT_DRV_COMPAT ite_it8xxx2_bbram #include <errno.h> #include <zephyr/drivers/bbram.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #ifndef CONFIG_BBRAM_IT8XXX2_EMUL #include <chip_chipregs.h> #else /* Emulation register values */ enum bram_indices { BRAM_IDX_VALID_FLAGS0, BRAM_IDX_VALID_FLAGS1, BRAM_IDX_VALID_FLAGS2, BRAM_IDX_VALID_FLAGS3, }; #endif #include "it8xxx2.h" LOG_MODULE_REGISTER(it8xxx2_bbram, CONFIG_BBRAM_LOG_LEVEL); #define BRAM_VALID_MAGIC 0x4252414D /* "BRAM" */ #define BRAM_VALID_MAGIC_FIELD0 (BRAM_VALID_MAGIC & 0xff) #define BRAM_VALID_MAGIC_FIELD1 ((BRAM_VALID_MAGIC >> 8) & 0xff) #define BRAM_VALID_MAGIC_FIELD2 ((BRAM_VALID_MAGIC >> 16) & 0xff) #define BRAM_VALID_MAGIC_FIELD3 ((BRAM_VALID_MAGIC >> 24) & 0xff) static int bbram_it8xxx2_read(const struct device *dev, size_t offset, size_t size, uint8_t *data) { const struct bbram_it8xxx2_config *config = dev->config; if (size < 1 || offset + size > config->size) { return -EINVAL; } bytecpy(data, ((uint8_t *)config->base_addr + offset), size); return 0; } static int bbram_it8xxx2_write(const struct device *dev, size_t offset, size_t size, const uint8_t *data) { const struct bbram_it8xxx2_config *config = dev->config; if (size < 1 || offset + size > config->size) { return -EINVAL; } bytecpy(((uint8_t *)config->base_addr + offset), data, size); return 0; } static int bbram_it8xxx2_size(const struct device *dev, size_t *size) { const struct bbram_it8xxx2_config *config = dev->config; *size = config->size; return 0; } static const struct bbram_driver_api bbram_it8xxx2_driver_api = { .read = bbram_it8xxx2_read, .write = bbram_it8xxx2_write, .get_size = bbram_it8xxx2_size, }; static int bbram_it8xxx2_init(const struct device *dev) { const struct bbram_it8xxx2_config *config = dev->config; uint8_t *base_addr = (uint8_t *)config->base_addr; uint8_t *bram_valid_flag0 = base_addr + BRAM_IDX_VALID_FLAGS0; uint8_t *bram_valid_flag1 = base_addr + BRAM_IDX_VALID_FLAGS1; uint8_t *bram_valid_flag2 = base_addr + BRAM_IDX_VALID_FLAGS2; uint8_t *bram_valid_flag3 = base_addr + BRAM_IDX_VALID_FLAGS3; int size = config->size; if ((*bram_valid_flag0 != BRAM_VALID_MAGIC_FIELD0) || (*bram_valid_flag1 != BRAM_VALID_MAGIC_FIELD1) || (*bram_valid_flag2 != BRAM_VALID_MAGIC_FIELD2) || (*bram_valid_flag3 != BRAM_VALID_MAGIC_FIELD3)) { /* * Magic does not match, so BRAM must be uninitialized. Clear * entire Bank0 and Bank1 BRAM, and set magic value. */ for (int i = 0; i < size; i++) { *(base_addr + i) = 0; } *bram_valid_flag0 = BRAM_VALID_MAGIC_FIELD0; *bram_valid_flag1 = BRAM_VALID_MAGIC_FIELD1; *bram_valid_flag2 = BRAM_VALID_MAGIC_FIELD2; *bram_valid_flag3 = BRAM_VALID_MAGIC_FIELD3; } return 0; } #define BBRAM_INIT(inst) \ BBRAM_IT8XXX2_DECL_CONFIG(inst); \ DEVICE_DT_INST_DEFINE(inst, bbram_it8xxx2_init, NULL, NULL, &bbram_cfg_##inst, \ PRE_KERNEL_1, CONFIG_BBRAM_INIT_PRIORITY, \ &bbram_it8xxx2_driver_api); DT_INST_FOREACH_STATUS_OKAY(BBRAM_INIT); ```
/content/code_sandbox/drivers/bbram/bbram_it8xxx2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
969
```unknown config BBRAM_MICROCHIP_MCP7940N bool "Microchip MCP7940N SRAM BBRAM driver" default y depends on DT_HAS_MICROCHIP_MCP7940N_ENABLED select I2C help Enable driver for Microchip MCP7940N SRAM based battery-backed RAM. config BBRAM_MICROCHIP_MCP7940N_EMUL bool "Emulator for the Microchip MCP7940N SRAM BBRAM driver" default y depends on BBRAM_MICROCHIP_MCP7940N depends on EMUL help Enable the emulator for the Microchip MCP7940N SRAM based battery-backed RAM. ```
/content/code_sandbox/drivers/bbram/Kconfig.microchip
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
153
```unknown config BBRAM_NPCX bool "Nuvoton NPCX embedded controller (EC) Battery-backed RAM drivers" default y depends on DT_HAS_NUVOTON_NPCX_BBRAM_ENABLED help This option enables the BBRAM driver for NPCX family of processors. config BBRAM_NPCX_EMUL bool "Emulator for the NPCX BBRAM driver" default y depends on BBRAM_NPCX depends on EMUL help Enable the emulator for the NPCX BBRAM. ```
/content/code_sandbox/drivers/bbram/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
117
```c /* */ #define DT_DRV_COMPAT microchip_mcp7940n #include <zephyr/device.h> #include <zephyr/drivers/emul.h> #include <zephyr/drivers/emul_bbram.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/i2c_emul.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(bbram_microchip_mcp7940n, CONFIG_BBRAM_LOG_LEVEL); #define MICROCHIP_MCP7940N_SRAM_OFFSET 0x20 #define MICROCHIP_MCP7940N_SRAM_SIZE 64 #define MICROCHIP_MCP7940N_RTCWKDAY_REGISTER_ADDRESS 0x03 #define MICROCHIP_MCP7940N_RTCWKDAY_VBATEN_BIT BIT(3) #define MICROCHIP_MCP7940N_RTCWKDAY_PWRFAIL_BIT BIT(4) struct mcp7940n_emul_cfg { }; struct mcp7940n_emul_data { uint8_t rtcwkday; uint8_t data[MICROCHIP_MCP7940N_SRAM_SIZE]; }; static int mcp7940n_emul_init(const struct emul *target, const struct device *parent) { ARG_UNUSED(target); ARG_UNUSED(parent); return 0; } static int mcp7940n_emul_transfer_i2c(const struct emul *target, struct i2c_msg *msgs, int num_msgs, int addr) { struct mcp7940n_emul_data *data = target->data; i2c_dump_msgs_rw(target->dev, msgs, num_msgs, addr, false); if (num_msgs < 1) { LOG_ERR("Invalid number of messages: %d", num_msgs); return -EIO; } if (FIELD_GET(I2C_MSG_READ, msgs->flags)) { LOG_ERR("Unexpected read"); return -EIO; } if (msgs->len < 1) { LOG_ERR("Unexpected msg0 length %d", msgs->len); return -EIO; } uint8_t regn = msgs->buf[0]; bool is_read = FIELD_GET(I2C_MSG_READ, msgs->flags) == 1; bool is_stop = FIELD_GET(I2C_MSG_STOP, msgs->flags) == 1; if (!is_stop && !is_read) { /* First message was a write with the register number, check next message */ msgs++; is_read = FIELD_GET(I2C_MSG_READ, msgs->flags) == 1; is_stop = FIELD_GET(I2C_MSG_STOP, msgs->flags) == 1; } if (is_read) { /* Read data */ if (regn == MICROCHIP_MCP7940N_RTCWKDAY_REGISTER_ADDRESS) { msgs->buf[0] = data->rtcwkday; return 0; } if (regn >= MICROCHIP_MCP7940N_SRAM_OFFSET && regn + msgs->len <= MICROCHIP_MCP7940N_SRAM_OFFSET + MICROCHIP_MCP7940N_SRAM_SIZE) { for (int i = 0; i < msgs->len; ++i) { msgs->buf[i] = data->data[regn + i - MICROCHIP_MCP7940N_SRAM_OFFSET]; } return 0; } } else { /* Write data */ if (regn == MICROCHIP_MCP7940N_RTCWKDAY_REGISTER_ADDRESS) { data->rtcwkday = msgs->buf[1]; return 0; } if (regn >= MICROCHIP_MCP7940N_SRAM_OFFSET && regn + msgs->len - 1 <= MICROCHIP_MCP7940N_SRAM_OFFSET + MICROCHIP_MCP7940N_SRAM_SIZE) { for (int i = 0; i < msgs->len; ++i) { data->data[regn + i - MICROCHIP_MCP7940N_SRAM_OFFSET] = msgs->buf[1 + i]; } return 0; } } return -EIO; } static const struct i2c_emul_api mcp7940n_emul_api_i2c = { .transfer = mcp7940n_emul_transfer_i2c, }; static int mcp7940n_emul_backend_set_data(const struct emul *target, size_t offset, size_t count, const uint8_t *buffer) { struct mcp7940n_emul_data *data = target->data; if (offset + count > MICROCHIP_MCP7940N_SRAM_SIZE) { return -ERANGE; } for (size_t i = 0; i < count; ++i) { data->data[offset + i] = buffer[i]; } return 0; } static int mcp7940n_emul_backend_get_data(const struct emul *target, size_t offset, size_t count, uint8_t *buffer) { struct mcp7940n_emul_data *data = target->data; if (offset + count > MICROCHIP_MCP7940N_SRAM_SIZE) { return -ERANGE; } for (size_t i = 0; i < count; ++i) { buffer[i] = data->data[offset + i]; } return 0; } static const struct emul_bbram_driver_api mcp7940n_emul_backend_api = { .set_data = mcp7940n_emul_backend_set_data, .get_data = mcp7940n_emul_backend_get_data, }; #define MCP7940N_EMUL(inst) \ static const struct mcp7940n_emul_cfg mcp7940n_emul_cfg_##inst; \ static struct mcp7940n_emul_data mcp7940n_emul_data_##inst; \ EMUL_DT_INST_DEFINE(inst, mcp7940n_emul_init, &mcp7940n_emul_data_##inst, \ &mcp7940n_emul_cfg_##inst, &mcp7940n_emul_api_i2c, \ &mcp7940n_emul_backend_api) DT_INST_FOREACH_STATUS_OKAY(MCP7940N_EMUL) ```
/content/code_sandbox/drivers/bbram/bbram_microchip_mcp7940n_emul.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,358
```c /* * */ #define DT_DRV_COMPAT microchip_xec_bbram #include <zephyr/drivers/bbram.h> #include <errno.h> #include <soc.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(bbram, CONFIG_BBRAM_LOG_LEVEL); /** Device config */ struct bbram_xec_config { /** BBRAM base address */ uint8_t *base; /** BBRAM size (Unit:bytes) */ int size; }; static int bbram_xec_check_invalid(const struct device *dev) { struct vbatr_regs *const regs = (struct vbatr_regs *)(DT_REG_ADDR_BY_NAME( DT_NODELABEL(pcr), vbatr)); if (regs->PFRS & BIT(MCHP_VBATR_PFRS_VBAT_RST_POS)) { regs->PFRS |= BIT(MCHP_VBATR_PFRS_VBAT_RST_POS); LOG_ERR("VBAT power rail failure"); return -EFAULT; } return 0; } static int bbram_xec_get_size(const struct device *dev, size_t *size) { const struct bbram_xec_config *dcfg = dev->config; *size = dcfg->size; return 0; } static int bbram_xec_read(const struct device *dev, size_t offset, size_t size, uint8_t *data) { const struct bbram_xec_config *dcfg = dev->config; if (size < 1 || offset + size > dcfg->size) { LOG_ERR("Invalid params"); return -EFAULT; } bytecpy(data, dcfg->base + offset, size); return 0; } static int bbram_xec_write(const struct device *dev, size_t offset, size_t size, const uint8_t *data) { const struct bbram_xec_config *dcfg = dev->config; if (size < 1 || offset + size > dcfg->size) { LOG_ERR("Invalid params"); return -EFAULT; } bytecpy(dcfg->base + offset, data, size); return 0; } static const struct bbram_driver_api bbram_xec_driver_api = { .check_invalid = bbram_xec_check_invalid, .get_size = bbram_xec_get_size, .read = bbram_xec_read, .write = bbram_xec_write, }; #define BBRAM_INIT(inst) \ static const struct bbram_xec_config bbram_cfg_##inst = { \ .base = (uint8_t *)(DT_INST_REG_ADDR(inst)), \ .size = DT_INST_REG_SIZE(inst), \ }; \ DEVICE_DT_INST_DEFINE(inst, NULL, NULL, NULL, &bbram_cfg_##inst,\ PRE_KERNEL_1, CONFIG_BBRAM_INIT_PRIORITY, \ &bbram_xec_driver_api); DT_INST_FOREACH_STATUS_OKAY(BBRAM_INIT); ```
/content/code_sandbox/drivers/bbram/bbram_xec.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
655
```unknown menuconfig BBRAM bool "Battery-backed RAM (BBRAM) drivers" help Enable BBRAM (battery-backed RAM) driver configuration. if BBRAM module = BBRAM module-str = bbram source "subsys/logging/Kconfig.template.log_config" config BBRAM_SHELL bool "Battery-backed RAM shell" depends on SHELL help Enable the BBRAM shell with read and write commands. config BBRAM_INIT_PRIORITY int "Init priority" # In STM32, BBRAM is a part of RTC. In this case init priority must be # lower than COUNTER_INIT_PRIORITY. default 65 if BBRAM_STM32 # MCP7940N is an I2C device, therefore the init priority must be # greater than I2C_INIT_PRIORITY. default 55 if BBRAM_MICROCHIP_MCP7940N default 10 help BBRAM driver initialization priority source "drivers/bbram/Kconfig.npcx" source "drivers/bbram/Kconfig.it8xxx2" source "drivers/bbram/Kconfig.bbram_emul" source "drivers/bbram/Kconfig.microchip" source "drivers/bbram/Kconfig.xec" source "drivers/bbram/Kconfig.stm32" endif # BBRAM ```
/content/code_sandbox/drivers/bbram/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
292
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #include <adsp_shim.h> #include <adsp_power.h> #if CONFIG_SOC_INTEL_ACE15_MTPM #include <adsp_power.h> #endif /* CONFIG_SOC_INTEL_ACE15_MTPM */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(power_domain_intel_adsp, LOG_LEVEL_INF); struct pg_bits { uint32_t SPA_bit; uint32_t CPA_bit; }; #ifdef CONFIG_PM_DEVICE static int pd_intel_adsp_set_power_enable(struct pg_bits *bits, bool power_enable) { uint16_t SPA_bit_mask = BIT(bits->SPA_bit); if (power_enable) { sys_write16(sys_read16((mem_addr_t)ACE_PWRCTL) | SPA_bit_mask, (mem_addr_t)ACE_PWRCTL); if (!WAIT_FOR(sys_read16((mem_addr_t)ACE_PWRSTS) & BIT(bits->CPA_bit), 10000, k_busy_wait(1))) { return -EIO; } } else { #if CONFIG_SOC_INTEL_ACE15_MTPM extern uint32_t adsp_pending_buffer; if (bits->SPA_bit == INTEL_ADSP_HST_DOMAIN_BIT) { volatile uint32_t *key_read_ptr = &adsp_pending_buffer; uint32_t key_value = *key_read_ptr; if (key_value != INTEL_ADSP_ACE15_MAGIC_KEY) return -EINVAL; } #endif sys_write16(sys_read16((mem_addr_t)ACE_PWRCTL) & ~(SPA_bit_mask), (mem_addr_t)ACE_PWRCTL); } return 0; } static int pd_intel_adsp_pm_action(const struct device *dev, enum pm_device_action action) { struct pg_bits *reg_bits = (struct pg_bits *)dev->data; int ret = 0; switch (action) { case PM_DEVICE_ACTION_RESUME: ret = pd_intel_adsp_set_power_enable(reg_bits, true); if (ret == 0) { pm_device_children_action_run(dev, PM_DEVICE_ACTION_TURN_ON, NULL); } break; case PM_DEVICE_ACTION_SUSPEND: pm_device_children_action_run(dev, PM_DEVICE_ACTION_TURN_OFF, NULL); ret = pd_intel_adsp_set_power_enable(reg_bits, false); break; case PM_DEVICE_ACTION_TURN_ON: break; case PM_DEVICE_ACTION_TURN_OFF: break; default: return -ENOTSUP; } return ret; } #endif /* CONFIG_PM_DEVICE */ static int pd_intel_adsp_init(const struct device *dev) { pm_device_init_suspended(dev); return pm_device_runtime_enable(dev); } #define DT_DRV_COMPAT intel_adsp_power_domain #define POWER_DOMAIN_DEVICE(id) \ static struct pg_bits pd_pg_reg##id = { \ .SPA_bit = DT_INST_PROP(id, bit_position), \ .CPA_bit = DT_INST_PROP(id, bit_position), \ }; \ PM_DEVICE_DT_INST_DEFINE(id, pd_intel_adsp_pm_action); \ DEVICE_DT_INST_DEFINE(id, pd_intel_adsp_init, PM_DEVICE_DT_INST_GET(id), \ &pd_pg_reg##id, NULL, POST_KERNEL, \ CONFIG_POWER_DOMAIN_INTEL_ADSP_INIT_PRIORITY, NULL); DT_INST_FOREACH_STATUS_OKAY(POWER_DOMAIN_DEVICE) ```
/content/code_sandbox/drivers/power_domain/power_domain_intel_adsp.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
747
```c /* * */ #define DT_DRV_COMPAT st_stm32_bbram #include <errno.h> #include <zephyr/drivers/bbram.h> #include <zephyr/logging/log.h> #include <zephyr/sys/util.h> #include <stm32_ll_pwr.h> #include <stm32_ll_rtc.h> LOG_MODULE_REGISTER(bbram, CONFIG_BBRAM_LOG_LEVEL); #define STM32_BKP_REG_BYTES 4 #ifdef TAMP /* If a SoC has a TAMP peripherals, then the backup registers are defined there, * not in the RTC. */ #define STM32_BKP_REG_OFFSET (TAMP_BASE + offsetof(TAMP_TypeDef, BKP0R) - RTC_BASE) #else #define STM32_BKP_REG_OFFSET offsetof(RTC_TypeDef, BKP0R) #endif #define STM32_BKP_REG_INDEX(offset) ((offset) >> 2) #define STM32_BKP_REG_BYTE_INDEX(offset) ((offset)&0x3UL) #define STM32_BKP_REG(i) (((volatile uint32_t *)config->base_addr)[(i)]) /** Device config */ struct bbram_stm32_config { const struct device *parent; /* BBRAM base address */ uintptr_t base_addr; /* BBRAM size in bytes. */ int size; }; static int bbram_stm32_read(const struct device *dev, size_t offset, size_t size, uint8_t *data) { const struct bbram_stm32_config *config = dev->config; uint32_t reg, begin, to_copy; if (size < 1 || offset + size > config->size) { return -EFAULT; } for (size_t read = 0; read < size; read += to_copy) { reg = STM32_BKP_REG(STM32_BKP_REG_INDEX(offset + read)); begin = STM32_BKP_REG_BYTE_INDEX(offset + read); to_copy = MIN(STM32_BKP_REG_BYTES - begin, size - read); bytecpy(data + read, (uint8_t *)&reg + begin, to_copy); } return 0; } static int bbram_stm32_write(const struct device *dev, size_t offset, size_t size, const uint8_t *data) { const struct bbram_stm32_config *config = dev->config; uint32_t reg, begin, to_copy; if (size < 1 || offset + size > config->size) { return -EFAULT; } #if defined(PWR_CR_DBP) || defined(PWR_CR1_DBP) || defined(PWR_DBPCR_DBP) || defined(PWR_DBPR_DBP) LL_PWR_EnableBkUpAccess(); #endif /* PWR_CR_DBP || PWR_CR1_DBP || PWR_DBPCR_DBP || PWR_DBPR_DBP */ for (size_t written = 0; written < size; written += to_copy) { reg = STM32_BKP_REG(STM32_BKP_REG_INDEX(offset + written)); begin = STM32_BKP_REG_BYTE_INDEX(offset + written); to_copy = MIN(STM32_BKP_REG_BYTES - begin, size - written); bytecpy((uint8_t *)&reg + begin, data + written, to_copy); STM32_BKP_REG(STM32_BKP_REG_INDEX(offset + written)) = reg; } #if defined(PWR_CR_DBP) || defined(PWR_CR1_DBP) || defined(PWR_DBPCR_DBP) || defined(PWR_DBPR_DBP) LL_PWR_DisableBkUpAccess(); #endif /* PWR_CR_DBP || PWR_CR1_DBP || PWR_DBPCR_DBP || PWR_DBPR_DBP */ return 0; } static int bbram_stm32_get_size(const struct device *dev, size_t *size) { const struct bbram_stm32_config *config = dev->config; *size = config->size; return 0; } static const struct bbram_driver_api bbram_stm32_driver_api = { .read = bbram_stm32_read, .write = bbram_stm32_write, .get_size = bbram_stm32_get_size, }; static int bbram_stm32_init(const struct device *dev) { const struct bbram_stm32_config *config = dev->config; if (!device_is_ready(config->parent)) { LOG_ERR("Device %s is not ready", config->parent->name); return -ENODEV; } return 0; } #define BBRAM_INIT(inst) \ static const struct bbram_stm32_config bbram_cfg_##inst = { \ .parent = DEVICE_DT_GET(DT_INST_PARENT(inst)), \ .base_addr = DT_REG_ADDR(DT_INST_PARENT(inst)) + STM32_BKP_REG_OFFSET, \ .size = DT_INST_PROP(inst, st_backup_regs) * STM32_BKP_REG_BYTES, \ }; \ DEVICE_DT_INST_DEFINE(inst, bbram_stm32_init, NULL, NULL, &bbram_cfg_##inst, PRE_KERNEL_1, \ CONFIG_BBRAM_INIT_PRIORITY, &bbram_stm32_driver_api); DT_INST_FOREACH_STATUS_OKAY(BBRAM_INIT); ```
/content/code_sandbox/drivers/bbram/bbram_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,131
```unknown menuconfig POWER_DOMAIN bool "Power domain drivers" help Include drivers for power domains in system config if POWER_DOMAIN module = POWER_DOMAIN module-str = power_domain source "subsys/logging/Kconfig.template.log_config" config POWER_DOMAIN_INIT_PRIORITY int "Power domain init priority" default 75 help Power domain initialization priority. config POWER_DOMAIN_GPIO bool "GPIO controlled power domain" default y depends on DT_HAS_POWER_DOMAIN_GPIO_ENABLED depends on GPIO depends on PM_DEVICE || !PM_DEVICE_POWER_DOMAIN depends on TIMEOUT_64BIT select DEVICE_DEPS if POWER_DOMAIN_GPIO config POWER_DOMAIN_GPIO_INIT_PRIORITY int "GPIO power domain init priority" default POWER_DOMAIN_INIT_PRIORITY help GPIO power domain initialization priority. endif #POWER_DOMAIN_GPIO_MONITOR config POWER_DOMAIN_INTEL_ADSP bool "Use Intel ADSP power gating mechanisms" default y depends on DT_HAS_INTEL_ADSP_POWER_DOMAIN_ENABLED select DEVICE_DEPS help Include Intel ADSP power domain control mechanisms if POWER_DOMAIN_INTEL_ADSP config POWER_DOMAIN_INTEL_ADSP_INIT_PRIORITY int "Intel ADSP power domain init priority" default KERNEL_INIT_PRIORITY_DEFAULT help Intel ADSP power domain initialization priority. endif #POWER_DOMAIN_INTEL_ADSP config POWER_DOMAIN_GPIO_MONITOR bool "GPIO monitor for sensing power on rail" default y depends on DT_HAS_POWER_DOMAIN_GPIO_MONITOR_ENABLED depends on GPIO select DEVICE_DEPS if POWER_DOMAIN_GPIO_MONITOR config POWER_DOMAIN_GPIO_MONITOR_INIT_PRIORITY int "GPIO monitor power domain init priority" default POWER_DOMAIN_INIT_PRIORITY help GPIO monitor power domain initialization priority. endif #POWER_DOMAIN_GPIO_MONITOR endif ```
/content/code_sandbox/drivers/power_domain/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
366
```c /* * Organisation (CSIRO) ABN 41 687 119 230. * */ #define DT_DRV_COMPAT power_domain_gpio #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(power_domain_gpio, CONFIG_POWER_DOMAIN_LOG_LEVEL); struct pd_gpio_config { struct gpio_dt_spec enable; uint32_t startup_delay_us; uint32_t off_on_delay_us; }; struct pd_gpio_data { k_timeout_t next_boot; }; struct pd_visitor_context { const struct device *domain; enum pm_device_action action; }; #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN static int pd_on_domain_visitor(const struct device *dev, void *context) { struct pd_visitor_context *visitor_context = context; /* Only run action if the device is on the specified domain */ if (!dev->pm || (dev->pm_base->domain != visitor_context->domain)) { return 0; } (void)pm_device_action_run(dev, visitor_context->action); return 0; } #endif static int pd_gpio_pm_action(const struct device *dev, enum pm_device_action action) { #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN struct pd_visitor_context context = {.domain = dev}; #endif const struct pd_gpio_config *cfg = dev->config; struct pd_gpio_data *data = dev->data; int64_t next_boot_ticks; int rc = 0; /* Validate that blocking API's can be used */ if (!k_can_yield()) { LOG_ERR("Blocking actions cannot run in this context"); return -ENOTSUP; } switch (action) { case PM_DEVICE_ACTION_RESUME: /* Wait until we can boot again */ k_sleep(data->next_boot); /* Switch power on */ gpio_pin_set_dt(&cfg->enable, 1); LOG_INF("%s is now ON", dev->name); /* Wait for domain to come up */ k_sleep(K_USEC(cfg->startup_delay_us)); #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN /* Notify devices on the domain they are now powered */ context.action = PM_DEVICE_ACTION_TURN_ON; (void)device_supported_foreach(dev, pd_on_domain_visitor, &context); #endif break; case PM_DEVICE_ACTION_SUSPEND: #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN /* Notify devices on the domain that power is going down */ context.action = PM_DEVICE_ACTION_TURN_OFF; (void)device_supported_foreach(dev, pd_on_domain_visitor, &context); #endif /* Switch power off */ gpio_pin_set_dt(&cfg->enable, 0); LOG_INF("%s is now OFF", dev->name); /* Store next time we can boot */ next_boot_ticks = k_uptime_ticks() + k_us_to_ticks_ceil32(cfg->off_on_delay_us); data->next_boot = K_TIMEOUT_ABS_TICKS(next_boot_ticks); break; case PM_DEVICE_ACTION_TURN_ON: /* Actively control the enable pin now that the device is powered */ gpio_pin_configure_dt(&cfg->enable, GPIO_OUTPUT_INACTIVE); LOG_DBG("%s is OFF and powered", dev->name); break; case PM_DEVICE_ACTION_TURN_OFF: /* Let the enable pin float while device is not powered */ gpio_pin_configure_dt(&cfg->enable, GPIO_DISCONNECTED); LOG_DBG("%s is OFF and not powered", dev->name); break; default: rc = -ENOTSUP; } return rc; } static int pd_gpio_init(const struct device *dev) { const struct pd_gpio_config *cfg = dev->config; struct pd_gpio_data *data = dev->data; if (!gpio_is_ready_dt(&cfg->enable)) { LOG_ERR("GPIO port %s is not ready", cfg->enable.port->name); return -ENODEV; } /* We can't know how long the domain has been off for before boot */ data->next_boot = K_TIMEOUT_ABS_US(cfg->off_on_delay_us); /* Boot according to state */ return pm_device_driver_init(dev, pd_gpio_pm_action); } #define POWER_DOMAIN_DEVICE(id) \ static const struct pd_gpio_config pd_gpio_##id##_cfg = { \ .enable = GPIO_DT_SPEC_INST_GET(id, enable_gpios), \ .startup_delay_us = DT_INST_PROP(id, startup_delay_us), \ .off_on_delay_us = DT_INST_PROP(id, off_on_delay_us), \ }; \ static struct pd_gpio_data pd_gpio_##id##_data; \ PM_DEVICE_DT_INST_DEFINE(id, pd_gpio_pm_action); \ DEVICE_DT_INST_DEFINE(id, pd_gpio_init, PM_DEVICE_DT_INST_GET(id), \ &pd_gpio_##id##_data, &pd_gpio_##id##_cfg, \ POST_KERNEL, CONFIG_POWER_DOMAIN_GPIO_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(POWER_DOMAIN_DEVICE) ```
/content/code_sandbox/drivers/power_domain/power_domain_gpio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,073
```unknown # USB configuration options source "drivers/usb/bc12/Kconfig" source "drivers/usb/udc/Kconfig" source "drivers/usb/uhc/Kconfig" source "drivers/usb/uvb/Kconfig" source "drivers/usb/device/Kconfig" source "drivers/usb/common/Kconfig" ```
/content/code_sandbox/drivers/usb/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
68
```c /* * */ #define DT_DRV_COMPAT power_domain_gpio_monitor #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/pm/device.h> #include <zephyr/pm/device_runtime.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(power_domain_gpio_monitor, CONFIG_POWER_DOMAIN_LOG_LEVEL); struct pd_gpio_monitor_config { struct gpio_dt_spec power_good_gpio; }; struct pd_gpio_monitor_data { struct gpio_callback callback; const struct device *dev; bool is_powered; }; struct pd_visitor_context { const struct device *domain; enum pm_device_action action; }; static int pd_on_domain_visitor(const struct device *dev, void *context) { struct pd_visitor_context *visitor_context = context; /* Only run action if the device is on the specified domain */ if (!dev->pm || (dev->pm_base->domain != visitor_context->domain)) { return 0; } dev->pm->usage = 0; (void)pm_device_action_run(dev, visitor_context->action); return 0; } static void pd_gpio_monitor_callback(const struct device *port, struct gpio_callback *cb, gpio_port_pins_t pins) { struct pd_gpio_monitor_data *data = CONTAINER_OF(cb, struct pd_gpio_monitor_data, callback); const struct pd_gpio_monitor_config *config = data->dev->config; const struct device *dev = data->dev; struct pd_visitor_context context = {.domain = dev}; int rc; rc = gpio_pin_get_dt(&config->power_good_gpio); if (rc < 0) { LOG_WRN("Failed to read gpio logic level"); return; } data->is_powered = rc; if (rc == 0) { context.action = PM_DEVICE_ACTION_SUSPEND; (void)device_supported_foreach(dev, pd_on_domain_visitor, &context); context.action = PM_DEVICE_ACTION_TURN_OFF; (void)device_supported_foreach(dev, pd_on_domain_visitor, &context); return; } pm_device_children_action_run(data->dev, PM_DEVICE_ACTION_TURN_ON, NULL); } static int pd_gpio_monitor_pm_action(const struct device *dev, enum pm_device_action action) { struct pd_gpio_monitor_data *data = dev->data; switch (action) { case PM_DEVICE_ACTION_TURN_ON: case PM_DEVICE_ACTION_TURN_OFF: return -ENOTSUP; case PM_DEVICE_ACTION_RESUME: if (!data->is_powered) { return -EAGAIN; } break; default: break; } return 0; } static int pd_gpio_monitor_init(const struct device *dev) { const struct pd_gpio_monitor_config *config = dev->config; struct pd_gpio_monitor_data *data = dev->data; int rc; data->dev = dev; if (!gpio_is_ready_dt(&config->power_good_gpio)) { LOG_ERR("GPIO port %s is not ready", config->power_good_gpio.port->name); return -ENODEV; } rc = gpio_pin_configure_dt(&config->power_good_gpio, GPIO_INPUT); if (rc) { LOG_ERR("Failed to configure GPIO"); goto unconfigure_pin; } rc = gpio_pin_interrupt_configure_dt(&config->power_good_gpio, GPIO_INT_EDGE_BOTH); if (rc) { gpio_pin_configure_dt(&config->power_good_gpio, GPIO_DISCONNECTED); LOG_ERR("Failed to configure GPIO interrupt"); goto unconfigure_interrupt; } gpio_init_callback(&data->callback, pd_gpio_monitor_callback, BIT(config->power_good_gpio.pin)); rc = gpio_add_callback_dt(&config->power_good_gpio, &data->callback); if (rc) { LOG_ERR("Failed to add GPIO callback"); goto remove_callback; } pm_device_init_suspended(dev); return pm_device_runtime_enable(dev); remove_callback: gpio_remove_callback(config->power_good_gpio.port, &data->callback); unconfigure_interrupt: gpio_pin_interrupt_configure_dt(&config->power_good_gpio, GPIO_INT_DISABLE); unconfigure_pin: gpio_pin_configure_dt(&config->power_good_gpio, GPIO_DISCONNECTED); return rc; } #define POWER_DOMAIN_DEVICE(inst) \ static const struct pd_gpio_monitor_config pd_gpio_monitor_config_##inst = { \ .power_good_gpio = GPIO_DT_SPEC_INST_GET(inst, gpios), \ }; \ static struct pd_gpio_monitor_data pd_gpio_monitor_data_##inst; \ PM_DEVICE_DT_INST_DEFINE(inst, pd_gpio_monitor_pm_action); \ DEVICE_DT_INST_DEFINE(inst, pd_gpio_monitor_init, \ PM_DEVICE_DT_INST_GET(inst), &pd_gpio_monitor_data_##inst, \ &pd_gpio_monitor_config_##inst, POST_KERNEL, \ CONFIG_POWER_DOMAIN_GPIO_MONITOR_INIT_PRIORITY, NULL); DT_INST_FOREACH_STATUS_OKAY(POWER_DOMAIN_DEVICE) ```
/content/code_sandbox/drivers/power_domain/power_domain_gpio_monitor.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,022
```c /* * */ #define DT_DRV_COMPAT atmel_sam0_usb #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(usb_dc_sam0); #include <zephyr/kernel.h> #include <zephyr/usb/usb_device.h> #include <zephyr/drivers/pinctrl.h> #include <soc.h> #include <string.h> #include <zephyr/irq.h> #define NVM_USB_PAD_TRANSN_POS 45 #define NVM_USB_PAD_TRANSN_SIZE 5 #define NVM_USB_PAD_TRANSP_POS 50 #define NVM_USB_PAD_TRANSP_SIZE 5 #define NVM_USB_PAD_TRIM_POS 55 #define NVM_USB_PAD_TRIM_SIZE 3 #define USB_SAM0_IN_EP 0x80 #define REGS ((Usb *)DT_INST_REG_ADDR(0)) #define USB_NUM_ENDPOINTS DT_INST_PROP(0, num_bidir_endpoints) /* The endpoint size stored in USB.PCKSIZE.SIZE */ enum usb_sam0_pcksize_size { USB_SAM0_PCKSIZE_SIZE_8 = 0, USB_SAM0_PCKSIZE_SIZE_16, USB_SAM0_PCKSIZE_SIZE_32, USB_SAM0_PCKSIZE_SIZE_64, USB_SAM0_PCKSIZE_SIZE_128, USB_SAM0_PCKSIZE_SIZE_256, USB_SAM0_PCKSIZE_SIZE_512, USB_SAM0_PCKSIZE_SIZE_1023, }; static const uint16_t usb_sam0_pcksize_bytes[] = { [USB_SAM0_PCKSIZE_SIZE_8] = 8, [USB_SAM0_PCKSIZE_SIZE_16] = 16, [USB_SAM0_PCKSIZE_SIZE_32] = 32, [USB_SAM0_PCKSIZE_SIZE_64] = 64, [USB_SAM0_PCKSIZE_SIZE_128] = 128, [USB_SAM0_PCKSIZE_SIZE_256] = 256, [USB_SAM0_PCKSIZE_SIZE_512] = 512, [USB_SAM0_PCKSIZE_SIZE_1023] = 1023, }; BUILD_ASSERT(ARRAY_SIZE(usb_sam0_pcksize_bytes) == 8); struct usb_sam0_data { UsbDeviceDescriptor descriptors[USB_NUM_ENDPOINTS]; usb_dc_status_callback cb; usb_dc_ep_callback ep_cb[2][USB_NUM_ENDPOINTS]; uint8_t addr; uint32_t out_at; }; static struct usb_sam0_data usb_sam0_data_0; PINCTRL_DT_INST_DEFINE(0); static const struct pinctrl_dev_config *pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0); static struct usb_sam0_data *usb_sam0_get_data(void) { return &usb_sam0_data_0; } /* Handles interrupts on an endpoint */ static void usb_sam0_ep_isr(uint8_t ep) { struct usb_sam0_data *data = usb_sam0_get_data(); UsbDevice *regs = &REGS->DEVICE; UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep]; uint32_t intflag = endpoint->EPINTFLAG.reg; endpoint->EPINTFLAG.reg = intflag; if ((intflag & USB_DEVICE_EPINTFLAG_RXSTP) != 0U) { /* Setup */ data->ep_cb[0][ep](ep, USB_DC_EP_SETUP); } if ((intflag & USB_DEVICE_EPINTFLAG_TRCPT0) != 0U) { /* Out (to device) data received */ data->ep_cb[0][ep](ep, USB_DC_EP_DATA_OUT); } if ((intflag & USB_DEVICE_EPINTFLAG_TRCPT1) != 0U) { /* In (to host) transmit complete */ data->ep_cb[1][ep](ep | USB_SAM0_IN_EP, USB_DC_EP_DATA_IN); if (data->addr != 0U) { /* Commit the pending address update. This * must be done after the ack to the host * completes else the ack will get dropped. */ regs->DADD.reg = data->addr; data->addr = 0U; } } } /* Top level interrupt handler */ static void usb_sam0_isr(void) { struct usb_sam0_data *data = usb_sam0_get_data(); UsbDevice *regs = &REGS->DEVICE; uint32_t intflag = regs->INTFLAG.reg; uint32_t epint = regs->EPINTSMRY.reg; uint8_t ep; /* Acknowledge all interrupts */ regs->INTFLAG.reg = intflag; if ((intflag & USB_DEVICE_INTFLAG_EORST) != 0U) { UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[0]; /* The device clears some of the configuration of EP0 * when it receives the EORST. Re-enable interrupts. */ endpoint->EPINTENSET.reg = USB_DEVICE_EPINTENSET_TRCPT0 | USB_DEVICE_EPINTENSET_TRCPT1 | USB_DEVICE_EPINTENSET_RXSTP; data->cb(USB_DC_RESET, NULL); } /* Dispatch the endpoint interrupts */ for (ep = 0U; epint != 0U; epint >>= 1) { /* Scan bit-by-bit as the Cortex-M0 doesn't have ffs */ if ((epint & 1) != 0U) { usb_sam0_ep_isr(ep); } ep++; } } /* Wait for the device to process the last config write */ static void usb_sam0_wait_syncbusy(void) { UsbDevice *regs = &REGS->DEVICE; while (regs->SYNCBUSY.reg != 0) { } } /* Load the pad calibration from the built-in fuses */ static void usb_sam0_load_padcal(void) { UsbDevice *regs = &REGS->DEVICE; uint32_t pad_transn; uint32_t pad_transp; uint32_t pad_trim; #ifdef USB_FUSES_TRANSN_ADDR pad_transn = *(uint32_t *)USB_FUSES_TRANSN_ADDR; #else pad_transn = (*((uint32_t *)(NVMCTRL_OTP4) + (NVM_USB_PAD_TRANSN_POS / 32)) >> (NVM_USB_PAD_TRANSN_POS % 32)) & ((1 << NVM_USB_PAD_TRANSN_SIZE) - 1); if (pad_transn == 0x1F) { pad_transn = 5U; } #endif regs->PADCAL.bit.TRANSN = pad_transn; #ifdef USB_FUSES_TRANSP_ADDR pad_transp = *(uint32_t *)USB_FUSES_TRANSP_ADDR; #else pad_transp = (*((uint32_t *)(NVMCTRL_OTP4) + (NVM_USB_PAD_TRANSP_POS / 32)) >> (NVM_USB_PAD_TRANSP_POS % 32)) & ((1 << NVM_USB_PAD_TRANSP_SIZE) - 1); if (pad_transp == 0x1F) { pad_transp = 29U; } #endif regs->PADCAL.bit.TRANSP = pad_transp; #ifdef USB_FUSES_TRIM_ADDR pad_trim = *(uint32_t *)USB_FUSES_TRIM_ADDR; #else pad_trim = (*((uint32_t *)(NVMCTRL_OTP4) + (NVM_USB_PAD_TRIM_POS / 32)) >> (NVM_USB_PAD_TRIM_POS % 32)) & ((1 << NVM_USB_PAD_TRIM_SIZE) - 1); if (pad_trim == 0x7) { pad_trim = 3U; } #endif regs->PADCAL.bit.TRIM = pad_trim; } #define SAM0_USB_IRQ_CONNECT(n) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, n, irq), \ DT_INST_IRQ_BY_IDX(0, n, priority), \ usb_sam0_isr, 0, 0); \ irq_enable(DT_INST_IRQ_BY_IDX(0, n, irq)); \ } while (false) /* Attach by initializing the device */ int usb_dc_attach(void) { UsbDevice *regs = &REGS->DEVICE; struct usb_sam0_data *data = usb_sam0_get_data(); int retval; #ifdef MCLK /* Enable the clock in MCLK */ MCLK->APBBMASK.bit.USB_ = 1; /* Enable the GCLK - use 48 MHz source */ GCLK->PCHCTRL[USB_GCLK_ID].reg = GCLK_PCHCTRL_GEN(2) | GCLK_PCHCTRL_CHEN; while (GCLK->SYNCBUSY.reg) { } #else /* Enable the clock in PM */ PM->APBBMASK.bit.USB_ = 1; /* Enable the GCLK */ GCLK->CLKCTRL.reg = GCLK_CLKCTRL_ID_USB | GCLK_CLKCTRL_GEN_GCLK0 | GCLK_CLKCTRL_CLKEN; while (GCLK->STATUS.bit.SYNCBUSY) { } #endif /* !MCLK */ /* Configure */ regs->CTRLA.bit.SWRST = 1; usb_sam0_wait_syncbusy(); /* Change QOS values to have the best performance and correct USB * behaviour */ regs->QOSCTRL.bit.CQOS = 2; regs->QOSCTRL.bit.DQOS = 2; retval = pinctrl_apply_state(pcfg, PINCTRL_STATE_DEFAULT); if (retval < 0) { return retval; } usb_sam0_load_padcal(); regs->CTRLA.reg = USB_CTRLA_MODE_DEVICE | USB_CTRLA_RUNSTDBY; regs->CTRLB.reg = USB_DEVICE_CTRLB_SPDCONF_HS; (void)memset(data->descriptors, 0, sizeof(data->descriptors)); regs->DESCADD.reg = (uintptr_t)&data->descriptors[0]; regs->INTENSET.reg = USB_DEVICE_INTENSET_EORST; /* Connect and enable the interrupt */ #if DT_INST_IRQ_HAS_CELL(0, irq) SAM0_USB_IRQ_CONNECT(0); #endif #if DT_INST_IRQ_HAS_IDX(0, 1) SAM0_USB_IRQ_CONNECT(1); #endif #if DT_INST_IRQ_HAS_IDX(0, 2) SAM0_USB_IRQ_CONNECT(2); #endif #if DT_INST_IRQ_HAS_IDX(0, 3) SAM0_USB_IRQ_CONNECT(3); #endif /* Enable and attach */ regs->CTRLA.bit.ENABLE = 1; usb_sam0_wait_syncbusy(); regs->CTRLB.bit.DETACH = 0; return 0; } static void usb_dc_release_buffers(void) { struct usb_sam0_data *data = usb_sam0_get_data(); UsbDeviceDescBank *bank; void *buf; /* release the buffers */ for (int i = 0; i < ARRAY_SIZE(data->descriptors); i++) { for (int j = 0; j < ARRAY_SIZE(data->descriptors[0].DeviceDescBank); j++) { bank = &data->descriptors[i].DeviceDescBank[j]; buf = (void *)bank->ADDR.reg; /* * We free the ep descriptor memory that was * allocated in usb_dc_ep_configure(). * Therefore a disabled ep must be reconfigured * before it can be enabled again. */ if (buf != NULL) { k_free(buf); bank->ADDR.reg = (uintptr_t) NULL; } } } } /* Detach from the bus */ int usb_dc_detach(void) { UsbDevice *regs = &REGS->DEVICE; regs->CTRLB.bit.DETACH = 1; usb_sam0_wait_syncbusy(); usb_dc_release_buffers(); return 0; } /* Remove the interrupt and reset the device */ int usb_dc_reset(void) { UsbDevice *regs = &REGS->DEVICE; irq_disable(DT_INST_IRQN(0)); regs->CTRLA.bit.SWRST = 1; usb_sam0_wait_syncbusy(); return 0; } /* Queue a change in address. This is processed later when the * current transfers are complete. */ int usb_dc_set_address(const uint8_t addr) { struct usb_sam0_data *data = usb_sam0_get_data(); data->addr = addr | USB_DEVICE_DADD_ADDEN; return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { struct usb_sam0_data *data = usb_sam0_get_data(); data->cb = cb; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); if ((cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx) { LOG_ERR("invalid endpoint configuration"); return -1; } if (ep_idx > USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address too high"); return -1; } return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const cfg) { struct usb_sam0_data *data = usb_sam0_get_data(); UsbDevice *regs = &REGS->DEVICE; uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; UsbDeviceDescriptor *desc = &data->descriptors[ep_idx]; UsbDeviceDescBank *bank; void *buf; int type; int size = -1; int i; /* Map the type to native type */ switch (cfg->ep_type) { case USB_DC_EP_CONTROL: type = 1; break; case USB_DC_EP_ISOCHRONOUS: type = 2; break; case USB_DC_EP_BULK: type = 3; break; case USB_DC_EP_INTERRUPT: type = 4; break; default: return -EINVAL; } /* Map the endpoint size to native size */ for (i = 0; i < ARRAY_SIZE(usb_sam0_pcksize_bytes); i++) { if (usb_sam0_pcksize_bytes[i] == cfg->ep_mps) { size = i; break; } } if (size < 0) { return -EINVAL; } if (USB_EP_DIR_IS_IN(cfg->ep_addr)) { bank = &desc->DeviceDescBank[1]; } else { bank = &desc->DeviceDescBank[0]; } buf = (void *)bank->ADDR.reg; if (bank->PCKSIZE.bit.SIZE != size || buf == NULL) { /* Release the previous buffer, if any */ k_free(buf); buf = k_malloc(cfg->ep_mps); if (buf == NULL) { return -ENOMEM; } bank->PCKSIZE.bit.SIZE = size; bank->ADDR.reg = (uintptr_t)buf; } if (USB_EP_DIR_IS_IN(cfg->ep_addr)) { endpoint->EPCFG.bit.EPTYPE1 = type; endpoint->EPSTATUSCLR.bit.BK1RDY = 1; } else { endpoint->EPCFG.bit.EPTYPE0 = type; endpoint->EPSTATUSCLR.bit.BK0RDY = 1; } return 0; } int usb_dc_ep_set_stall(const uint8_t ep) { UsbDevice *regs = &REGS->DEVICE; uint8_t for_in = USB_EP_GET_DIR(ep); uint8_t ep_idx = USB_EP_GET_IDX(ep); UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -1; } if (for_in) { endpoint->EPSTATUSSET.bit.STALLRQ1 = 1; } else { endpoint->EPSTATUSSET.bit.STALLRQ0 = 1; } return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { UsbDevice *regs = &REGS->DEVICE; uint8_t for_in = USB_EP_GET_DIR(ep); uint8_t ep_idx = USB_EP_GET_IDX(ep); UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -1; } if (for_in) { endpoint->EPSTATUSCLR.bit.STALLRQ1 = 1; } else { endpoint->EPSTATUSCLR.bit.STALLRQ0 = 1; } return 0; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *stalled) { UsbDevice *regs = &REGS->DEVICE; uint8_t for_in = USB_EP_GET_DIR(ep); uint8_t ep_idx = USB_EP_GET_IDX(ep); UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -1; } if (stalled == NULL) { LOG_ERR("parameter must not be NULL"); return -1; } if (for_in) { *stalled = endpoint->EPSTATUS.bit.STALLRQ1; } else { *stalled = endpoint->EPSTATUS.bit.STALLRQ0; } return 0; } /* Halt the selected endpoint */ int usb_dc_ep_halt(uint8_t ep) { return usb_dc_ep_set_stall(ep); } /* Flush the selected endpoint */ int usb_dc_ep_flush(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -1; } /* TODO */ LOG_WRN("flush not implemented"); return 0; } /* Enable an endpoint and the endpoint interrupts */ int usb_dc_ep_enable(const uint8_t ep) { UsbDevice *regs = &REGS->DEVICE; uint8_t for_in = USB_EP_GET_DIR(ep); uint8_t ep_idx = USB_EP_GET_IDX(ep); UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -EINVAL; } if (for_in) { endpoint->EPSTATUSCLR.bit.BK1RDY = 1; } else { endpoint->EPSTATUSCLR.bit.BK0RDY = 1; } endpoint->EPINTENSET.reg = USB_DEVICE_EPINTENSET_TRCPT0 | USB_DEVICE_EPINTENSET_TRCPT1 | USB_DEVICE_EPINTENSET_RXSTP; return 0; } /* Disable the selected endpoint */ int usb_dc_ep_disable(uint8_t ep) { UsbDevice *regs = &REGS->DEVICE; uint8_t ep_idx = USB_EP_GET_IDX(ep); UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -EINVAL; } endpoint->EPINTENCLR.reg = USB_DEVICE_EPINTENCLR_TRCPT0 | USB_DEVICE_EPINTENCLR_TRCPT1 | USB_DEVICE_EPINTENCLR_RXSTP; return 0; } /* Write a single payload to the IN buffer on the endpoint */ int usb_dc_ep_write(uint8_t ep, const uint8_t *buf, uint32_t len, uint32_t *ret_bytes) { struct usb_sam0_data *data = usb_sam0_get_data(); UsbDevice *regs = &REGS->DEVICE; uint8_t ep_idx = USB_EP_GET_IDX(ep); UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; UsbDeviceDescriptor *desc = &data->descriptors[ep_idx]; uint32_t addr = desc->DeviceDescBank[1].ADDR.reg; uint32_t capacity = usb_sam0_pcksize_bytes[ desc->DeviceDescBank[1].PCKSIZE.bit.SIZE]; if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -1; } if (endpoint->EPSTATUS.bit.BK1RDY) { /* Write in progress, drop */ return -EAGAIN; } len = Z_MIN(len, capacity); /* Note that this code does not use the hardware's * multi-packet and automatic zero-length packet features as * the upper layers in Zephyr implement these in code. */ memcpy((void *)addr, buf, len); desc->DeviceDescBank[1].PCKSIZE.bit.MULTI_PACKET_SIZE = 0; desc->DeviceDescBank[1].PCKSIZE.bit.BYTE_COUNT = len; endpoint->EPINTFLAG.reg = USB_DEVICE_EPINTFLAG_TRCPT1 | USB_DEVICE_EPINTFLAG_TRFAIL1; endpoint->EPSTATUSSET.bit.BK1RDY = 1; if (ret_bytes != NULL) { *ret_bytes = len; } return 0; } /* Read data from an OUT endpoint */ int usb_dc_ep_read_ex(uint8_t ep, uint8_t *buf, uint32_t max_data_len, uint32_t *read_bytes, bool wait) { struct usb_sam0_data *data = usb_sam0_get_data(); UsbDevice *regs = &REGS->DEVICE; uint8_t ep_idx = USB_EP_GET_IDX(ep); UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; UsbDeviceDescriptor *desc = &data->descriptors[ep_idx]; uint32_t addr = desc->DeviceDescBank[0].ADDR.reg; uint32_t bytes = desc->DeviceDescBank[0].PCKSIZE.bit.BYTE_COUNT; uint32_t take; int remain; if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -1; } if (!endpoint->EPSTATUS.bit.BK0RDY) { return -EAGAIN; } /* The code below emulates the Quark FIFO which the Zephyr USB * API is based on. Reading with buf == NULL returns the * number of bytes available and starts the read. The caller * then keeps calling until all bytes are consumed which * also marks the OUT buffer as freed. */ if (buf == NULL) { data->out_at = 0U; if (read_bytes != NULL) { *read_bytes = bytes; } return 0; } remain = bytes - data->out_at; take = MIN(max_data_len, remain); memcpy(buf, (uint8_t *)addr + data->out_at, take); if (read_bytes != NULL) { *read_bytes = take; } if (take == remain) { if (!wait) { endpoint->EPSTATUSCLR.bit.BK0RDY = 1; data->out_at = 0U; } } else { data->out_at += take; } return 0; } int usb_dc_ep_read(uint8_t ep, uint8_t *buf, uint32_t max_data_len, uint32_t *read_bytes) { return usb_dc_ep_read_ex(ep, buf, max_data_len, read_bytes, false); } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *buf, uint32_t max_data_len, uint32_t *read_bytes) { return usb_dc_ep_read_ex(ep, buf, max_data_len, read_bytes, true); } int usb_dc_ep_read_continue(uint8_t ep) { struct usb_sam0_data *data = usb_sam0_get_data(); UsbDevice *regs = &REGS->DEVICE; uint8_t ep_idx = USB_EP_GET_IDX(ep); UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -1; } endpoint->EPSTATUSCLR.bit.BK0RDY = 1; data->out_at = 0U; return 0; } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { struct usb_sam0_data *data = usb_sam0_get_data(); uint8_t for_in = USB_EP_GET_DIR(ep); uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -1; } data->ep_cb[for_in ? 1 : 0][ep_idx] = cb; return 0; } int usb_dc_ep_mps(const uint8_t ep) { struct usb_sam0_data *data = usb_sam0_get_data(); UsbDevice *regs = &REGS->DEVICE; uint8_t for_in = USB_EP_GET_DIR(ep); uint8_t ep_idx = USB_EP_GET_IDX(ep); UsbDeviceDescriptor *desc = &data->descriptors[ep_idx]; UsbDeviceEndpoint *endpoint = &regs->DeviceEndpoint[ep_idx]; if (ep_idx >= USB_NUM_ENDPOINTS) { LOG_ERR("endpoint index/address out of range"); return -1; } if (for_in) { /* if endpoint is not configured, this should return 0 */ if (endpoint->EPCFG.bit.EPTYPE1 == 0) { return 0; } return usb_sam0_pcksize_bytes[ desc->DeviceDescBank[1].PCKSIZE.bit.SIZE]; } else { /* if endpoint is not configured, this should return 0 */ if (endpoint->EPCFG.bit.EPTYPE0 == 0) { return 0; } return usb_sam0_pcksize_bytes[ desc->DeviceDescBank[0].PCKSIZE.bit.SIZE]; } } ```
/content/code_sandbox/drivers/usb/device/usb_dc_sam0.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,823
```c /* * */ #include <soc.h> #include <string.h> #include <hardware/regs/usb.h> #include <hardware/structs/usb.h> #include <hardware/resets.h> #include <pico/platform.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/usb/usb_device.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/drivers/clock_control.h> LOG_MODULE_REGISTER(udc_rpi, CONFIG_USB_DRIVER_LOG_LEVEL); #define DT_DRV_COMPAT raspberrypi_pico_usbd #define USB_BASE_ADDRESS DT_INST_REG_ADDR(0) #define USB_IRQ DT_INST_IRQ_BY_NAME(0, usbctrl, irq) #define USB_IRQ_PRI DT_INST_IRQ_BY_NAME(0, usbctrl, priority) #define USB_NUM_BIDIR_ENDPOINTS DT_INST_PROP(0, num_bidir_endpoints) #define CLK_DRV DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(0)) #define CLK_ID (clock_control_subsys_t)DT_INST_PHA_BY_IDX(0, clocks, 0, clk_id) #define DATA_BUFFER_SIZE 64U /* Needed for pico-sdk */ #ifndef typeof #define typeof __typeof__ #endif struct udc_rpi_ep_state { uint16_t mps; enum usb_dc_ep_transfer_type type; uint8_t halted; usb_dc_ep_callback cb; uint32_t read_offset; struct k_sem write_sem; io_rw_32 *ep_ctl; io_rw_32 *buf_ctl; uint8_t *buf; uint8_t next_pid; }; #define USBD_THREAD_STACK_SIZE 1024 K_THREAD_STACK_DEFINE(thread_stack, USBD_THREAD_STACK_SIZE); static struct k_thread thread; struct udc_rpi_state { usb_dc_status_callback status_cb; struct udc_rpi_ep_state out_ep_state[USB_NUM_BIDIR_ENDPOINTS]; struct udc_rpi_ep_state in_ep_state[USB_NUM_BIDIR_ENDPOINTS]; bool abort_control_writes; bool setup_available; bool should_set_address; uint16_t control_out_ep_rcvd; uint8_t addr; bool rwu_pending; }; static struct udc_rpi_state state; struct cb_msg { bool ep_event; uint32_t type; uint8_t ep; }; K_MSGQ_DEFINE(usb_dc_msgq, sizeof(struct cb_msg), 10, 4); static struct udc_rpi_ep_state *udc_rpi_get_ep_state(uint8_t ep) { struct udc_rpi_ep_state *ep_state_base; if (USB_EP_GET_IDX(ep) >= USB_NUM_BIDIR_ENDPOINTS) { return NULL; } if (USB_EP_DIR_IS_OUT(ep)) { ep_state_base = state.out_ep_state; } else { ep_state_base = state.in_ep_state; } return ep_state_base + USB_EP_GET_IDX(ep); } static int udc_rpi_start_xfer(uint8_t ep, const void *data, const size_t len) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); uint32_t val = len; if (*ep_state->buf_ctl & USB_BUF_CTRL_AVAIL) { LOG_WRN("ep 0x%02x was already armed", ep); } if (USB_EP_DIR_IS_IN(ep)) { if (len > DATA_BUFFER_SIZE) { return -ENOMEM; } val |= USB_BUF_CTRL_FULL; if (data) { memcpy(ep_state->buf, data, len); } } else { ep_state->read_offset = 0; } LOG_DBG("xfer ep %d len %d pid: %d", ep, len, ep_state->next_pid); val |= ep_state->next_pid ? USB_BUF_CTRL_DATA1_PID : USB_BUF_CTRL_DATA0_PID; ep_state->next_pid ^= 1u; *ep_state->buf_ctl = val; /* * By default, clk_sys runs at 125MHz, wait 3 nop instructions before * setting the AVAILABLE bit. See 4.1.2.5.1. Concurrent access. */ arch_nop(); arch_nop(); arch_nop(); *ep_state->buf_ctl = val | USB_BUF_CTRL_AVAIL; return 0; } /* * This function converts a zephyr endpoint address into a * bit mask that can be used with registers: * - BUFF_STATUS * - BUFF_CPU_SHOULD_HANDLE * - EP_ABOR * - EP_ABORT_DONE * - EP_STATUS_STALL_NAK */ static inline uint32_t udc_rpi_endpoint_mask(const uint8_t ep) { const int bit_index = (USB_EP_GET_IDX(ep) << 1) | !!USB_EP_DIR_IS_OUT(ep); return BIT(bit_index); } static void udc_rpi_cancel_endpoint(const uint8_t ep) { struct udc_rpi_ep_state *const ep_state = udc_rpi_get_ep_state(ep); if (*ep_state->buf_ctl & USB_BUF_CTRL_AVAIL) { const uint32_t mask = udc_rpi_endpoint_mask(ep); bool abort_handshake_supported = rp2040_chip_version() >= 2; if (abort_handshake_supported) { hw_set_alias(usb_hw)->abort = mask; while ((usb_hw->abort_done & mask) != mask) { } } *ep_state->buf_ctl &= ~USB_BUF_CTRL_AVAIL; if (abort_handshake_supported) { hw_clear_alias(usb_hw)->abort = mask; } if (USB_EP_DIR_IS_IN(ep)) { k_sem_give(&ep_state->write_sem); } } } static void udc_rpi_handle_setup(void) { const struct udc_rpi_ep_state *const ep_state = udc_rpi_get_ep_state(USB_CONTROL_EP_OUT); struct cb_msg msg; /* Normally all control transfers should complete before a new setup * transaction is sent, however in some rare cases from the perspective * of the device, a new setup transaction could arrive prematurely, in * which case the previous control transfer should be aborted, and for * this reason we're canceling both control IN and control OUT * endpoints. See section 5.5.5 of the Universal Serial Bus * Specification, version 2.0. */ udc_rpi_cancel_endpoint(USB_CONTROL_EP_IN); if (*ep_state->buf_ctl & USB_BUF_CTRL_AVAIL) { udc_rpi_cancel_endpoint(USB_CONTROL_EP_OUT); /* This warning could be triggered by the rare event described * above, but it could also be a sign of a software bug, that * can expose us to race conditions when the system is slowed * down, because it becomes impossible to determine in what * order did setup/data transactions arrive. */ LOG_WRN("EP0_OUT was armed while setup stage arrived."); } state.abort_control_writes = true; /* Set DATA1 PID for the next (data or status) stage */ udc_rpi_get_ep_state(USB_CONTROL_EP_IN)->next_pid = 1; udc_rpi_get_ep_state(USB_CONTROL_EP_OUT)->next_pid = 1; msg.ep = USB_CONTROL_EP_OUT; msg.type = USB_DC_EP_SETUP; msg.ep_event = true; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); } static void udc_rpi_handle_buff_status(void) { struct udc_rpi_ep_state *ep_state; enum usb_dc_ep_cb_status_code status_code; uint32_t status = usb_hw->buf_status; unsigned int bit = 1U; struct cb_msg msg; LOG_DBG("status: %d", status); for (int i = 0; status && i < USB_NUM_BIDIR_ENDPOINTS * 2; i++) { if (status & bit) { hw_clear_alias(usb_hw)->buf_status = bit; bool in = !(i & 1U); uint8_t ep = (i >> 1U) | (in ? USB_EP_DIR_IN : USB_EP_DIR_OUT); ep_state = udc_rpi_get_ep_state(ep); status_code = in ? USB_DC_EP_DATA_IN : USB_DC_EP_DATA_OUT; LOG_DBG("buff ep %i in? %i", (i >> 1), in); if (i == 0 && in && state.should_set_address) { state.should_set_address = false; usb_hw->dev_addr_ctrl = state.addr; } if (in) { k_sem_give(&ep_state->write_sem); } msg.ep = ep; msg.ep_event = true; msg.type = status_code; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); status &= ~bit; } bit <<= 1U; } } static void udc_rpi_handle_resume(void) { struct cb_msg msg = { .ep = 0U, .type = USB_DC_RESUME, .ep_event = false, }; LOG_DBG("Resume"); k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); state.rwu_pending = false; } static void udc_rpi_handle_suspended(void) { struct cb_msg msg = { .ep = 0U, .type = USB_DC_SUSPEND, .ep_event = false, }; LOG_DBG("Suspended"); k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); } static void udc_rpi_isr(const void *arg) { uint32_t status = usb_hw->ints; uint32_t handled = 0; struct cb_msg msg; if ((status & (USB_INTS_BUFF_STATUS_BITS | USB_INTS_SETUP_REQ_BITS)) && state.rwu_pending) { /* The rpi pico USB device does not appear to be sending * USB_INTR_DEV_RESUME_FROM_HOST interrupts when the resume is * a result of a remote wakeup request sent by us. * This will simulate a resume event if bus activity is observed */ udc_rpi_handle_resume(); } if (status & USB_INTS_BUFF_STATUS_BITS) { /* Note: we should check buffer interrupts before setup interrupts. * this may seem a little counter-intuitive, because setup irqs * sound more urgent, however in case we see an EP0_OUT buffer irq * at the same time as a setup irq, then we know the buffer irq * belongs to the previous control transfer, so we want to handle * that first. */ handled |= USB_INTS_BUFF_STATUS_BITS; udc_rpi_handle_buff_status(); } if (status & USB_INTS_SETUP_REQ_BITS) { handled |= USB_INTS_SETUP_REQ_BITS; hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_SETUP_REC_BITS; udc_rpi_handle_setup(); } if (status & USB_INTS_DEV_CONN_DIS_BITS) { LOG_DBG("buf %u ep %u", *udc_rpi_get_ep_state(0x81)->buf_ctl, *udc_rpi_get_ep_state(0x81)->ep_ctl); handled |= USB_INTS_DEV_CONN_DIS_BITS; hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_CONNECTED_BITS; msg.ep = 0U; msg.ep_event = false; msg.type = usb_hw->sie_status & USB_SIE_STATUS_CONNECTED_BITS ? USB_DC_DISCONNECTED : USB_DC_CONNECTED; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); } if (status & USB_INTS_BUS_RESET_BITS) { LOG_WRN("BUS RESET"); handled |= USB_INTS_BUS_RESET_BITS; hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_BUS_RESET_BITS; usb_hw->dev_addr_ctrl = 0; /* The DataInCallback will never be called at this point for any pending * transactions. Reset the IN semaphores to prevent perpetual locked state. */ for (int i = 0; i < USB_NUM_BIDIR_ENDPOINTS; i++) { k_sem_give(&state.in_ep_state[i].write_sem); } msg.ep = 0U; msg.type = USB_DC_RESET; msg.ep_event = false; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); } if (status & USB_INTS_DEV_SUSPEND_BITS) { handled |= USB_INTS_DEV_SUSPEND_BITS; hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_SUSPENDED_BITS; udc_rpi_handle_suspended(); } if (status & USB_INTR_DEV_RESUME_FROM_HOST_BITS) { handled |= USB_INTR_DEV_RESUME_FROM_HOST_BITS; hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_RESUME_BITS; udc_rpi_handle_resume(); } if (status & USB_INTS_ERROR_DATA_SEQ_BITS) { LOG_WRN("data seq"); hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_DATA_SEQ_ERROR_BITS; handled |= USB_INTS_ERROR_DATA_SEQ_BITS; } if (status & USB_INTS_ERROR_RX_TIMEOUT_BITS) { LOG_WRN("rx timeout"); hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_RX_TIMEOUT_BITS; handled |= USB_INTS_ERROR_RX_TIMEOUT_BITS; } if (status & USB_INTS_ERROR_RX_OVERFLOW_BITS) { LOG_WRN("rx overflow"); hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_RX_OVERFLOW_BITS; handled |= USB_INTS_ERROR_RX_OVERFLOW_BITS; } if (status & USB_INTS_ERROR_BIT_STUFF_BITS) { LOG_WRN("bit stuff error"); hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_BIT_STUFF_ERROR_BITS; handled |= USB_INTS_ERROR_BIT_STUFF_BITS; } if (status & USB_INTS_ERROR_CRC_BITS) { LOG_ERR("crc error"); hw_clear_alias(usb_hw)->sie_status = USB_SIE_STATUS_CRC_ERROR_BITS; handled |= USB_INTS_ERROR_CRC_BITS; } if (status ^ handled) { LOG_ERR("unhandled IRQ: 0x%x", (uint)(status ^ handled)); } } static void udc_rpi_init_endpoint(const uint8_t i) { state.out_ep_state[i].buf_ctl = &usb_dpram->ep_buf_ctrl[i].out; state.in_ep_state[i].buf_ctl = &usb_dpram->ep_buf_ctrl[i].in; if (i != 0) { state.out_ep_state[i].ep_ctl = &usb_dpram->ep_ctrl[i - 1].out; state.in_ep_state[i].ep_ctl = &usb_dpram->ep_ctrl[i - 1].in; state.out_ep_state[i].buf = &usb_dpram->epx_data[((i - 1) * 2 + 1) * DATA_BUFFER_SIZE]; state.in_ep_state[i].buf = &usb_dpram->epx_data[((i - 1) * 2) * DATA_BUFFER_SIZE]; } else { state.out_ep_state[i].buf = &usb_dpram->ep0_buf_a[0]; state.in_ep_state[i].buf = &usb_dpram->ep0_buf_a[0]; } k_sem_init(&state.in_ep_state[i].write_sem, 1, 1); } static int udc_rpi_init(void) { /* Reset usb controller */ reset_block(RESETS_RESET_USBCTRL_BITS); unreset_block_wait(RESETS_RESET_USBCTRL_BITS); /* Clear any previous state in dpram/hw just in case */ memset(usb_hw, 0, sizeof(*usb_hw)); memset(usb_dpram, 0, sizeof(*usb_dpram)); /* Mux the controller to the onboard usb phy */ usb_hw->muxing = USB_USB_MUXING_TO_PHY_BITS | USB_USB_MUXING_SOFTCON_BITS; /* Force VBUS detect so the device thinks it is plugged into a host */ usb_hw->pwr = USB_USB_PWR_VBUS_DETECT_BITS | USB_USB_PWR_VBUS_DETECT_OVERRIDE_EN_BITS; /* Enable the USB controller in device mode. */ usb_hw->main_ctrl = USB_MAIN_CTRL_CONTROLLER_EN_BITS; /* Enable an interrupt per EP0 transaction */ usb_hw->sie_ctrl = USB_SIE_CTRL_EP0_INT_1BUF_BITS; /* Enable interrupts for when a buffer is done, when the bus is reset, * and when a setup packet is received, and device connection status */ usb_hw->inte = USB_INTS_BUFF_STATUS_BITS | USB_INTS_BUS_RESET_BITS | USB_INTS_DEV_CONN_DIS_BITS | USB_INTS_SETUP_REQ_BITS | /*USB_INTS_EP_STALL_NAK_BITS |*/ USB_INTS_ERROR_BIT_STUFF_BITS | USB_INTS_ERROR_CRC_BITS | USB_INTS_ERROR_DATA_SEQ_BITS | USB_INTS_ERROR_RX_OVERFLOW_BITS | USB_INTS_ERROR_RX_TIMEOUT_BITS | USB_INTS_DEV_SUSPEND_BITS | USB_INTR_DEV_RESUME_FROM_HOST_BITS; /* Set up endpoints (endpoint control registers) * described by device configuration * usb_setup_endpoints(); */ for (int i = 0; i < USB_NUM_BIDIR_ENDPOINTS; i++) { udc_rpi_init_endpoint(i); } /* Present full speed device by enabling pull up on DP */ hw_set_alias(usb_hw)->sie_ctrl = USB_SIE_CTRL_PULLUP_EN_BITS; return 0; } /* Zephyr USB device controller API implementation */ int usb_dc_attach(void) { return udc_rpi_init(); } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); LOG_DBG("ep 0x%02x", ep); if (!ep_state) { return -EINVAL; } ep_state->cb = cb; return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { state.status_cb = cb; } int usb_dc_set_address(const uint8_t addr) { LOG_DBG("addr %u (0x%02x)", addr, addr); state.should_set_address = true; state.addr = addr; return 0; } int usb_dc_ep_start_read(uint8_t ep, size_t len) { int ret; LOG_DBG("ep 0x%02x len %d", ep, len); if (!USB_EP_DIR_IS_OUT(ep)) { LOG_ERR("invalid ep 0x%02x", ep); return -EINVAL; } if (len > DATA_BUFFER_SIZE) { len = DATA_BUFFER_SIZE; } ret = udc_rpi_start_xfer(ep, NULL, len); return ret; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data *const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); LOG_DBG("ep %x, mps %d, type %d", cfg->ep_addr, cfg->ep_mps, cfg->ep_type); if ((cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx) { LOG_ERR("invalid endpoint configuration"); return -1; } if (ep_idx > (USB_NUM_BIDIR_ENDPOINTS - 1)) { LOG_ERR("endpoint index/address out of range"); return -1; } return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const ep_cfg) { uint8_t ep = ep_cfg->ep_addr; struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); if (!ep_state) { return -EINVAL; } LOG_DBG("ep 0x%02x, previous mps %u, mps %u, type %u", ep_cfg->ep_addr, ep_state->mps, ep_cfg->ep_mps, ep_cfg->ep_type); ep_state->mps = ep_cfg->ep_mps; ep_state->type = ep_cfg->ep_type; return 0; } int usb_dc_ep_set_stall(const uint8_t ep) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); LOG_DBG("ep 0x%02x", ep); if (!ep_state) { return -EINVAL; } if (USB_EP_GET_IDX(ep) == 0) { hw_set_alias(usb_hw)->ep_stall_arm = USB_EP_DIR_IS_OUT(ep) ? USB_EP_STALL_ARM_EP0_OUT_BITS : USB_EP_STALL_ARM_EP0_IN_BITS; } *ep_state->buf_ctl = USB_BUF_CTRL_STALL; if (ep == USB_CONTROL_EP_IN) { /* Un-arm EP0_OUT endpoint, to make sure next setup packet starts clean */ udc_rpi_cancel_endpoint(USB_CONTROL_EP_OUT); } ep_state->halted = 1U; return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); uint8_t val; LOG_DBG("ep 0x%02x", ep); if (!ep_state) { return -EINVAL; } if (USB_EP_GET_IDX(ep) > 0) { val = *ep_state->buf_ctl; val &= ~USB_BUF_CTRL_STALL; *ep_state->buf_ctl = val; ep_state->halted = 0U; ep_state->read_offset = 0U; } return 0; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); LOG_DBG("ep 0x%02x", ep); if (!ep_state || !stalled) { return -EINVAL; } *stalled = ep_state->halted; return 0; } static inline uint32_t usb_dc_ep_rpi_pico_buffer_offset(volatile uint8_t *buf) { /* TODO: Bits 0-5 are ignored by the controller so make sure these are 0 */ return (uint32_t)buf ^ (uint32_t)usb_dpram; } int usb_dc_ep_enable(const uint8_t ep) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); if (!ep_state) { return -EINVAL; } LOG_DBG("ep 0x%02x (id: %d) -> type %d", ep, USB_EP_GET_IDX(ep), ep_state->type); /* clear buffer state */ *ep_state->buf_ctl = USB_BUF_CTRL_DATA0_PID; ep_state->next_pid = 0; /* EP0 doesn't have an ep_ctl */ if (ep_state->ep_ctl) { uint32_t val = EP_CTRL_ENABLE_BITS | EP_CTRL_INTERRUPT_PER_BUFFER | (ep_state->type << EP_CTRL_BUFFER_TYPE_LSB) | usb_dc_ep_rpi_pico_buffer_offset(ep_state->buf); *ep_state->ep_ctl = val; } if (USB_EP_DIR_IS_OUT(ep) && ep != USB_CONTROL_EP_OUT) { return usb_dc_ep_start_read(ep, DATA_BUFFER_SIZE); } return 0; } int usb_dc_ep_disable(const uint8_t ep) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); LOG_DBG("ep 0x%02x", ep); if (!ep_state) { return -EINVAL; } /* EP0 doesn't have an ep_ctl */ if (!ep_state->ep_ctl) { return 0; } /* If this endpoint has previously been used and e.g. the host application * crashed, the endpoint may remain locked even after reconfiguration * because the write semaphore is never given back. * udc_rpi_cancel_endpoint() handles this so the endpoint can be written again. */ udc_rpi_cancel_endpoint(ep); uint8_t val = *ep_state->ep_ctl & ~EP_CTRL_ENABLE_BITS; *ep_state->ep_ctl = val; return 0; } int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data, const uint32_t data_len, uint32_t *const ret_bytes) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); uint32_t len = data_len; int ret = 0; LOG_DBG("ep 0x%02x, len %u", ep, data_len); if (!ep_state || !USB_EP_DIR_IS_IN(ep)) { LOG_ERR("invalid ep 0x%02x", ep); return -EINVAL; } if (ep == USB_CONTROL_EP_IN && state.abort_control_writes) { /* If abort_control_writes is high, it means the setup packet has not * yet been consumed by the thread, which means that this write * is part of a previous control transfer, which now must be * aborted. */ if (ret_bytes != NULL) { *ret_bytes = len; } return 0; } if (ep == USB_CONTROL_EP_IN && len > USB_MAX_CTRL_MPS) { len = USB_MAX_CTRL_MPS; } else if (len > ep_state->mps) { len = ep_state->mps; } ret = k_sem_take(&ep_state->write_sem, K_NO_WAIT); if (ret) { return -EAGAIN; } if (!k_is_in_isr()) { irq_disable(USB_IRQ); } ret = udc_rpi_start_xfer(ep, data, len); if (ret < 0) { k_sem_give(&ep_state->write_sem); ret = -EIO; } if (!k_is_in_isr()) { irq_enable(USB_IRQ); } if (ret >= 0 && ret_bytes != NULL) { *ret_bytes = len; } return ret; } uint32_t udc_rpi_get_ep_buffer_len(const uint8_t ep) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); uint32_t buf_ctl = *ep_state->buf_ctl; return buf_ctl & USB_BUF_CTRL_LEN_MASK; } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); uint32_t read_count; if (!ep_state) { LOG_ERR("Invalid Endpoint %x", ep); return -EINVAL; } if (!USB_EP_DIR_IS_OUT(ep)) { LOG_ERR("Wrong endpoint direction: 0x%02x", ep); return -EINVAL; } if (ep == USB_CONTROL_EP_OUT && state.setup_available) { read_count = sizeof(struct usb_setup_packet); if (read_count != max_data_len) { LOG_WRN("Attempting to read setup packet with the wrong length" " (expected: %d, read: %d)", read_count, max_data_len); } } else { read_count = udc_rpi_get_ep_buffer_len(ep) - ep_state->read_offset; } LOG_DBG("ep 0x%02x, %u bytes, %u+%u, %p", ep, max_data_len, ep_state->read_offset, read_count, (void *)data); if (data) { read_count = MIN(read_count, max_data_len); if (ep == USB_CONTROL_EP_OUT && state.setup_available) { memcpy(data, (const void *)&usb_dpram->setup_packet, read_count); } else { memcpy(data, ep_state->buf + ep_state->read_offset, read_count); } ep_state->read_offset += read_count; } else if (max_data_len) { LOG_ERR("Wrong arguments"); } if (read_bytes) { *read_bytes = read_count; } return 0; } static int usb_dc_control_ep_read_continue(const struct udc_rpi_ep_state *const ep_state, bool *const arm_out_endpoint) { const struct usb_setup_packet *const setup = (const void *)&usb_dpram->setup_packet; if (state.setup_available) { LOG_DBG("EP0 setup (wLength=%d, is_to_device=%d)", setup->wLength, usb_reqtype_is_to_device(setup)); if (setup->wLength != 0U) { /* In the case of a control transfer, we want to prime the OUT endpoint * exactly once, to either: * 1) in the to_device case, to receive the data (only if wLength is not 0) * 2) in the to_host case, to receive a 0-length status stage transfer * (only valid if wLength is not 0) * Note that when wLength = 0, the status stage transfer is always an IN * type so we don't need to consider that case. */ *arm_out_endpoint = true; state.control_out_ep_rcvd = 0; } state.setup_available = false; } else { const size_t len = udc_rpi_get_ep_buffer_len(USB_CONTROL_EP_OUT); LOG_DBG("Control OUT received %u offset: %u", len, ep_state->read_offset); if (usb_reqtype_is_to_device(setup)) { if (state.control_out_ep_rcvd + ep_state->read_offset < setup->wLength) { /* If no more data in the buffer, but we're still waiting * for more, start a new read transaction. */ if (len == ep_state->read_offset) { state.control_out_ep_rcvd += ep_state->read_offset; *arm_out_endpoint = true; } } } } return 0; } int usb_dc_ep_read_continue(const uint8_t ep) { const struct udc_rpi_ep_state *const ep_state = udc_rpi_get_ep_state(ep); bool arm_out_endpoint = false; if (!ep_state || !USB_EP_DIR_IS_OUT(ep)) { LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } if (ep == USB_CONTROL_EP_OUT) { int ret = usb_dc_control_ep_read_continue(ep_state, &arm_out_endpoint); if (ret != 0) { return ret; } } else { const size_t len = udc_rpi_get_ep_buffer_len(ep); LOG_DBG("Endpoint 0x%02x received %u offset: %u", ep, len, ep_state->read_offset); /* If no more data in the buffer, start a new read transaction. */ if (len == ep_state->read_offset) { arm_out_endpoint = true; } } if (arm_out_endpoint) { LOG_DBG("Arming endpoint 0x%02x", ep); return usb_dc_ep_start_read(ep, DATA_BUFFER_SIZE); } else { LOG_DBG("Not arming endpoint 0x%02x", ep); } return 0; } int usb_dc_ep_read(const uint8_t ep, uint8_t *const data, const uint32_t max_data_len, uint32_t *const read_bytes) { if (usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes) != 0) { return -EINVAL; } if (!max_data_len) { return 0; } if (usb_dc_ep_read_continue(ep) != 0) { return -EINVAL; } return 0; } int usb_dc_ep_halt(const uint8_t ep) { return usb_dc_ep_set_stall(ep); } int usb_dc_ep_flush(const uint8_t ep) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); if (!ep_state) { return -EINVAL; } LOG_ERR("Not implemented"); return 0; } int usb_dc_ep_mps(const uint8_t ep) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(ep); if (!ep_state) { return -EINVAL; } return ep_state->mps; } int usb_dc_detach(void) { LOG_ERR("Not implemented"); return 0; } int usb_dc_reset(void) { LOG_ERR("Not implemented"); return 0; } int usb_dc_wakeup_request(void) { LOG_DBG("Remote Wakeup"); state.rwu_pending = true; hw_set_alias(usb_hw)->sie_ctrl = USB_SIE_CTRL_RESUME_BITS; return 0; } /* * This thread is only used to not run the USB device stack and endpoint * callbacks in the ISR context, which happens when an callback function * is called. TODO: something similar should be implemented in the USB * device stack so that it can be used by all drivers. */ static void udc_rpi_thread_main(void *arg1, void *unused1, void *unused2) { ARG_UNUSED(arg1); ARG_UNUSED(unused1); ARG_UNUSED(unused2); struct cb_msg msg; while (true) { k_msgq_get(&usb_dc_msgq, &msg, K_FOREVER); if (msg.ep_event) { struct udc_rpi_ep_state *ep_state = udc_rpi_get_ep_state(msg.ep); if (msg.type == USB_DC_EP_SETUP) { state.abort_control_writes = false; state.setup_available = true; } if (ep_state->cb) { ep_state->cb(msg.ep, msg.type); } } else { if (state.status_cb) { state.status_cb(msg.type, NULL); } } } } static int usb_rpi_init(void) { int ret; k_thread_create(&thread, thread_stack, USBD_THREAD_STACK_SIZE, udc_rpi_thread_main, NULL, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); k_thread_name_set(&thread, "usb_rpi"); ret = clock_control_on(CLK_DRV, CLK_ID); if (ret < 0) { return ret; } IRQ_CONNECT(USB_IRQ, USB_IRQ_PRI, udc_rpi_isr, 0, 0); irq_enable(USB_IRQ); return 0; } SYS_INIT(usb_rpi_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE); ```
/content/code_sandbox/drivers/usb/device/usb_dc_rpi_pico.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,615
```c /* * */ /* For accept4() */ #define _GNU_SOURCE 1 #define __packed __attribute__((__packed__)) #include <stdio.h> #include <stdarg.h> #include <stdbool.h> #include <errno.h> #include <string.h> #include <unistd.h> #include <stdlib.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/un.h> #include <netinet/in.h> #include <arpa/inet.h> /* Zephyr headers */ #include <zephyr/kernel.h> #include <zephyr/usb/usb_device.h> #include <posix_board_if.h> #include "usb_dc_native_posix_adapt.h" #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL LOG_MODULE_REGISTER(native_posix_adapt); #define USBIP_PORT 3240 #define USBIP_VERSION 273 #define VERBOSE_DEBUG int connfd_global; int seqnum_global; int devid_global; /* Helpers */ #ifdef VERBOSE_DEBUG static void usbip_header_dump(struct usbip_header *hdr) { LOG_DBG("cmd %x seq %u dir %u ep %x", ntohl(hdr->common.command), ntohl(hdr->common.seqnum), ntohl(hdr->common.direction), ntohl(hdr->common.ep)); switch (ntohl(hdr->common.command)) { case USBIP_CMD_SUBMIT: LOG_DBG("flags %x np %u int %u buflen %u", ntohl(hdr->u.submit.transfer_flags), ntohl(hdr->u.submit.number_of_packets), ntohl(hdr->u.submit.interval), ntohl(hdr->u.submit.transfer_buffer_length)); break; case USBIP_CMD_UNLINK: LOG_DBG("seq %d", ntohl(hdr->u.unlink.seqnum)); break; default: break; } } #else #define usbip_header_dump(x) #endif void get_interface(uint8_t *descriptors) { while (descriptors[0]) { if (descriptors[1] == USB_DESC_INTERFACE) { LOG_DBG("interface found"); } /* skip to next descriptor */ descriptors += descriptors[0]; } } static int send_interfaces(const uint8_t *descriptors, int connfd) { struct devlist_interface { uint8_t bInterfaceClass; uint8_t bInterfaceSubClass; uint8_t bInterfaceProtocol; uint8_t padding; /* alignment */ } __packed iface; while (descriptors[0]) { if (descriptors[1] == USB_DESC_INTERFACE) { struct usb_if_descriptor *desc = (void *)descriptors; iface.bInterfaceClass = desc->bInterfaceClass; iface.bInterfaceSubClass = desc->bInterfaceSubClass; iface.bInterfaceProtocol = desc->bInterfaceProtocol; iface.padding = 0U; if (send(connfd, &iface, sizeof(iface), 0) != sizeof(iface)) { LOG_ERR("send() failed: %s", strerror(errno)); return errno; } } /* skip to next descriptor */ descriptors += descriptors[0]; } return 0; } static void fill_device(struct devlist_device *dev, const uint8_t *desc) { struct usb_device_descriptor *dev_dsc = (void *)desc; struct usb_cfg_descriptor *cfg = (void *)(desc + sizeof(struct usb_device_descriptor)); memset(dev->path, 0, 256); strcpy(dev->path, "/sys/devices/pci0000:00/0000:00:01.2/usb1/1-1"); memset(dev->busid, 0, 32); strcpy(dev->busid, "1-1"); dev->busnum = htonl(1); dev->devnum = htonl(2); if (IS_ENABLED(CONFIG_USB_NATIVE_POSIX_HS)) { dev->speed = htonl(3); } else { dev->speed = htonl(2); } dev->idVendor = htons(dev_dsc->idVendor); dev->idProduct = htons(dev_dsc->idProduct); dev->bcdDevice = htons(dev_dsc->bcdDevice); dev->bDeviceClass = dev_dsc->bDeviceClass; dev->bDeviceSubClass = dev_dsc->bDeviceSubClass; dev->bDeviceProtocol = dev_dsc->bDeviceProtocol; dev->bConfigurationValue = cfg->bConfigurationValue; dev->bNumConfigurations = dev_dsc->bNumConfigurations; dev->bNumInterfaces = cfg->bNumInterfaces; } static int send_device(const uint8_t *desc, int connfd) { struct devlist_device dev; fill_device(&dev, desc); if (send(connfd, &dev, sizeof(dev), 0) != sizeof(dev)) { LOG_ERR("send() device failed: %s", strerror(errno)); return errno; } return 0; } static int handle_device_list(const uint8_t *desc, int connfd) { struct op_common header = { .version = htons(USBIP_VERSION), .code = htons(OP_REP_DEVLIST), .status = 0, }; LOG_DBG("desc %p", desc); if (send(connfd, &header, sizeof(header), 0) != sizeof(header)) { LOG_ERR("send() header failed: %s", strerror(errno)); return errno; } /* Send number of devices */ uint32_t ndev = htonl(1); if (send(connfd, &ndev, sizeof(ndev), 0) != sizeof(ndev)) { LOG_ERR("send() ndev failed: %s", strerror(errno)); return errno; } send_device(desc, connfd); send_interfaces(desc, connfd); return 0; } static void handle_usbip_submit(int connfd, struct usbip_header *hdr) { struct usbip_submit *req = &hdr->u.submit; int read; LOG_DBG(""); read = recv(connfd, req, sizeof(*req), 0); if (read != sizeof(*req)) { LOG_ERR("recv() failed: %s", strerror(errno)); return; } usbip_header_dump((void *)hdr); if (ntohl(hdr->common.ep) == 0) { handle_usb_control(hdr); } else { handle_usb_data(hdr); } } static void handle_usbip_unlink(int connfd, struct usbip_header *hdr) { int read; LOG_DBG(""); /* Need to read the whole structure */ read = recv(connfd, &hdr->u, sizeof(hdr->u), 0); if (read != sizeof(hdr->u)) { LOG_ERR("recv() failed: %s", strerror(errno)); return; } usbip_header_dump((void *)hdr); /* TODO: unlink */ } static int handle_import(const uint8_t *desc, int connfd) { struct op_common header = { .version = htons(USBIP_VERSION), .code = htons(OP_REP_IMPORT), .status = 0, }; char busid[32]; LOG_DBG("attach device"); if (recv(connfd, busid, 32, 0) != sizeof(busid)) { LOG_ERR("recv() failed: %s", strerror(errno)); return errno; } if (send(connfd, &header, sizeof(header), 0) != sizeof(header)) { LOG_ERR("send() header failed: %s", strerror(errno)); return errno; } send_device(desc, connfd); return 0; } extern struct usb_desc_header __usb_descriptor_start[]; void usbip_start(void) { struct sockaddr_in srv; unsigned char attached; int listenfd, connfd; const uint8_t *desc; int reuse = 1; LOG_DBG("Starting"); /* * Do not use usb_get_device_descriptor(); * to prevent double string fixing */ desc = (const uint8_t *)__usb_descriptor_start; if (!desc) { LOG_ERR("Descriptors are not set"); posix_exit(EXIT_FAILURE); } listenfd = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); if (listenfd < 0) { LOG_ERR("socket() failed: %s", strerror(errno)); posix_exit(EXIT_FAILURE); } if (setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, (const char *)&reuse, sizeof(reuse)) < 0) { LOG_WRN("setsockopt() failed: %s", strerror(errno)); } memset(&srv, 0, sizeof(srv)); srv.sin_family = AF_INET; srv.sin_addr.s_addr = htonl(INADDR_ANY); srv.sin_port = htons(USBIP_PORT); if (bind(listenfd, (struct sockaddr *)&srv, sizeof(srv)) < 0) { LOG_ERR("bind() failed: %s", strerror(errno)); posix_exit(EXIT_FAILURE); } if (listen(listenfd, SOMAXCONN) < 0) { LOG_ERR("listen() failed: %s", strerror(errno)); posix_exit(EXIT_FAILURE); } while (true) { struct sockaddr_in client_addr; socklen_t client_addr_len = sizeof(client_addr); connfd = accept4(listenfd, (struct sockaddr *)&client_addr, &client_addr_len, SOCK_NONBLOCK); if (connfd < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { /* Non-blocking accept */ k_sleep(K_MSEC(100)); continue; } LOG_ERR("accept() failed: %s", strerror(errno)); posix_exit(EXIT_FAILURE); } connfd_global = connfd; LOG_DBG("Connection: %s", inet_ntoa(client_addr.sin_addr)); /* Set attached 0 */ attached = 0U; while (true) { struct usbip_header cmd; struct usbip_header_common *hdr = &cmd.common; int read; if (!attached) { struct op_common req; read = recv(connfd, &req, sizeof(req), 0); if (read < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { /* Non-blocking accept */ k_sleep(K_MSEC(100)); continue; } } if (read != sizeof(req)) { LOG_WRN("wrong length, %d", read); /* Closing connection */ break; } LOG_HEXDUMP_DBG((uint8_t *)&req, sizeof(req), "Got request"); LOG_DBG("Code: 0x%x", ntohs(req.code)); switch (ntohs(req.code)) { case OP_REQ_DEVLIST: handle_device_list(desc, connfd); break; case OP_REQ_IMPORT: if (!handle_import(desc, connfd)) { attached = 1U; } break; default: LOG_ERR("Unhandled code: 0x%x", ntohs(req.code)); break; } continue; } /* Handle attached case */ read = recv(connfd, hdr, sizeof(*hdr), 0); if (read < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { /* Non-blocking accept */ k_sleep(K_MSEC(100)); continue; } } LOG_HEXDUMP_DBG((uint8_t *)hdr, read, "Got cmd"); if (read != sizeof(*hdr)) { LOG_ERR("recv wrong length: %d", read); /* Closing connection */ break; } devid_global = ntohl(hdr->devid); seqnum_global = ntohl(hdr->seqnum); switch (ntohl(hdr->command)) { case USBIP_CMD_SUBMIT: handle_usbip_submit(connfd, &cmd); break; case USBIP_CMD_UNLINK: handle_usbip_unlink(connfd, &cmd); break; default: LOG_ERR("Unknown command: 0x%x", ntohl(hdr->command)); close(connfd); return; } } LOG_DBG("Closing connection"); close(connfd); } } int usbip_recv(uint8_t *buf, size_t len) { return recv(connfd_global, buf, len, 0); } int usbip_send(uint8_t ep, const uint8_t *data, size_t len) { return send(connfd_global, data, len, 0); } bool usbip_send_common(uint8_t ep, uint32_t data_len) { struct usbip_submit_rsp rsp; uint32_t ep_dir = USB_EP_DIR_IS_IN(ep) ? USBIP_DIR_IN : USBIP_DIR_OUT; uint32_t ep_idx = USB_EP_GET_IDX(ep); rsp.common.command = htonl(USBIP_RET_SUBMIT); rsp.common.seqnum = htonl(seqnum_global); rsp.common.devid = htonl(0); rsp.common.direction = htonl(ep_dir); rsp.common.ep = htonl(ep_idx); rsp.status = htonl(0); rsp.actual_length = htonl(data_len); rsp.start_frame = htonl(0); rsp.number_of_packets = htonl(0); rsp.error_count = htonl(0); rsp.setup = htonl(0); if (usbip_send(ep, (uint8_t *)&rsp, sizeof(rsp)) == sizeof(rsp)) { return true; } return false; } ```
/content/code_sandbox/drivers/usb/device/usb_dc_native_posix_adapt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,899
```c /* * */ #define DT_DRV_COMPAT atmel_sam_usbhs #include <zephyr/usb/usb_device.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <zephyr/sys/barrier.h> #include <soc.h> #include <string.h> #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(usb_dc_sam_usbhs); /* * This is defined in the support files for the SAM S7x, but not for * the SAM E7x nor SAM V7x. */ #ifndef USBHS_RAM_ADDR #define USBHS_RAM_ADDR (0xA0100000) #endif /* * The new Atmel DFP headers provide mode-specific interrupt register field * definitions. Map the existing generic definitions to these. */ #ifndef USBHS_DEVEPTISR_CTRL_RXSTPI #define USBHS_DEVEPTISR_CTRL_RXSTPI USBHS_DEVEPTISR_RXSTPI #endif #ifndef USBHS_DEVEPTICR_CTRL_RXSTPIC #define USBHS_DEVEPTICR_CTRL_RXSTPIC USBHS_DEVEPTICR_RXSTPIC #endif #ifndef USBHS_DEVEPTIMR_CTRL_STALLRQ #define USBHS_DEVEPTIMR_CTRL_STALLRQ USBHS_DEVEPTIMR_STALLRQ #endif #ifndef USBHS_DEVEPTIER_CTRL_RXSTPES #define USBHS_DEVEPTIER_CTRL_RXSTPES USBHS_DEVEPTIER_RXSTPES #endif #ifndef USBHS_DEVEPTIER_CTRL_STALLRQS #define USBHS_DEVEPTIER_CTRL_STALLRQS USBHS_DEVEPTIER_STALLRQS #endif #ifndef USBHS_DEVEPTIDR_CTRL_STALLRQC #define USBHS_DEVEPTIDR_CTRL_STALLRQC USBHS_DEVEPTIDR_STALLRQC #endif #define NUM_OF_EP_MAX DT_INST_PROP(0, num_bidir_endpoints) #define USB_MAXIMUM_SPEED DT_INST_ENUM_IDX_OR(0, maximum_speed, 1) BUILD_ASSERT(USB_MAXIMUM_SPEED, "low-speed is not supported"); struct usb_device_ep_data { uint16_t mps; usb_dc_ep_callback cb_in; usb_dc_ep_callback cb_out; uint8_t *fifo; }; struct usb_device_data { bool addr_enabled; usb_dc_status_callback status_cb; struct usb_device_ep_data ep_data[NUM_OF_EP_MAX]; }; static struct usb_device_data dev_data; /* Enable the USB device clock */ static void usb_dc_enable_clock(void) { /* Start the USB PLL */ PMC->CKGR_UCKR |= CKGR_UCKR_UPLLEN; /* Wait for it to be ready */ while (!(PMC->PMC_SR & PMC_SR_LOCKU)) { k_yield(); } /* In low power mode, provide a 48MHZ clock instead of the 480MHz one */ if ((USBHS->USBHS_DEVCTRL & USBHS_DEVCTRL_SPDCONF_Msk) == USBHS_DEVCTRL_SPDCONF_LOW_POWER) { /* Configure the USB_48M clock to be UPLLCK/10 */ PMC->PMC_MCKR &= ~PMC_MCKR_UPLLDIV2; PMC->PMC_USB = PMC_USB_USBDIV(9) | PMC_USB_USBS; /* Enable USB_48M clock */ PMC->PMC_SCER |= PMC_SCER_USBCLK; } } /* Disable the USB device clock */ static void usb_dc_disable_clock(void) { /* Disable USB_48M clock */ PMC->PMC_SCER &= ~PMC_SCER_USBCLK; /* Disable the USB PLL */ PMC->CKGR_UCKR &= ~CKGR_UCKR_UPLLEN; } /* Check if the USB device is attached */ static bool usb_dc_is_attached(void) { return (USBHS->USBHS_DEVCTRL & USBHS_DEVCTRL_DETACH) == 0; } /* Check if an endpoint is configured */ static bool usb_dc_ep_is_configured(uint8_t ep_idx) { return USBHS->USBHS_DEVEPTISR[ep_idx] & USBHS_DEVEPTISR_CFGOK; } /* Check if an endpoint is enabled */ static bool usb_dc_ep_is_enabled(uint8_t ep_idx) { return USBHS->USBHS_DEVEPT & BIT(USBHS_DEVEPT_EPEN0_Pos + ep_idx); } /* Reset and endpoint */ static void usb_dc_ep_reset(uint8_t ep_idx) { USBHS->USBHS_DEVEPT |= BIT(USBHS_DEVEPT_EPRST0_Pos + ep_idx); USBHS->USBHS_DEVEPT &= ~BIT(USBHS_DEVEPT_EPRST0_Pos + ep_idx); barrier_dsync_fence_full(); } /* Enable endpoint interrupts, depending of the type and direction */ static void usb_dc_ep_enable_interrupts(uint8_t ep_idx) { if (ep_idx == 0U) { /* Control endpoint: enable SETUP and OUT */ USBHS->USBHS_DEVEPTIER[ep_idx] = USBHS_DEVEPTIER_CTRL_RXSTPES; USBHS->USBHS_DEVEPTIER[ep_idx] = USBHS_DEVEPTIER_RXOUTES; } else if ((USBHS->USBHS_DEVEPTCFG[ep_idx] & USBHS_DEVEPTCFG_EPDIR_Msk) == USBHS_DEVEPTCFG_EPDIR_IN) { /* IN direction: acknowledge FIFO empty interrupt */ USBHS->USBHS_DEVEPTICR[ep_idx] = USBHS_DEVEPTICR_TXINIC; USBHS->USBHS_DEVEPTIER[ep_idx] = USBHS_DEVEPTIER_TXINES; } else { /* OUT direction */ USBHS->USBHS_DEVEPTIER[ep_idx] = USBHS_DEVEPTIER_RXOUTES; } } /* Reset the endpoint FIFO pointer to the beginning of the endpoint memory */ static void usb_dc_ep_fifo_reset(uint8_t ep_idx) { uint8_t *p; p = (uint8_t *)(USBHS_RAM_ADDR + 0x8000 * ep_idx); dev_data.ep_data[ep_idx].fifo = p; } /* Fetch a byte from the endpoint FIFO */ static uint8_t usb_dc_ep_fifo_get(uint8_t ep_idx) { return *(dev_data.ep_data[ep_idx].fifo++); } /* Put a byte from the endpoint FIFO */ static void usb_dc_ep_fifo_put(uint8_t ep_idx, uint8_t data) { *(dev_data.ep_data[ep_idx].fifo++) = data; } /* Handle interrupts on a control endpoint */ static void usb_dc_ep0_isr(void) { uint32_t sr = USBHS->USBHS_DEVEPTISR[0] & USBHS->USBHS_DEVEPTIMR[0]; uint32_t dev_ctrl = USBHS->USBHS_DEVCTRL; if (sr & USBHS_DEVEPTISR_CTRL_RXSTPI) { /* SETUP data received */ usb_dc_ep_fifo_reset(0); dev_data.ep_data[0].cb_out(USB_EP_DIR_OUT, USB_DC_EP_SETUP); } if (sr & USBHS_DEVEPTISR_RXOUTI) { /* OUT (to device) data received */ usb_dc_ep_fifo_reset(0); dev_data.ep_data[0].cb_out(USB_EP_DIR_OUT, USB_DC_EP_DATA_OUT); } if (sr & USBHS_DEVEPTISR_TXINI) { /* Disable the interrupt */ USBHS->USBHS_DEVEPTIDR[0] = USBHS_DEVEPTIDR_TXINEC; /* IN (to host) transmit complete */ usb_dc_ep_fifo_reset(0); dev_data.ep_data[0].cb_in(USB_EP_DIR_IN, USB_DC_EP_DATA_IN); if (!(dev_ctrl & USBHS_DEVCTRL_ADDEN) && (dev_ctrl & USBHS_DEVCTRL_UADD_Msk) != 0U) { /* Commit the pending address update. This * must be done after the ack to the host * completes else the ack will get dropped. */ USBHS->USBHS_DEVCTRL = dev_ctrl | USBHS_DEVCTRL_ADDEN; } } } /* Handle interrupts on a non-control endpoint */ static void usb_dc_ep_isr(uint8_t ep_idx) { uint32_t sr = USBHS->USBHS_DEVEPTISR[ep_idx] & USBHS->USBHS_DEVEPTIMR[ep_idx]; if (sr & USBHS_DEVEPTISR_RXOUTI) { uint8_t ep = ep_idx | USB_EP_DIR_OUT; /* Acknowledge the interrupt */ USBHS->USBHS_DEVEPTICR[ep_idx] = USBHS_DEVEPTICR_RXOUTIC; /* OUT (to device) data received */ usb_dc_ep_fifo_reset(ep_idx); dev_data.ep_data[ep_idx].cb_out(ep, USB_DC_EP_DATA_OUT); } if (sr & USBHS_DEVEPTISR_TXINI) { uint8_t ep = ep_idx | USB_EP_DIR_IN; /* Acknowledge the interrupt */ USBHS->USBHS_DEVEPTICR[ep_idx] = USBHS_DEVEPTICR_TXINIC; /* IN (to host) transmit complete */ usb_dc_ep_fifo_reset(ep_idx); dev_data.ep_data[ep_idx].cb_in(ep, USB_DC_EP_DATA_IN); } } /* Top level interrupt handler */ static void usb_dc_isr(void) { uint32_t sr = USBHS->USBHS_DEVISR & USBHS->USBHS_DEVIMR; /* End of resume interrupt */ if (sr & USBHS_DEVISR_EORSM) { /* Acknowledge the interrupt */ USBHS->USBHS_DEVICR = USBHS_DEVICR_EORSMC; /* Callback function */ dev_data.status_cb(USB_DC_RESUME, NULL); } /* End of reset interrupt */ if (sr & USBHS_DEVISR_EORST) { /* Acknowledge the interrupt */ USBHS->USBHS_DEVICR = USBHS_DEVICR_EORSTC; if (!usb_dc_ep_is_configured(0) && dev_data.ep_data[0].mps) { /* Restore EP0 configuration to previously set mps */ struct usb_dc_ep_cfg_data cfg = { .ep_addr = 0, .ep_mps = dev_data.ep_data[0].mps, .ep_type = USB_DC_EP_CONTROL, }; usb_dc_ep_configure(&cfg); usb_dc_ep_enable(0); } if (usb_dc_ep_is_enabled(0)) { /* The device clears some of the configuration of EP0 * when it receives the EORST. Re-enable interrupts. */ usb_dc_ep_enable_interrupts(0); } /* Free all endpoint memory */ for (int idx = 1; idx < NUM_OF_EP_MAX; idx++) { usb_dc_ep_disable(idx); USBHS->USBHS_DEVEPTCFG[idx] &= ~USBHS_DEVEPTCFG_ALLOC; } /* Callback function */ dev_data.status_cb(USB_DC_RESET, NULL); } /* Suspend interrupt */ if (sr & USBHS_DEVISR_SUSP) { /* Acknowledge the interrupt */ USBHS->USBHS_DEVICR = USBHS_DEVICR_SUSPC; /* Callback function */ dev_data.status_cb(USB_DC_SUSPEND, NULL); } #ifdef CONFIG_USB_DEVICE_SOF /* SOF interrupt */ if (sr & USBHS_DEVISR_SOF) { /* Acknowledge the interrupt */ USBHS->USBHS_DEVICR = USBHS_DEVICR_SOFC; /* Callback function */ dev_data.status_cb(USB_DC_SOF, NULL); } #endif /* EP0 endpoint interrupt */ if (sr & USBHS_DEVISR_PEP_0) { usb_dc_ep0_isr(); } /* Other endpoints interrupt */ for (int ep_idx = 1; ep_idx < NUM_OF_EP_MAX; ep_idx++) { if (sr & BIT(USBHS_DEVISR_PEP_0_Pos + ep_idx)) { usb_dc_ep_isr(ep_idx); } } } /* Attach USB for device connection */ int usb_dc_attach(void) { const struct atmel_sam_pmc_config clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0); uint32_t regval; /* Enable USBHS clock in PMC */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&clock_cfg); /* Enable the USB controller in device mode with the clock frozen */ USBHS->USBHS_CTRL = USBHS_CTRL_UIMOD | USBHS_CTRL_USBE | USBHS_CTRL_FRZCLK; barrier_dsync_fence_full(); /* Select the speed */ regval = USBHS_DEVCTRL_DETACH; #if (USB_MAXIMUM_SPEED == 2) && IS_ENABLED(CONFIG_USB_DC_HAS_HS_SUPPORT) /* high-speed */ regval |= USBHS_DEVCTRL_SPDCONF_NORMAL; #else /* full-speed */ regval |= USBHS_DEVCTRL_SPDCONF_LOW_POWER; #endif USBHS->USBHS_DEVCTRL = regval; /* Enable the USB clock */ usb_dc_enable_clock(); /* Unfreeze the clock */ USBHS->USBHS_CTRL = USBHS_CTRL_UIMOD | USBHS_CTRL_USBE; /* Enable device interrupts */ USBHS->USBHS_DEVIER = USBHS_DEVIER_EORSMES; USBHS->USBHS_DEVIER = USBHS_DEVIER_EORSTES; USBHS->USBHS_DEVIER = USBHS_DEVIER_SUSPES; #ifdef CONFIG_USB_DEVICE_SOF USBHS->USBHS_DEVIER = USBHS_DEVIER_SOFES; #endif /* Connect and enable the interrupt */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), usb_dc_isr, 0, 0); irq_enable(DT_INST_IRQN(0)); /* Attach the device */ USBHS->USBHS_DEVCTRL &= ~USBHS_DEVCTRL_DETACH; LOG_DBG(""); return 0; } /* Detach the USB device */ int usb_dc_detach(void) { const struct atmel_sam_pmc_config clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0); /* Detach the device */ USBHS->USBHS_DEVCTRL |= USBHS_DEVCTRL_DETACH; /* Disable the USB clock */ usb_dc_disable_clock(); /* Disable the USB controller and freeze the clock */ USBHS->USBHS_CTRL = USBHS_CTRL_UIMOD | USBHS_CTRL_FRZCLK; /* Disable USBHS clock in PMC */ (void)clock_control_off(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&clock_cfg); /* Disable interrupt */ irq_disable(DT_INST_IRQN(0)); LOG_DBG(""); return 0; } /* Reset the USB device */ int usb_dc_reset(void) { /* Reset the controller */ USBHS->USBHS_CTRL = USBHS_CTRL_UIMOD | USBHS_CTRL_FRZCLK; /* Clear private data */ (void)memset(&dev_data, 0, sizeof(dev_data)); LOG_DBG(""); return 0; } /* Set USB device address */ int usb_dc_set_address(uint8_t addr) { /* * Set the address but keep it disabled for now. It should be enabled * only after the ack to the host completes. */ USBHS->USBHS_DEVCTRL &= ~(USBHS_DEVCTRL_UADD_Msk | USBHS_DEVCTRL_ADDEN); USBHS->USBHS_DEVCTRL |= USBHS_DEVCTRL_UADD(addr); LOG_DBG(""); return 0; } /* Set USB device controller status callback */ void usb_dc_set_status_callback(const usb_dc_status_callback cb) { LOG_DBG(""); dev_data.status_cb = cb; } /* Check endpoint capabilities */ int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("endpoint index/address out of range"); return -1; } if (ep_idx == 0U) { if (cfg->ep_type != USB_DC_EP_CONTROL) { LOG_ERR("pre-selected as control endpoint"); return -1; } } else if (ep_idx & BIT(0)) { if (USB_EP_GET_DIR(cfg->ep_addr) != USB_EP_DIR_IN) { LOG_INF("pre-selected as IN endpoint"); return -1; } } else { if (USB_EP_GET_DIR(cfg->ep_addr) != USB_EP_DIR_OUT) { LOG_INF("pre-selected as OUT endpoint"); return -1; } } if (cfg->ep_mps < 1 || cfg->ep_mps > 1024 || (cfg->ep_type == USB_DC_EP_CONTROL && cfg->ep_mps > 64)) { LOG_ERR("invalid endpoint size"); return -1; } return 0; } /* Configure endpoint */ int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); bool ep_configured[NUM_OF_EP_MAX]; bool ep_enabled[NUM_OF_EP_MAX]; uint32_t regval = 0U; int log2ceil_mps; if (usb_dc_ep_check_cap(cfg) != 0) { return -EINVAL; } if (!usb_dc_is_attached()) { LOG_ERR("device not attached"); return -ENODEV; } if (usb_dc_ep_is_enabled(ep_idx)) { LOG_WRN("endpoint already configured & enabled 0x%x", ep_idx); return -EBUSY; } LOG_INF("Configure ep %x, mps %d, type %d", cfg->ep_addr, cfg->ep_mps, cfg->ep_type); /* Reset the endpoint */ usb_dc_ep_reset(ep_idx); /* Initialize the endpoint FIFO */ usb_dc_ep_fifo_reset(ep_idx); /* Map the endpoint type */ switch (cfg->ep_type) { case USB_DC_EP_CONTROL: regval |= USBHS_DEVEPTCFG_EPTYPE_CTRL; break; case USB_DC_EP_ISOCHRONOUS: regval |= USBHS_DEVEPTCFG_EPTYPE_ISO; break; case USB_DC_EP_BULK: regval |= USBHS_DEVEPTCFG_EPTYPE_BLK; break; case USB_DC_EP_INTERRUPT: regval |= USBHS_DEVEPTCFG_EPTYPE_INTRPT; break; default: return -EINVAL; } /* Map the endpoint direction */ if (USB_EP_DIR_IS_OUT(cfg->ep_addr) || cfg->ep_type == USB_DC_EP_CONTROL) { regval |= USBHS_DEVEPTCFG_EPDIR_OUT; } else { regval |= USBHS_DEVEPTCFG_EPDIR_IN; } /* * Map the endpoint size to the buffer size. Only power of 2 buffer * sizes between 8 and 1024 are possible, get the next power of 2. */ log2ceil_mps = 32 - __builtin_clz((MAX(cfg->ep_mps, 8) << 1) - 1) - 1; regval |= USBHS_DEVEPTCFG_EPSIZE(log2ceil_mps - 3); dev_data.ep_data[ep_idx].mps = cfg->ep_mps; /* Use double bank buffering for isochronous endpoints */ if (cfg->ep_type == USB_DC_EP_ISOCHRONOUS) { regval |= USBHS_DEVEPTCFG_EPBK_2_BANK; } else { regval |= USBHS_DEVEPTCFG_EPBK_1_BANK; } /* Configure the endpoint */ USBHS->USBHS_DEVEPTCFG[ep_idx] = regval; /* * Allocate the memory. This part is a bit tricky as memory can only be * allocated if all above endpoints are disabled and not allocated. Loop * backward through the above endpoints, disable them if they are * enabled, deallocate their memory if needed. Then loop again through * all the above endpoints to allocate and enabled them. */ for (int i = NUM_OF_EP_MAX - 1; i > ep_idx; i--) { ep_configured[i] = usb_dc_ep_is_configured(i); ep_enabled[i] = usb_dc_ep_is_enabled(i); if (ep_enabled[i]) { LOG_INF("Temporary disable ep idx %x", i); usb_dc_ep_disable(i); } if (ep_configured[i]) { USBHS->USBHS_DEVEPTCFG[i] &= ~USBHS_DEVEPTCFG_ALLOC; } } ep_configured[ep_idx] = true; ep_enabled[ep_idx] = false; for (int i = ep_idx; i < NUM_OF_EP_MAX; i++) { if (ep_configured[i]) { USBHS->USBHS_DEVEPTCFG[i] |= USBHS_DEVEPTCFG_ALLOC; } if (ep_enabled[i]) { usb_dc_ep_enable(i); } } /* Check that the endpoint is correctly configured */ if (!usb_dc_ep_is_configured(ep_idx)) { LOG_ERR("endpoint configuration failed"); return -EINVAL; } return 0; } /* Set stall condition for the selected endpoint */ int usb_dc_ep_set_stall(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } USBHS->USBHS_DEVEPTIER[ep_idx] = USBHS_DEVEPTIER_CTRL_STALLRQS; LOG_DBG("ep 0x%x", ep); return 0; } /* Clear stall condition for the selected endpoint */ int usb_dc_ep_clear_stall(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } USBHS->USBHS_DEVEPTIDR[ep_idx] = USBHS_DEVEPTIDR_CTRL_STALLRQC; LOG_DBG("ep 0x%x", ep); return 0; } /* Check if the selected endpoint is stalled */ int usb_dc_ep_is_stalled(uint8_t ep, uint8_t *stalled) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!stalled) { return -EINVAL; } *stalled = (USBHS->USBHS_DEVEPTIMR[ep_idx] & USBHS_DEVEPTIMR_CTRL_STALLRQ) != 0; LOG_DBG("ep 0x%x", ep); return 0; } /* Halt the selected endpoint */ int usb_dc_ep_halt(uint8_t ep) { return usb_dc_ep_set_stall(ep); } /* Enable the selected endpoint */ int usb_dc_ep_enable(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!usb_dc_ep_is_configured(ep_idx)) { LOG_ERR("endpoint not configured"); return -ENODEV; } /* Enable endpoint */ USBHS->USBHS_DEVEPT |= BIT(USBHS_DEVEPT_EPEN0_Pos + ep_idx); /* Enable endpoint interrupts */ USBHS->USBHS_DEVIER = BIT(USBHS_DEVIER_PEP_0_Pos + ep_idx); /* Enable SETUP, IN or OUT endpoint interrupts */ usb_dc_ep_enable_interrupts(ep_idx); LOG_INF("Enable ep 0x%x", ep); return 0; } /* Disable the selected endpoint */ int usb_dc_ep_disable(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } /* Disable endpoint interrupt */ USBHS->USBHS_DEVIDR = BIT(USBHS_DEVIDR_PEP_0_Pos + ep_idx); /* Disable endpoint and SETUP, IN or OUT interrupts */ USBHS->USBHS_DEVEPT &= ~BIT(USBHS_DEVEPT_EPEN0_Pos + ep_idx); LOG_INF("Disable ep 0x%x", ep); return 0; } /* Flush the selected endpoint */ int usb_dc_ep_flush(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!usb_dc_ep_is_enabled(ep_idx)) { LOG_ERR("endpoint not enabled"); return -ENODEV; } /* Disable the IN interrupt */ USBHS->USBHS_DEVEPTIDR[ep_idx] = USBHS_DEVEPTIDR_TXINEC; /* Kill the last written bank if needed */ if (USBHS->USBHS_DEVEPTISR[ep_idx] & USBHS_DEVEPTISR_NBUSYBK_Msk) { USBHS->USBHS_DEVEPTIER[ep_idx] = USBHS_DEVEPTIER_KILLBKS; barrier_dsync_fence_full(); while (USBHS->USBHS_DEVEPTIMR[ep_idx] & USBHS_DEVEPTIMR_KILLBK) { k_yield(); } } /* Reset the endpoint */ usb_dc_ep_reset(ep_idx); /* Re-enable interrupts */ usb_dc_ep_enable_interrupts(ep_idx); LOG_DBG("ep 0x%x", ep); return 0; } /* Write data to the specified endpoint */ int usb_dc_ep_write(uint8_t ep, const uint8_t *data, uint32_t data_len, uint32_t *ret_bytes) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t packet_len; if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!usb_dc_ep_is_enabled(ep_idx)) { LOG_ERR("endpoint not enabled"); return -ENODEV; } if (USB_EP_GET_DIR(ep) != USB_EP_DIR_IN) { LOG_ERR("wrong endpoint direction"); return -EINVAL; } if ((USBHS->USBHS_DEVEPTIMR[ep_idx] & USBHS_DEVEPTIMR_CTRL_STALLRQ) != 0) { LOG_WRN("endpoint is stalled"); return -EBUSY; } /* Write the data to the FIFO */ packet_len = MIN(data_len, dev_data.ep_data[ep_idx].mps); for (int i = 0; i < packet_len; i++) { usb_dc_ep_fifo_put(ep_idx, data[i]); } barrier_dsync_fence_full(); if (ep_idx == 0U) { /* * Control endpoint: clear the interrupt flag to send the data, * and re-enable the interrupts to trigger an interrupt at the * end of the transfer. */ USBHS->USBHS_DEVEPTICR[ep_idx] = USBHS_DEVEPTICR_TXINIC; USBHS->USBHS_DEVEPTIER[ep_idx] = USBHS_DEVEPTIER_TXINES; } else { /* * Other endpoint types: clear the FIFO control flag to send * the data. */ USBHS->USBHS_DEVEPTIDR[ep_idx] = USBHS_DEVEPTIDR_FIFOCONC; } if (ret_bytes) { *ret_bytes = packet_len; } LOG_DBG("ep 0x%x write %d bytes from %d", ep, packet_len, data_len); return 0; } /* Read data from the specified endpoint */ int usb_dc_ep_read(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { uint8_t ep_idx = USB_EP_GET_IDX(ep); int rc; rc = usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes); if (rc) { return rc; } if (!data && !max_data_len) { /* When both buffer and max data to read are zero the above * call would fetch the data len and we simply return. */ return 0; } /* If the packet has been read entirely, get the next one */ if (!(USBHS->USBHS_DEVEPTISR[ep_idx] & USBHS_DEVEPTISR_RWALL)) { rc = usb_dc_ep_read_continue(ep); } LOG_DBG("ep 0x%x", ep); return rc; } /* Set callback function for the specified endpoint */ int usb_dc_ep_set_callback(uint8_t ep, const usb_dc_ep_callback cb) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (USB_EP_DIR_IS_IN(ep)) { dev_data.ep_data[ep_idx].cb_in = cb; } else { dev_data.ep_data[ep_idx].cb_out = cb; } LOG_DBG("ep 0x%x", ep); return 0; } /* Read data from the specified endpoint */ int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t data_len = (USBHS->USBHS_DEVEPTISR[ep_idx] & USBHS_DEVEPTISR_BYCT_Msk) >> USBHS_DEVEPTISR_BYCT_Pos; if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!usb_dc_ep_is_enabled(ep_idx)) { LOG_ERR("endpoint not enabled"); return -ENODEV; } if (USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT) { LOG_ERR("wrong endpoint direction"); return -EINVAL; } if ((USBHS->USBHS_DEVEPTIMR[ep_idx] & USBHS_DEVEPTIMR_CTRL_STALLRQ) != 0) { LOG_WRN("endpoint is stalled"); return -EBUSY; } if (!data && !max_data_len) { /* * When both buffer and max data to read are zero return * the available data in buffer. */ if (read_bytes) { *read_bytes = data_len; } return 0; } if (data_len > max_data_len) { LOG_WRN("Not enough space to copy all the data!"); data_len = max_data_len; } if (data != NULL) { for (int i = 0; i < data_len; i++) { data[i] = usb_dc_ep_fifo_get(ep_idx); } } if (read_bytes) { *read_bytes = data_len; } LOG_DBG("ep 0x%x read %d bytes", ep, data_len); return 0; } /* Continue reading data from the endpoint */ int usb_dc_ep_read_continue(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!usb_dc_ep_is_enabled(ep_idx)) { LOG_ERR("endpoint not enabled"); return -ENODEV; } if (USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT) { LOG_ERR("wrong endpoint direction"); return -EINVAL; } if (ep_idx == 0U) { /* * Control endpoint: clear the interrupt flag to send the data. * It is easier to clear both SETUP and OUT flag than checking * the stage of the transfer. */ USBHS->USBHS_DEVEPTICR[ep_idx] = USBHS_DEVEPTICR_RXOUTIC; USBHS->USBHS_DEVEPTICR[ep_idx] = USBHS_DEVEPTICR_CTRL_RXSTPIC; } else { /* * Other endpoint types: clear the FIFO control flag to * receive more data. */ USBHS->USBHS_DEVEPTIDR[ep_idx] = USBHS_DEVEPTIDR_FIFOCONC; } LOG_DBG("ep 0x%x continue", ep); return 0; } /* Endpoint max packet size (mps) */ int usb_dc_ep_mps(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } return dev_data.ep_data[ep_idx].mps; } ```
/content/code_sandbox/drivers/usb/device/usb_dc_sam_usbhs.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,262
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_usbd #include <zephyr/kernel.h> #include <zephyr/usb/usb_device.h> #include <zephyr/dt-bindings/usb/usb.h> #include <zephyr/sys/math_extras.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/clock_control_numaker.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(usb_dc_numaker, CONFIG_USB_DRIVER_LOG_LEVEL); #include <soc.h> #include <NuMicro.h> /* USBD notes * * 1. Require 48MHz clock source * (1) Not support HIRC48 as clock source. It involves trim with USB SOF packets * and isn't suitable in HAL. * (2) Instead of HICR48, core clock is required to be multiple of 48MHz e.g. 192MHz, * to generate necessary 48MHz. */ /* For bus reset, keep 'SE0' (USB spec: SE0 >= 2.5 ms) */ #define NUMAKER_USBD_BUS_RESET_DRV_SE0_US 3000 /* For bus resume, generate 'K' (USB spec: 'K' >= 1 ms) */ #define NUMAKER_USBD_BUS_RESUME_DRV_K_US 1500 /* Reserve DMA buffer for Setup/CTRL OUT/CTRL IN, required to be 8-byte aligned */ #define NUMAKER_USBD_DMABUF_SIZE_SETUP 8 #define NUMAKER_USBD_DMABUF_SIZE_CTRLOUT 64 #define NUMAKER_USBD_DMABUF_SIZE_CTRLIN 64 /* Maximum number of EP contexts across all instances * This is to static-allocate EP contexts which can accommodate all instances. * The number of effective EP contexts per instance is passed on through its * num_bidir_endpoints, which must not be larger than this. */ #define NUMAKER_USBD_EP_MAXNUM 25ul /* Message type */ #define NUMAKER_USBD_MSG_TYPE_SW_RECONN 0 /* S/W reconnect */ #define NUMAKER_USBD_MSG_TYPE_CB_STATE 1 /* Callback for usb_dc_status_code */ #define NUMAKER_USBD_MSG_TYPE_CB_EP 2 /* Callback for usb_dc_ep_cb_status_code */ /* Message structure */ struct numaker_usbd_msg { uint32_t type; union { struct { enum usb_dc_status_code status_code; } cb_device; struct { uint8_t ep; enum usb_dc_ep_cb_status_code status_code; } cb_ep; }; }; /* Immutable device context */ struct numaker_usbd_config { USBD_T *base; const struct reset_dt_spec reset; uint32_t clk_modidx; uint32_t clk_src; uint32_t clk_div; const struct device *clkctrl_dev; void (*irq_config_func)(const struct device *dev); void (*irq_unconfig_func)(const struct device *dev); const struct pinctrl_dev_config *pincfg; uint32_t num_bidir_endpoints; uint32_t dmabuf_size; bool disallow_iso_inout_same; }; /* EP context */ struct numaker_usbd_ep { bool valid; bool nak_clr; /* NAK cleared (ACK next transaction) */ const struct device *dev; /* Pointer to the containing device */ uint8_t ep_hw_idx; /* BSP USBD driver EP index EP0, EP1, EP2, etc */ uint32_t ep_hw_cfg; /* BSP USBD driver EP configuration */ /* EP DMA buffer */ bool dmabuf_valid; uint32_t dmabuf_base; uint32_t dmabuf_size; /* On USBD, no H/W FIFO. Simulate based on above DMA buffer with * one-shot implementation */ uint32_t read_fifo_pos; uint32_t read_fifo_used; uint32_t write_fifo_pos; uint32_t write_fifo_free; /* NOTE: On USBD, Setup and CTRL OUT are not completely separated. CTRL OUT MXPLD * can be overridden to 8 by next Setup. To overcome it, we make one copy of CTRL * OUT MXPLD immediately on its interrupt. */ uint32_t mxpld_ctrlout; /* EP address */ bool addr_valid; uint8_t addr; /* EP MPS */ bool mps_valid; uint16_t mps; usb_dc_ep_callback cb; /* EP callback function */ }; /* EP context manager */ struct numaker_usbd_ep_mgmt { /* EP context management * * Allocate-only, and de-allocate all on re-initialize in usb_dc_attach(). */ uint8_t ep_idx; /* DMA buffer management * * Allocate-only, and de-allocate all on re-initialize in usb_dc_attach(). */ uint32_t dmabuf_pos; /* Pass Setup packet from ISR to thread */ bool new_setup; struct usb_setup_packet setup_packet; struct numaker_usbd_ep ep_pool[NUMAKER_USBD_EP_MAXNUM]; }; /* Mutable device context */ struct numaker_usbd_data { uint8_t addr; /* Host assigned USB device address */ struct k_mutex sync_mutex; /* Enable interrupt top/bottom halves processing * * Registered callbacks may use mutex or other kernel functions which are not supported * in interrupt context */ struct k_msgq msgq; struct numaker_usbd_msg msgq_buf[CONFIG_USB_DC_NUMAKER_MSG_QUEUE_SIZE]; K_KERNEL_STACK_MEMBER(msg_hdlr_thread_stack, CONFIG_USB_DC_NUMAKER_MSG_HANDLER_THREAD_STACK_SIZE); struct k_thread msg_hdlr_thread; usb_dc_status_callback status_cb; /* Status callback function */ struct numaker_usbd_ep_mgmt ep_mgmt; /* EP management */ }; static inline const struct device *numaker_usbd_device_get(void); static inline void numaker_usbd_lock(const struct device *dev) { struct numaker_usbd_data *data = dev->data; k_mutex_lock(&data->sync_mutex, K_FOREVER); } static inline void numaker_usbd_unlock(const struct device *dev) { struct numaker_usbd_data *data = dev->data; k_mutex_unlock(&data->sync_mutex); } static inline void numaker_usbd_sw_connect(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; USBD_T *const base = config->base; /* Clear all interrupts first for clean */ base->INTSTS = base->INTSTS; /* Enable relevant interrupts */ base->INTEN = USBD_INT_BUS | USBD_INT_USB | USBD_INT_FLDET | USBD_INT_WAKEUP | USBD_INT_SOF; /* Clear SE0 for connect */ base->SE0 &= ~USBD_DRVSE0; } static inline void numaker_usbd_sw_disconnect(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; USBD_T *const base = config->base; /* Set SE0 for disconnect */ base->SE0 |= USBD_DRVSE0; } static inline void numaker_usbd_sw_reconnect(const struct device *dev) { /* Keep SE0 to trigger bus reset */ numaker_usbd_sw_disconnect(dev); k_sleep(K_USEC(NUMAKER_USBD_BUS_RESET_DRV_SE0_US)); numaker_usbd_sw_connect(dev); } static inline void numaker_usbd_reset_addr(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; struct numaker_usbd_data *data = dev->data; USBD_T *const base = config->base; base->FADDR = 0; data->addr = 0; } static inline void numaker_usbd_set_addr(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; struct numaker_usbd_data *data = dev->data; USBD_T *const base = config->base; if (base->FADDR != data->addr) { base->FADDR = data->addr; } } /* USBD EP base by e.g. EP0, EP1, ... */ static inline USBD_EP_T *numaker_usbd_ep_base(const struct device *dev, uint32_t ep_hw_idx) { const struct numaker_usbd_config *config = dev->config; USBD_T *const base = config->base; return base->EP + ep_hw_idx; } static inline uint32_t numaker_usbd_ep_fifo_max(struct numaker_usbd_ep *ep_cur) { /* NOTE: For one-shot implementation, effective size of EP FIFO is limited to EP MPS */ __ASSERT_NO_MSG(ep_cur->dmabuf_valid); __ASSERT_NO_MSG(ep_cur->mps_valid); __ASSERT_NO_MSG(ep_cur->mps <= ep_cur->dmabuf_size); return ep_cur->mps; } static inline uint32_t numaker_usbd_ep_fifo_used(struct numaker_usbd_ep *ep_cur) { __ASSERT_NO_MSG(ep_cur->dmabuf_valid); return USB_EP_DIR_IS_OUT(ep_cur->addr) ? ep_cur->read_fifo_used : numaker_usbd_ep_fifo_max(ep_cur) - ep_cur->write_fifo_free; } /* Reset EP FIFO * * NOTE: EP FIFO is based on EP DMA buffer, which may not be configured yet. */ static void numaker_usbd_ep_fifo_reset(struct numaker_usbd_ep *ep_cur) { if (ep_cur->dmabuf_valid && ep_cur->mps_valid) { if (USB_EP_DIR_IS_OUT(ep_cur->addr)) { /* Read FIFO */ ep_cur->read_fifo_pos = ep_cur->dmabuf_base; ep_cur->read_fifo_used = 0; } else { /* Write FIFO */ ep_cur->write_fifo_pos = ep_cur->dmabuf_base; ep_cur->write_fifo_free = numaker_usbd_ep_fifo_max(ep_cur); } } } static inline void numaker_usbd_ep_set_stall(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* Set EP to stalled */ ep_base->CFGP |= USBD_CFGP_SSTALL_Msk; } /* Reset EP to unstalled and data toggle bit to 0 */ static inline void numaker_usbd_ep_clear_stall_n_data_toggle(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* Reset EP to unstalled */ ep_base->CFGP &= ~USBD_CFGP_SSTALL_Msk; /* Reset EP data toggle bit to 0 */ ep_base->CFG &= ~USBD_CFG_DSQSYNC_Msk; } static inline bool numaker_usbd_ep_is_stalled(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); return ep_base->CFGP & USBD_CFGP_SSTALL_Msk; } static int numaker_usbd_send_msg(const struct device *dev, const struct numaker_usbd_msg *msg) { struct numaker_usbd_data *data = dev->data; int rc; rc = k_msgq_put(&data->msgq, msg, K_NO_WAIT); if (rc < 0) { /* Try to recover by S/W reconnect */ struct numaker_usbd_msg msg_reconn = { .type = NUMAKER_USBD_MSG_TYPE_SW_RECONN, }; LOG_ERR("Message queue overflow"); /* Discard all not yet received messages for error recovery below */ k_msgq_purge(&data->msgq); rc = k_msgq_put(&data->msgq, &msg_reconn, K_NO_WAIT); if (rc < 0) { LOG_ERR("Message queue overflow again"); } } return rc; } static int numaker_usbd_hw_setup(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; USBD_T *const base = config->base; int rc; struct numaker_scc_subsys scc_subsys; /* Reset controller ready? */ if (!device_is_ready(config->reset.dev)) { LOG_ERR("Reset controller not ready"); return -ENODEV; } SYS_UnlockReg(); /* Configure USB PHY for USBD */ SYS->USBPHY = (SYS->USBPHY & ~SYS_USBPHY_USBROLE_Msk) | (SYS_USBPHY_USBROLE_STD_USBD | SYS_USBPHY_USBEN_Msk | SYS_USBPHY_SBO_Msk); /* Invoke Clock controller to enable module clock */ memset(&scc_subsys, 0x00, sizeof(scc_subsys)); scc_subsys.subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC; scc_subsys.pcc.clk_modidx = config->clk_modidx; scc_subsys.pcc.clk_src = config->clk_src; scc_subsys.pcc.clk_div = config->clk_div; /* Equivalent to CLK_EnableModuleClock() */ rc = clock_control_on(config->clkctrl_dev, (clock_control_subsys_t)&scc_subsys); if (rc < 0) { goto cleanup; } /* Equivalent to CLK_SetModuleClock() */ rc = clock_control_configure(config->clkctrl_dev, (clock_control_subsys_t)&scc_subsys, NULL); if (rc < 0) { goto cleanup; } /* Configure pinmux (NuMaker's SYS MFP) */ rc = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (rc < 0) { goto cleanup; } /* Invoke Reset controller to reset module to default state */ /* Equivalent to SYS_ResetModule() */ reset_line_toggle_dt(&config->reset); /* Initialize USBD engine */ /* NOTE: BSP USBD driver: ATTR = 0x7D0 */ base->ATTR = USBD_ATTR_BYTEM_Msk | BIT(9) | USBD_ATTR_DPPUEN_Msk | USBD_ATTR_USBEN_Msk | BIT(6) | USBD_ATTR_PHYEN_Msk; /* Set SE0 for S/W disconnect */ numaker_usbd_sw_disconnect(dev); /* NOTE: Ignore DT maximum-speed with USBD fixed to full-speed */ /* Initialize IRQ */ config->irq_config_func(dev); cleanup: SYS_LockReg(); return rc; } static void numaker_usbd_hw_shutdown(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; USBD_T *const base = config->base; struct numaker_scc_subsys scc_subsys; SYS_UnlockReg(); /* Uninitialize IRQ */ config->irq_unconfig_func(dev); /* Set SE0 for S/W disconnect */ numaker_usbd_sw_disconnect(dev); /* Disable USB PHY */ base->ATTR &= ~USBD_PHY_EN; /* Invoke Clock controller to disable module clock */ memset(&scc_subsys, 0x00, sizeof(scc_subsys)); scc_subsys.subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC; scc_subsys.pcc.clk_modidx = config->clk_modidx; /* Equivalent to CLK_DisableModuleClock() */ clock_control_off(config->clkctrl_dev, (clock_control_subsys_t)&scc_subsys); /* Invoke Reset controller to reset module to default state */ /* Equivalent to SYS_ResetModule() */ reset_line_toggle_dt(&config->reset); SYS_LockReg(); } /* Interrupt top half processing for bus reset */ static void numaker_usbd_bus_reset_th(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; USBD_EP_T *ep_base; for (uint32_t i = 0ul; i < config->num_bidir_endpoints; i++) { ep_base = numaker_usbd_ep_base(dev, EP0 + i); /* Cancel EP on-going transaction */ ep_base->CFGP |= USBD_CFGP_CLRRDY_Msk; /* Reset EP to unstalled */ ep_base->CFGP &= ~USBD_CFGP_SSTALL_Msk; /* Reset EP data toggle bit to 0 */ ep_base->CFG &= ~USBD_CFG_DSQSYNC_Msk; /* Except EP0/EP1 kept resident for CTRL OUT/IN, disable all other EPs */ if (i >= 2) { ep_base->CFG = 0; } } numaker_usbd_reset_addr(dev); } static void numaker_usbd_remote_wakeup(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; USBD_T *const base = config->base; /* Enable back USB/PHY first */ base->ATTR |= USBD_ATTR_USBEN_Msk | USBD_ATTR_PHYEN_Msk; /* Then generate 'K' */ base->ATTR |= USBD_ATTR_RWAKEUP_Msk; k_sleep(K_USEC(NUMAKER_USBD_BUS_RESUME_DRV_K_US)); base->ATTR ^= USBD_ATTR_RWAKEUP_Msk; } /* USBD SRAM base for DMA */ static inline uint32_t numaker_usbd_buf_base(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; USBD_T *const base = config->base; return ((uint32_t)base + 0x800ul); } /* Copy to user buffer from Setup FIFO */ static void numaker_usbd_setup_fifo_copy_to_user(const struct device *dev, uint8_t *usrbuf) { const struct numaker_usbd_config *config = dev->config; USBD_T *const base = config->base; uint32_t dmabuf_addr; dmabuf_addr = numaker_usbd_buf_base(dev) + (base->STBUFSEG & USBD_STBUFSEG_STBUFSEG_Msk); bytecpy(usrbuf, (uint8_t *)dmabuf_addr, 8ul); } /* Copy data to user buffer from EP FIFO * * size_p holds size to copy/copied on input/output */ static int numaker_usbd_ep_fifo_copy_to_user(struct numaker_usbd_ep *ep_cur, uint8_t *usrbuf, uint32_t *size_p) { const struct device *dev = ep_cur->dev; uint32_t dmabuf_addr; __ASSERT_NO_MSG(size_p); __ASSERT_NO_MSG(ep_cur->dmabuf_valid); dmabuf_addr = numaker_usbd_buf_base(dev) + ep_cur->read_fifo_pos; /* Clamp to read FIFO used count */ *size_p = MIN(*size_p, numaker_usbd_ep_fifo_used(ep_cur)); bytecpy(usrbuf, (uint8_t *)dmabuf_addr, *size_p); /* Advance read FIFO */ ep_cur->read_fifo_pos += *size_p; ep_cur->read_fifo_used -= *size_p; if (ep_cur->read_fifo_used == 0) { ep_cur->read_fifo_pos = ep_cur->dmabuf_base; } return 0; } /* Copy data from user buffer to EP FIFO * * size_p holds size to copy/copied on input/output */ static int numaker_usbd_ep_fifo_copy_from_user(struct numaker_usbd_ep *ep_cur, const uint8_t *usrbuf, uint32_t *size_p) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); uint32_t dmabuf_addr; uint32_t fifo_free; __ASSERT_NO_MSG(size_p); __ASSERT_NO_MSG(ep_cur->dmabuf_valid); __ASSERT_NO_MSG(ep_cur->mps_valid); __ASSERT_NO_MSG(ep_cur->mps <= ep_cur->dmabuf_size); dmabuf_addr = numaker_usbd_buf_base(dev) + ep_base->BUFSEG; fifo_free = numaker_usbd_ep_fifo_max(ep_cur) - numaker_usbd_ep_fifo_used(ep_cur); *size_p = MIN(*size_p, fifo_free); bytecpy((uint8_t *)dmabuf_addr, (uint8_t *)usrbuf, *size_p); /* Advance write FIFO */ ep_cur->write_fifo_pos += *size_p; ep_cur->write_fifo_free -= *size_p; if (ep_cur->write_fifo_free == 0) { ep_cur->write_fifo_pos = ep_cur->dmabuf_base; } return 0; } /* Update EP read/write FIFO on DATA OUT/IN completed */ static void numaker_usbd_ep_fifo_update(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); __ASSERT_NO_MSG(ep_cur->addr_valid); __ASSERT_NO_MSG(ep_cur->dmabuf_valid); if (USB_EP_DIR_IS_OUT(ep_cur->addr)) { /* Read FIFO */ /* NOTE: For one-shot implementation, FIFO gets updated from empty. */ ep_cur->read_fifo_pos = ep_cur->dmabuf_base; /* NOTE: See comment on mxpld_ctrlout for why make one copy of CTRL OUT's MXPLD */ if (USB_EP_GET_IDX(ep_cur->addr) == 0) { ep_cur->read_fifo_used = ep_cur->mxpld_ctrlout; } else { ep_cur->read_fifo_used = ep_base->MXPLD; } } else { /* Write FIFO */ /* NOTE: For one-shot implementation, FIFO gets to empty. */ ep_cur->write_fifo_pos = ep_cur->dmabuf_base; ep_cur->write_fifo_free = numaker_usbd_ep_fifo_max(ep_cur); } } static void numaker_usbd_ep_config_dmabuf(struct numaker_usbd_ep *ep_cur, uint32_t dmabuf_base, uint32_t dmabuf_size) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); ep_base->BUFSEG = dmabuf_base; ep_cur->dmabuf_valid = true; ep_cur->dmabuf_base = dmabuf_base; ep_cur->dmabuf_size = dmabuf_size; } static void numaker_usbd_ep_abort(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* Abort EP on-going transaction */ ep_base->CFGP |= USBD_CFGP_CLRRDY_Msk; /* Need to clear NAK for next transaction */ ep_cur->nak_clr = false; } /* Configure EP major common parts */ static void numaker_usbd_ep_config_major(struct numaker_usbd_ep *ep_cur, const struct usb_dc_ep_cfg_data *const ep_cfg) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); ep_cur->mps_valid = true; ep_cur->mps = ep_cfg->ep_mps; /* Configure EP transfer type, DATA0/1 toggle, direction, number, etc. */ ep_cur->ep_hw_cfg = 0; /* Clear STALL Response in Setup stage */ if (ep_cfg->ep_type == USB_DC_EP_CONTROL) { ep_cur->ep_hw_cfg |= USBD_CFG_CSTALL; } /* Default to DATA0 */ ep_cur->ep_hw_cfg &= ~USBD_CFG_DSQSYNC_Msk; /* Endpoint IN/OUT, though, default to disabled */ ep_cur->ep_hw_cfg |= USBD_CFG_EPMODE_DISABLE; /* Isochronous or not */ if (ep_cfg->ep_type == USB_DC_EP_ISOCHRONOUS) { ep_cur->ep_hw_cfg |= USBD_CFG_TYPE_ISO; } /* Endpoint index */ ep_cur->ep_hw_cfg |= (USB_EP_GET_IDX(ep_cfg->ep_addr) << USBD_CFG_EPNUM_Pos) & USBD_CFG_EPNUM_Msk; ep_base->CFG = ep_cur->ep_hw_cfg; } static void numaker_usbd_ep_enable(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* For safe, EP (re-)enable from clean state */ numaker_usbd_ep_abort(ep_cur); numaker_usbd_ep_clear_stall_n_data_toggle(ep_cur); numaker_usbd_ep_fifo_reset(ep_cur); /* Enable EP to IN/OUT */ ep_cur->ep_hw_cfg &= ~USBD_CFG_STATE_Msk; if (USB_EP_DIR_IS_IN(ep_cur->addr)) { ep_cur->ep_hw_cfg |= USBD_CFG_EPMODE_IN; } else { ep_cur->ep_hw_cfg |= USBD_CFG_EPMODE_OUT; } ep_base->CFG = ep_cur->ep_hw_cfg; /* For USBD, no separate EP interrupt control */ } static void numaker_usbd_ep_disable(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* For USBD, no separate EP interrupt control */ /* Disable EP */ ep_cur->ep_hw_cfg = (ep_cur->ep_hw_cfg & ~USBD_CFG_STATE_Msk) | USBD_CFG_EPMODE_DISABLE; ep_base->CFG = ep_cur->ep_hw_cfg; } /* Start EP data transaction */ static void numaker_usbd_ep_trigger(struct numaker_usbd_ep *ep_cur, uint32_t len) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); ep_base->MXPLD = len; } static struct numaker_usbd_ep *numaker_usbd_ep_mgmt_alloc_ep(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; struct numaker_usbd_data *data = dev->data; struct numaker_usbd_ep_mgmt *ep_mgmt = &data->ep_mgmt; struct numaker_usbd_ep *ep_cur = NULL; if (ep_mgmt->ep_idx < config->num_bidir_endpoints) { ep_cur = ep_mgmt->ep_pool + ep_mgmt->ep_idx; ep_mgmt->ep_idx++; __ASSERT_NO_MSG(!ep_cur->valid); /* Indicate this EP context is allocated */ ep_cur->valid = true; } return ep_cur; } /* Allocate DMA buffer * * Return -ENOMEM on OOM error, or 0 on success with DMA buffer base/size (rounded up) allocated */ static int numaker_usbd_ep_mgmt_alloc_dmabuf(const struct device *dev, uint32_t size, uint32_t *dmabuf_base_p, uint32_t *dmabuf_size_p) { const struct numaker_usbd_config *config = dev->config; struct numaker_usbd_data *data = dev->data; struct numaker_usbd_ep_mgmt *ep_mgmt = &data->ep_mgmt; __ASSERT_NO_MSG(dmabuf_base_p); __ASSERT_NO_MSG(dmabuf_size_p); /* Required to be 8-byte aligned */ size = ROUND_UP(size, 8); ep_mgmt->dmabuf_pos += size; if (ep_mgmt->dmabuf_pos > config->dmabuf_size) { ep_mgmt->dmabuf_pos -= size; return -ENOMEM; } *dmabuf_base_p = ep_mgmt->dmabuf_pos - size; *dmabuf_size_p = size; return 0; } /* Initialize all endpoint-related */ static void numaker_usbd_ep_mgmt_init(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; struct numaker_usbd_data *data = dev->data; USBD_T *const base = config->base; struct numaker_usbd_ep_mgmt *ep_mgmt = &data->ep_mgmt; struct numaker_usbd_ep *ep_cur; struct numaker_usbd_ep *ep_end; /* Initialize all fields to zero except persistent */ memset(ep_mgmt, 0x00, sizeof(*ep_mgmt)); ep_cur = ep_mgmt->ep_pool; ep_end = ep_mgmt->ep_pool + config->num_bidir_endpoints; /* Initialize all EP contexts */ for (; ep_cur != ep_end; ep_cur++) { /* Pointer to the containing device */ ep_cur->dev = dev; /* BSP USBD driver EP handle */ ep_cur->ep_hw_idx = EP0 + (ep_cur - ep_mgmt->ep_pool); } /* Reserve 1st/2nd EP contexts (BSP USBD driver EP0/EP1) for CTRL OUT/IN */ ep_mgmt->ep_idx = 2; /* Reserve DMA buffer for Setup/CTRL OUT/CTRL IN, starting from 0 */ ep_mgmt->dmabuf_pos = 0; /* Configure DMA buffer for Setup packet */ base->STBUFSEG = ep_mgmt->dmabuf_pos; ep_mgmt->dmabuf_pos += NUMAKER_USBD_DMABUF_SIZE_SETUP; /* Reserve 1st EP context (BSP USBD driver EP0) for CTRL OUT */ ep_cur = ep_mgmt->ep_pool + 0; ep_cur->valid = true; ep_cur->addr_valid = true; ep_cur->addr = USB_EP_GET_ADDR(0, USB_EP_DIR_OUT); numaker_usbd_ep_config_dmabuf(ep_cur, ep_mgmt->dmabuf_pos, NUMAKER_USBD_DMABUF_SIZE_CTRLOUT); ep_mgmt->dmabuf_pos += NUMAKER_USBD_DMABUF_SIZE_CTRLOUT; ep_cur->mps_valid = true; ep_cur->mps = NUMAKER_USBD_DMABUF_SIZE_CTRLOUT; /* Reserve 2nd EP context (BSP USBD driver EP1) for CTRL IN */ ep_cur = ep_mgmt->ep_pool + 1; ep_cur->valid = true; ep_cur->addr_valid = true; ep_cur->addr = USB_EP_GET_ADDR(0, USB_EP_DIR_IN); numaker_usbd_ep_config_dmabuf(ep_cur, ep_mgmt->dmabuf_pos, NUMAKER_USBD_DMABUF_SIZE_CTRLIN); ep_mgmt->dmabuf_pos += NUMAKER_USBD_DMABUF_SIZE_CTRLIN; ep_cur->mps_valid = true; ep_cur->mps = NUMAKER_USBD_DMABUF_SIZE_CTRLIN; } /* Find EP context by EP address */ static struct numaker_usbd_ep *numaker_usbd_ep_mgmt_find_ep(const struct device *dev, const uint8_t ep) { const struct numaker_usbd_config *config = dev->config; struct numaker_usbd_data *data = dev->data; struct numaker_usbd_ep_mgmt *ep_mgmt = &data->ep_mgmt; struct numaker_usbd_ep *ep_cur = ep_mgmt->ep_pool; struct numaker_usbd_ep *ep_end = ep_mgmt->ep_pool + config->num_bidir_endpoints; for (; ep_cur != ep_end; ep_cur++) { if (!ep_cur->valid) { continue; } if (!ep_cur->addr_valid) { continue; } if (ep == ep_cur->addr) { return ep_cur; } } return NULL; } /* Bind EP context to EP address */ static struct numaker_usbd_ep *numaker_usbd_ep_mgmt_bind_ep(const struct device *dev, const uint8_t ep) { struct numaker_usbd_ep *ep_cur = numaker_usbd_ep_mgmt_find_ep(dev, ep); if (!ep_cur) { ep_cur = numaker_usbd_ep_mgmt_alloc_ep(dev); if (!ep_cur) { return NULL; } /* Bind EP context to EP address */ ep_cur->addr = ep; ep_cur->addr_valid = true; } /* Assert EP context bound to EP address */ __ASSERT_NO_MSG(ep_cur->valid); __ASSERT_NO_MSG(ep_cur->addr_valid); __ASSERT_NO_MSG(ep_cur->addr == ep); return ep_cur; } /* Interrupt bottom half processing for bus reset */ static void numaker_usbd_bus_reset_bh(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; struct numaker_usbd_data *data = dev->data; struct numaker_usbd_ep_mgmt *ep_mgmt = &data->ep_mgmt; struct numaker_usbd_ep *ep_cur = ep_mgmt->ep_pool; struct numaker_usbd_ep *ep_end = ep_mgmt->ep_pool + config->num_bidir_endpoints; for (; ep_cur != ep_end; ep_cur++) { /* Reset EP FIFO */ numaker_usbd_ep_fifo_reset(ep_cur); /* Abort EP on-going transaction and signal H/W relinquishes DMA buffer ownership */ numaker_usbd_ep_abort(ep_cur); /* Reset EP to unstalled and data toggle bit to 0 */ numaker_usbd_ep_clear_stall_n_data_toggle(ep_cur); } numaker_usbd_reset_addr(dev); } /* Interrupt bottom half processing for Setup/EP data transaction */ static void numaker_usbd_ep_bh(struct numaker_usbd_ep *ep_cur, enum usb_dc_ep_cb_status_code status_code) { const struct device *dev = ep_cur->dev; struct numaker_usbd_data *data = dev->data; struct numaker_usbd_ep_mgmt *ep_mgmt = &data->ep_mgmt; if (status_code == USB_DC_EP_SETUP) { /* Zephyr USB device stack passes Setup packet via CTRL OUT EP. */ __ASSERT_NO_MSG(ep_cur->addr == USB_EP_GET_ADDR(0, USB_EP_DIR_OUT)); if (numaker_usbd_ep_fifo_used(ep_cur)) { LOG_WRN("New Setup will override previous Control OUT data"); } /* We should have reserved 1st/2nd EP contexts for CTRL OUT/IN */ __ASSERT_NO_MSG(ep_cur->addr == USB_EP_GET_ADDR(0, USB_EP_DIR_OUT)); __ASSERT_NO_MSG((ep_cur + 1)->addr == USB_EP_GET_ADDR(0, USB_EP_DIR_IN)); /* Reset CTRL OUT/IN FIFO due to new Setup packet */ numaker_usbd_ep_fifo_reset(ep_cur); numaker_usbd_ep_fifo_reset(ep_cur + 1); /* Relinquish CTRL OUT/IN DMA buffer ownership on behalf of H/W */ numaker_usbd_ep_abort(ep_cur); numaker_usbd_ep_abort(ep_cur + 1); /* Mark new Setup packet for read */ numaker_usbd_setup_fifo_copy_to_user(dev, (uint8_t *)&ep_mgmt->setup_packet); ep_mgmt->new_setup = true; } else if (status_code == USB_DC_EP_DATA_OUT) { __ASSERT_NO_MSG(USB_EP_DIR_IS_OUT(ep_cur->addr)); /* Update EP read FIFO */ numaker_usbd_ep_fifo_update(ep_cur); /* Need to clear NAK for next transaction */ ep_cur->nak_clr = false; } else if (status_code == USB_DC_EP_DATA_IN) { __ASSERT_NO_MSG(USB_EP_DIR_IS_IN(ep_cur->addr)); /* Update EP write FIFO */ numaker_usbd_ep_fifo_update(ep_cur); /* Need to clear NAK for next transaction */ ep_cur->nak_clr = false; } } /* Message handler for S/W reconnect */ static void numaker_usbd_msg_sw_reconn(const struct device *dev, struct numaker_usbd_msg *msg) { __ASSERT_NO_MSG(msg->type == NUMAKER_USBD_MSG_TYPE_SW_RECONN); /* S/W reconnect for error recovery */ numaker_usbd_lock(dev); numaker_usbd_sw_reconnect(dev); numaker_usbd_unlock(dev); } /* Message handler for callback for usb_dc_status_code */ static void numaker_usbd_msg_cb_state(const struct device *dev, struct numaker_usbd_msg *msg) { struct numaker_usbd_data *data = dev->data; __ASSERT_NO_MSG(msg->type == NUMAKER_USBD_MSG_TYPE_CB_STATE); /* Interrupt bottom half processing for bus reset */ if (msg->cb_device.status_code == USB_DC_RESET) { numaker_usbd_lock(dev); numaker_usbd_bus_reset_bh(dev); numaker_usbd_unlock(dev); } /* NOTE: Don't run callback with our mutex locked, or we may encounter * deadlock because the Zephyr USB device stack can have its own * synchronization. */ if (data->status_cb) { data->status_cb(msg->cb_device.status_code, NULL); } else { LOG_WRN("No status callback: status_code=%d", msg->cb_device.status_code); } } /* Message handler for callback for usb_dc_ep_cb_status_code */ static void numaker_usbd_msg_cb_ep(const struct device *dev, struct numaker_usbd_msg *msg) { uint8_t ep; struct numaker_usbd_ep *ep_cur; __ASSERT_NO_MSG(msg->type == NUMAKER_USBD_MSG_TYPE_CB_EP); ep = msg->cb_ep.ep; /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); return; } /* Interrupt bottom half processing for EP */ numaker_usbd_lock(dev); numaker_usbd_ep_bh(ep_cur, msg->cb_ep.status_code); numaker_usbd_unlock(dev); /* NOTE: Same as above, don't run callback with our mutex locked */ if (ep_cur->cb) { ep_cur->cb(ep, msg->cb_ep.status_code); } else { LOG_WRN("No EP callback: ep=0x%02x, status_code=%d", ep, msg->cb_ep.status_code); } } /* Interrupt bottom half processing * * This thread is used to not run Zephyr USB device stack and callbacks in interrupt * context. This is because callbacks from this stack may use mutex or other kernel functions * which are not supported in interrupt context. */ static void numaker_usbd_msg_hdlr_thread_main(void *arg1, void *arg2, void *arg3) { const struct device *dev = (const struct device *)arg1; struct numaker_usbd_data *data = dev->data; struct numaker_usbd_msg msg; __ASSERT_NO_MSG(arg1); ARG_UNUSED(arg2); ARG_UNUSED(arg3); while (true) { if (k_msgq_get(&data->msgq, &msg, K_FOREVER)) { continue; } switch (msg.type) { case NUMAKER_USBD_MSG_TYPE_SW_RECONN: numaker_usbd_msg_sw_reconn(dev, &msg); break; case NUMAKER_USBD_MSG_TYPE_CB_STATE: numaker_usbd_msg_cb_state(dev, &msg); break; case NUMAKER_USBD_MSG_TYPE_CB_EP: numaker_usbd_msg_cb_ep(dev, &msg); break; default: __ASSERT_NO_MSG(false); } } } static void numaker_udbd_isr(const struct device *dev) { const struct numaker_usbd_config *config = dev->config; struct numaker_usbd_data *data = dev->data; USBD_T *const base = config->base; struct numaker_usbd_ep_mgmt *ep_mgmt = &data->ep_mgmt; struct numaker_usbd_msg msg = {0}; uint32_t volatile usbd_intsts = base->INTSTS; uint32_t volatile usbd_bus_state = base->ATTR; /* USB plug-in/unplug */ if (usbd_intsts & USBD_INTSTS_FLDET) { /* Floating detect */ base->INTSTS = USBD_INTSTS_FLDET; if (base->VBUSDET & USBD_VBUSDET_VBUSDET_Msk) { /* USB plug-in */ /* Enable back USB/PHY */ base->ATTR |= USBD_ATTR_USBEN_Msk | USBD_ATTR_PHYEN_Msk; /* Message for bottom-half processing */ msg.type = NUMAKER_USBD_MSG_TYPE_CB_STATE; msg.cb_device.status_code = USB_DC_CONNECTED; numaker_usbd_send_msg(dev, &msg); LOG_DBG("USB plug-in"); } else { /* USB unplug */ /* Disable USB */ base->ATTR &= ~USBD_USB_EN; /* Message for bottom-half processing */ msg.type = NUMAKER_USBD_MSG_TYPE_CB_STATE; msg.cb_device.status_code = USB_DC_DISCONNECTED; numaker_usbd_send_msg(dev, &msg); LOG_DBG("USB unplug"); } } /* USB wake-up */ if (usbd_intsts & USBD_INTSTS_WAKEUP) { /* Clear event flag */ base->INTSTS = USBD_INTSTS_WAKEUP; LOG_DBG("USB wake-up"); } /* USB reset/suspend/resume */ if (usbd_intsts & USBD_INTSTS_BUS) { /* Clear event flag */ base->INTSTS = USBD_INTSTS_BUS; if (usbd_bus_state & USBD_STATE_USBRST) { /* Bus reset */ /* Enable back USB/PHY */ base->ATTR |= USBD_ATTR_USBEN_Msk | USBD_ATTR_PHYEN_Msk; /* Bus reset top half */ numaker_usbd_bus_reset_th(dev); /* Message for bottom-half processing */ msg.type = NUMAKER_USBD_MSG_TYPE_CB_STATE; msg.cb_device.status_code = USB_DC_RESET; numaker_usbd_send_msg(dev, &msg); LOG_DBG("USB reset"); } if (usbd_bus_state & USBD_STATE_SUSPEND) { /* Enable USB but disable PHY */ base->ATTR &= ~USBD_PHY_EN; /* Message for bottom-half processing */ msg.type = NUMAKER_USBD_MSG_TYPE_CB_STATE; msg.cb_device.status_code = USB_DC_SUSPEND; numaker_usbd_send_msg(dev, &msg); LOG_DBG("USB suspend"); } if (usbd_bus_state & USBD_STATE_RESUME) { /* Enable back USB/PHY */ base->ATTR |= USBD_ATTR_USBEN_Msk | USBD_ATTR_PHYEN_Msk; /* Message for bottom-half processing */ msg.type = NUMAKER_USBD_MSG_TYPE_CB_STATE; msg.cb_device.status_code = USB_DC_RESUME; numaker_usbd_send_msg(dev, &msg); LOG_DBG("USB resume"); } } /* USB SOF */ if (usbd_intsts & USBD_INTSTS_SOFIF_Msk) { /* Clear event flag */ base->INTSTS = USBD_INTSTS_SOFIF_Msk; /* Message for bottom-half processing */ msg.type = NUMAKER_USBD_MSG_TYPE_CB_STATE; msg.cb_device.status_code = USB_DC_SOF; numaker_usbd_send_msg(dev, &msg); } /* USB Setup/EP */ if (usbd_intsts & USBD_INTSTS_USB) { uint32_t epintsts; /* Setup event */ if (usbd_intsts & USBD_INTSTS_SETUP) { USBD_EP_T *ep0_base = numaker_usbd_ep_base(dev, EP0); USBD_EP_T *ep1_base = numaker_usbd_ep_base(dev, EP1); /* Clear event flag */ base->INTSTS = USBD_INTSTS_SETUP; /* Clear the data IN/OUT ready flag of control endpoints */ ep0_base->CFGP |= USBD_CFGP_CLRRDY_Msk; ep1_base->CFGP |= USBD_CFGP_CLRRDY_Msk; /* By USB spec, following transactions, regardless of Data/Status stage, * will always be DATA1 */ ep0_base->CFG |= USBD_CFG_DSQSYNC_Msk; ep1_base->CFG |= USBD_CFG_DSQSYNC_Msk; /* Message for bottom-half processing */ /* NOTE: In Zephyr USB device stack, Setup packet is passed via * CTRL OUT EP */ msg.type = NUMAKER_USBD_MSG_TYPE_CB_EP; msg.cb_ep.ep = USB_EP_GET_ADDR(0, USB_EP_DIR_OUT); msg.cb_ep.status_code = USB_DC_EP_SETUP; numaker_usbd_send_msg(dev, &msg); } /* EP events */ epintsts = base->EPINTSTS; base->EPINTSTS = epintsts; while (epintsts) { uint32_t ep_hw_idx = u32_count_trailing_zeros(epintsts); USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_hw_idx); uint8_t ep_dir; uint8_t ep_idx; uint8_t ep; /* We don't enable INNAKEN interrupt, so as long as EP event occurs, * we can just regard one data transaction has completed (ACK for * CTRL/BULK/INT or no-ACK for Iso), that is, no need to check EPSTS0, * EPSTS1, etc. */ /* EP direction, number, and address */ ep_dir = ((ep_base->CFG & USBD_CFG_STATE_Msk) == USBD_CFG_EPMODE_IN) ? USB_EP_DIR_IN : USB_EP_DIR_OUT; ep_idx = (ep_base->CFG & USBD_CFG_EPNUM_Msk) >> USBD_CFG_EPNUM_Pos; ep = USB_EP_GET_ADDR(ep_idx, ep_dir); /* NOTE: See comment in usb_dc_set_address()'s implementation * for safe place to change USB device address */ if (ep == USB_EP_GET_ADDR(0, USB_EP_DIR_IN)) { numaker_usbd_set_addr(dev); } /* NOTE: See comment on mxpld_ctrlout for why make one copy of * CTRL OUT's MXPLD */ if (ep == USB_EP_GET_ADDR(0, USB_EP_DIR_OUT)) { struct numaker_usbd_ep *ep_ctrlout = ep_mgmt->ep_pool + 0; USBD_EP_T *ep_ctrlout_base = numaker_usbd_ep_base(dev, ep_ctrlout->ep_hw_idx); ep_ctrlout->mxpld_ctrlout = ep_ctrlout_base->MXPLD; } /* Message for bottom-half processing */ msg.type = NUMAKER_USBD_MSG_TYPE_CB_EP; msg.cb_ep.ep = ep; msg.cb_ep.status_code = USB_EP_DIR_IS_IN(ep) ? USB_DC_EP_DATA_IN : USB_DC_EP_DATA_OUT; numaker_usbd_send_msg(dev, &msg); /* Have handled this EP and go next */ epintsts &= ~BIT(ep_hw_idx); } } } /* Zephyr USB device controller API implementation */ int usb_dc_attach(void) { const struct device *dev = numaker_usbd_device_get(); int rc; numaker_usbd_lock(dev); /* Initialize USB DC H/W */ rc = numaker_usbd_hw_setup(dev); if (rc < 0) { LOG_ERR("Set up H/W"); goto cleanup; } /* USB device address defaults to 0 */ numaker_usbd_reset_addr(dev); /* Initialize all EPs */ numaker_usbd_ep_mgmt_init(dev); /* S/W connect */ numaker_usbd_sw_connect(dev); LOG_DBG("attached"); cleanup: if (rc < 0) { usb_dc_detach(); } numaker_usbd_unlock(dev); return rc; } int usb_dc_detach(void) { const struct device *dev = numaker_usbd_device_get(); struct numaker_usbd_data *data = dev->data; LOG_DBG("detached"); numaker_usbd_lock(dev); /* S/W disconnect */ numaker_usbd_sw_disconnect(dev); /* Uninitialize USB DC H/W */ numaker_usbd_hw_shutdown(numaker_usbd_device_get()); /* Purge message queue */ k_msgq_purge(&data->msgq); numaker_usbd_unlock(dev); return 0; } int usb_dc_reset(void) { const struct device *dev = numaker_usbd_device_get(); LOG_DBG("usb_dc_reset"); numaker_usbd_lock(dev); usb_dc_detach(); usb_dc_attach(); numaker_usbd_unlock(dev); return 0; } int usb_dc_set_address(const uint8_t addr) { const struct device *dev = numaker_usbd_device_get(); struct numaker_usbd_data *data = dev->data; LOG_DBG("USB device address=%u (0x%02x)", addr, addr); numaker_usbd_lock(dev); /* NOTE: Timing for configuring USB device address into H/W is critical. It must be done * in-between SET_ADDRESS control transfer and next transfer. For this, it is done in * IN ACK ISR of SET_ADDRESS control transfer. */ data->addr = addr; numaker_usbd_unlock(dev); return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { const struct device *dev = numaker_usbd_device_get(); struct numaker_usbd_data *data = dev->data; numaker_usbd_lock(dev); data->status_cb = cb; numaker_usbd_unlock(dev); } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data *const ep_cfg) { const struct device *dev = numaker_usbd_device_get(); const struct numaker_usbd_config *config = dev->config; int rc = 0; struct numaker_usbd_ep *ep_cur; numaker_usbd_lock(dev); /* For safe, require EP number for control transfer to be 0 */ if ((ep_cfg->ep_type == USB_DC_EP_CONTROL) && USB_EP_GET_IDX(ep_cfg->ep_addr) != 0) { LOG_ERR("EP number for control transfer must be 0"); rc = -ENOTSUP; goto cleanup; } /* Some soc series don't allow ISO IN/OUT to be assigned the same EP number. * This is addressed by limiting all OUT/IN EP addresses in top/bottom halves, * except CTRL OUT/IN. */ if (config->disallow_iso_inout_same && ep_cfg->ep_type != USB_DC_EP_CONTROL) { /* Limit all OUT EP addresses in top-half, except CTRL OUT */ if (USB_EP_DIR_IS_OUT(ep_cfg->ep_addr) && USB_EP_GET_IDX(ep_cfg->ep_addr) >= 8) { LOG_DBG("Support only ISO OUT EP address 0x01~0x07: 0x%02x", ep_cfg->ep_addr); rc = -ENOTSUP; goto cleanup; } /* Limit all IN EP addresses in bottom-half , except CTRL IN */ if (USB_EP_DIR_IS_IN(ep_cfg->ep_addr) && USB_EP_GET_IDX(ep_cfg->ep_addr) < 8) { LOG_DBG("Support only ISO IN EP address 0x88~0x8F: 0x%02x", ep_cfg->ep_addr); rc = -ENOTSUP; goto cleanup; } } /* To respect this capability check, pre-bind EP context to EP address, * and pre-determined its type */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep_cfg->ep_addr); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep_cfg->ep_addr); rc = -ENOMEM; goto cleanup; } cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } ep_cur->cb = cb; cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const ep_cfg) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; uint32_t dmabuf_base; uint32_t dmabuf_size; struct numaker_usbd_ep *ep_cur; LOG_DBG("EP=0x%02x, MPS=%d, Type=%d", ep_cfg->ep_addr, ep_cfg->ep_mps, ep_cfg->ep_type); numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep_cfg->ep_addr); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep_cfg->ep_addr); rc = -ENOMEM; goto cleanup; } /* Configure EP DMA buffer */ if (!ep_cur->dmabuf_valid || ep_cur->dmabuf_size < ep_cfg->ep_mps) { /* Allocate DMA buffer */ rc = numaker_usbd_ep_mgmt_alloc_dmabuf(dev, ep_cfg->ep_mps, &dmabuf_base, &dmabuf_size); if (rc < 0) { LOG_ERR("Allocate DMA buffer failed"); goto cleanup; } /* Configure EP DMA buffer */ numaker_usbd_ep_config_dmabuf(ep_cur, dmabuf_base, dmabuf_size); } /* Configure EP majorly */ numaker_usbd_ep_config_major(ep_cur, ep_cfg); cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_set_stall(const uint8_t ep) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; LOG_DBG("Set stall: ep=0x%02x", ep); numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } /* Set EP to stalled */ numaker_usbd_ep_set_stall(ep_cur); cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_clear_stall(const uint8_t ep) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; LOG_DBG("Clear stall: ep=0x%02x", ep); numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } /* Reset EP to unstalled and data toggle bit to 0 */ numaker_usbd_ep_clear_stall_n_data_toggle(ep_cur); cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; if (!stalled) { return -EINVAL; } numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } *stalled = numaker_usbd_ep_is_stalled(ep_cur); cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_halt(const uint8_t ep) { return usb_dc_ep_set_stall(ep); } int usb_dc_ep_enable(const uint8_t ep) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; LOG_DBG("Enable: ep=0x%02x", ep); numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } numaker_usbd_ep_enable(ep_cur); /* Trigger OUT transaction manually, or H/W will continue to reply NAK because * Zephyr USB device stack is unclear on kicking off by invoking usb_dc_ep_read() * or friends. We needn't do this for CTRL OUT because Setup sequence will involve * this. */ if (USB_EP_DIR_IS_OUT(ep) && USB_EP_GET_IDX(ep) != 0) { rc = usb_dc_ep_read_continue(ep); if (rc < 0) { goto cleanup; } } cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_disable(const uint8_t ep) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; LOG_DBG("Disable: ep=0x%02x", ep); numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } numaker_usbd_ep_disable(ep_cur); cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_flush(const uint8_t ep) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; LOG_DBG("ep=0x%02x", ep); numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } numaker_usbd_ep_fifo_reset(ep_cur); cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data_buf, const uint32_t data_len, uint32_t *const ret_bytes) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; uint32_t data_len_act; numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } if (!USB_EP_DIR_IS_IN(ep)) { LOG_ERR("Invalid EP address 0x%02x for write", ep); rc = -EINVAL; goto cleanup; } /* For USBD, avoid duplicate NAK clear */ if (ep_cur->nak_clr) { LOG_WRN("ep 0x%02x busy", ep); rc = -EAGAIN; goto cleanup; } /* For one-shot implementation, don't trigger next DATA IN with write FIFO not empty. */ if (numaker_usbd_ep_fifo_used(ep_cur)) { LOG_WRN("ep 0x%02x: Write FIFO not empty for one-shot implementation", ep); rc = -EAGAIN; goto cleanup; } /* NOTE: Null data or zero data length are valid, used for ZLP */ if (data_buf && data_len) { data_len_act = data_len; rc = numaker_usbd_ep_fifo_copy_from_user(ep_cur, data_buf, &data_len_act); if (rc < 0) { LOG_ERR("Copy to FIFO from user buffer"); goto cleanup; } } else { data_len_act = 0; } /* Now H/W actually owns EP DMA buffer */ numaker_usbd_ep_trigger(ep_cur, data_len_act); /* NOTE: For one-shot implementation, at most MPS size can be written, though, * null 'ret_bytes' requires all data written. */ if (ret_bytes) { *ret_bytes = data_len_act; } else if (data_len_act != data_len) { LOG_ERR("Expected write all %d bytes, but actual %d bytes written", data_len, data_len_act); rc = -EIO; goto cleanup; } cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_read(const uint8_t ep, uint8_t *const data, const uint32_t max_data_len, uint32_t *const read_bytes) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; numaker_usbd_lock(dev); rc = usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes); if (rc < 0) { goto cleanup; } rc = usb_dc_ep_read_continue(ep); if (rc < 0) { goto cleanup; } cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data_buf, uint32_t max_data_len, uint32_t *read_bytes) { const struct device *dev = numaker_usbd_device_get(); struct numaker_usbd_data *data = dev->data; int rc = 0; struct numaker_usbd_ep_mgmt *ep_mgmt = &data->ep_mgmt; struct numaker_usbd_ep *ep_cur; uint32_t data_len_act = 0; numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } if (!USB_EP_DIR_IS_OUT(ep)) { LOG_ERR("Invalid EP address 0x%02x for read", ep); rc = -EINVAL; goto cleanup; } /* Special handling for USB_CONTROL_EP_OUT on Setup packet */ if (ep == USB_CONTROL_EP_OUT && ep_mgmt->new_setup) { if (!data_buf || max_data_len != 8) { LOG_ERR("Invalid parameter for reading Setup packet"); rc = -EINVAL; goto cleanup; } memcpy(data_buf, &ep_mgmt->setup_packet, 8); ep_mgmt->new_setup = false; if (read_bytes) { *read_bytes = 8; } goto cleanup; } /* For one-shot implementation, don't read FIFO with EP busy. */ if (ep_cur->nak_clr) { LOG_WRN("ep 0x%02x busy", ep); rc = -EAGAIN; goto cleanup; } /* NOTE: Null data and zero data length is valid, used for returning number of * available bytes for read */ if (data_buf) { data_len_act = max_data_len; rc = numaker_usbd_ep_fifo_copy_to_user(ep_cur, data_buf, &data_len_act); if (rc < 0) { LOG_ERR("Copy from FIFO to user buffer"); goto cleanup; } if (read_bytes) { *read_bytes = data_len_act; } } else if (max_data_len) { LOG_ERR("Null data but non-zero data length"); rc = -EINVAL; goto cleanup; } else { if (read_bytes) { *read_bytes = numaker_usbd_ep_fifo_used(ep_cur); } } /* Suppress further USB_DC_EP_DATA_OUT events by replying NAK or disabling interrupt * * For USBD, further control is unnecessary because NAK is automatically replied until * next USBD_SET_PAYLOAD_LEN(). */ cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_read_continue(uint8_t ep) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } if (!USB_EP_DIR_IS_OUT(ep)) { LOG_ERR("Invalid EP address 0x%02x for read", ep); rc = -EINVAL; goto cleanup; } /* Avoid duplicate NAK clear */ if (ep_cur->nak_clr) { rc = 0; goto cleanup; } /* For one-shot implementation, don't trigger next DATA OUT, or overwrite. */ if (numaker_usbd_ep_fifo_used(ep_cur)) { goto cleanup; } __ASSERT_NO_MSG(ep_cur->mps_valid); numaker_usbd_ep_trigger(ep_cur, ep_cur->mps); cleanup: numaker_usbd_unlock(dev); return rc; } int usb_dc_ep_mps(const uint8_t ep) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; struct numaker_usbd_ep *ep_cur; uint16_t ep_mps = 0; numaker_usbd_lock(dev); /* Bind EP context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP context: ep=0x%02x", ep); rc = -ENOMEM; goto cleanup; } __ASSERT_NO_MSG(ep_cur->mps_valid); ep_mps = ep_cur->mps; cleanup: numaker_usbd_unlock(dev); return rc == 0 ? ep_mps : rc; } int usb_dc_wakeup_request(void) { const struct device *dev = numaker_usbd_device_get(); int rc = 0; LOG_DBG("Remote wakeup"); numaker_usbd_lock(dev); numaker_usbd_remote_wakeup(dev); numaker_usbd_unlock(dev); return rc; } static int numaker_udbd_init(const struct device *dev) { struct numaker_usbd_data *data = dev->data; int rc = 0; /* Initialize all fields to zero */ memset(data, 0x00, sizeof(*data)); k_mutex_init(&data->sync_mutex); /* Set up interrupt top/bottom halves processing */ k_msgq_init(&data->msgq, (char *)data->msgq_buf, sizeof(struct numaker_usbd_msg), CONFIG_USB_DC_NUMAKER_MSG_QUEUE_SIZE); k_thread_create(&data->msg_hdlr_thread, data->msg_hdlr_thread_stack, CONFIG_USB_DC_NUMAKER_MSG_HANDLER_THREAD_STACK_SIZE, numaker_usbd_msg_hdlr_thread_main, (void *)dev, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); k_thread_name_set(&data->msg_hdlr_thread, "numaker_usbd"); return rc; } #define USB_DC_NUMAKER_INIT(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ \ static void numaker_usbd_irq_config_func_##inst(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), numaker_udbd_isr, \ DEVICE_DT_INST_GET(inst), 0); \ \ irq_enable(DT_INST_IRQN(inst)); \ } \ \ static void numaker_uusbd_irq_unconfig_func_##inst(const struct device *dev) \ { \ irq_disable(DT_INST_IRQN(inst)); \ } \ \ static const struct numaker_usbd_config numaker_usbd_config_##inst = { \ .base = (USBD_T *)DT_INST_REG_ADDR(inst), \ .reset = RESET_DT_SPEC_INST_GET(inst), \ .clk_modidx = DT_INST_CLOCKS_CELL(inst, clock_module_index), \ .clk_src = DT_INST_CLOCKS_CELL(inst, clock_source), \ .clk_div = DT_INST_CLOCKS_CELL(inst, clock_divider), \ .clkctrl_dev = DEVICE_DT_GET(DT_PARENT(DT_INST_CLOCKS_CTLR(inst))), \ .irq_config_func = numaker_usbd_irq_config_func_##inst, \ .irq_unconfig_func = numaker_uusbd_irq_unconfig_func_##inst, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .num_bidir_endpoints = DT_INST_PROP(inst, num_bidir_endpoints), \ .dmabuf_size = DT_INST_PROP(inst, dma_buffer_size), \ .disallow_iso_inout_same = DT_INST_PROP(inst, disallow_iso_in_out_same_number), \ }; \ \ static struct numaker_usbd_data numaker_usbd_data_##inst; \ \ BUILD_ASSERT(DT_INST_PROP(inst, num_bidir_endpoints) <= NUMAKER_USBD_EP_MAXNUM, \ "num_bidir_endpoints exceeds support limit by USBD driver"); \ \ DEVICE_DT_INST_DEFINE(inst, numaker_udbd_init, NULL, &numaker_usbd_data_##inst, \ &numaker_usbd_config_##inst, POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, NULL); USB_DC_NUMAKER_INIT(0); /* Get USB DC device context instance 0 */ static inline const struct device *numaker_usbd_device_get(void) { return DEVICE_DT_INST_GET(0); } ```
/content/code_sandbox/drivers/usb/device/usb_dc_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
15,367
```objective-c /* * */ struct op_common { uint16_t version; uint16_t code; uint32_t status; } __packed; struct devlist_device { char path[256]; char busid[32]; uint32_t busnum; uint32_t devnum; uint32_t speed; uint16_t idVendor; uint16_t idProduct; uint16_t bcdDevice; uint8_t bDeviceClass; uint8_t bDeviceSubClass; uint8_t bDeviceProtocol; uint8_t bConfigurationValue; uint8_t bNumConfigurations; uint8_t bNumInterfaces; } __packed; #define OP_REQUEST (0x80 << 8) #define OP_REPLY (0x00 << 8) /* Devlist */ #define OP_DEVLIST 0x05 #define OP_REQ_DEVLIST (OP_REQUEST | OP_DEVLIST) #define OP_REP_DEVLIST (OP_REPLY | OP_DEVLIST) /* Import USB device */ #define OP_IMPORT 0x03 #define OP_REQ_IMPORT (OP_REQUEST | OP_IMPORT) #define OP_REP_IMPORT (OP_REPLY | OP_IMPORT) /* USBIP requests */ #define USBIP_CMD_SUBMIT 0x0001 #define USBIP_CMD_UNLINK 0x0002 #define USBIP_RET_SUBMIT 0x0003 #define USBIP_RET_UNLINK 0x0004 /* USBIP direction */ #define USBIP_DIR_OUT 0x00 #define USBIP_DIR_IN 0x01 struct usbip_header_common { uint32_t command; uint32_t seqnum; uint32_t devid; uint32_t direction; uint32_t ep; } __packed; struct usbip_submit { uint32_t transfer_flags; int32_t transfer_buffer_length; int32_t start_frame; int32_t number_of_packets; int32_t interval; uint8_t bmRequestType; uint8_t bRequest; uint16_t wValue; uint16_t wIndex; uint16_t wLength; } __packed; struct usbip_unlink { uint32_t seqnum; } __packed; struct usbip_submit_rsp { struct usbip_header_common common; int32_t status; int32_t actual_length; int32_t start_frame; int32_t number_of_packets; int32_t error_count; uint64_t setup; } __packed; struct usbip_header { struct usbip_header_common common; union { struct usbip_submit submit; struct usbip_unlink unlink; } u; } __packed; /* Function definitions */ int usbip_recv(uint8_t *buf, size_t len); bool usbip_send_common(uint8_t ep, uint32_t data_len); int usbip_send(uint8_t ep, const uint8_t *data, size_t len); void usbip_start(void); int handle_usb_control(struct usbip_header *hdr); int handle_usb_data(struct usbip_header *hdr); ```
/content/code_sandbox/drivers/usb/device/usb_dc_native_posix_adapt.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
610
```c /* * */ #define DT_DRV_COMPAT atmel_sam_usbc #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(usb_dc_sam_usbc, CONFIG_USB_DRIVER_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/usb/usb_device.h> #include <soc.h> #include <string.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/barrier.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #define EP_UDINT_MASK 0x000FF000 #define NUM_OF_EP_MAX DT_INST_PROP(0, num_bidir_endpoints) #define USBC_RAM_ADDR DT_REG_ADDR(DT_NODELABEL(sram1)) #define USBC_RAM_SIZE DT_REG_SIZE(DT_NODELABEL(sram1)) /** * @brief USB Driver Control Endpoint Finite State Machine states * * FSM states to keep tracking of control endpoint hidden states. */ enum usb_dc_epctrl_state { /* Wait a SETUP packet */ USB_EPCTRL_SETUP, /* Wait a OUT data packet */ USB_EPCTRL_DATA_OUT, /* Wait a IN data packet */ USB_EPCTRL_DATA_IN, /* Wait a IN ZLP packet */ USB_EPCTRL_HANDSHAKE_WAIT_IN_ZLP, /* Wait a OUT ZLP packet */ USB_EPCTRL_HANDSHAKE_WAIT_OUT_ZLP, /* STALL enabled on IN & OUT packet */ USB_EPCTRL_STALL_REQ, }; struct sam_usbc_udesc_sizes { uint32_t byte_count:15; uint32_t reserved:1; uint32_t multi_packet_size:15; uint32_t auto_zlp:1; }; struct sam_usbc_udesc_bk_ctrl_stat { uint32_t stallrq:1; uint32_t reserved1:15; uint32_t crcerri:1; uint32_t overfi:1; uint32_t underfi:1; uint32_t reserved2:13; }; struct sam_usbc_udesc_ep_ctrl_stat { uint32_t pipe_dev_addr:7; uint32_t reserved1:1; uint32_t pipe_num:4; uint32_t pipe_error_cnt_max:4; uint32_t pipe_error_status:8; uint32_t reserved2:8; }; struct sam_usbc_desc_table { uint8_t *ep_pipe_addr; union { uint32_t sizes; struct sam_usbc_udesc_sizes udesc_sizes; }; union { uint32_t bk_ctrl_stat; struct sam_usbc_udesc_bk_ctrl_stat udesc_bk_ctrl_stat; }; union { uint32_t ep_ctrl_stat; struct sam_usbc_udesc_ep_ctrl_stat udesc_ep_ctrl_stat; }; }; struct usb_device_ep_data { usb_dc_ep_callback cb_in; usb_dc_ep_callback cb_out; uint16_t mps; bool mps_x2; bool is_configured; uint32_t out_at; }; struct usb_device_data { usb_dc_status_callback status_cb; struct usb_device_ep_data ep_data[NUM_OF_EP_MAX]; }; static struct sam_usbc_desc_table dev_desc[(NUM_OF_EP_MAX + 1) * 2]; static struct usb_device_data dev_data; static volatile Usbc *regs = (Usbc *) DT_INST_REG_ADDR(0); PINCTRL_DT_INST_DEFINE(0); static const struct pinctrl_dev_config *pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0); static enum usb_dc_epctrl_state epctrl_fsm; static const char *const usb_dc_epctrl_state_string[] = { "STP", "DOUT", "DIN", "IN_ZLP", "OUT_ZLP", "STALL", }; #if defined(CONFIG_USB_DRIVER_LOG_LEVEL_DBG) static uint32_t dev_ep_sta_dbg[2][NUM_OF_EP_MAX]; static void usb_dc_sam_usbc_isr_sta_dbg(uint32_t ep_idx, uint32_t sr) { if (regs->UESTA[ep_idx] != dev_ep_sta_dbg[0][ep_idx]) { dev_ep_sta_dbg[0][ep_idx] = regs->UESTA[ep_idx]; dev_ep_sta_dbg[1][ep_idx] = 0; LOG_INF("ISR[%d] CON=%08x INT=%08x INTE=%08x " "ECON=%08x ESTA=%08x%s", ep_idx, regs->UDCON, regs->UDINT, regs->UDINTE, regs->UECON[ep_idx], regs->UESTA[ep_idx], ((sr & USBC_UESTA0_RXSTPI) ? " STP" : "")); } else if (dev_ep_sta_dbg[0][ep_idx] != dev_ep_sta_dbg[1][ep_idx]) { dev_ep_sta_dbg[1][ep_idx] = dev_ep_sta_dbg[0][ep_idx]; LOG_INF("ISR[%d] CON=%08x INT=%08x INTE=%08x " "ECON=%08x ESTA=%08x LOOP", ep_idx, regs->UDCON, regs->UDINT, regs->UDINTE, regs->UECON[ep_idx], regs->UESTA[ep_idx]); } } static void usb_dc_sam_usbc_clean_sta_dbg(void) { for (int i = 0; i < NUM_OF_EP_MAX; i++) { dev_ep_sta_dbg[0][i] = 0; dev_ep_sta_dbg[1][i] = 0; } } #else #define usb_dc_sam_usbc_isr_sta_dbg(ep_idx, sr) #define usb_dc_sam_usbc_clean_sta_dbg() #endif static ALWAYS_INLINE bool usb_dc_sam_usbc_is_frozen_clk(void) { return USBC->USBCON & USBC_USBCON_FRZCLK; } static ALWAYS_INLINE void usb_dc_sam_usbc_freeze_clk(void) { USBC->USBCON |= USBC_USBCON_FRZCLK; } static ALWAYS_INLINE void usb_dc_sam_usbc_unfreeze_clk(void) { USBC->USBCON &= ~USBC_USBCON_FRZCLK; while (USBC->USBCON & USBC_USBCON_FRZCLK) { ; }; } static uint8_t usb_dc_sam_usbc_ep_curr_bank(uint8_t ep_idx) { uint8_t idx = ep_idx * 2; if ((ep_idx > 0) && (regs->UESTA[ep_idx] & USBC_UESTA0_CURRBK(1)) > 0) { idx++; } return idx; } static bool usb_dc_is_attached(void) { return (regs->UDCON & USBC_UDCON_DETACH) == 0; } static bool usb_dc_ep_is_enabled(uint8_t ep_idx) { int reg = regs->UERST; return (reg & BIT(USBC_UERST_EPEN0_Pos + ep_idx)); } static int usb_dc_sam_usbc_ep_alloc_buf(int ep_idx) { struct sam_usbc_desc_table *ep_desc_bk; bool ep_enabled[NUM_OF_EP_MAX]; int desc_mem_alloc; int mps; if (ep_idx >= NUM_OF_EP_MAX) { return -EINVAL; } desc_mem_alloc = 0; mps = dev_data.ep_data[ep_idx].mps_x2 ? dev_data.ep_data[ep_idx].mps * 2 : dev_data.ep_data[ep_idx].mps; /* Check if there are memory to all endpoints */ for (int i = 0; i < NUM_OF_EP_MAX; i++) { if (!dev_data.ep_data[i].is_configured || i == ep_idx) { continue; } desc_mem_alloc += dev_data.ep_data[i].mps_x2 ? dev_data.ep_data[i].mps * 2 : dev_data.ep_data[i].mps; } if ((desc_mem_alloc + mps) > USBC_RAM_SIZE) { memset(&dev_data.ep_data[ep_idx], 0, sizeof(struct usb_device_ep_data)); return -ENOMEM; } for (int i = NUM_OF_EP_MAX - 1; i >= ep_idx; i--) { ep_enabled[i] = usb_dc_ep_is_enabled(i); if (ep_enabled[i]) { usb_dc_ep_disable(i); } } desc_mem_alloc = 0U; for (int i = 0; i < ep_idx; i++) { if (!dev_data.ep_data[i].is_configured) { continue; } desc_mem_alloc += dev_data.ep_data[i].mps_x2 ? dev_data.ep_data[i].mps * 2 : dev_data.ep_data[i].mps; } ep_desc_bk = ((struct sam_usbc_desc_table *) &dev_desc) + (ep_idx * 2); for (int i = ep_idx; i < NUM_OF_EP_MAX; i++) { if (!dev_data.ep_data[i].is_configured && (i != ep_idx)) { ep_desc_bk += 2; continue; } /* Alloc bank 0 */ ep_desc_bk->ep_pipe_addr = ((uint8_t *) USBC_RAM_ADDR) + desc_mem_alloc; ep_desc_bk->sizes = 0; ep_desc_bk->bk_ctrl_stat = 0; ep_desc_bk->ep_ctrl_stat = 0; ep_desc_bk++; /** * Alloc bank 1 * * if dual bank, * then ep_pipe_addr[1] = ep_pipe_addr[0] address + mps size * else ep_pipe_addr[1] = ep_pipe_addr[0] address */ ep_desc_bk->ep_pipe_addr = ((uint8_t *) USBC_RAM_ADDR) + desc_mem_alloc + (dev_data.ep_data[i].mps_x2 ? dev_data.ep_data[i].mps : 0); ep_desc_bk->sizes = 0; ep_desc_bk->bk_ctrl_stat = 0; ep_desc_bk->ep_ctrl_stat = 0; ep_desc_bk++; desc_mem_alloc += dev_data.ep_data[i].mps_x2 ? dev_data.ep_data[i].mps * 2 : dev_data.ep_data[i].mps; } ep_enabled[ep_idx] = false; for (int i = ep_idx; i < NUM_OF_EP_MAX; i++) { if (ep_enabled[i]) { usb_dc_ep_enable(i); } } return 0; } static void usb_dc_ep_enable_interrupts(uint8_t ep_idx) { if (ep_idx == 0U) { /* Control endpoint: enable SETUP */ regs->UECONSET[ep_idx] = USBC_UECON0SET_RXSTPES; } else if (regs->UECFG[ep_idx] & USBC_UECFG0_EPDIR_IN) { /* TX - IN direction: acknowledge FIFO empty interrupt */ regs->UESTACLR[ep_idx] = USBC_UESTA0CLR_TXINIC; regs->UECONSET[ep_idx] = USBC_UECON0SET_TXINES; } else { /* RX - OUT direction */ regs->UECONSET[ep_idx] = USBC_UECON0SET_RXOUTES; } } static void usb_dc_ep_isr_sta(uint8_t ep_idx) { uint32_t sr = regs->UESTA[ep_idx]; usb_dc_sam_usbc_isr_sta_dbg(ep_idx, sr); if (sr & USBC_UESTA0_RAMACERI) { regs->UESTACLR[ep_idx] = USBC_UESTA0CLR_RAMACERIC; LOG_ERR("ISR: EP%d RAM Access Error", ep_idx); } } static void usb_dc_ctrl_init(void) { LOG_INF("STP - INIT"); /* In case of abort of IN Data Phase: * No need to abort IN transfer (rise TXINI), * because it is automatically done by hardware when a Setup packet is * received. But the interrupt must be disabled to don't generate * interrupt TXINI after SETUP reception. */ regs->UECONCLR[0] = USBC_UECON0CLR_TXINEC; /* In case of OUT ZLP event is no processed before Setup event occurs */ regs->UESTACLR[0] = USBC_UESTA0CLR_RXOUTIC; regs->UECONCLR[0] = USBC_UECON0CLR_RXOUTEC | USBC_UECON0CLR_NAKOUTEC | USBC_UECON0CLR_NAKINEC; epctrl_fsm = USB_EPCTRL_SETUP; } static void usb_dc_ctrl_stall_data(uint32_t flags) { LOG_INF("STP - STALL"); epctrl_fsm = USB_EPCTRL_STALL_REQ; regs->UECONSET[0] = USBC_UECON0SET_STALLRQS; regs->UESTACLR[0] = flags; } static void usb_dc_ctrl_send_zlp_in(void) { uint32_t key; LOG_INF("STP - ZLP IN"); epctrl_fsm = USB_EPCTRL_HANDSHAKE_WAIT_IN_ZLP; /* Validate and send empty IN packet on control endpoint */ dev_desc[0].sizes = 0; key = irq_lock(); /* Send ZLP on IN endpoint */ regs->UESTACLR[0] = USBC_UESTA0CLR_TXINIC; regs->UECONSET[0] = USBC_UECON0SET_TXINES; /* To detect a protocol error, enable nak interrupt on data OUT phase */ regs->UESTACLR[0] = USBC_UESTA0CLR_NAKOUTIC; regs->UECONSET[0] = USBC_UECON0SET_NAKOUTES; irq_unlock(key); } static void usb_dc_ctrl_send_zlp_out(void) { uint32_t key; LOG_INF("STP - ZLP OUT"); epctrl_fsm = USB_EPCTRL_HANDSHAKE_WAIT_OUT_ZLP; /* To detect a protocol error, enable nak interrupt on data IN phase */ key = irq_lock(); regs->UESTACLR[0] = USBC_UESTA0CLR_NAKINIC; regs->UECONSET[0] = USBC_UECON0SET_NAKINES; irq_unlock(key); } static void usb_dc_ep0_isr(void) { uint32_t sr = regs->UESTA[0]; uint32_t dev_ctrl = regs->UDCON; usb_dc_ep_isr_sta(0); regs->UECONCLR[0] = USBC_UECON0CLR_NAKINEC; regs->UECONCLR[0] = USBC_UECON0CLR_NAKOUTEC; if (sr & USBC_UESTA0_RXSTPI) { /* May be a hidden DATA or ZLP phase or protocol abort */ if (epctrl_fsm != USB_EPCTRL_SETUP) { /* Reinitializes control endpoint management */ usb_dc_ctrl_init(); } /* SETUP data received */ dev_data.ep_data[0].cb_out(USB_EP_DIR_OUT, USB_DC_EP_SETUP); return; } if (sr & USBC_UESTA0_RXOUTI) { LOG_DBG("RXOUT= fsm: %s", usb_dc_epctrl_state_string[epctrl_fsm]); if (epctrl_fsm != USB_EPCTRL_DATA_OUT) { if ((epctrl_fsm == USB_EPCTRL_DATA_IN) || (epctrl_fsm == USB_EPCTRL_HANDSHAKE_WAIT_OUT_ZLP)) { /* End of SETUP request: * - Data IN Phase aborted, * - or last Data IN Phase hidden by ZLP OUT * sending quickly, * - or ZLP OUT received normally. * * Nothing to do */ } else { /* Protocol error during SETUP request */ usb_dc_ctrl_stall_data(0); } usb_dc_ctrl_init(); return; } /* OUT (to device) data received */ dev_data.ep_data[0].cb_out(USB_EP_DIR_OUT, USB_DC_EP_DATA_OUT); return; } if ((sr & USBC_UESTA0_TXINI) && (regs->UECON[0] & USBC_UECON0_TXINE)) { LOG_DBG("TXINI= fsm: %s", usb_dc_epctrl_state_string[epctrl_fsm]); regs->UECONCLR[0] = USBC_UECON0CLR_TXINEC; if (epctrl_fsm == USB_EPCTRL_HANDSHAKE_WAIT_IN_ZLP) { if (!(dev_ctrl & USBC_UDCON_ADDEN) && (dev_ctrl & USBC_UDCON_UADD_Msk) != 0U) { /* Commit the pending address update. This * must be done after the ack to the host * completes else the ack will get dropped. */ regs->UDCON |= USBC_UDCON_ADDEN; } /* ZLP on IN is sent */ usb_dc_ctrl_init(); return; } /* IN (to host) transmit complete */ dev_data.ep_data[0].cb_in(USB_EP_DIR_IN, USB_DC_EP_DATA_IN); return; } if (sr & USBC_UESTA0_NAKOUTI) { LOG_DBG("NAKOUT= fsm: %s", usb_dc_epctrl_state_string[epctrl_fsm]); regs->UESTACLR[0] = USBC_UESTA0CLR_NAKOUTIC; if (regs->UESTA[0] & USBC_UESTA0_TXINI) { /** overflow ignored if IN data is received */ return; } if (epctrl_fsm == USB_EPCTRL_HANDSHAKE_WAIT_IN_ZLP) { /* A IN handshake is waiting by device, but host want * extra OUT data then stall extra OUT data */ regs->UECONSET[0] = USBC_UECON0SET_STALLRQS; } return; } if (sr & USBC_UESTA0_NAKINI) { LOG_DBG("NAKIN= fsm: %s", usb_dc_epctrl_state_string[epctrl_fsm]); regs->UESTACLR[0] = USBC_UESTA0CLR_NAKINIC; if (regs->UESTA[0] & USBC_UESTA0_RXOUTI) { /** underflow ignored if OUT data is received */ return; } if (epctrl_fsm == USB_EPCTRL_DATA_OUT) { /* Host want to stop OUT transaction then stop to * wait OUT data phase and wait IN ZLP handshake. */ usb_dc_ctrl_send_zlp_in(); } else if (epctrl_fsm == USB_EPCTRL_HANDSHAKE_WAIT_OUT_ZLP) { /* A OUT handshake is waiting by device, but host want * extra IN data then stall extra IN data. */ regs->UECONSET[0] = USBC_UECON0SET_STALLRQS; } else { /** Nothing to do */ } return; } } static void usb_dc_ep_isr(uint8_t ep_idx) { uint32_t sr = regs->UESTA[ep_idx]; usb_dc_ep_isr_sta(ep_idx); if (sr & USBC_UESTA0_RXOUTI) { uint8_t ep = ep_idx | USB_EP_DIR_OUT; regs->UESTACLR[ep_idx] = USBC_UESTA0CLR_RXOUTIC; /* OUT (to device) data received */ dev_data.ep_data[ep_idx].cb_out(ep, USB_DC_EP_DATA_OUT); } if (sr & USBC_UESTA0_TXINI) { uint8_t ep = ep_idx | USB_EP_DIR_IN; regs->UESTACLR[ep_idx] = USBC_UESTA0CLR_TXINIC; /* IN (to host) transmit complete */ dev_data.ep_data[ep_idx].cb_in(ep, USB_DC_EP_DATA_IN); } } static void usb_dc_sam_usbc_isr(void) { uint32_t sr = regs->UDINT; if (IS_ENABLED(CONFIG_USB_DEVICE_SOF)) { /* SOF interrupt */ if (sr & USBC_UDINT_SOF) { /* Acknowledge the interrupt */ regs->UDINTCLR = USBC_UDINTCLR_SOFC; dev_data.status_cb(USB_DC_SOF, NULL); goto usb_dc_sam_usbc_isr_barrier; } } /* EP0 endpoint interrupt */ if (sr & USBC_UDINT_EP0INT) { usb_dc_ep0_isr(); goto usb_dc_sam_usbc_isr_barrier; } /* Other endpoints interrupt */ if (sr & EP_UDINT_MASK) { for (int ep_idx = 1; ep_idx < NUM_OF_EP_MAX; ep_idx++) { if (sr & (USBC_UDINT_EP0INT << ep_idx)) { usb_dc_ep_isr(ep_idx); } } goto usb_dc_sam_usbc_isr_barrier; } /* End of resume interrupt */ if (sr & USBC_UDINT_EORSM) { LOG_DBG("ISR: End Of Resume"); regs->UDINTCLR = USBC_UDINTCLR_EORSMC; dev_data.status_cb(USB_DC_RESUME, NULL); goto usb_dc_sam_usbc_isr_barrier; } /* End of reset interrupt */ if (sr & USBC_UDINT_EORST) { LOG_DBG("ISR: End Of Reset"); regs->UDINTCLR = USBC_UDINTCLR_EORSTC; if (usb_dc_ep_is_enabled(0)) { /* The device clears some of the configuration of EP0 * when it receives the EORST. Re-enable interrupts. */ usb_dc_ep_enable_interrupts(0); usb_dc_ctrl_init(); } dev_data.status_cb(USB_DC_RESET, NULL); usb_dc_sam_usbc_clean_sta_dbg(); goto usb_dc_sam_usbc_isr_barrier; } /* Suspend interrupt */ if (sr & USBC_UDINT_SUSP && regs->UDINTE & USBC_UDINTE_SUSPE) { LOG_DBG("ISR: Suspend"); regs->UDINTCLR = USBC_UDINTCLR_SUSPC; usb_dc_sam_usbc_unfreeze_clk(); /** * Sync Generic Clock * Check USB clock ready after suspend and * eventually sleep USB clock */ while ((regs->USBSTA & USBC_USBSTA_CLKUSABLE) == 0) { ; }; regs->UDINTECLR = USBC_UDINTECLR_SUSPEC; regs->UDINTCLR = USBC_UDINTCLR_WAKEUPC; regs->UDINTESET = USBC_UDINTESET_WAKEUPES; usb_dc_sam_usbc_freeze_clk(); dev_data.status_cb(USB_DC_SUSPEND, NULL); goto usb_dc_sam_usbc_isr_barrier; } /* Wakeup interrupt */ if (sr & USBC_UDINT_WAKEUP && regs->UDINTE & USBC_UDINTE_WAKEUPE) { LOG_DBG("ISR: Wake Up"); regs->UDINTCLR = USBC_UDINTCLR_WAKEUPC; usb_dc_sam_usbc_unfreeze_clk(); /** * Sync Generic Clock * Check USB clock ready after suspend and * eventually sleep USB clock */ while ((regs->USBSTA & USBC_USBSTA_CLKUSABLE) == 0) { ; }; regs->UDINTECLR = USBC_UDINTECLR_WAKEUPEC; regs->UDINTCLR = USBC_UDINTCLR_SUSPC; regs->UDINTESET = USBC_UDINTESET_SUSPES; } usb_dc_sam_usbc_isr_barrier: barrier_dmem_fence_full(); } int usb_dc_attach(void) { uint32_t pmcon; uint32_t regval; uint32_t key = irq_lock(); int retval; /* Enable USBC asynchronous wake-up source */ PM->AWEN |= BIT(PM_AWEN_USBC); /* Always authorize asynchronous USB interrupts to exit of sleep mode * For SAM USB wake up device except BACKUP mode */ pmcon = BPM->PMCON | BPM_PMCON_FASTWKUP; BPM->UNLOCK = BPM_UNLOCK_KEY(0xAAu) | BPM_UNLOCK_ADDR((uint32_t)&BPM->PMCON - (uint32_t)BPM); BPM->PMCON = pmcon; /* Start the peripheral clock PBB & DATA */ soc_pmc_peripheral_enable( PM_CLOCK_MASK(PM_CLK_GRP_PBB, SYSCLK_USBC_REGS)); soc_pmc_peripheral_enable( PM_CLOCK_MASK(PM_CLK_GRP_HSB, SYSCLK_USBC_DATA)); /* Enable USB Generic clock */ SCIF->GCCTRL[GEN_CLK_USBC] = 0; SCIF->GCCTRL[GEN_CLK_USBC] = SCIF_GCCTRL_OSCSEL(SCIF_GC_USES_CLK_HSB) | SCIF_GCCTRL_CEN; /* Sync Generic Clock */ while ((regs->USBSTA & USBC_USBSTA_CLKUSABLE) == 0) { ; }; retval = pinctrl_apply_state(pcfg, PINCTRL_STATE_DEFAULT); if (retval < 0) { return retval; } /* Enable the USB controller in device mode with the clock unfrozen */ regs->USBCON = USBC_USBCON_UIMOD | USBC_USBCON_USBE; usb_dc_sam_usbc_unfreeze_clk(); regs->UDESC = USBC_UDESC_UDESCA((int) &dev_desc); /* Select the speed with pads detached */ regval = USBC_UDCON_DETACH; switch (DT_INST_ENUM_IDX(0, maximum_speed)) { case 1: WRITE_BIT(regval, USBC_UDCON_LS_Pos, 0); break; case 0: WRITE_BIT(regval, USBC_UDCON_LS_Pos, 1); break; default: WRITE_BIT(regval, USBC_UDCON_LS_Pos, 0); LOG_WRN("Unsupported maximum speed defined in device tree. " "USB controller will default to its maximum HW " "capability"); } regs->UDCON = regval; /* Enable device interrupts * EORSM End of Resume Interrupt * SOF Start of Frame Interrupt * EORST End of Reset Interrupt * SUSP Suspend Interrupt * WAKEUP Wake-Up Interrupt */ regs->UDINTCLR = USBC_UDINTCLR_EORSMC | USBC_UDINTCLR_EORSTC | USBC_UDINTCLR_SOFC | USBC_UDINTCLR_SUSPC | USBC_UDINTCLR_WAKEUPC; regs->UDINTESET = USBC_UDINTESET_EORSMES | USBC_UDINTESET_EORSTES | USBC_UDINTESET_SUSPES | USBC_UDINTESET_WAKEUPES; if (IS_ENABLED(CONFIG_USB_DEVICE_SOF)) { regs->UDINTESET |= USBC_UDINTESET_SOFES; } IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), usb_dc_sam_usbc_isr, 0, 0); irq_enable(DT_INST_IRQN(0)); /* Attach the device */ regs->UDCON &= ~USBC_UDCON_DETACH; /* Put USB on low power state (wait Susp/Wake int) */ usb_dc_sam_usbc_freeze_clk(); /* Force Susp 2 Wake transition */ regs->UDINTSET = USBC_UDINTSET_SUSPS; irq_unlock(key); LOG_DBG("USB DC attach"); return 0; } int usb_dc_detach(void) { uint32_t key = irq_lock(); regs->UDCON |= USBC_UDCON_DETACH; /* Disable the USB controller and freeze the clock */ regs->USBCON = USBC_USBCON_UIMOD | USBC_USBCON_FRZCLK; /* Disable USB Generic clock */ SCIF->GCCTRL[GEN_CLK_USBC] = 0; /* Disable USBC asynchronous wake-up source */ PM->AWEN &= ~(BIT(PM_AWEN_USBC)); /* Disable the peripheral clock HSB & PBB */ soc_pmc_peripheral_enable( PM_CLOCK_MASK(PM_CLK_GRP_HSB, SYSCLK_USBC_DATA)); soc_pmc_peripheral_enable( PM_CLOCK_MASK(PM_CLK_GRP_PBB, SYSCLK_USBC_REGS)); irq_disable(DT_INST_IRQN(0)); irq_unlock(key); LOG_DBG("USB DC detach"); return 0; } int usb_dc_reset(void) { uint32_t key = irq_lock(); /* Reset the controller */ regs->USBCON = USBC_USBCON_UIMOD | USBC_USBCON_FRZCLK; /* Clear private data */ (void)memset(&dev_data, 0, sizeof(dev_data)); (void)memset(&dev_desc, 0, sizeof(dev_desc)); irq_unlock(key); LOG_DBG("USB DC reset"); return 0; } int usb_dc_set_address(uint8_t addr) { /* * Set the address but keep it disabled for now. It should be enabled * only after the ack to the host completes. */ regs->UDCON &= ~USBC_UDCON_ADDEN; regs->UDCON |= USBC_UDCON_UADD(addr); LOG_DBG("USB DC set address 0x%02x", addr); return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { regs->UDINTECLR = USBC_UDINTECLR_MASK; regs->UDINTCLR = USBC_UDINTCLR_MASK; usb_dc_detach(); usb_dc_reset(); dev_data.status_cb = cb; LOG_DBG("USB DC set callback"); } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("endpoint index/address out of range"); return -EINVAL; } if (ep_idx == 0U) { if (cfg->ep_type != USB_DC_EP_CONTROL) { LOG_ERR("pre-selected as control endpoint"); return -EINVAL; } } else if (ep_idx & BIT(0)) { if (USB_EP_DIR_IS_OUT(cfg->ep_addr)) { LOG_INF("pre-selected as IN endpoint"); return -EINVAL; } } else { if (USB_EP_DIR_IS_IN(cfg->ep_addr)) { LOG_INF("pre-selected as OUT endpoint"); return -EINVAL; } } if (cfg->ep_mps < 1 || cfg->ep_mps > 1024 || (cfg->ep_type == USB_DC_EP_CONTROL && cfg->ep_mps > 64)) { LOG_ERR("invalid endpoint size"); return -EINVAL; } return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); uint32_t regval = 0U; int log2ceil_mps; if (usb_dc_ep_check_cap(cfg) != 0) { return -EINVAL; } if (!usb_dc_is_attached()) { LOG_ERR("device not attached"); return -ENODEV; } /* Allow re-configure any endpoint */ if (usb_dc_ep_is_enabled(ep_idx)) { usb_dc_ep_disable(ep_idx); } LOG_DBG("Configure ep 0x%02x, mps %d, type %d", cfg->ep_addr, cfg->ep_mps, cfg->ep_type); switch (cfg->ep_type) { case USB_DC_EP_CONTROL: regval |= USBC_UECFG0_EPTYPE_CONTROL; break; case USB_DC_EP_ISOCHRONOUS: regval |= USBC_UECFG0_EPTYPE_ISOCHRONOUS; break; case USB_DC_EP_BULK: regval |= USBC_UECFG0_EPTYPE_BULK; break; case USB_DC_EP_INTERRUPT: regval |= USBC_UECFG0_EPTYPE_INTERRUPT; break; default: return -EINVAL; } if (USB_EP_DIR_IS_OUT(cfg->ep_addr) || cfg->ep_type == USB_DC_EP_CONTROL) { regval |= USBC_UECFG0_EPDIR_OUT; } else { regval |= USBC_UECFG0_EPDIR_IN; } /* * Map the endpoint size to the buffer size. Only power of 2 buffer * sizes between 8 and 1024 are possible, get the next power of 2. */ log2ceil_mps = 32 - __builtin_clz((MAX(cfg->ep_mps, 8) << 1) - 1) - 1; regval |= USBC_UECFG0_EPSIZE(log2ceil_mps - 3); dev_data.ep_data[ep_idx].mps = cfg->ep_mps; /* Use double bank buffering for: ISOCHRONOUS, BULK and INTERRUPT */ if (cfg->ep_type != USB_DC_EP_CONTROL) { regval |= USBC_UECFG0_EPBK_DOUBLE; dev_data.ep_data[ep_idx].mps_x2 = true; } else { regval |= USBC_UECFG0_EPBK_SINGLE; dev_data.ep_data[ep_idx].mps_x2 = false; } /** Enable Global NAK */ regs->UDCON |= USBC_UDCON_GNAK; if (usb_dc_sam_usbc_ep_alloc_buf(ep_idx) < 0) { dev_data.ep_data[ep_idx].is_configured = false; regs->UDCON &= ~USBC_UDCON_GNAK; return -ENOMEM; } regs->UDCON &= ~USBC_UDCON_GNAK; /* Configure the endpoint */ dev_data.ep_data[ep_idx].is_configured = true; regs->UECFG[ep_idx] = regval; LOG_DBG("ep 0x%02x configured", cfg->ep_addr); return 0; } int usb_dc_ep_set_stall(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (ep_idx == 0) { if (epctrl_fsm == USB_EPCTRL_SETUP) { usb_dc_ctrl_stall_data(USBC_UESTA0CLR_RXSTPIC); } else if (epctrl_fsm == USB_EPCTRL_DATA_OUT) { usb_dc_ctrl_stall_data(USBC_UESTA0CLR_RXOUTIC); } else { /** Stall without commit any status */ usb_dc_ctrl_stall_data(0); } } else { regs->UECONSET[ep_idx] = USBC_UECON0SET_STALLRQS; } LOG_WRN("USB DC stall set ep 0x%02x", ep); return 0; } int usb_dc_ep_clear_stall(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t key; if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (regs->UECON[ep_idx] & USBC_UECON0_STALLRQ) { key = irq_lock(); dev_data.ep_data[ep_idx].out_at = 0U; regs->UECONCLR[ep_idx] = USBC_UECON0CLR_STALLRQC; if (regs->UESTA[ep_idx] & USBC_UESTA0_STALLEDI) { regs->UESTACLR[ep_idx] = USBC_UESTA0CLR_STALLEDIC; regs->UECONSET[ep_idx] = USBC_UECON0SET_RSTDTS; } irq_unlock(key); } LOG_DBG("USB DC stall clear ep 0x%02x", ep); return 0; } int usb_dc_ep_is_stalled(uint8_t ep, uint8_t *stalled) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!stalled) { return -EINVAL; } *stalled = ((regs->UECON[ep_idx] & USBC_UECON0_STALLRQ) != 0); LOG_DBG("USB DC stall check ep 0x%02x stalled: %d", ep, *stalled); return 0; } int usb_dc_ep_halt(uint8_t ep) { return usb_dc_ep_set_stall(ep); } int usb_dc_ep_enable(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t key; if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!dev_data.ep_data[ep_idx].is_configured) { LOG_ERR("endpoint not configured"); return -ENODEV; } key = irq_lock(); dev_data.ep_data[ep_idx].out_at = 0U; /* Enable endpoint */ regs->UERST |= BIT(USBC_UERST_EPEN0_Pos + ep_idx); /* Enable global endpoint interrupts */ regs->UDINTESET = (USBC_UDINTESET_EP0INTES << ep_idx); usb_dc_ep_enable_interrupts(ep_idx); irq_unlock(key); LOG_DBG("Enable ep 0x%02x", ep); return 0; } int usb_dc_ep_disable(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t key; if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } key = irq_lock(); /* Disable global endpoint interrupt */ regs->UDINTECLR = BIT(USBC_UDINTESET_EP0INTES_Pos + ep_idx); /* Disable endpoint and reset */ regs->UERST &= ~BIT(USBC_UERST_EPEN0_Pos + ep_idx); irq_unlock(key); LOG_DBG("Disable ep 0x%02x", ep); return 0; } int usb_dc_ep_flush(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t key; if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!usb_dc_ep_is_enabled(ep_idx)) { LOG_ERR("endpoint not enabled"); return -ENODEV; } key = irq_lock(); /* Disable the IN interrupt */ regs->UECONCLR[ep_idx] = USBC_UECON0CLR_TXINEC; /* Reset the endpoint */ regs->UERST &= ~(BIT(ep_idx)); regs->UERST |= BIT(ep_idx); dev_data.ep_data[ep_idx].out_at = 0U; /* Re-enable interrupts */ usb_dc_ep_enable_interrupts(ep_idx); irq_unlock(key); LOG_DBG("ep 0x%02x flushed", ep); return 0; } int usb_dc_ep_set_callback(uint8_t ep, const usb_dc_ep_callback cb) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (USB_EP_DIR_IS_IN(ep)) { dev_data.ep_data[ep_idx].cb_in = cb; } else { dev_data.ep_data[ep_idx].cb_out = cb; } LOG_DBG("set ep 0x%02x %s callback", ep, USB_EP_DIR_IS_IN(ep) ? "IN" : "OUT"); return 0; } static int usb_dc_ep_write_stp(uint8_t ep_bank, const uint8_t *data, uint32_t packet_len) { uint32_t key; if (epctrl_fsm == USB_EPCTRL_SETUP) { regs->UESTACLR[0] = USBC_UESTA0CLR_RXSTPIC; epctrl_fsm = USB_EPCTRL_DATA_IN; key = irq_lock(); regs->UECONCLR[0] = USBC_UECON0CLR_TXINEC; irq_unlock(key); } if (epctrl_fsm == USB_EPCTRL_DATA_IN) { /* All data requested are transferred or a short packet has * been sent then it is the end of data phase. * * Generate an OUT ZLP for handshake phase. */ if (packet_len == 0) { usb_dc_ctrl_send_zlp_out(); return 0; } /** Critical section * Only in case of DATA IN phase abort without USB Reset * signal after. The IN data don't must be written in * endpoint 0 DPRAM during a next setup reception in same * endpoint 0 DPRAM. Thereby, an OUT ZLP reception must * check before IN data write and if no OUT ZLP is received * the data must be written quickly (800us) before an * eventually ZLP OUT and SETUP reception. */ key = irq_lock(); if (regs->UESTA[0] & USBC_UESTA0_RXOUTI) { /* IN DATA phase aborted by OUT ZLP */ irq_unlock(key); epctrl_fsm = USB_EPCTRL_HANDSHAKE_WAIT_OUT_ZLP; return 0; } if (data) { memcpy(dev_desc[ep_bank].ep_pipe_addr, data, packet_len); barrier_dsync_fence_full(); } dev_desc[ep_bank].sizes = packet_len; /* * Control endpoint: clear the interrupt flag to send * the data, and re-enable the interrupts to trigger * an interrupt at the end of the transfer. */ regs->UESTACLR[0] = USBC_UESTA0CLR_TXINIC; regs->UECONSET[0] = USBC_UECON0SET_TXINES; /* In case of abort of DATA IN phase, no need to enable * nak OUT interrupt because OUT endpoint is already * free and ZLP OUT accepted. */ irq_unlock(key); } else if (epctrl_fsm == USB_EPCTRL_DATA_OUT || epctrl_fsm == USB_EPCTRL_HANDSHAKE_WAIT_IN_ZLP) { /* ZLP on IN is sent, then valid end of setup request * or * No data phase requested. * * Send IN ZLP to ACK setup request */ usb_dc_ctrl_send_zlp_in(); } else { LOG_ERR("Invalid STP state %d on IN phase", epctrl_fsm); return -EPERM; } return 0; } int usb_dc_ep_write(uint8_t ep, const uint8_t *data, uint32_t data_len, uint32_t *ret_bytes) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_bank; uint32_t packet_len; if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!usb_dc_ep_is_enabled(ep_idx)) { LOG_ERR("endpoint not enabled"); return -ENODEV; } if (USB_EP_DIR_IS_OUT(ep)) { LOG_ERR("wrong endpoint direction"); return -EINVAL; } if ((regs->UECON[ep_idx] & USBC_UECON0_STALLRQ) != 0) { LOG_WRN("endpoint is stalled"); return -EBUSY; } /* Check if there is bank available */ if (ep_idx > 0) { if ((regs->UECON[ep_idx] & USBC_UECON0_FIFOCON) == 0) { return -EAGAIN; } } ep_bank = usb_dc_sam_usbc_ep_curr_bank(ep_idx); packet_len = MIN(data_len, dev_data.ep_data[ep_idx].mps); if (ret_bytes) { *ret_bytes = packet_len; } if (ep_idx == 0U) { if (usb_dc_ep_write_stp(ep_bank, data, packet_len)) { return -EPERM; } } else { if (data && packet_len > 0) { memcpy(dev_desc[ep_bank].ep_pipe_addr, data, packet_len); barrier_dsync_fence_full(); } dev_desc[ep_bank].sizes = packet_len; /* * Other endpoint types: clear the FIFO control flag to send * the data. */ regs->UECONCLR[ep_idx] = USBC_UECON0CLR_FIFOCONC; } LOG_INF("ep 0x%02x write %d bytes from %d to bank %d%s", ep, packet_len, data_len, ep_bank % 2, packet_len == 0 ? " (ZLP)" : ""); return 0; } static int usb_dc_ep_read_ex_stp(uint32_t take, uint32_t wLength) { uint32_t key; if (epctrl_fsm == USB_EPCTRL_SETUP) { if (regs->UESTA[0] & USBC_UESTA0_CTRLDIR) { /** Do Nothing */ } else { regs->UESTACLR[0] = USBC_UESTA0CLR_RXSTPIC; epctrl_fsm = USB_EPCTRL_DATA_OUT; if (wLength == 0) { /* No data phase requested. * Send IN ZLP to ACK setup request * * This is send at usb_dc_ep_write() */ return 0; } regs->UECONSET[0] = USBC_UECON0SET_RXOUTES; /* To detect a protocol error, enable nak * interrupt on data IN phase */ regs->UESTACLR[0] = USBC_UESTA0CLR_NAKINIC; key = irq_lock(); regs->UECONSET[0] = USBC_UECON0SET_NAKINES; irq_unlock(key); } } else if (epctrl_fsm == USB_EPCTRL_DATA_OUT) { regs->UESTACLR[0] = USBC_UESTA0CLR_RXOUTIC; if (take == 0) { usb_dc_ctrl_send_zlp_in(); } else { regs->UESTACLR[0] = USBC_UESTA0CLR_NAKINIC; key = irq_lock(); regs->UECONSET[0] = USBC_UECON0SET_NAKINES; irq_unlock(key); } } else { LOG_ERR("Invalid STP state %d on OUT phase", epctrl_fsm); return -EPERM; } return 0; } int usb_dc_ep_read_ex(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes, bool wait) { uint8_t ep_idx = USB_EP_GET_IDX(ep); struct usb_setup_packet *setup; uint8_t ep_bank; uint32_t data_len; uint32_t remaining; uint32_t take; int rc = 0; if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!usb_dc_ep_is_enabled(ep_idx)) { LOG_ERR("endpoint not enabled"); return -ENODEV; } if (USB_EP_DIR_IS_IN(ep)) { LOG_ERR("wrong endpoint direction"); return -EINVAL; } if ((regs->UECON[ep_idx] & USBC_UECON0_STALLRQ) != 0) { LOG_WRN("endpoint is stalled"); return -EBUSY; } ep_bank = usb_dc_sam_usbc_ep_curr_bank(ep_idx); data_len = dev_desc[ep_bank].udesc_sizes.byte_count; if (data == NULL) { dev_data.ep_data[ep_idx].out_at = 0U; if (read_bytes) { *read_bytes = data_len; } return 0; } remaining = data_len - dev_data.ep_data[ep_idx].out_at; take = MIN(max_data_len, remaining); if (take) { memcpy(data, (uint8_t *) dev_desc[ep_bank].ep_pipe_addr + dev_data.ep_data[ep_idx].out_at, take); barrier_dsync_fence_full(); } if (read_bytes) { *read_bytes = take; } if (take == remaining || take == 0) { if (!wait) { dev_data.ep_data[ep_idx].out_at = 0U; if (ep_idx == 0) { setup = (struct usb_setup_packet *) data; rc = usb_dc_ep_read_ex_stp(take, setup->wLength); } else { rc = usb_dc_ep_read_continue(ep); } } } else { dev_data.ep_data[ep_idx].out_at += take; } LOG_INF("ep 0x%02x read %d bytes from bank %d and %s", ep, take, ep_bank % 2, wait ? "wait" : "NO wait"); return rc; } int usb_dc_ep_read_continue(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx == 0 || ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } if (!usb_dc_ep_is_enabled(ep_idx)) { LOG_ERR("endpoint not enabled"); return -ENODEV; } if (USB_EP_DIR_IS_IN(ep)) { LOG_ERR("wrong endpoint direction"); return -EINVAL; } regs->UECONCLR[ep_idx] = USBC_UECON0CLR_FIFOCONC; return 0; } int usb_dc_ep_read(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { return usb_dc_ep_read_ex(ep, data, max_data_len, read_bytes, false); } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { return usb_dc_ep_read_ex(ep, data, max_data_len, read_bytes, true); } int usb_dc_ep_mps(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= NUM_OF_EP_MAX) { LOG_ERR("wrong endpoint index/address"); return -EINVAL; } return dev_data.ep_data[ep_idx].mps; } int usb_dc_wakeup_request(void) { bool is_clk_frozen = usb_dc_sam_usbc_is_frozen_clk(); if (is_clk_frozen) { usb_dc_sam_usbc_unfreeze_clk(); } regs->UDCON |= USBC_UDCON_RMWKUP; if (is_clk_frozen) { usb_dc_sam_usbc_freeze_clk(); } return 0; } ```
/content/code_sandbox/drivers/usb/device/usb_dc_sam_usbc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,378
```c /* * */ /** * @file * @brief USB DesignWare device controller driver * * USB DesignWare device controller driver. The driver implements the low * level control routines to deal directly with the hardware. */ #define DT_DRV_COMPAT snps_dwc2 #include <string.h> #include <stdio.h> #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/irq.h> #include <zephyr/sys/util.h> #include <zephyr/sys/byteorder.h> #include <zephyr/usb/usb_device.h> #include <usb_dwc2_hw.h> #include "usb_dc_dw_stm32.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(usb_dc_dw, CONFIG_USB_DRIVER_LOG_LEVEL); /* FIXME: The actual number of endpoints should be obtained from GHWCFG4. */ enum usb_dw_in_ep_idx { USB_DW_IN_EP_0 = 0, USB_DW_IN_EP_1, USB_DW_IN_EP_2, USB_DW_IN_EP_3, USB_DW_IN_EP_4, USB_DW_IN_EP_5, USB_DW_IN_EP_NUM }; /* FIXME: The actual number of endpoints should be obtained from GHWCFG2. */ enum usb_dw_out_ep_idx { USB_DW_OUT_EP_0 = 0, USB_DW_OUT_EP_1, USB_DW_OUT_EP_2, USB_DW_OUT_EP_3, USB_DW_OUT_EP_NUM }; #define USB_DW_CORE_RST_TIMEOUT_US 10000 /* FIXME: The actual MPS depends on endpoint type and bus speed. */ #define DW_USB_MAX_PACKET_SIZE 64 /* Number of SETUP back-to-back packets */ #define USB_DW_SUP_CNT 1 /* Get Data FIFO access register */ #define USB_DW_EP_FIFO(base, idx) \ (*(uint32_t *)(POINTER_TO_UINT(base) + 0x1000 * (idx + 1))) struct usb_dw_config { struct usb_dwc2_reg *const base; struct pinctrl_dev_config *const pcfg; void (*irq_enable_func)(const struct device *dev); int (*clk_enable_func)(void); int (*pwr_on_func)(struct usb_dwc2_reg *const base); }; /* * USB endpoint private structure. */ struct usb_ep_ctrl_prv { uint8_t ep_ena; uint8_t fifo_num; uint32_t fifo_size; uint16_t mps; /* Max ep pkt size */ usb_dc_ep_callback cb;/* Endpoint callback function */ uint32_t data_len; }; static void usb_dw_isr_handler(const void *unused); /* * USB controller private structure. */ struct usb_dw_ctrl_prv { usb_dc_status_callback status_cb; struct usb_ep_ctrl_prv in_ep_ctrl[USB_DW_IN_EP_NUM]; struct usb_ep_ctrl_prv out_ep_ctrl[USB_DW_OUT_EP_NUM]; int n_tx_fifos; uint8_t attached; }; #if defined(CONFIG_PINCTRL) #include <zephyr/drivers/pinctrl.h> static int usb_dw_init_pinctrl(const struct usb_dw_config *const config) { const struct pinctrl_dev_config *const pcfg = config->pcfg; int ret = 0; if (pcfg == NULL) { LOG_INF("Skip pinctrl configuration"); return 0; } ret = pinctrl_apply_state(pcfg, PINCTRL_STATE_DEFAULT); if (ret) { LOG_ERR("Failed to apply default pinctrl state (%d)", ret); } return ret; } #else static int usb_dw_init_pinctrl(const struct usb_dw_config *const config) { ARG_UNUSED(config); return 0; } #endif #define USB_DW_GET_COMPAT_QUIRK_NONE(n) NULL #define USB_DW_GET_COMPAT_CLK_QUIRK_0(n) \ COND_CODE_1(DT_INST_NODE_HAS_COMPAT(n, st_stm32f4_fsotg), \ (clk_enable_st_stm32f4_fsotg_##n), \ USB_DW_GET_COMPAT_QUIRK_NONE(n)) #define USB_DW_GET_COMPAT_PWR_QUIRK_0(n) \ COND_CODE_1(DT_INST_NODE_HAS_COMPAT(n, st_stm32f4_fsotg), \ (pwr_on_st_stm32f4_fsotg), \ USB_DW_GET_COMPAT_QUIRK_NONE(n)) #define USB_DW_PINCTRL_DT_INST_DEFINE(n) \ COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \ (PINCTRL_DT_INST_DEFINE(n)), ()) #define USB_DW_PINCTRL_DT_INST_DEV_CONFIG_GET(n) \ COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \ ((void *)PINCTRL_DT_INST_DEV_CONFIG_GET(n)), (NULL)) #define USB_DW_IRQ_FLAGS_TYPE0(n) 0 #define USB_DW_IRQ_FLAGS_TYPE1(n) DT_INST_IRQ(n, type) #define DW_IRQ_FLAGS(n) \ _CONCAT(USB_DW_IRQ_FLAGS_TYPE, DT_INST_IRQ_HAS_CELL(n, type))(n) #define USB_DW_DEVICE_DEFINE(n) \ USB_DW_PINCTRL_DT_INST_DEFINE(n); \ USB_DW_QUIRK_ST_STM32F4_FSOTG_DEFINE(n); \ \ static void usb_dw_irq_enable_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ usb_dw_isr_handler, \ 0, \ DW_IRQ_FLAGS(n)); \ \ irq_enable(DT_INST_IRQN(n)); \ } \ \ static const struct usb_dw_config usb_dw_cfg_##n = { \ .base = (struct usb_dwc2_reg *)DT_INST_REG_ADDR(n), \ .pcfg = USB_DW_PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .irq_enable_func = usb_dw_irq_enable_func_##n, \ .clk_enable_func = USB_DW_GET_COMPAT_CLK_QUIRK_0(n), \ .pwr_on_func = USB_DW_GET_COMPAT_PWR_QUIRK_0(n), \ }; \ \ static struct usb_dw_ctrl_prv usb_dw_ctrl_##n; USB_DW_DEVICE_DEFINE(0) #define usb_dw_ctrl usb_dw_ctrl_0 #define usb_dw_cfg usb_dw_cfg_0 static void usb_dw_reg_dump(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t i; LOG_DBG("USB registers: GOTGCTL : 0x%x GOTGINT : 0x%x GAHBCFG : " "0x%x", base->gotgctl, base->gotgint, base->gahbcfg); LOG_DBG(" GUSBCFG : 0x%x GINTSTS : 0x%x GINTMSK : 0x%x", base->gusbcfg, base->gintsts, base->gintmsk); LOG_DBG(" DCFG : 0x%x DCTL : 0x%x DSTS : 0x%x", base->dcfg, base->dctl, base->dsts); LOG_DBG(" DIEPMSK : 0x%x DOEPMSK : 0x%x DAINT : 0x%x", base->diepmsk, base->doepmsk, base->daint); LOG_DBG(" DAINTMSK: 0x%x GHWCFG1 : 0x%x GHWCFG2 : 0x%x", base->daintmsk, base->ghwcfg1, base->ghwcfg2); LOG_DBG(" GHWCFG3 : 0x%x GHWCFG4 : 0x%x", base->ghwcfg3, base->ghwcfg4); for (i = 0U; i < USB_DW_OUT_EP_NUM; i++) { LOG_DBG("\n EP %d registers: DIEPCTL : 0x%x DIEPINT : " "0x%x", i, base->in_ep[i].diepctl, base->in_ep[i].diepint); LOG_DBG(" DIEPTSIZ: 0x%x DIEPDMA : 0x%x DOEPCTL : " "0x%x", base->in_ep[i].dieptsiz, base->in_ep[i].diepdma, base->out_ep[i].doepctl); LOG_DBG(" DOEPINT : 0x%x DOEPTSIZ: 0x%x DOEPDMA : " "0x%x", base->out_ep[i].doepint, base->out_ep[i].doeptsiz, base->out_ep[i].doepdma); } } static uint8_t usb_dw_ep_is_valid(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); /* Check if ep enabled */ if ((USB_EP_DIR_IS_OUT(ep)) && ep_idx < USB_DW_OUT_EP_NUM) { return 1; } else if ((USB_EP_DIR_IS_IN(ep)) && ep_idx < USB_DW_IN_EP_NUM) { return 1; } return 0; } static uint8_t usb_dw_ep_is_enabled(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); /* Check if ep enabled */ if ((USB_EP_DIR_IS_OUT(ep)) && usb_dw_ctrl.out_ep_ctrl[ep_idx].ep_ena) { return 1; } else if ((USB_EP_DIR_IS_IN(ep)) && usb_dw_ctrl.in_ep_ctrl[ep_idx].ep_ena) { return 1; } return 0; } static inline void usb_dw_udelay(uint32_t us) { k_busy_wait(us); } static int usb_dw_reset(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint32_t cnt = 0U; /* Wait for AHB master idle state. */ while (!(base->grstctl & USB_DWC2_GRSTCTL_AHBIDLE)) { usb_dw_udelay(1); if (++cnt > USB_DW_CORE_RST_TIMEOUT_US) { LOG_ERR("USB reset HANG! AHB Idle GRSTCTL=0x%08x", base->grstctl); return -EIO; } } /* Core Soft Reset */ cnt = 0U; base->grstctl |= USB_DWC2_GRSTCTL_CSFTRST; do { if (++cnt > USB_DW_CORE_RST_TIMEOUT_US) { LOG_DBG("USB reset HANG! Soft Reset GRSTCTL=0x%08x", base->grstctl); return -EIO; } usb_dw_udelay(1); } while (base->grstctl & USB_DWC2_GRSTCTL_CSFTRST); /* Wait for 3 PHY Clocks */ usb_dw_udelay(100); return 0; } static int usb_dw_num_dev_eps(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; return (base->ghwcfg2 >> 10) & 0xf; } static void usb_dw_flush_tx_fifo(int ep) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; int fnum = usb_dw_ctrl.in_ep_ctrl[ep].fifo_num; base->grstctl = (fnum << 6) | (1<<5); while (base->grstctl & (1<<5)) { } } static int usb_dw_tx_fifo_avail(int ep) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; return base->in_ep[ep].dtxfsts & USB_DWC2_DTXFSTS_INEPTXFSPCAVAIL_MASK; } /* Choose a FIFO number for an IN endpoint */ static int usb_dw_set_fifo(uint8_t ep) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; int ep_idx = USB_EP_GET_IDX(ep); volatile uint32_t *reg = &base->in_ep[ep_idx].diepctl; uint32_t val; int fifo = 0; int ded_fifo = !!(base->ghwcfg4 & USB_DWC2_GHWCFG4_DEDFIFOMODE); if (!ded_fifo) { /* No support for shared-FIFO mode yet, existing * Zephyr hardware doesn't use it */ return -ENOTSUP; } /* In dedicated-FIFO mode, all IN endpoints must have a unique * FIFO number associated with them in the TXFNUM field of * DIEPCTLx, with EP0 always being assigned to FIFO zero (the * reset default, so we don't touch it). * * FIXME: would be better (c.f. the dwc2 driver in Linux) to * choose a FIFO based on the hardware depth: we want the * smallest one that fits our configured maximum packet size * for the endpoint. This just picks the next available one. */ if (ep_idx != 0) { fifo = ++usb_dw_ctrl.n_tx_fifos; if (fifo >= usb_dw_num_dev_eps()) { return -EINVAL; } reg = &base->in_ep[ep_idx].diepctl; val = *reg & ~USB_DWC2_DEPCTL_TXFNUM_MASK; val |= fifo << USB_DWC2_DEPCTL_TXFNUM_POS; *reg = val; } usb_dw_ctrl.in_ep_ctrl[ep_idx].fifo_num = fifo; usb_dw_flush_tx_fifo(ep_idx); val = usb_dw_tx_fifo_avail(ep_idx); usb_dw_ctrl.in_ep_ctrl[ep_idx].fifo_size = val; return 0; } static int usb_dw_ep_set(uint8_t ep, uint32_t ep_mps, enum usb_dc_ep_transfer_type ep_type) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; volatile uint32_t *p_depctl; uint8_t ep_idx = USB_EP_GET_IDX(ep); LOG_DBG("%s ep %x, mps %d, type %d", __func__, ep, ep_mps, ep_type); if (USB_EP_DIR_IS_OUT(ep)) { p_depctl = &base->out_ep[ep_idx].doepctl; usb_dw_ctrl.out_ep_ctrl[ep_idx].mps = ep_mps; } else { p_depctl = &base->in_ep[ep_idx].diepctl; usb_dw_ctrl.in_ep_ctrl[ep_idx].mps = ep_mps; } if (!ep_idx) { /* Set max packet size for EP0 */ *p_depctl &= ~USB_DWC2_DEPCTL0_MPS_MASK; switch (ep_mps) { case 8: *p_depctl |= USB_DWC2_DEPCTL0_MPS_8 << USB_DWC2_DEPCTL_MPS_POS; break; case 16: *p_depctl |= USB_DWC2_DEPCTL0_MPS_16 << USB_DWC2_DEPCTL_MPS_POS; break; case 32: *p_depctl |= USB_DWC2_DEPCTL0_MPS_32 << USB_DWC2_DEPCTL_MPS_POS; break; case 64: *p_depctl |= USB_DWC2_DEPCTL0_MPS_64 << USB_DWC2_DEPCTL_MPS_POS; break; default: return -EINVAL; } /* No need to set EP0 type */ } else { /* Set max packet size for EP */ if (ep_mps > (USB_DWC2_DEPCTL_MPS_MASK >> USB_DWC2_DEPCTL_MPS_POS)) { return -EINVAL; } *p_depctl &= ~USB_DWC2_DEPCTL_MPS_MASK; *p_depctl |= ep_mps << USB_DWC2_DEPCTL_MPS_POS; /* Set endpoint type */ *p_depctl &= ~USB_DWC2_DEPCTL_EPTYPE_MASK; switch (ep_type) { case USB_DC_EP_CONTROL: *p_depctl |= USB_DWC2_DEPCTL_EPTYPE_CONTROL << USB_DWC2_DEPCTL_EPTYPE_POS; break; case USB_DC_EP_BULK: *p_depctl |= USB_DWC2_DEPCTL_EPTYPE_BULK << USB_DWC2_DEPCTL_EPTYPE_POS; break; case USB_DC_EP_INTERRUPT: *p_depctl |= USB_DWC2_DEPCTL_EPTYPE_INTERRUPT << USB_DWC2_DEPCTL_EPTYPE_POS; break; default: return -EINVAL; } /* sets the Endpoint Data PID to DATA0 */ *p_depctl |= USB_DWC2_DEPCTL_SETD0PID; } if (USB_EP_DIR_IS_IN(ep)) { int ret = usb_dw_set_fifo(ep); if (ret) { return ret; } } return 0; } static void usb_dw_prep_rx(const uint8_t ep, uint8_t setup) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; enum usb_dw_out_ep_idx ep_idx = USB_EP_GET_IDX(ep); uint32_t ep_mps = usb_dw_ctrl.out_ep_ctrl[ep_idx].mps; /* Set max RX size to EP mps so we get an interrupt * each time a packet is received */ base->out_ep[ep_idx].doeptsiz = (USB_DW_SUP_CNT << USB_DWC2_DOEPTSIZ_SUP_CNT_POS) | (1 << USB_DWC2_DEPTSIZ_PKT_CNT_POS) | ep_mps; /* Clear NAK and enable ep */ if (!setup) { base->out_ep[ep_idx].doepctl |= USB_DWC2_DEPCTL_CNAK; } base->out_ep[ep_idx].doepctl |= USB_DWC2_DEPCTL_EPENA; LOG_DBG("USB OUT EP%d armed", ep_idx); } static int usb_dw_tx(uint8_t ep, const uint8_t *const data, uint32_t data_len) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; enum usb_dw_in_ep_idx ep_idx = USB_EP_GET_IDX(ep); uint32_t max_xfer_size, max_pkt_cnt, pkt_cnt, avail_space; uint32_t ep_mps = usb_dw_ctrl.in_ep_ctrl[ep_idx].mps; unsigned int key; uint32_t i; /* Wait for FIFO space available */ do { avail_space = usb_dw_tx_fifo_avail(ep_idx); if (avail_space == usb_dw_ctrl.in_ep_ctrl[ep_idx].fifo_size) { break; } /* Make sure we don't hog the CPU */ k_yield(); } while (1); key = irq_lock(); avail_space *= 4U; if (!avail_space) { LOG_ERR("USB IN EP%d no space available, DTXFSTS %x", ep_idx, base->in_ep[ep_idx].dtxfsts); irq_unlock(key); return -EAGAIN; } /* For now tx-fifo sizes are not configured (cf usb_dw_set_fifo). Here * we force available fifo size to be a multiple of ep mps in order to * prevent splitting data incorrectly. */ avail_space -= avail_space % ep_mps; if (data_len > avail_space) { data_len = avail_space; } if (data_len != 0U) { /* Get max packet size and packet count for ep */ if (ep_idx == USB_DW_IN_EP_0) { max_pkt_cnt = USB_DWC2_DIEPTSIZ0_PKT_CNT_MASK >> USB_DWC2_DEPTSIZ_PKT_CNT_POS; max_xfer_size = USB_DWC2_DEPTSIZ0_XFER_SIZE_MASK >> USB_DWC2_DEPTSIZ_XFER_SIZE_POS; } else { max_pkt_cnt = USB_DWC2_DIEPTSIZn_PKT_CNT_MASK >> USB_DWC2_DEPTSIZ_PKT_CNT_POS; max_xfer_size = USB_DWC2_DEPTSIZn_XFER_SIZE_MASK >> USB_DWC2_DEPTSIZ_XFER_SIZE_POS; } /* Check if transfer len is too big */ if (data_len > max_xfer_size) { LOG_WRN("USB IN EP%d len too big (%d->%d)", ep_idx, data_len, max_xfer_size); data_len = max_xfer_size; } /* * Program the transfer size and packet count as follows: * * transfer size = N * ep_maxpacket + short_packet * pktcnt = N + (short_packet exist ? 1 : 0) */ pkt_cnt = DIV_ROUND_UP(data_len, ep_mps); if (pkt_cnt > max_pkt_cnt) { LOG_WRN("USB IN EP%d pkt count too big (%d->%d)", ep_idx, pkt_cnt, pkt_cnt); pkt_cnt = max_pkt_cnt; data_len = pkt_cnt * ep_mps; } } else { /* Zero length packet */ pkt_cnt = 1U; } /* Set number of packets and transfer size */ base->in_ep[ep_idx].dieptsiz = (pkt_cnt << USB_DWC2_DEPTSIZ_PKT_CNT_POS) | data_len; /* Clear NAK and enable ep */ base->in_ep[ep_idx].diepctl |= (USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_CNAK); /* * Write data to FIFO, make sure that we are protected against * other USB register accesses. According to "DesignWare Cores * USB 1.1/2.0 Device Subsystem-AHB/VCI Databook": "During FIFO * access, the application must not access the UDC/Subsystem * registers or vendor registers (for ULPI mode). After starting * to access a FIFO, the application must complete the transaction * before accessing the register." */ for (i = 0U; i < data_len; i += 4U) { uint32_t val = data[i]; if (i + 1 < data_len) { val |= ((uint32_t)data[i+1]) << 8; } if (i + 2 < data_len) { val |= ((uint32_t)data[i+2]) << 16; } if (i + 3 < data_len) { val |= ((uint32_t)data[i+3]) << 24; } USB_DW_EP_FIFO(base, ep_idx) = val; } irq_unlock(key); LOG_DBG("USB IN EP%d write %u bytes", ep_idx, data_len); return data_len; } static int usb_dw_init(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t ep; int ret; ret = usb_dw_reset(); if (ret) { return ret; } /* * Force device mode as we do no support other roles or role changes. * Wait 25ms for the change to take effect. */ base->gusbcfg |= USB_DWC2_GUSBCFG_FORCEDEVMODE; k_msleep(25); #ifdef CONFIG_USB_DW_USB_2_0 /* set the PHY interface to be 16-bit UTMI */ base->gusbcfg = (base->gusbcfg & ~USB_DWC2_GUSBCFG_PHYIF_16_BIT) | USB_DWC2_GUSBCFG_PHYIF_16_BIT; /* Set USB2.0 High Speed */ base->dcfg |= USB_DWC2_DCFG_DEVSPD_USBHS20; #else /* Set device speed to Full Speed */ base->dcfg |= USB_DWC2_DCFG_DEVSPD_USBFS1148; #endif /* Set NAK for all OUT EPs */ for (ep = 0U; ep < USB_DW_OUT_EP_NUM; ep++) { base->out_ep[ep].doepctl = USB_DWC2_DEPCTL_SNAK; } /* Enable global interrupts */ base->gintmsk = USB_DWC2_GINTSTS_OEPINT | USB_DWC2_GINTSTS_IEPINT | USB_DWC2_GINTSTS_ENUMDONE | USB_DWC2_GINTSTS_USBRST | USB_DWC2_GINTSTS_WKUPINT | USB_DWC2_GINTSTS_USBSUSP; /* Enable global interrupt */ base->gahbcfg |= USB_DWC2_GAHBCFG_GLBINTRMASK; /* Call vendor-specific function to enable peripheral */ if (usb_dw_cfg.pwr_on_func != NULL) { ret = usb_dw_cfg.pwr_on_func(base); if (ret) { return ret; } } /* Disable soft disconnect */ base->dctl &= ~USB_DWC2_DCTL_SFTDISCON; usb_dw_reg_dump(); return 0; } static void usb_dw_handle_reset(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; LOG_DBG("USB RESET event"); /* Inform upper layers */ if (usb_dw_ctrl.status_cb) { usb_dw_ctrl.status_cb(USB_DC_RESET, NULL); } /* Clear device address during reset. */ base->dcfg &= ~USB_DWC2_DCFG_DEVADDR_MASK; /* enable global EP interrupts */ base->doepmsk = 0U; base->gintmsk |= USB_DWC2_GINTSTS_RXFLVL; base->diepmsk |= USB_DWC2_DIEPINT_XFERCOMPL; } static void usb_dw_handle_enum_done(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint32_t speed; speed = (base->dsts & ~USB_DWC2_DSTS_ENUMSPD_MASK) >> USB_DWC2_DSTS_ENUMSPD_POS; LOG_DBG("USB ENUM DONE event, %s speed detected", speed == USB_DWC2_DSTS_ENUMSPD_LS6 ? "Low" : "Full"); /* Inform upper layers */ if (usb_dw_ctrl.status_cb) { usb_dw_ctrl.status_cb(USB_DC_CONNECTED, NULL); } } /* USB ISR handler */ static inline void usb_dw_int_rx_flvl_handler(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint32_t grxstsp = base->grxstsp; uint32_t status, xfer_size; uint8_t ep_idx; usb_dc_ep_callback ep_cb; /* Packet in RX FIFO */ ep_idx = grxstsp & USB_DWC2_GRXSTSR_EPNUM_MASK; status = (grxstsp & USB_DWC2_GRXSTSR_PKTSTS_MASK) >> USB_DWC2_GRXSTSR_PKTSTS_POS; xfer_size = (grxstsp & USB_DWC2_GRXSTSR_BCNT_MASK) >> USB_DWC2_GRXSTSR_BCNT_POS; LOG_DBG("USB OUT EP%u: RX_FLVL status %u, size %u", ep_idx, status, xfer_size); usb_dw_ctrl.out_ep_ctrl[ep_idx].data_len = xfer_size; ep_cb = usb_dw_ctrl.out_ep_ctrl[ep_idx].cb; switch (status) { case USB_DWC2_GRXSTSR_PKTSTS_SETUP: /* Call the registered callback if any */ if (ep_cb) { ep_cb(USB_EP_GET_ADDR(ep_idx, USB_EP_DIR_OUT), USB_DC_EP_SETUP); } break; case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA: if (ep_cb) { ep_cb(USB_EP_GET_ADDR(ep_idx, USB_EP_DIR_OUT), USB_DC_EP_DATA_OUT); } break; case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA_DONE: case USB_DWC2_GRXSTSR_PKTSTS_SETUP_DONE: break; default: break; } } static inline void usb_dw_int_iep_handler(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint32_t ep_int_status; uint8_t ep_idx; usb_dc_ep_callback ep_cb; for (ep_idx = 0U; ep_idx < USB_DW_IN_EP_NUM; ep_idx++) { if (base->daint & USB_DWC2_DAINT_INEPINT(ep_idx)) { /* Read IN EP interrupt status */ ep_int_status = base->in_ep[ep_idx].diepint & base->diepmsk; /* Clear IN EP interrupts */ base->in_ep[ep_idx].diepint = ep_int_status; LOG_DBG("USB IN EP%u interrupt status: 0x%x", ep_idx, ep_int_status); ep_cb = usb_dw_ctrl.in_ep_ctrl[ep_idx].cb; if (ep_cb && (ep_int_status & USB_DWC2_DIEPINT_XFERCOMPL)) { /* Call the registered callback */ ep_cb(USB_EP_GET_ADDR(ep_idx, USB_EP_DIR_IN), USB_DC_EP_DATA_IN); } } } /* Clear interrupt. */ base->gintsts = USB_DWC2_GINTSTS_IEPINT; } static inline void usb_dw_int_oep_handler(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint32_t ep_int_status; uint8_t ep_idx; for (ep_idx = 0U; ep_idx < USB_DW_OUT_EP_NUM; ep_idx++) { if (base->daint & USB_DWC2_DAINT_OUTEPINT(ep_idx)) { /* Read OUT EP interrupt status */ ep_int_status = base->out_ep[ep_idx].doepint & base->doepmsk; /* Clear OUT EP interrupts */ base->out_ep[ep_idx].doepint = ep_int_status; LOG_DBG("USB OUT EP%u interrupt status: 0x%x\n", ep_idx, ep_int_status); } } /* Clear interrupt. */ base->gintsts = USB_DWC2_GINTSTS_OEPINT; } static void usb_dw_isr_handler(const void *unused) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint32_t int_status; ARG_UNUSED(unused); /* Read interrupt status */ while ((int_status = (base->gintsts & base->gintmsk))) { LOG_DBG("USB GINTSTS 0x%x", int_status); if (int_status & USB_DWC2_GINTSTS_USBRST) { /* Clear interrupt. */ base->gintsts = USB_DWC2_GINTSTS_USBRST; /* Reset detected */ usb_dw_handle_reset(); } if (int_status & USB_DWC2_GINTSTS_ENUMDONE) { /* Clear interrupt. */ base->gintsts = USB_DWC2_GINTSTS_ENUMDONE; /* Enumeration done detected */ usb_dw_handle_enum_done(); } if (int_status & USB_DWC2_GINTSTS_USBSUSP) { /* Clear interrupt. */ base->gintsts = USB_DWC2_GINTSTS_USBSUSP; if (usb_dw_ctrl.status_cb) { usb_dw_ctrl.status_cb(USB_DC_SUSPEND, NULL); } } if (int_status & USB_DWC2_GINTSTS_WKUPINT) { /* Clear interrupt. */ base->gintsts = USB_DWC2_GINTSTS_WKUPINT; if (usb_dw_ctrl.status_cb) { usb_dw_ctrl.status_cb(USB_DC_RESUME, NULL); } } if (int_status & USB_DWC2_GINTSTS_RXFLVL) { /* Packet in RX FIFO */ usb_dw_int_rx_flvl_handler(); } if (int_status & USB_DWC2_GINTSTS_IEPINT) { /* IN EP interrupt */ usb_dw_int_iep_handler(); } if (int_status & USB_DWC2_GINTSTS_OEPINT) { /* No OUT interrupt expected in FIFO mode, * just clear interrupt */ usb_dw_int_oep_handler(); } } } int usb_dc_attach(void) { int ret; if (usb_dw_ctrl.attached) { return 0; } if (usb_dw_cfg.clk_enable_func != NULL) { ret = usb_dw_cfg.clk_enable_func(); if (ret) { return ret; } } ret = usb_dw_init_pinctrl(&usb_dw_cfg); if (ret) { return ret; } ret = usb_dw_init(); if (ret) { return ret; } /* Connect and enable USB interrupt */ usb_dw_cfg.irq_enable_func(NULL); usb_dw_ctrl.attached = 1U; return 0; } int usb_dc_detach(void) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; if (!usb_dw_ctrl.attached) { return 0; } irq_disable(DT_INST_IRQN(0)); /* Enable soft disconnect */ base->dctl |= USB_DWC2_DCTL_SFTDISCON; usb_dw_ctrl.attached = 0U; return 0; } int usb_dc_reset(void) { int ret; ret = usb_dw_reset(); /* Clear private data */ (void)memset(&usb_dw_ctrl, 0, sizeof(usb_dw_ctrl)); return ret; } int usb_dc_set_address(const uint8_t addr) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; if (addr > (USB_DWC2_DCFG_DEVADDR_MASK >> USB_DWC2_DCFG_DEVADDR_POS)) { return -EINVAL; } base->dcfg &= ~USB_DWC2_DCFG_DEVADDR_MASK; base->dcfg |= addr << USB_DWC2_DCFG_DEVADDR_POS; return 0; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); LOG_DBG("ep %x, mps %d, type %d", cfg->ep_addr, cfg->ep_mps, cfg->ep_type); if ((cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx) { LOG_ERR("invalid endpoint configuration"); return -1; } if (cfg->ep_mps > DW_USB_MAX_PACKET_SIZE) { LOG_WRN("unsupported packet size"); return -1; } if (USB_EP_DIR_IS_OUT(cfg->ep_addr) && ep_idx >= USB_DW_OUT_EP_NUM) { LOG_WRN("OUT endpoint address out of range"); return -1; } if (USB_EP_DIR_IS_IN(cfg->ep_addr) && ep_idx >= USB_DW_IN_EP_NUM) { LOG_WRN("IN endpoint address out of range"); return -1; } return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data * const ep_cfg) { uint8_t ep; if (!ep_cfg) { return -EINVAL; } ep = ep_cfg->ep_addr; if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } usb_dw_ep_set(ep, ep_cfg->ep_mps, ep_cfg->ep_type); return 0; } int usb_dc_ep_set_stall(const uint8_t ep) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (USB_EP_DIR_IS_OUT(ep)) { base->out_ep[ep_idx].doepctl |= USB_DWC2_DEPCTL_STALL; } else { base->in_ep[ep_idx].diepctl |= USB_DWC2_DEPCTL_STALL; } return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (!ep_idx) { /* Not possible to clear stall for EP0 */ return -EINVAL; } if (USB_EP_DIR_IS_OUT(ep)) { base->out_ep[ep_idx].doepctl &= ~USB_DWC2_DEPCTL_STALL; } else { base->in_ep[ep_idx].diepctl &= ~USB_DWC2_DEPCTL_STALL; } return 0; } int usb_dc_ep_halt(const uint8_t ep) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t ep_idx = USB_EP_GET_IDX(ep); volatile uint32_t *p_depctl; if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (!ep_idx) { /* Cannot disable EP0, just set stall */ usb_dc_ep_set_stall(ep); } else { if (USB_EP_DIR_IS_OUT(ep)) { p_depctl = &base->out_ep[ep_idx].doepctl; } else { p_depctl = &base->in_ep[ep_idx].diepctl; } /* Set STALL and disable endpoint if enabled */ if (*p_depctl & USB_DWC2_DEPCTL_EPENA) { *p_depctl |= USB_DWC2_DEPCTL_EPDIS | USB_DWC2_DEPCTL_STALL; } else { *p_depctl |= USB_DWC2_DEPCTL_STALL; } } return 0; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (!stalled) { return -EINVAL; } *stalled = 0U; if (USB_EP_DIR_IS_OUT(ep)) { if (base->out_ep[ep_idx].doepctl & USB_DWC2_DEPCTL_STALL) { *stalled = 1U; } } else { if (base->in_ep[ep_idx].diepctl & USB_DWC2_DEPCTL_STALL) { *stalled = 1U; } } return 0; } int usb_dc_ep_enable(const uint8_t ep) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* enable EP interrupts */ if (USB_EP_DIR_IS_OUT(ep)) { base->daintmsk |= USB_DWC2_DAINT_OUTEPINT(ep_idx); } else { base->daintmsk |= USB_DWC2_DAINT_INEPINT(ep_idx); } /* Activate Ep */ if (USB_EP_DIR_IS_OUT(ep)) { base->out_ep[ep_idx].doepctl |= USB_DWC2_DEPCTL_USBACTEP; usb_dw_ctrl.out_ep_ctrl[ep_idx].ep_ena = 1U; } else { base->in_ep[ep_idx].diepctl |= USB_DWC2_DEPCTL_USBACTEP; usb_dw_ctrl.in_ep_ctrl[ep_idx].ep_ena = 1U; } if (USB_EP_DIR_IS_OUT(ep) && usb_dw_ctrl.out_ep_ctrl[ep_idx].cb != usb_transfer_ep_callback) { /* Start reading now, except for transfer managed eps */ usb_dw_prep_rx(ep, 0); } return 0; } int usb_dc_ep_disable(const uint8_t ep) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* Disable EP interrupts */ if (USB_EP_DIR_IS_OUT(ep)) { base->daintmsk &= ~USB_DWC2_DAINT_OUTEPINT(ep_idx); base->doepmsk &= ~USB_DWC2_DOEPINT_SETUP; } else { base->daintmsk &= ~USB_DWC2_DAINT_INEPINT(ep_idx); base->diepmsk &= ~USB_DWC2_DIEPINT_XFERCOMPL; base->gintmsk &= ~USB_DWC2_GINTSTS_RXFLVL; } /* De-activate, disable and set NAK for Ep */ if (USB_EP_DIR_IS_OUT(ep)) { base->out_ep[ep_idx].doepctl &= ~(USB_DWC2_DEPCTL_USBACTEP | USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_SNAK); usb_dw_ctrl.out_ep_ctrl[ep_idx].ep_ena = 0U; } else { base->in_ep[ep_idx].diepctl &= ~(USB_DWC2_DEPCTL_USBACTEP | USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_SNAK); usb_dw_ctrl.in_ep_ctrl[ep_idx].ep_ena = 0U; } return 0; } int usb_dc_ep_flush(const uint8_t ep) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t cnt; if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (USB_EP_DIR_IS_OUT(ep)) { /* RX FIFO is global and cannot be flushed per EP */ return -EINVAL; } /* Each endpoint has dedicated Tx FIFO */ base->grstctl |= ep_idx << USB_DWC2_GRSTCTL_TXFNUM_POS; base->grstctl |= USB_DWC2_GRSTCTL_TXFFLSH; cnt = 0U; do { if (++cnt > USB_DW_CORE_RST_TIMEOUT_US) { LOG_ERR("USB TX FIFO flush HANG!"); return -EIO; } usb_dw_udelay(1); } while (base->grstctl & USB_DWC2_GRSTCTL_TXFFLSH); return 0; } int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data, const uint32_t data_len, uint32_t * const ret_bytes) { int ret; if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* Check if IN ep */ if (USB_EP_GET_DIR(ep) != USB_EP_DIR_IN) { return -EINVAL; } /* Check if ep enabled */ if (!usb_dw_ep_is_enabled(ep)) { return -EINVAL; } ret = usb_dw_tx(ep, data, data_len); if (ret < 0) { return ret; } if (ret_bytes) { *ret_bytes = ret; } return 0; } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { struct usb_dwc2_reg *const base = usb_dw_cfg.base; uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t i, j, data_len, bytes_to_copy; if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* Check if OUT ep */ if (USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT) { LOG_ERR("Wrong endpoint direction"); return -EINVAL; } /* Allow to read 0 bytes */ if (!data && max_data_len) { LOG_ERR("Wrong arguments"); return -EINVAL; } /* Check if ep enabled */ if (!usb_dw_ep_is_enabled(ep)) { LOG_ERR("Not enabled endpoint"); return -EINVAL; } data_len = usb_dw_ctrl.out_ep_ctrl[ep_idx].data_len; if (!data && !max_data_len) { /* When both buffer and max data to read are zero return * the available data in buffer */ if (read_bytes) { *read_bytes = data_len; } return 0; } if (data_len > max_data_len) { LOG_ERR("Not enough room to copy all the rcvd data!"); bytes_to_copy = max_data_len; } else { bytes_to_copy = data_len; } LOG_DBG("Read EP%d, req %d, read %d bytes", ep, max_data_len, bytes_to_copy); /* Data in the FIFOs is always stored per 32-bit words */ for (i = 0U; i < (bytes_to_copy & ~0x3); i += 4U) { *(uint32_t *)(data + i) = USB_DW_EP_FIFO(base, ep_idx); } if (bytes_to_copy & 0x3) { /* Not multiple of 4 */ uint32_t last_dw = USB_DW_EP_FIFO(base, ep_idx); for (j = 0U; j < (bytes_to_copy & 0x3); j++) { *(data + i + j) = (sys_cpu_to_le32(last_dw) >> (j * 8U)) & 0xFF; } } usb_dw_ctrl.out_ep_ctrl[ep_idx].data_len -= bytes_to_copy; if (read_bytes) { *read_bytes = bytes_to_copy; } return 0; } int usb_dc_ep_read_continue(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* Check if OUT ep */ if (USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT) { LOG_ERR("Wrong endpoint direction"); return -EINVAL; } if (!usb_dw_ctrl.out_ep_ctrl[ep_idx].data_len) { usb_dw_prep_rx(ep_idx, 0); } return 0; } int usb_dc_ep_read(const uint8_t ep, uint8_t *const data, const uint32_t max_data_len, uint32_t * const read_bytes) { if (usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes) != 0) { return -EINVAL; } if (!data && !max_data_len) { /* When both buffer and max data to read are zero the above * call would fetch the data len and we simply return. */ return 0; } if (usb_dc_ep_read_continue(ep) != 0) { return -EINVAL; } return 0; } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (USB_EP_DIR_IS_IN(ep)) { usb_dw_ctrl.in_ep_ctrl[ep_idx].cb = cb; } else { usb_dw_ctrl.out_ep_ctrl[ep_idx].cb = cb; } return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { usb_dw_ctrl.status_cb = cb; } int usb_dc_ep_mps(const uint8_t ep) { enum usb_dw_out_ep_idx ep_idx = USB_EP_GET_IDX(ep); if (!usb_dw_ctrl.attached || !usb_dw_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (USB_EP_DIR_IS_OUT(ep)) { return usb_dw_ctrl.out_ep_ctrl[ep_idx].mps; } else { return usb_dw_ctrl.in_ep_ctrl[ep_idx].mps; } } ```
/content/code_sandbox/drivers/usb/device/usb_dc_dw.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,987
```c /* * */ /** * @file * @brief USB device controller shim driver for STM32 devices * * This driver uses the STM32 Cube low level drivers to talk to the USB * device controller on the STM32 family of devices using the * STM32Cube HAL layer. */ #include <soc.h> #include <stm32_ll_bus.h> #include <stm32_ll_pwr.h> #include <stm32_ll_rcc.h> #include <stm32_ll_system.h> #include <string.h> #include <zephyr/usb/usb_device.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/pinctrl.h> #include "stm32_hsem.h" #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(usb_dc_stm32); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otgfs) && DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) #error "Only one interface should be enabled at a time, OTG FS or OTG HS" #endif /* * Vbus sensing is determined based on the presence of the hardware detection * pin(s) in the device tree. E.g: pinctrl-0 = <&usb_otg_fs_vbus_pa9 ...>; * * The detection pins are dependent on the enabled USB driver and the physical * interface(s) offered by the hardware. These are mapped to PA9 and/or PB13 * (subject to MCU), being the former the most widespread option. */ #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) #define DT_DRV_COMPAT st_stm32_otghs #define USB_IRQ_NAME otghs #define USB_VBUS_SENSING (DT_NODE_EXISTS(DT_CHILD(DT_NODELABEL(pinctrl), usb_otg_hs_vbus_pa9)) || \ DT_NODE_EXISTS(DT_CHILD(DT_NODELABEL(pinctrl), usb_otg_hs_vbus_pb13))) #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otgfs) #define DT_DRV_COMPAT st_stm32_otgfs #define USB_IRQ_NAME otgfs #define USB_VBUS_SENSING DT_NODE_EXISTS(DT_CHILD(DT_NODELABEL(pinctrl), usb_otg_fs_vbus_pa9)) #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_usb) #define DT_DRV_COMPAT st_stm32_usb #define USB_IRQ_NAME usb #define USB_VBUS_SENSING false #endif #define USB_BASE_ADDRESS DT_INST_REG_ADDR(0) #define USB_IRQ DT_INST_IRQ_BY_NAME(0, USB_IRQ_NAME, irq) #define USB_IRQ_PRI DT_INST_IRQ_BY_NAME(0, USB_IRQ_NAME, priority) #define USB_NUM_BIDIR_ENDPOINTS DT_INST_PROP(0, num_bidir_endpoints) #define USB_RAM_SIZE DT_INST_PROP(0, ram_size) static const struct stm32_pclken pclken[] = STM32_DT_INST_CLOCKS(0); #if DT_INST_NODE_HAS_PROP(0, maximum_speed) #define USB_MAXIMUM_SPEED DT_INST_PROP(0, maximum_speed) #endif PINCTRL_DT_INST_DEFINE(0); static const struct pinctrl_dev_config *usb_pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0); #define USB_OTG_HS_EMB_PHY (DT_HAS_COMPAT_STATUS_OKAY(st_stm32_usbphyc) && \ DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs)) #define USB_OTG_HS_ULPI_PHY (DT_HAS_COMPAT_STATUS_OKAY(usb_ulpi_phy) && \ DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs)) #if USB_OTG_HS_ULPI_PHY static const struct gpio_dt_spec ulpi_reset = GPIO_DT_SPEC_GET_OR(DT_PHANDLE(DT_INST(0, st_stm32_otghs), phys), reset_gpios, {0}); #endif /* * USB, USB_OTG_FS and USB_DRD_FS are defined in STM32Cube HAL and allows to * distinguish between two kind of USB DC. STM32 F0, F3, L0 and G4 series * support USB device controller. STM32 F4 and F7 series support USB_OTG_FS * device controller. STM32 F1 and L4 series support either USB or USB_OTG_FS * device controller.STM32 G0 series supports USB_DRD_FS device controller. * * WARNING: Don't mix USB defined in STM32Cube HAL and CONFIG_USB_* from Zephyr * Kconfig system. */ #if defined(USB) || defined(USB_DRD_FS) #define EP0_MPS 64U #define EP_MPS 64U /* * USB BTABLE is stored in the PMA. The size of BTABLE is 4 bytes * per endpoint. * */ #define USB_BTABLE_SIZE (8 * USB_NUM_BIDIR_ENDPOINTS) #else /* USB_OTG_FS */ /* * STM32L4 series USB LL API doesn't provide HIGH and HIGH_IN_FULL speed * defines. */ #if defined(CONFIG_SOC_SERIES_STM32L4X) #define USB_OTG_SPEED_HIGH 0U #define USB_OTG_SPEED_HIGH_IN_FULL 1U #endif /* CONFIG_SOC_SERIES_STM32L4X */ #define EP0_MPS USB_OTG_MAX_EP0_SIZE #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) #define EP_MPS USB_OTG_HS_MAX_PACKET_SIZE #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otgfs) || DT_HAS_COMPAT_STATUS_OKAY(st_stm32_usb) #define EP_MPS USB_OTG_FS_MAX_PACKET_SIZE #endif /* We need n TX IN FIFOs */ #define TX_FIFO_NUM USB_NUM_BIDIR_ENDPOINTS /* We need a minimum size for RX FIFO - exact number seemingly determined through trial and error */ #define RX_FIFO_EP_WORDS 160 /* Allocate FIFO memory evenly between the TX FIFOs */ /* except the first TX endpoint need only 64 bytes */ #define TX_FIFO_EP_0_WORDS 16 #define TX_FIFO_WORDS (USB_RAM_SIZE / 4 - RX_FIFO_EP_WORDS - TX_FIFO_EP_0_WORDS) /* Number of words for each remaining TX endpoint FIFO */ #define TX_FIFO_EP_WORDS (TX_FIFO_WORDS / (TX_FIFO_NUM - 1)) #endif /* USB */ /* Size of a USB SETUP packet */ #define SETUP_SIZE 8 /* Helper macros to make it easier to work with endpoint numbers */ #define EP0_IDX 0 #define EP0_IN (EP0_IDX | USB_EP_DIR_IN) #define EP0_OUT (EP0_IDX | USB_EP_DIR_OUT) /* Endpoint state */ struct usb_dc_stm32_ep_state { uint16_t ep_mps; /** Endpoint max packet size */ uint16_t ep_pma_buf_len; /** Previously allocated buffer size */ uint8_t ep_type; /** Endpoint type (STM32 HAL enum) */ uint8_t ep_stalled; /** Endpoint stall flag */ usb_dc_ep_callback cb; /** Endpoint callback function */ uint32_t read_count; /** Number of bytes in read buffer */ uint32_t read_offset; /** Current offset in read buffer */ struct k_sem write_sem; /** Write boolean semaphore */ }; /* Driver state */ struct usb_dc_stm32_state { PCD_HandleTypeDef pcd; /* Storage for the HAL_PCD api */ usb_dc_status_callback status_cb; /* Status callback */ struct usb_dc_stm32_ep_state out_ep_state[USB_NUM_BIDIR_ENDPOINTS]; struct usb_dc_stm32_ep_state in_ep_state[USB_NUM_BIDIR_ENDPOINTS]; uint8_t ep_buf[USB_NUM_BIDIR_ENDPOINTS][EP_MPS]; #if defined(USB) || defined(USB_DRD_FS) uint32_t pma_offset; #endif /* USB */ }; static struct usb_dc_stm32_state usb_dc_stm32_state; /* Internal functions */ static struct usb_dc_stm32_ep_state *usb_dc_stm32_get_ep_state(uint8_t ep) { struct usb_dc_stm32_ep_state *ep_state_base; if (USB_EP_GET_IDX(ep) >= USB_NUM_BIDIR_ENDPOINTS) { return NULL; } if (USB_EP_DIR_IS_OUT(ep)) { ep_state_base = usb_dc_stm32_state.out_ep_state; } else { ep_state_base = usb_dc_stm32_state.in_ep_state; } return ep_state_base + USB_EP_GET_IDX(ep); } static void usb_dc_stm32_isr(const void *arg) { HAL_PCD_IRQHandler(&usb_dc_stm32_state.pcd); } #ifdef CONFIG_USB_DEVICE_SOF void HAL_PCD_SOFCallback(PCD_HandleTypeDef *hpcd) { usb_dc_stm32_state.status_cb(USB_DC_SOF, NULL); } #endif static int usb_dc_stm32_clock_enable(void) { const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) && defined(CONFIG_SOC_SERIES_STM32U5X) /* Sequence to enable the power of the OTG HS on a stm32U5 serie : Enable VDDUSB */ bool pwr_clk = LL_AHB3_GRP1_IsEnabledClock(LL_AHB3_GRP1_PERIPH_PWR); if (!pwr_clk) { LL_AHB3_GRP1_EnableClock(LL_AHB3_GRP1_PERIPH_PWR); } /* Check that power range is 1 or 2 */ if (LL_PWR_GetRegulVoltageScaling() < LL_PWR_REGU_VOLTAGE_SCALE2) { LOG_ERR("Wrong Power range to use USB OTG HS"); return -EIO; } LL_PWR_EnableVddUSB(); /* Configure VOSR register of USB HSTransceiverSupply(); */ LL_PWR_EnableUSBPowerSupply(); LL_PWR_EnableUSBEPODBooster(); while (LL_PWR_IsActiveFlag_USBBOOST() != 1) { /* Wait for USB EPOD BOOST ready */ } /* Leave the PWR clock in its initial position */ if (!pwr_clk) { LL_AHB3_GRP1_DisableClock(LL_AHB3_GRP1_PERIPH_PWR); } /* Set the OTG PHY reference clock selection (through SYSCFG) block */ LL_APB3_GRP1_EnableClock(LL_APB3_GRP1_PERIPH_SYSCFG); HAL_SYSCFG_SetOTGPHYReferenceClockSelection(SYSCFG_OTG_HS_PHY_CLK_SELECT_1); /* Configuring the SYSCFG registers OTG_HS PHY : OTG_HS PHY enable*/ HAL_SYSCFG_EnableOTGPHY(SYSCFG_OTG_HS_PHY_ENABLE); #elif defined(PWR_USBSCR_USB33SV) || defined(PWR_SVMCR_USV) /* * VDDUSB independent USB supply (PWR clock is on) * with LL_PWR_EnableVDDUSB function (higher case) */ LL_PWR_EnableVDDUSB(); #endif if (DT_INST_NUM_CLOCKS(0) > 1) { if (clock_control_configure(clk, (clock_control_subsys_t)&pclken[1], NULL) != 0) { LOG_ERR("Could not select USB domain clock"); return -EIO; } } if (clock_control_on(clk, (clock_control_subsys_t)&pclken[0]) != 0) { LOG_ERR("Unable to enable USB clock"); return -EIO; } if (IS_ENABLED(CONFIG_USB_DC_STM32_CLOCK_CHECK)) { uint32_t usb_clock_rate; if (clock_control_get_rate(clk, (clock_control_subsys_t)&pclken[1], &usb_clock_rate) != 0) { LOG_ERR("Failed to get USB domain clock rate"); return -EIO; } if (usb_clock_rate != MHZ(48)) { LOG_ERR("USB Clock is not 48MHz (%d)", usb_clock_rate); return -ENOTSUP; } } /* Previous check won't work in case of F1/F3. Add build time check */ #if defined(RCC_CFGR_OTGFSPRE) || defined(RCC_CFGR_USBPRE) #if (MHZ(48) == CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) && !defined(STM32_PLL_USBPRE) /* PLL output clock is set to 48MHz, it should not be divided */ #warning USBPRE/OTGFSPRE should be set in rcc node #endif #endif /* RCC_CFGR_OTGFSPRE / RCC_CFGR_USBPRE */ #if USB_OTG_HS_ULPI_PHY #if defined(CONFIG_SOC_SERIES_STM32H7X) LL_AHB1_GRP1_EnableClock(LL_AHB1_GRP1_PERIPH_USB1OTGHSULPI); #else LL_AHB1_GRP1_EnableClock(LL_AHB1_GRP1_PERIPH_OTGHSULPI); #endif #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) /* USB_OTG_HS_ULPI_PHY */ /* Disable ULPI interface (for external high-speed PHY) clock in sleep/low-power mode. * It is disabled by default in run power mode, no need to disable it. */ #if defined(CONFIG_SOC_SERIES_STM32H7X) LL_AHB1_GRP1_DisableClockSleep(LL_AHB1_GRP1_PERIPH_USB1OTGHSULPI); #elif defined(CONFIG_SOC_SERIES_STM32U5X) LL_AHB2_GRP1_EnableClock(LL_AHB2_GRP1_PERIPH_USBPHY); /* Both OTG HS and USBPHY sleep clock MUST be disabled here at the same time */ LL_AHB2_GRP1_DisableClockStopSleep(LL_AHB2_GRP1_PERIPH_OTG_HS || LL_AHB2_GRP1_PERIPH_USBPHY); #else LL_AHB1_GRP1_DisableClockLowPower(LL_AHB1_GRP1_PERIPH_OTGHSULPI); #endif #if USB_OTG_HS_EMB_PHY LL_APB2_GRP1_EnableClock(LL_APB2_GRP1_PERIPH_OTGPHYC); #endif #endif /* USB_OTG_HS_ULPI_PHY */ return 0; } static int usb_dc_stm32_clock_disable(void) { const struct device *clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (clock_control_off(clk, (clock_control_subsys_t)&pclken[0]) != 0) { LOG_ERR("Unable to disable USB clock"); return -EIO; } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) && defined(CONFIG_SOC_SERIES_STM32U5X) LL_AHB2_GRP1_DisableClock(LL_AHB2_GRP1_PERIPH_USBPHY); #endif return 0; } #if defined(USB_OTG_FS) || defined(USB_OTG_HS) static uint32_t usb_dc_stm32_get_maximum_speed(void) { /* * If max-speed is not passed via DT, set it to USB controller's * maximum hardware capability. */ #if USB_OTG_HS_EMB_PHY || USB_OTG_HS_ULPI_PHY uint32_t speed = USB_OTG_SPEED_HIGH; #else uint32_t speed = USB_OTG_SPEED_FULL; #endif #ifdef USB_MAXIMUM_SPEED if (!strncmp(USB_MAXIMUM_SPEED, "high-speed", 10)) { speed = USB_OTG_SPEED_HIGH; } else if (!strncmp(USB_MAXIMUM_SPEED, "full-speed", 10)) { #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(USB_OTG_HS_EMB_PHY) speed = USB_OTG_SPEED_HIGH_IN_FULL; #else speed = USB_OTG_SPEED_FULL; #endif } else { LOG_DBG("Unsupported maximum speed defined in device tree. " "USB controller will default to its maximum HW " "capability"); } #endif return speed; } #endif /* USB_OTG_FS || USB_OTG_HS */ static int usb_dc_stm32_init(void) { HAL_StatusTypeDef status; int ret; unsigned int i; #if defined(USB) || defined(USB_DRD_FS) #ifdef USB usb_dc_stm32_state.pcd.Instance = USB; #else usb_dc_stm32_state.pcd.Instance = USB_DRD_FS; #endif usb_dc_stm32_state.pcd.Init.speed = PCD_SPEED_FULL; usb_dc_stm32_state.pcd.Init.dev_endpoints = USB_NUM_BIDIR_ENDPOINTS; usb_dc_stm32_state.pcd.Init.phy_itface = PCD_PHY_EMBEDDED; usb_dc_stm32_state.pcd.Init.ep0_mps = PCD_EP0MPS_64; usb_dc_stm32_state.pcd.Init.low_power_enable = 0; #else /* USB_OTG_FS || USB_OTG_HS */ #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) usb_dc_stm32_state.pcd.Instance = USB_OTG_HS; #else usb_dc_stm32_state.pcd.Instance = USB_OTG_FS; #endif usb_dc_stm32_state.pcd.Init.dev_endpoints = USB_NUM_BIDIR_ENDPOINTS; usb_dc_stm32_state.pcd.Init.speed = usb_dc_stm32_get_maximum_speed(); #if USB_OTG_HS_EMB_PHY usb_dc_stm32_state.pcd.Init.phy_itface = USB_OTG_HS_EMBEDDED_PHY; #elif USB_OTG_HS_ULPI_PHY usb_dc_stm32_state.pcd.Init.phy_itface = USB_OTG_ULPI_PHY; #else usb_dc_stm32_state.pcd.Init.phy_itface = PCD_PHY_EMBEDDED; #endif usb_dc_stm32_state.pcd.Init.ep0_mps = USB_OTG_MAX_EP0_SIZE; usb_dc_stm32_state.pcd.Init.vbus_sensing_enable = USB_VBUS_SENSING ? ENABLE : DISABLE; #ifndef CONFIG_SOC_SERIES_STM32F1X usb_dc_stm32_state.pcd.Init.dma_enable = DISABLE; #endif #endif /* USB */ #ifdef CONFIG_USB_DEVICE_SOF usb_dc_stm32_state.pcd.Init.Sof_enable = 1; #endif /* CONFIG_USB_DEVICE_SOF */ #if defined(CONFIG_SOC_SERIES_STM32H7X) #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otgfs) /* The USB2 controller only works in FS mode, but the ULPI clock needs * to be disabled in sleep mode for it to work. For the USB1 * controller, as it is an HS one, the clock is disabled in the common * path. */ LL_AHB1_GRP1_DisableClockSleep(LL_AHB1_GRP1_PERIPH_USB2OTGHSULPI); #endif LL_PWR_EnableUSBVoltageDetector(); /* Per AN2606: USBREGEN not supported when running in FS mode. */ LL_PWR_DisableUSBReg(); while (!LL_PWR_IsActiveFlag_USB()) { LOG_INF("PWR not active yet"); k_sleep(K_MSEC(100)); } #endif LOG_DBG("Pinctrl signals configuration"); ret = pinctrl_apply_state(usb_pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("USB pinctrl setup failed (%d)", ret); return ret; } LOG_DBG("HAL_PCD_Init"); status = HAL_PCD_Init(&usb_dc_stm32_state.pcd); if (status != HAL_OK) { LOG_ERR("PCD_Init failed, %d", (int)status); return -EIO; } /* On a soft reset force USB to reset first and switch it off * so the USB connection can get re-initialized */ LOG_DBG("HAL_PCD_Stop"); status = HAL_PCD_Stop(&usb_dc_stm32_state.pcd); if (status != HAL_OK) { LOG_ERR("PCD_Stop failed, %d", (int)status); return -EIO; } LOG_DBG("HAL_PCD_Start"); status = HAL_PCD_Start(&usb_dc_stm32_state.pcd); if (status != HAL_OK) { LOG_ERR("PCD_Start failed, %d", (int)status); return -EIO; } usb_dc_stm32_state.out_ep_state[EP0_IDX].ep_mps = EP0_MPS; usb_dc_stm32_state.out_ep_state[EP0_IDX].ep_type = EP_TYPE_CTRL; usb_dc_stm32_state.in_ep_state[EP0_IDX].ep_mps = EP0_MPS; usb_dc_stm32_state.in_ep_state[EP0_IDX].ep_type = EP_TYPE_CTRL; #if defined(USB) || defined(USB_DRD_FS) /* Start PMA configuration for the endpoints after the BTABLE. */ usb_dc_stm32_state.pma_offset = USB_BTABLE_SIZE; for (i = 0U; i < USB_NUM_BIDIR_ENDPOINTS; i++) { k_sem_init(&usb_dc_stm32_state.in_ep_state[i].write_sem, 1, 1); } #else /* USB_OTG_FS */ /* TODO: make this dynamic (depending usage) */ HAL_PCDEx_SetRxFiFo(&usb_dc_stm32_state.pcd, RX_FIFO_EP_WORDS); for (i = 0U; i < USB_NUM_BIDIR_ENDPOINTS; i++) { if (i == 0) { /* first endpoint need only 64 byte for EP_TYPE_CTRL */ HAL_PCDEx_SetTxFiFo(&usb_dc_stm32_state.pcd, i, TX_FIFO_EP_0_WORDS); } else { HAL_PCDEx_SetTxFiFo(&usb_dc_stm32_state.pcd, i, TX_FIFO_EP_WORDS); } k_sem_init(&usb_dc_stm32_state.in_ep_state[i].write_sem, 1, 1); } #endif /* USB */ IRQ_CONNECT(USB_IRQ, USB_IRQ_PRI, usb_dc_stm32_isr, 0, 0); irq_enable(USB_IRQ); return 0; } /* Zephyr USB device controller API implementation */ int usb_dc_attach(void) { int ret; LOG_DBG(""); #ifdef SYSCFG_CFGR1_USB_IT_RMP /* * STM32F302/F303: USB IRQ collides with CAN_1 IRQ (14.1.3, RM0316) * Remap IRQ by default to enable use of both IPs simultaneoulsy * This should be done before calling any HAL function */ if (LL_APB2_GRP1_IsEnabledClock(LL_APB2_GRP1_PERIPH_SYSCFG)) { LL_SYSCFG_EnableRemapIT_USB(); } else { LOG_ERR("System Configuration Controller clock is " "disabled. Unable to enable IRQ remapping."); } #endif #if USB_OTG_HS_ULPI_PHY if (ulpi_reset.port != NULL) { if (!gpio_is_ready_dt(&ulpi_reset)) { LOG_ERR("Reset GPIO device not ready"); return -EINVAL; } if (gpio_pin_configure_dt(&ulpi_reset, GPIO_OUTPUT_INACTIVE)) { LOG_ERR("Couldn't configure reset pin"); return -EIO; } } #endif ret = usb_dc_stm32_clock_enable(); if (ret) { return ret; } ret = usb_dc_stm32_init(); if (ret) { return ret; } /* * Required for at least STM32L4 devices as they electrically * isolate USB features from VddUSB. It must be enabled before * USB can function. Refer to section 5.1.3 in DM00083560 or * DM00310109. */ #ifdef PWR_CR2_USV #if defined(LL_APB1_GRP1_PERIPH_PWR) if (LL_APB1_GRP1_IsEnabledClock(LL_APB1_GRP1_PERIPH_PWR)) { LL_PWR_EnableVddUSB(); } else { LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_PWR); LL_PWR_EnableVddUSB(); LL_APB1_GRP1_DisableClock(LL_APB1_GRP1_PERIPH_PWR); } #else LL_PWR_EnableVddUSB(); #endif /* defined(LL_APB1_GRP1_PERIPH_PWR) */ #endif /* PWR_CR2_USV */ return 0; } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); LOG_DBG("ep 0x%02x", ep); if (!ep_state) { return -EINVAL; } ep_state->cb = cb; return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { LOG_DBG(""); usb_dc_stm32_state.status_cb = cb; } int usb_dc_set_address(const uint8_t addr) { HAL_StatusTypeDef status; LOG_DBG("addr %u (0x%02x)", addr, addr); status = HAL_PCD_SetAddress(&usb_dc_stm32_state.pcd, addr); if (status != HAL_OK) { LOG_ERR("HAL_PCD_SetAddress failed(0x%02x), %d", addr, (int)status); return -EIO; } return 0; } int usb_dc_ep_start_read(uint8_t ep, uint8_t *data, uint32_t max_data_len) { HAL_StatusTypeDef status; LOG_DBG("ep 0x%02x, len %u", ep, max_data_len); /* we flush EP0_IN by doing a 0 length receive on it */ if (!USB_EP_DIR_IS_OUT(ep) && (ep != EP0_IN || max_data_len)) { LOG_ERR("invalid ep 0x%02x", ep); return -EINVAL; } if (max_data_len > EP_MPS) { max_data_len = EP_MPS; } status = HAL_PCD_EP_Receive(&usb_dc_stm32_state.pcd, ep, usb_dc_stm32_state.ep_buf[USB_EP_GET_IDX(ep)], max_data_len); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_Receive failed(0x%02x), %d", ep, (int)status); return -EIO; } return 0; } int usb_dc_ep_get_read_count(uint8_t ep, uint32_t *read_bytes) { if (!USB_EP_DIR_IS_OUT(ep) || !read_bytes) { LOG_ERR("invalid ep 0x%02x", ep); return -EINVAL; } *read_bytes = HAL_PCD_EP_GetRxCount(&usb_dc_stm32_state.pcd, ep); return 0; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); LOG_DBG("ep %x, mps %d, type %d", cfg->ep_addr, cfg->ep_mps, cfg->ep_type); if ((cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx) { LOG_ERR("invalid endpoint configuration"); return -1; } if (ep_idx > (USB_NUM_BIDIR_ENDPOINTS - 1)) { LOG_ERR("endpoint index/address out of range"); return -1; } return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data * const ep_cfg) { uint8_t ep = ep_cfg->ep_addr; struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); if (!ep_state) { return -EINVAL; } LOG_DBG("ep 0x%02x, previous ep_mps %u, ep_mps %u, ep_type %u", ep_cfg->ep_addr, ep_state->ep_mps, ep_cfg->ep_mps, ep_cfg->ep_type); #if defined(USB) || defined(USB_DRD_FS) if (ep_cfg->ep_mps > ep_state->ep_pma_buf_len) { if (ep_cfg->ep_type == USB_DC_EP_ISOCHRONOUS) { if (USB_RAM_SIZE <= (usb_dc_stm32_state.pma_offset + ep_cfg->ep_mps*2)) { return -EINVAL; } } else if (USB_RAM_SIZE <= (usb_dc_stm32_state.pma_offset + ep_cfg->ep_mps)) { return -EINVAL; } if (ep_cfg->ep_type == USB_DC_EP_ISOCHRONOUS) { HAL_PCDEx_PMAConfig(&usb_dc_stm32_state.pcd, ep, PCD_DBL_BUF, usb_dc_stm32_state.pma_offset + ((usb_dc_stm32_state.pma_offset + ep_cfg->ep_mps) << 16)); ep_state->ep_pma_buf_len = ep_cfg->ep_mps*2; usb_dc_stm32_state.pma_offset += ep_cfg->ep_mps*2; } else { HAL_PCDEx_PMAConfig(&usb_dc_stm32_state.pcd, ep, PCD_SNG_BUF, usb_dc_stm32_state.pma_offset); ep_state->ep_pma_buf_len = ep_cfg->ep_mps; usb_dc_stm32_state.pma_offset += ep_cfg->ep_mps; } } if (ep_cfg->ep_type == USB_DC_EP_ISOCHRONOUS) { ep_state->ep_mps = ep_cfg->ep_mps*2; } else { ep_state->ep_mps = ep_cfg->ep_mps; } #else ep_state->ep_mps = ep_cfg->ep_mps; #endif switch (ep_cfg->ep_type) { case USB_DC_EP_CONTROL: ep_state->ep_type = EP_TYPE_CTRL; break; case USB_DC_EP_ISOCHRONOUS: ep_state->ep_type = EP_TYPE_ISOC; break; case USB_DC_EP_BULK: ep_state->ep_type = EP_TYPE_BULK; break; case USB_DC_EP_INTERRUPT: ep_state->ep_type = EP_TYPE_INTR; break; default: return -EINVAL; } return 0; } int usb_dc_ep_set_stall(const uint8_t ep) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); HAL_StatusTypeDef status; LOG_DBG("ep 0x%02x", ep); if (!ep_state) { return -EINVAL; } status = HAL_PCD_EP_SetStall(&usb_dc_stm32_state.pcd, ep); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_SetStall failed(0x%02x), %d", ep, (int)status); return -EIO; } ep_state->ep_stalled = 1U; return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); HAL_StatusTypeDef status; LOG_DBG("ep 0x%02x", ep); if (!ep_state) { return -EINVAL; } status = HAL_PCD_EP_ClrStall(&usb_dc_stm32_state.pcd, ep); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_ClrStall failed(0x%02x), %d", ep, (int)status); return -EIO; } ep_state->ep_stalled = 0U; ep_state->read_count = 0U; return 0; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); LOG_DBG("ep 0x%02x", ep); if (!ep_state || !stalled) { return -EINVAL; } *stalled = ep_state->ep_stalled; return 0; } int usb_dc_ep_enable(const uint8_t ep) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); HAL_StatusTypeDef status; LOG_DBG("ep 0x%02x", ep); if (!ep_state) { return -EINVAL; } LOG_DBG("HAL_PCD_EP_Open(0x%02x, %u, %u)", ep, ep_state->ep_mps, ep_state->ep_type); status = HAL_PCD_EP_Open(&usb_dc_stm32_state.pcd, ep, ep_state->ep_mps, ep_state->ep_type); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_Open failed(0x%02x), %d", ep, (int)status); return -EIO; } if (USB_EP_DIR_IS_OUT(ep) && ep != EP0_OUT) { return usb_dc_ep_start_read(ep, usb_dc_stm32_state.ep_buf[USB_EP_GET_IDX(ep)], ep_state->ep_mps); } return 0; } int usb_dc_ep_disable(const uint8_t ep) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); HAL_StatusTypeDef status; LOG_DBG("ep 0x%02x", ep); if (!ep_state) { return -EINVAL; } status = HAL_PCD_EP_Close(&usb_dc_stm32_state.pcd, ep); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_Close failed(0x%02x), %d", ep, (int)status); return -EIO; } return 0; } int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data, const uint32_t data_len, uint32_t * const ret_bytes) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); HAL_StatusTypeDef status; uint32_t len = data_len; int ret = 0; LOG_DBG("ep 0x%02x, len %u", ep, data_len); if (!ep_state || !USB_EP_DIR_IS_IN(ep)) { LOG_ERR("invalid ep 0x%02x", ep); return -EINVAL; } ret = k_sem_take(&ep_state->write_sem, K_NO_WAIT); if (ret) { LOG_ERR("Unable to get write lock (%d)", ret); return -EAGAIN; } if (!k_is_in_isr()) { irq_disable(USB_IRQ); } if (ep == EP0_IN && len > USB_MAX_CTRL_MPS) { len = USB_MAX_CTRL_MPS; } status = HAL_PCD_EP_Transmit(&usb_dc_stm32_state.pcd, ep, (void *)data, len); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_Transmit failed(0x%02x), %d", ep, (int)status); k_sem_give(&ep_state->write_sem); ret = -EIO; } if (!ret && ep == EP0_IN && len > 0) { /* Wait for an empty package as from the host. * This also flushes the TX FIFO to the host. */ usb_dc_ep_start_read(ep, NULL, 0); } if (!k_is_in_isr()) { irq_enable(USB_IRQ); } if (!ret && ret_bytes) { *ret_bytes = len; } return ret; } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); uint32_t read_count; if (!ep_state) { LOG_ERR("Invalid Endpoint %x", ep); return -EINVAL; } read_count = ep_state->read_count; LOG_DBG("ep 0x%02x, %u bytes, %u+%u, %p", ep, max_data_len, ep_state->read_offset, read_count, (void *)data); if (!USB_EP_DIR_IS_OUT(ep)) { /* check if OUT ep */ LOG_ERR("Wrong endpoint direction: 0x%02x", ep); return -EINVAL; } /* When both buffer and max data to read are zero, just ignore reading * and return available data in buffer. Otherwise, return data * previously stored in the buffer. */ if (data) { read_count = MIN(read_count, max_data_len); memcpy(data, usb_dc_stm32_state.ep_buf[USB_EP_GET_IDX(ep)] + ep_state->read_offset, read_count); ep_state->read_count -= read_count; ep_state->read_offset += read_count; } else if (max_data_len) { LOG_ERR("Wrong arguments"); } if (read_bytes) { *read_bytes = read_count; } return 0; } int usb_dc_ep_read_continue(uint8_t ep) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); if (!ep_state || !USB_EP_DIR_IS_OUT(ep)) { /* Check if OUT ep */ LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } /* If no more data in the buffer, start a new read transaction. * DataOutStageCallback will called on transaction complete. */ if (!ep_state->read_count) { usb_dc_ep_start_read(ep, usb_dc_stm32_state.ep_buf[USB_EP_GET_IDX(ep)], ep_state->ep_mps); } return 0; } int usb_dc_ep_read(const uint8_t ep, uint8_t *const data, const uint32_t max_data_len, uint32_t * const read_bytes) { if (usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes) != 0) { return -EINVAL; } if (usb_dc_ep_read_continue(ep) != 0) { return -EINVAL; } return 0; } int usb_dc_ep_halt(const uint8_t ep) { return usb_dc_ep_set_stall(ep); } int usb_dc_ep_flush(const uint8_t ep) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); if (!ep_state) { return -EINVAL; } LOG_ERR("Not implemented"); return 0; } int usb_dc_ep_mps(const uint8_t ep) { struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); if (!ep_state) { return -EINVAL; } return ep_state->ep_mps; } int usb_dc_wakeup_request(void) { HAL_StatusTypeDef status; status = HAL_PCD_ActivateRemoteWakeup(&usb_dc_stm32_state.pcd); if (status != HAL_OK) { return -EAGAIN; } /* Must be active from 1ms to 15ms as per reference manual. */ k_sleep(K_MSEC(2)); status = HAL_PCD_DeActivateRemoteWakeup(&usb_dc_stm32_state.pcd); if (status != HAL_OK) { return -EAGAIN; } return 0; } int usb_dc_detach(void) { HAL_StatusTypeDef status; int ret; LOG_DBG("HAL_PCD_DeInit"); status = HAL_PCD_DeInit(&usb_dc_stm32_state.pcd); if (status != HAL_OK) { LOG_ERR("PCD_DeInit failed, %d", (int)status); return -EIO; } ret = usb_dc_stm32_clock_disable(); if (ret) { return ret; } if (irq_is_enabled(USB_IRQ)) { irq_disable(USB_IRQ); } return 0; } int usb_dc_reset(void) { LOG_ERR("Not implemented"); return 0; } /* Callbacks from the STM32 Cube HAL code */ void HAL_PCD_ResetCallback(PCD_HandleTypeDef *hpcd) { int i; LOG_DBG(""); HAL_PCD_EP_Open(&usb_dc_stm32_state.pcd, EP0_IN, EP0_MPS, EP_TYPE_CTRL); HAL_PCD_EP_Open(&usb_dc_stm32_state.pcd, EP0_OUT, EP0_MPS, EP_TYPE_CTRL); /* The DataInCallback will never be called at this point for any pending * transactions. Reset the IN semaphores to prevent perpetual locked state. * */ for (i = 0; i < USB_NUM_BIDIR_ENDPOINTS; i++) { k_sem_give(&usb_dc_stm32_state.in_ep_state[i].write_sem); } if (usb_dc_stm32_state.status_cb) { usb_dc_stm32_state.status_cb(USB_DC_RESET, NULL); } } void HAL_PCD_ConnectCallback(PCD_HandleTypeDef *hpcd) { LOG_DBG(""); if (usb_dc_stm32_state.status_cb) { usb_dc_stm32_state.status_cb(USB_DC_CONNECTED, NULL); } } void HAL_PCD_DisconnectCallback(PCD_HandleTypeDef *hpcd) { LOG_DBG(""); if (usb_dc_stm32_state.status_cb) { usb_dc_stm32_state.status_cb(USB_DC_DISCONNECTED, NULL); } } void HAL_PCD_SuspendCallback(PCD_HandleTypeDef *hpcd) { LOG_DBG(""); if (usb_dc_stm32_state.status_cb) { usb_dc_stm32_state.status_cb(USB_DC_SUSPEND, NULL); } } void HAL_PCD_ResumeCallback(PCD_HandleTypeDef *hpcd) { LOG_DBG(""); if (usb_dc_stm32_state.status_cb) { usb_dc_stm32_state.status_cb(USB_DC_RESUME, NULL); } } void HAL_PCD_SetupStageCallback(PCD_HandleTypeDef *hpcd) { struct usb_setup_packet *setup = (void *)usb_dc_stm32_state.pcd.Setup; struct usb_dc_stm32_ep_state *ep_state; LOG_DBG(""); ep_state = usb_dc_stm32_get_ep_state(EP0_OUT); /* can't fail for ep0 */ __ASSERT(ep_state, "No corresponding ep_state for EP0"); ep_state->read_count = SETUP_SIZE; ep_state->read_offset = 0U; memcpy(&usb_dc_stm32_state.ep_buf[EP0_IDX], usb_dc_stm32_state.pcd.Setup, ep_state->read_count); if (ep_state->cb) { ep_state->cb(EP0_OUT, USB_DC_EP_SETUP); if (!(setup->wLength == 0U) && usb_reqtype_is_to_device(setup)) { usb_dc_ep_start_read(EP0_OUT, usb_dc_stm32_state.ep_buf[EP0_IDX], setup->wLength); } } } void HAL_PCD_DataOutStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) { uint8_t ep_idx = USB_EP_GET_IDX(epnum); uint8_t ep = ep_idx | USB_EP_DIR_OUT; struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); LOG_DBG("epnum 0x%02x, rx_count %u", epnum, HAL_PCD_EP_GetRxCount(&usb_dc_stm32_state.pcd, epnum)); /* Transaction complete, data is now stored in the buffer and ready * for the upper stack (usb_dc_ep_read to retrieve). */ usb_dc_ep_get_read_count(ep, &ep_state->read_count); ep_state->read_offset = 0U; if (ep_state->cb) { ep_state->cb(ep, USB_DC_EP_DATA_OUT); } } void HAL_PCD_DataInStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) { uint8_t ep_idx = USB_EP_GET_IDX(epnum); uint8_t ep = ep_idx | USB_EP_DIR_IN; struct usb_dc_stm32_ep_state *ep_state = usb_dc_stm32_get_ep_state(ep); LOG_DBG("epnum 0x%02x", epnum); __ASSERT(ep_state, "No corresponding ep_state for ep"); k_sem_give(&ep_state->write_sem); if (ep_state->cb) { ep_state->cb(ep, USB_DC_EP_DATA_IN); } } #if (defined(USB) || defined(USB_DRD_FS)) && DT_INST_NODE_HAS_PROP(0, disconnect_gpios) void HAL_PCDEx_SetConnectionState(PCD_HandleTypeDef *hpcd, uint8_t state) { struct gpio_dt_spec usb_disconnect = GPIO_DT_SPEC_INST_GET(0, disconnect_gpios); gpio_pin_configure_dt(&usb_disconnect, (state ? GPIO_OUTPUT_ACTIVE : GPIO_OUTPUT_INACTIVE)); } #endif /* USB && DT_INST_NODE_HAS_PROP(0, disconnect_gpios) */ ```
/content/code_sandbox/drivers/usb/device/usb_dc_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,758
```c /* * */ /** * @file usb_dc_nrfx.c * @brief Nordic USB device controller driver * * The driver implements the interface between the USBD peripheral * driver from nrfx package and the operating system. */ #include <soc.h> #include <string.h> #include <stdio.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/drivers/usb/usb_dc.h> #include <zephyr/usb/usb_device.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/nrf_clock_control.h> #include <zephyr/dt-bindings/regulator/nrf5x.h> #include <nrf_usbd_common.h> #include <hal/nrf_usbd.h> #include <nrfx_power.h> #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(usb_nrfx); /* USB device controller access from devicetree */ #define DT_DRV_COMPAT nordic_nrf_usbd /** * @brief nRF USBD peripheral states */ enum usbd_periph_state { USBD_DETACHED, USBD_ATTACHED, USBD_POWERED, USBD_SUSPENDED, USBD_RESUMED, USBD_DEFAULT, USBD_ADDRESS_SET, USBD_CONFIGURED, }; /** * @brief Endpoint event types. */ enum usbd_ep_event_type { EP_EVT_SETUP_RECV, EP_EVT_RECV_REQ, EP_EVT_RECV_COMPLETE, EP_EVT_WRITE_COMPLETE, }; /** * @brief USBD peripheral event types. */ enum usbd_event_type { USBD_EVT_POWER, USBD_EVT_EP, USBD_EVT_RESET, USBD_EVT_SOF, USBD_EVT_REINIT }; /** * @brief Endpoint configuration. * * @param cb Endpoint callback. * @param max_sz Max packet size supported by endpoint. * @param en Enable/Disable flag. * @param addr Endpoint address. * @param type Endpoint transfer type. */ struct nrf_usbd_ep_cfg { usb_dc_ep_callback cb; uint32_t max_sz; bool en; uint8_t addr; enum usb_dc_ep_transfer_type type; }; struct usbd_mem_block { void *data; }; /** * @brief Endpoint buffer * * @param len Remaining length to be read/written. * @param block Mempool block, for freeing up buffer after use. * @param data Pointer to the data buffer for the endpoint. * @param curr Pointer to the current offset in the endpoint buffer. */ struct nrf_usbd_ep_buf { uint32_t len; struct usbd_mem_block block; uint8_t *data; uint8_t *curr; }; /** * @brief Endpoint context * * @param cfg Endpoint configuration * @param buf Endpoint buffer * @param read_complete A flag indicating that DMA read operation * has been completed. * @param read_pending A flag indicating that the Host has requested * a data transfer. * @param write_in_progress A flag indicating that write operation has * been scheduled. * @param trans_zlp Flag required for Control IN Endpoint. It * indicates that ZLP is required to end data * stage of the control request. */ struct nrf_usbd_ep_ctx { struct nrf_usbd_ep_cfg cfg; struct nrf_usbd_ep_buf buf; volatile bool read_complete; volatile bool read_pending; volatile bool write_in_progress; bool trans_zlp; }; /** * @brief Endpoint event structure * * @param ep Endpoint control block pointer * @param evt_type Event type */ struct usbd_ep_event { struct nrf_usbd_ep_ctx *ep; enum usbd_ep_event_type evt_type; }; /** * @brief Power event structure * * @param state New USBD peripheral state. */ struct usbd_pwr_event { enum usbd_periph_state state; }; /** * @brief Endpoint USB event * Used by ISR to send events to work handler * * @param node Used by the kernel for FIFO management * @param block Mempool block pointer for freeing up after use * @param evt Event data field * @param evt_type Type of event that has occurred from the USBD peripheral */ struct usbd_event { sys_snode_t node; struct usbd_mem_block block; union { struct usbd_ep_event ep_evt; struct usbd_pwr_event pwr_evt; } evt; enum usbd_event_type evt_type; }; /** * @brief Fifo element slab * Used for allocating fifo elements to pass from ISR to work handler * TODO: The number of FIFO elements is an arbitrary number now but it should * be derived from the theoretical number of backlog events possible depending * on the number of endpoints configured. */ #define FIFO_ELEM_SZ sizeof(struct usbd_event) #define FIFO_ELEM_ALIGN sizeof(unsigned int) K_MEM_SLAB_DEFINE(fifo_elem_slab, FIFO_ELEM_SZ, CONFIG_USB_NRFX_EVT_QUEUE_SIZE, FIFO_ELEM_ALIGN); /** Number of IN Endpoints configured (including control) */ #define CFG_EPIN_CNT (DT_INST_PROP(0, num_in_endpoints) + \ DT_INST_PROP(0, num_bidir_endpoints)) /** Number of OUT Endpoints configured (including control) */ #define CFG_EPOUT_CNT (DT_INST_PROP(0, num_out_endpoints) + \ DT_INST_PROP(0, num_bidir_endpoints)) /** Number of ISO IN Endpoints */ #define CFG_EP_ISOIN_CNT DT_INST_PROP(0, num_isoin_endpoints) /** Number of ISO OUT Endpoints */ #define CFG_EP_ISOOUT_CNT DT_INST_PROP(0, num_isoout_endpoints) /** ISO endpoint index */ #define EP_ISOIN_INDEX CFG_EPIN_CNT #define EP_ISOOUT_INDEX (CFG_EPIN_CNT + CFG_EP_ISOIN_CNT + CFG_EPOUT_CNT) #define EP_BUF_MAX_SZ 64UL #define ISO_EP_BUF_MAX_SZ 1024UL /** * @brief Output endpoint buffers * Used as buffers for the endpoints' data transfer * Max buffers size possible: 1536 Bytes (8 EP * 64B + 1 ISO * 1024B) */ static uint8_t ep_out_bufs[CFG_EPOUT_CNT][EP_BUF_MAX_SZ] __aligned(sizeof(uint32_t)); static uint8_t ep_isoout_bufs[CFG_EP_ISOOUT_CNT][ISO_EP_BUF_MAX_SZ] __aligned(sizeof(uint32_t)); /** Total endpoints configured */ #define CFG_EP_CNT (CFG_EPIN_CNT + CFG_EP_ISOIN_CNT + \ CFG_EPOUT_CNT + CFG_EP_ISOOUT_CNT) /** * @brief USBD control structure * * @param status_cb Status callback for USB DC notifications * @param setup Setup packet for Control requests * @param hfxo_cli Onoff client used to control HFXO * @param hfxo_mgr Pointer to onoff manager associated with HFXO. * @param clk_requested Flag used to protect against double stop. * @param attached USBD Attached flag * @param ready USBD Ready flag set after pullup * @param usb_work USBD work item * @param drv_lock Mutex for thread-safe nrfx driver use * @param ep_ctx Endpoint contexts * @param ctrl_read_len State of control read operation (EP0). */ struct nrf_usbd_ctx { usb_dc_status_callback status_cb; struct usb_setup_packet setup; struct onoff_client hfxo_cli; struct onoff_manager *hfxo_mgr; atomic_t clk_requested; bool attached; bool ready; struct k_work usb_work; struct k_mutex drv_lock; struct nrf_usbd_ep_ctx ep_ctx[CFG_EP_CNT]; uint16_t ctrl_read_len; }; /* FIFO used for queuing up events from ISR. */ K_FIFO_DEFINE(usbd_evt_fifo); /* Work queue used for handling the ISR events (i.e. for notifying the USB * device stack, for executing the endpoints callbacks, etc.) out of the ISR * context. * The system work queue cannot be used for this purpose as it might be used in * applications for scheduling USB transfers and this could lead to a deadlock * when the USB device stack would not be notified about certain event because * of a system work queue item waiting for a USB transfer to be finished. */ static struct k_work_q usbd_work_queue; static K_KERNEL_STACK_DEFINE(usbd_work_queue_stack, CONFIG_USB_NRFX_WORK_QUEUE_STACK_SIZE); static struct nrf_usbd_ctx usbd_ctx = { .attached = false, .ready = false, }; static inline struct nrf_usbd_ctx *get_usbd_ctx(void) { return &usbd_ctx; } static inline bool dev_attached(void) { return get_usbd_ctx()->attached; } static inline bool dev_ready(void) { return get_usbd_ctx()->ready; } static inline nrf_usbd_common_ep_t ep_addr_to_nrfx(uint8_t ep) { return (nrf_usbd_common_ep_t)ep; } static inline uint8_t nrfx_addr_to_ep(nrf_usbd_common_ep_t ep) { return (uint8_t)ep; } static inline bool ep_is_valid(const uint8_t ep) { uint8_t ep_num = USB_EP_GET_IDX(ep); if (NRF_USBD_EPIN_CHECK(ep)) { if (unlikely(ep_num == NRF_USBD_EPISO_FIRST)) { if (CFG_EP_ISOIN_CNT == 0) { return false; } } else { if (ep_num >= CFG_EPIN_CNT) { return false; } } } else { if (unlikely(ep_num == NRF_USBD_EPISO_FIRST)) { if (CFG_EP_ISOOUT_CNT == 0) { return false; } } else { if (ep_num >= CFG_EPOUT_CNT) { return false; } } } return true; } static struct nrf_usbd_ep_ctx *endpoint_ctx(const uint8_t ep) { struct nrf_usbd_ctx *ctx; uint8_t ep_num; if (!ep_is_valid(ep)) { return NULL; } ctx = get_usbd_ctx(); ep_num = NRF_USBD_EP_NR_GET(ep); if (NRF_USBD_EPIN_CHECK(ep)) { if (unlikely(NRF_USBD_EPISO_CHECK(ep))) { return &ctx->ep_ctx[EP_ISOIN_INDEX]; } else { return &ctx->ep_ctx[ep_num]; } } else { if (unlikely(NRF_USBD_EPISO_CHECK(ep))) { return &ctx->ep_ctx[EP_ISOOUT_INDEX]; } else { return &ctx->ep_ctx[CFG_EPIN_CNT + CFG_EP_ISOIN_CNT + ep_num]; } } return NULL; } static struct nrf_usbd_ep_ctx *in_endpoint_ctx(const uint8_t ep) { return endpoint_ctx(NRF_USBD_EPIN(ep)); } static struct nrf_usbd_ep_ctx *out_endpoint_ctx(const uint8_t ep) { return endpoint_ctx(NRF_USBD_EPOUT(ep)); } /** * @brief Schedule USBD event processing. * * Should be called after usbd_evt_put(). */ static inline void usbd_work_schedule(void) { k_work_submit_to_queue(&usbd_work_queue, &get_usbd_ctx()->usb_work); } /** * @brief Free previously allocated USBD event. * * Should be called after usbd_evt_get(). * * @param Pointer to the USBD event structure. */ static inline void usbd_evt_free(struct usbd_event *ev) { k_mem_slab_free(&fifo_elem_slab, (void *)ev->block.data); } /** * @brief Enqueue USBD event. * * @param Pointer to the previously allocated and filled event structure. */ static inline void usbd_evt_put(struct usbd_event *ev) { k_fifo_put(&usbd_evt_fifo, ev); } /** * @brief Get next enqueued USBD event if present. */ static inline struct usbd_event *usbd_evt_get(void) { return k_fifo_get(&usbd_evt_fifo, K_NO_WAIT); } /** * @brief Drop all enqueued events. */ static inline void usbd_evt_flush(void) { struct usbd_event *ev; do { ev = usbd_evt_get(); if (ev) { usbd_evt_free(ev); } } while (ev != NULL); } /** * @brief Allocate USBD event. * * This function should be called prior to usbd_evt_put(). * * @returns Pointer to the allocated event or NULL if there was no space left. */ static inline struct usbd_event *usbd_evt_alloc(void) { struct usbd_event *ev; struct usbd_mem_block block; if (k_mem_slab_alloc(&fifo_elem_slab, (void **)&block.data, K_NO_WAIT)) { LOG_ERR("USBD event allocation failed!"); /* * Allocation may fail if workqueue thread is starved or event * queue size is too small (CONFIG_USB_NRFX_EVT_QUEUE_SIZE). * Wipe all events, free the space and schedule * reinitialization. */ usbd_evt_flush(); if (k_mem_slab_alloc(&fifo_elem_slab, (void **)&block.data, K_NO_WAIT)) { LOG_ERR("USBD event memory corrupted"); __ASSERT_NO_MSG(0); return NULL; } ev = (struct usbd_event *)block.data; ev->block = block; ev->evt_type = USBD_EVT_REINIT; usbd_evt_put(ev); usbd_work_schedule(); return NULL; } ev = (struct usbd_event *)block.data; ev->block = block; return ev; } static void submit_dc_power_event(enum usbd_periph_state state) { struct usbd_event *ev = usbd_evt_alloc(); if (!ev) { return; } ev->evt_type = USBD_EVT_POWER; ev->evt.pwr_evt.state = state; usbd_evt_put(ev); if (usbd_ctx.attached) { usbd_work_schedule(); } } #if CONFIG_USB_NRFX_ATTACHED_EVENT_DELAY static void attached_evt_delay_handler(struct k_timer *timer) { LOG_DBG("ATTACHED event delay done"); submit_dc_power_event(USBD_ATTACHED); } static K_TIMER_DEFINE(delay_timer, attached_evt_delay_handler, NULL); #endif static void usb_dc_power_event_handler(nrfx_power_usb_evt_t event) { enum usbd_periph_state new_state; switch (event) { case NRFX_POWER_USB_EVT_DETECTED: #if !CONFIG_USB_NRFX_ATTACHED_EVENT_DELAY new_state = USBD_ATTACHED; break; #else LOG_DBG("ATTACHED event delayed"); k_timer_start(&delay_timer, K_MSEC(CONFIG_USB_NRFX_ATTACHED_EVENT_DELAY), K_NO_WAIT); return; #endif case NRFX_POWER_USB_EVT_READY: new_state = USBD_POWERED; break; case NRFX_POWER_USB_EVT_REMOVED: new_state = USBD_DETACHED; break; default: LOG_ERR("Unknown USB power event %d", event); return; } submit_dc_power_event(new_state); } /* Stopping HFXO, algorithm supports case when stop comes before clock is * started. In that case, it is stopped from the callback context. */ static int hfxo_stop(struct nrf_usbd_ctx *ctx) { if (atomic_cas(&ctx->clk_requested, 1, 0)) { return onoff_cancel_or_release(ctx->hfxo_mgr, &ctx->hfxo_cli); } return 0; } static int hfxo_start(struct nrf_usbd_ctx *ctx) { if (atomic_cas(&ctx->clk_requested, 0, 1)) { sys_notify_init_spinwait(&ctx->hfxo_cli.notify); return onoff_request(ctx->hfxo_mgr, &ctx->hfxo_cli); } return 0; } static void usbd_enable_endpoints(struct nrf_usbd_ctx *ctx) { struct nrf_usbd_ep_ctx *ep_ctx; int i; for (i = 0; i < CFG_EPIN_CNT; i++) { ep_ctx = in_endpoint_ctx(i); __ASSERT_NO_MSG(ep_ctx); if (ep_ctx->cfg.en) { nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr)); } } if (CFG_EP_ISOIN_CNT) { ep_ctx = in_endpoint_ctx(NRF_USBD_EPIN(8)); __ASSERT_NO_MSG(ep_ctx); if (ep_ctx->cfg.en) { nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr)); } } for (i = 0; i < CFG_EPOUT_CNT; i++) { ep_ctx = out_endpoint_ctx(i); __ASSERT_NO_MSG(ep_ctx); if (ep_ctx->cfg.en) { nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr)); } } if (CFG_EP_ISOOUT_CNT) { ep_ctx = out_endpoint_ctx(NRF_USBD_EPOUT(8)); __ASSERT_NO_MSG(ep_ctx); if (ep_ctx->cfg.en) { nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep_ctx->cfg.addr)); } } } /** * @brief Reset endpoint state. * * Resets the internal logic state for a given endpoint. * * @param[in] ep_cts Endpoint structure control block */ static void ep_ctx_reset(struct nrf_usbd_ep_ctx *ep_ctx) { ep_ctx->buf.data = ep_ctx->buf.block.data; ep_ctx->buf.curr = ep_ctx->buf.data; ep_ctx->buf.len = 0U; /* Abort ongoing write operation. */ if (ep_ctx->write_in_progress) { nrf_usbd_common_ep_abort(ep_addr_to_nrfx(ep_ctx->cfg.addr)); } ep_ctx->read_complete = true; ep_ctx->read_pending = false; ep_ctx->write_in_progress = false; ep_ctx->trans_zlp = false; } /** * @brief Initialize all endpoint structures. * * Endpoint buffers are allocated during the first call of this function. * This function may also be called again on every USB reset event * to reinitialize the state of all endpoints. */ static int eps_ctx_init(void) { struct nrf_usbd_ep_ctx *ep_ctx; uint32_t i; for (i = 0U; i < CFG_EPIN_CNT; i++) { ep_ctx = in_endpoint_ctx(i); __ASSERT_NO_MSG(ep_ctx); ep_ctx_reset(ep_ctx); } for (i = 0U; i < CFG_EPOUT_CNT; i++) { ep_ctx = out_endpoint_ctx(i); __ASSERT_NO_MSG(ep_ctx); if (!ep_ctx->buf.block.data) { ep_ctx->buf.block.data = ep_out_bufs[i]; } ep_ctx_reset(ep_ctx); } if (CFG_EP_ISOIN_CNT) { ep_ctx = in_endpoint_ctx(NRF_USBD_EPIN(8)); __ASSERT_NO_MSG(ep_ctx); ep_ctx_reset(ep_ctx); } if (CFG_EP_ISOOUT_CNT) { BUILD_ASSERT(CFG_EP_ISOOUT_CNT <= 1); ep_ctx = out_endpoint_ctx(NRF_USBD_EPOUT(8)); __ASSERT_NO_MSG(ep_ctx); if (!ep_ctx->buf.block.data) { ep_ctx->buf.block.data = ep_isoout_bufs[0]; } ep_ctx_reset(ep_ctx); } return 0; } static inline void usbd_work_process_pwr_events(struct usbd_pwr_event *pwr_evt) { struct nrf_usbd_ctx *ctx = get_usbd_ctx(); int err; switch (pwr_evt->state) { case USBD_ATTACHED: if (!nrf_usbd_common_is_enabled()) { LOG_DBG("USB detected"); nrf_usbd_common_enable(); err = hfxo_start(ctx); __ASSERT_NO_MSG(err >= 0); } /* No callback here. * Stack will be notified when the peripheral is ready. */ break; case USBD_POWERED: usbd_enable_endpoints(ctx); nrf_usbd_common_start(IS_ENABLED(CONFIG_USB_DEVICE_SOF)); ctx->ready = true; LOG_DBG("USB Powered"); if (ctx->status_cb) { ctx->status_cb(USB_DC_CONNECTED, NULL); } break; case USBD_DETACHED: ctx->ready = false; nrf_usbd_common_disable(); err = hfxo_stop(ctx); __ASSERT_NO_MSG(err >= 0); LOG_DBG("USB Removed"); if (ctx->status_cb) { ctx->status_cb(USB_DC_DISCONNECTED, NULL); } break; case USBD_SUSPENDED: if (dev_ready()) { nrf_usbd_common_suspend(); LOG_DBG("USB Suspend state"); if (ctx->status_cb) { ctx->status_cb(USB_DC_SUSPEND, NULL); } } break; case USBD_RESUMED: if (ctx->status_cb && dev_ready()) { LOG_DBG("USB resume"); ctx->status_cb(USB_DC_RESUME, NULL); } break; default: break; } } static inline void usbd_work_process_setup(struct nrf_usbd_ep_ctx *ep_ctx) { __ASSERT_NO_MSG(ep_ctx); __ASSERT(ep_ctx->cfg.type == USB_DC_EP_CONTROL, "Invalid event on CTRL EP."); struct usb_setup_packet *usbd_setup; /* SETUP packets are handled by USBD hardware. * For compatibility with the USB stack, * SETUP packet must be reassembled. */ usbd_setup = (struct usb_setup_packet *)ep_ctx->buf.data; memset(usbd_setup, 0, sizeof(struct usb_setup_packet)); usbd_setup->bmRequestType = nrf_usbd_setup_bmrequesttype_get(NRF_USBD); usbd_setup->bRequest = nrf_usbd_setup_brequest_get(NRF_USBD); usbd_setup->wValue = nrf_usbd_setup_wvalue_get(NRF_USBD); usbd_setup->wIndex = nrf_usbd_setup_windex_get(NRF_USBD); usbd_setup->wLength = nrf_usbd_setup_wlength_get(NRF_USBD); ep_ctx->buf.len = sizeof(struct usb_setup_packet); /* Copy setup packet to driver internal structure */ memcpy(&usbd_ctx.setup, usbd_setup, sizeof(struct usb_setup_packet)); LOG_DBG("SETUP: bR:0x%02x bmRT:0x%02x wV:0x%04x wI:0x%04x wL:%d", (uint32_t)usbd_setup->bRequest, (uint32_t)usbd_setup->bmRequestType, (uint32_t)usbd_setup->wValue, (uint32_t)usbd_setup->wIndex, (uint32_t)usbd_setup->wLength); /* Inform the stack. */ ep_ctx->cfg.cb(ep_ctx->cfg.addr, USB_DC_EP_SETUP); struct nrf_usbd_ctx *ctx = get_usbd_ctx(); if (usb_reqtype_is_to_device(usbd_setup) && usbd_setup->wLength) { ctx->ctrl_read_len = usbd_setup->wLength; /* Allow data chunk on EP0 OUT */ nrf_usbd_common_setup_data_clear(); } else { ctx->ctrl_read_len = 0U; } } static inline void usbd_work_process_recvreq(struct nrf_usbd_ctx *ctx, struct nrf_usbd_ep_ctx *ep_ctx) { if (!ep_ctx->read_pending) { return; } if (!ep_ctx->read_complete) { return; } ep_ctx->read_pending = false; ep_ctx->read_complete = false; k_mutex_lock(&ctx->drv_lock, K_FOREVER); NRF_USBD_COMMON_TRANSFER_OUT(transfer, ep_ctx->buf.data, ep_ctx->cfg.max_sz); nrfx_err_t err = nrf_usbd_common_ep_transfer( ep_addr_to_nrfx(ep_ctx->cfg.addr), &transfer); if (err != NRFX_SUCCESS) { LOG_ERR("nRF USBD transfer error (OUT): 0x%02x", err); } k_mutex_unlock(&ctx->drv_lock); } static inline void usbd_work_process_ep_events(struct usbd_ep_event *ep_evt) { struct nrf_usbd_ctx *ctx = get_usbd_ctx(); struct nrf_usbd_ep_ctx *ep_ctx = ep_evt->ep; __ASSERT_NO_MSG(ep_ctx); switch (ep_evt->evt_type) { case EP_EVT_SETUP_RECV: usbd_work_process_setup(ep_ctx); break; case EP_EVT_RECV_REQ: usbd_work_process_recvreq(ctx, ep_ctx); break; case EP_EVT_RECV_COMPLETE: ep_ctx->cfg.cb(ep_ctx->cfg.addr, USB_DC_EP_DATA_OUT); break; case EP_EVT_WRITE_COMPLETE: if (ep_ctx->cfg.type == USB_DC_EP_CONTROL && !ep_ctx->trans_zlp) { /* Trigger the hardware to perform * status stage, but only if there is * no ZLP required. */ k_mutex_lock(&ctx->drv_lock, K_FOREVER); nrf_usbd_common_setup_clear(); k_mutex_unlock(&ctx->drv_lock); } ep_ctx->cfg.cb(ep_ctx->cfg.addr, USB_DC_EP_DATA_IN); break; default: break; } } static void usbd_event_transfer_ctrl(nrf_usbd_common_evt_t const *const p_event) { struct nrf_usbd_ep_ctx *ep_ctx = endpoint_ctx(p_event->data.eptransfer.ep); if (NRF_USBD_EPIN_CHECK(p_event->data.eptransfer.ep)) { switch (p_event->data.eptransfer.status) { case NRF_USBD_COMMON_EP_OK: { struct usbd_event *ev = usbd_evt_alloc(); if (!ev) { return; } ep_ctx->write_in_progress = false; ev->evt_type = USBD_EVT_EP; ev->evt.ep_evt.evt_type = EP_EVT_WRITE_COMPLETE; ev->evt.ep_evt.ep = ep_ctx; LOG_DBG("ctrl write complete"); usbd_evt_put(ev); usbd_work_schedule(); } break; case NRF_USBD_COMMON_EP_ABORTED: { LOG_DBG("Endpoint 0x%02x write aborted", p_event->data.eptransfer.ep); } break; default: { LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x", p_event->data.eptransfer.status, p_event->data.eptransfer.ep); } break; } } else { switch (p_event->data.eptransfer.status) { case NRF_USBD_COMMON_EP_WAITING: { struct usbd_event *ev = usbd_evt_alloc(); if (!ev) { return; } LOG_DBG("ctrl read request"); ep_ctx->read_pending = true; ev->evt_type = USBD_EVT_EP; ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ; ev->evt.ep_evt.ep = ep_ctx; usbd_evt_put(ev); usbd_work_schedule(); } break; case NRF_USBD_COMMON_EP_OK: { struct nrf_usbd_ctx *ctx = get_usbd_ctx(); struct usbd_event *ev = usbd_evt_alloc(); if (!ev) { return; } nrf_usbd_common_ep_status_t err_code; ev->evt_type = USBD_EVT_EP; ev->evt.ep_evt.evt_type = EP_EVT_RECV_COMPLETE; ev->evt.ep_evt.ep = ep_ctx; err_code = nrf_usbd_common_ep_status_get( p_event->data.eptransfer.ep, &ep_ctx->buf.len); if (err_code != NRF_USBD_COMMON_EP_OK) { LOG_ERR("_ep_status_get failed! Code: %d", err_code); __ASSERT_NO_MSG(0); } LOG_DBG("ctrl read done: %d", ep_ctx->buf.len); if (ctx->ctrl_read_len > ep_ctx->buf.len) { ctx->ctrl_read_len -= ep_ctx->buf.len; /* Allow next data chunk on EP0 OUT */ nrf_usbd_common_setup_data_clear(); } else { ctx->ctrl_read_len = 0U; } usbd_evt_put(ev); usbd_work_schedule(); } break; default: { LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x", p_event->data.eptransfer.status, p_event->data.eptransfer.ep); } break; } } } static void usbd_event_transfer_data(nrf_usbd_common_evt_t const *const p_event) { struct nrf_usbd_ep_ctx *ep_ctx = endpoint_ctx(p_event->data.eptransfer.ep); if (NRF_USBD_EPIN_CHECK(p_event->data.eptransfer.ep)) { switch (p_event->data.eptransfer.status) { case NRF_USBD_COMMON_EP_OK: { struct usbd_event *ev = usbd_evt_alloc(); if (!ev) { return; } LOG_DBG("write complete, ep 0x%02x", (uint32_t)p_event->data.eptransfer.ep); ep_ctx->write_in_progress = false; ev->evt_type = USBD_EVT_EP; ev->evt.ep_evt.evt_type = EP_EVT_WRITE_COMPLETE; ev->evt.ep_evt.ep = ep_ctx; usbd_evt_put(ev); usbd_work_schedule(); } break; case NRF_USBD_COMMON_EP_ABORTED: { LOG_DBG("Endpoint 0x%02x write aborted", p_event->data.eptransfer.ep); } break; default: { LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x", p_event->data.eptransfer.status, p_event->data.eptransfer.ep); } break; } } else { switch (p_event->data.eptransfer.status) { case NRF_USBD_COMMON_EP_WAITING: { struct usbd_event *ev = usbd_evt_alloc(); if (!ev) { return; } LOG_DBG("read request, ep 0x%02x", (uint32_t)p_event->data.eptransfer.ep); ep_ctx->read_pending = true; ev->evt_type = USBD_EVT_EP; ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ; ev->evt.ep_evt.ep = ep_ctx; usbd_evt_put(ev); usbd_work_schedule(); } break; case NRF_USBD_COMMON_EP_OK: { struct usbd_event *ev = usbd_evt_alloc(); if (!ev) { return; } ep_ctx->buf.len = nrf_usbd_ep_amount_get(NRF_USBD, p_event->data.eptransfer.ep); LOG_DBG("read complete, ep 0x%02x, len %d", (uint32_t)p_event->data.eptransfer.ep, ep_ctx->buf.len); ev->evt_type = USBD_EVT_EP; ev->evt.ep_evt.evt_type = EP_EVT_RECV_COMPLETE; ev->evt.ep_evt.ep = ep_ctx; usbd_evt_put(ev); usbd_work_schedule(); } break; default: { LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x", p_event->data.eptransfer.status, p_event->data.eptransfer.ep); } break; } } } /** * @brief nRFx USBD driver event handler function. */ static void usbd_event_handler(nrf_usbd_common_evt_t const *const p_event) { struct usbd_event evt = {0}; bool put_evt = false; switch (p_event->type) { case NRF_USBD_COMMON_EVT_SUSPEND: LOG_DBG("SUSPEND state detected"); evt.evt_type = USBD_EVT_POWER; evt.evt.pwr_evt.state = USBD_SUSPENDED; put_evt = true; break; case NRF_USBD_COMMON_EVT_RESUME: LOG_DBG("RESUMING from suspend"); evt.evt_type = USBD_EVT_POWER; evt.evt.pwr_evt.state = USBD_RESUMED; put_evt = true; break; case NRF_USBD_COMMON_EVT_WUREQ: LOG_DBG("RemoteWU initiated"); evt.evt_type = USBD_EVT_POWER; evt.evt.pwr_evt.state = USBD_RESUMED; put_evt = true; break; case NRF_USBD_COMMON_EVT_RESET: evt.evt_type = USBD_EVT_RESET; put_evt = true; break; case NRF_USBD_COMMON_EVT_SOF: if (IS_ENABLED(CONFIG_USB_DEVICE_SOF)) { evt.evt_type = USBD_EVT_SOF; put_evt = true; } break; case NRF_USBD_COMMON_EVT_EPTRANSFER: { struct nrf_usbd_ep_ctx *ep_ctx; ep_ctx = endpoint_ctx(p_event->data.eptransfer.ep); switch (ep_ctx->cfg.type) { case USB_DC_EP_CONTROL: usbd_event_transfer_ctrl(p_event); break; case USB_DC_EP_BULK: case USB_DC_EP_INTERRUPT: usbd_event_transfer_data(p_event); break; case USB_DC_EP_ISOCHRONOUS: usbd_event_transfer_data(p_event); break; default: break; } break; } case NRF_USBD_COMMON_EVT_SETUP: { nrf_usbd_common_setup_t drv_setup; nrf_usbd_common_setup_get(&drv_setup); if ((drv_setup.bRequest != USB_SREQ_SET_ADDRESS) || (USB_REQTYPE_GET_TYPE(drv_setup.bmRequestType) != USB_REQTYPE_TYPE_STANDARD)) { /* SetAddress is handled by USBD hardware. * No software action required. */ struct nrf_usbd_ep_ctx *ep_ctx = endpoint_ctx(NRF_USBD_EPOUT(0)); evt.evt_type = USBD_EVT_EP; evt.evt.ep_evt.ep = ep_ctx; evt.evt.ep_evt.evt_type = EP_EVT_SETUP_RECV; put_evt = true; } break; } default: break; } if (put_evt) { struct usbd_event *ev; ev = usbd_evt_alloc(); if (!ev) { return; } ev->evt_type = evt.evt_type; ev->evt = evt.evt; usbd_evt_put(ev); usbd_work_schedule(); } } static inline void usbd_reinit(void) { int ret; nrfx_err_t err; nrfx_power_usbevt_disable(); nrf_usbd_common_disable(); nrf_usbd_common_uninit(); usbd_evt_flush(); ret = eps_ctx_init(); __ASSERT_NO_MSG(ret == 0); nrfx_power_usbevt_enable(); err = nrf_usbd_common_init(usbd_event_handler); if (err != NRFX_SUCCESS) { LOG_DBG("nRF USBD driver reinit failed. Code: %d", err); __ASSERT_NO_MSG(0); } } /** * @brief function to generate fake receive request for * ISO OUT EP. * * ISO OUT endpoint does not generate irq by itself and reading * from ISO OUT ep is synchronized with SOF frame. For more details * refer to Nordic usbd specification. */ static void usbd_sof_trigger_iso_read(void) { struct usbd_event *ev; struct nrf_usbd_ep_ctx *ep_ctx; ep_ctx = endpoint_ctx(NRF_USBD_COMMON_EPOUT8); if (!ep_ctx) { LOG_ERR("There is no ISO ep"); return; } if (ep_ctx->cfg.en) { /* Dissect receive request * if the iso OUT ep is enabled */ ep_ctx->read_pending = true; ep_ctx->read_complete = true; ev = usbd_evt_alloc(); if (!ev) { LOG_ERR("Failed to alloc evt"); return; } ev->evt_type = USBD_EVT_EP; ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ; ev->evt.ep_evt.ep = ep_ctx; usbd_evt_put(ev); usbd_work_schedule(); } else { LOG_DBG("Endpoint is not enabled"); } } /* Work handler */ static void usbd_work_handler(struct k_work *item) { struct nrf_usbd_ctx *ctx; struct usbd_event *ev; ctx = CONTAINER_OF(item, struct nrf_usbd_ctx, usb_work); while ((ev = usbd_evt_get()) != NULL) { if (!dev_ready() && ev->evt_type != USBD_EVT_POWER) { /* Drop non-power events when cable is detached. */ usbd_evt_free(ev); continue; } switch (ev->evt_type) { case USBD_EVT_EP: if (!ctx->attached) { LOG_ERR("not attached, EP 0x%02x event dropped", (uint32_t)ev->evt.ep_evt.ep->cfg.addr); } usbd_work_process_ep_events(&ev->evt.ep_evt); break; case USBD_EVT_POWER: usbd_work_process_pwr_events(&ev->evt.pwr_evt); break; case USBD_EVT_RESET: LOG_DBG("USBD reset event"); k_mutex_lock(&ctx->drv_lock, K_FOREVER); eps_ctx_init(); k_mutex_unlock(&ctx->drv_lock); if (ctx->status_cb) { ctx->status_cb(USB_DC_RESET, NULL); } break; case USBD_EVT_SOF: usbd_sof_trigger_iso_read(); if (ctx->status_cb) { ctx->status_cb(USB_DC_SOF, NULL); } break; case USBD_EVT_REINIT: { /* * Reinitialize the peripheral after queue * overflow. */ LOG_ERR("USBD event queue full!"); usbd_reinit(); break; } default: LOG_ERR("Unknown USBD event: %"PRId16, ev->evt_type); break; } usbd_evt_free(ev); } } int usb_dc_attach(void) { struct nrf_usbd_ctx *ctx = get_usbd_ctx(); int ret; if (ctx->attached) { return 0; } k_mutex_init(&ctx->drv_lock); ctx->hfxo_mgr = z_nrf_clock_control_get_onoff( COND_CODE_1(NRF_CLOCK_HAS_HFCLK192M, (CLOCK_CONTROL_NRF_SUBSYS_HF192M), (CLOCK_CONTROL_NRF_SUBSYS_HF))); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), nrfx_isr, nrf_usbd_common_irq_handler, 0); nrfx_power_usbevt_enable(); ret = eps_ctx_init(); if (ret == 0) { ctx->attached = true; } if (!k_fifo_is_empty(&usbd_evt_fifo)) { usbd_work_schedule(); } if (nrfx_power_usbstatus_get() != NRFX_POWER_USB_STATE_DISCONNECTED) { /* USBDETECTED event is be generated on cable attachment and * when cable is already attached during reset, but not when * the peripheral is re-enabled. * When USB-enabled bootloader is used, target application * will not receive this event and it needs to be generated * again here. */ usb_dc_power_event_handler(NRFX_POWER_USB_EVT_DETECTED); } return ret; } int usb_dc_detach(void) { struct nrf_usbd_ctx *ctx = get_usbd_ctx(); k_mutex_lock(&ctx->drv_lock, K_FOREVER); usbd_evt_flush(); if (nrf_usbd_common_is_enabled()) { nrf_usbd_common_disable(); } (void)hfxo_stop(ctx); nrfx_power_usbevt_disable(); ctx->attached = false; k_mutex_unlock(&ctx->drv_lock); return 0; } int usb_dc_reset(void) { int ret; if (!dev_attached() || !dev_ready()) { return -ENODEV; } LOG_DBG("USBD Reset"); ret = usb_dc_detach(); if (ret) { return ret; } ret = usb_dc_attach(); if (ret) { return ret; } return 0; } int usb_dc_set_address(const uint8_t addr) { struct nrf_usbd_ctx *ctx; if (!dev_attached() || !dev_ready()) { return -ENODEV; } /** * Nothing to do here. The USBD HW already takes care of initiating * STATUS stage. Just double check the address for sanity. */ __ASSERT(addr == (uint8_t)NRF_USBD->USBADDR, "USB Address incorrect!"); ctx = get_usbd_ctx(); LOG_DBG("Address set to: %d", addr); return 0; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data *const ep_cfg) { uint8_t ep_idx = NRF_USBD_EP_NR_GET(ep_cfg->ep_addr); LOG_DBG("ep 0x%02x, mps %d, type %d", ep_cfg->ep_addr, ep_cfg->ep_mps, ep_cfg->ep_type); if ((ep_cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx) { LOG_ERR("invalid endpoint configuration"); return -1; } if (!NRF_USBD_EP_VALIDATE(ep_cfg->ep_addr)) { LOG_ERR("invalid endpoint index/address"); return -1; } if ((ep_cfg->ep_type == USB_DC_EP_ISOCHRONOUS) && (!NRF_USBD_EPISO_CHECK(ep_cfg->ep_addr))) { LOG_WRN("invalid endpoint type"); return -1; } if ((ep_cfg->ep_type != USB_DC_EP_ISOCHRONOUS) && (NRF_USBD_EPISO_CHECK(ep_cfg->ep_addr))) { LOG_WRN("iso endpoint can only be iso"); return -1; } return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const ep_cfg) { struct nrf_usbd_ep_ctx *ep_ctx; if (!dev_attached()) { return -ENODEV; } /** * TODO: * For ISO endpoints, application has to use EPIN/OUT 8 * but right now there's no standard way of knowing the * ISOIN/ISOOUT endpoint number in advance to configure * accordingly. So either this needs to be chosen in the * menuconfig in application area or perhaps in device tree * at compile time or introduce a new API to read the endpoint * configuration at runtime before configuring them. */ ep_ctx = endpoint_ctx(ep_cfg->ep_addr); if (!ep_ctx) { return -EINVAL; } ep_ctx->cfg.addr = ep_cfg->ep_addr; ep_ctx->cfg.type = ep_cfg->ep_type; ep_ctx->cfg.max_sz = ep_cfg->ep_mps; if (!NRF_USBD_EPISO_CHECK(ep_cfg->ep_addr)) { if ((ep_cfg->ep_mps & (ep_cfg->ep_mps - 1)) != 0U) { LOG_ERR("EP max packet size must be a power of 2"); return -EINVAL; } } nrf_usbd_common_ep_max_packet_size_set(ep_addr_to_nrfx(ep_cfg->ep_addr), ep_cfg->ep_mps); return 0; } int usb_dc_ep_set_stall(const uint8_t ep) { struct nrf_usbd_ep_ctx *ep_ctx; if (!dev_attached() || !dev_ready()) { return -ENODEV; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } switch (ep_ctx->cfg.type) { case USB_DC_EP_CONTROL: nrf_usbd_common_setup_stall(); break; case USB_DC_EP_BULK: case USB_DC_EP_INTERRUPT: nrf_usbd_common_ep_stall(ep_addr_to_nrfx(ep)); break; case USB_DC_EP_ISOCHRONOUS: LOG_ERR("STALL unsupported on ISO endpoint"); return -EINVAL; } ep_ctx->buf.len = 0U; ep_ctx->buf.curr = ep_ctx->buf.data; LOG_DBG("STALL on EP 0x%02x", ep); return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { struct nrf_usbd_ep_ctx *ep_ctx; if (!dev_attached() || !dev_ready()) { return -ENODEV; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } if (NRF_USBD_EPISO_CHECK(ep)) { /* ISO transactions do not support a handshake phase. */ return -EINVAL; } nrf_usbd_common_ep_dtoggle_clear(ep_addr_to_nrfx(ep)); nrf_usbd_common_ep_stall_clear(ep_addr_to_nrfx(ep)); LOG_DBG("Unstall on EP 0x%02x", ep); return 0; } int usb_dc_ep_halt(const uint8_t ep) { return usb_dc_ep_set_stall(ep); } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled) { struct nrf_usbd_ep_ctx *ep_ctx; if (!dev_attached() || !dev_ready()) { return -ENODEV; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } if (!stalled) { return -EINVAL; } *stalled = (uint8_t) nrf_usbd_common_ep_stall_check(ep_addr_to_nrfx(ep)); return 0; } int usb_dc_ep_enable(const uint8_t ep) { struct nrf_usbd_ep_ctx *ep_ctx; if (!dev_attached()) { return -ENODEV; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } if (!NRF_USBD_EPISO_CHECK(ep)) { /* ISO transactions for full-speed device do not support * toggle sequencing and should only send DATA0 PID. */ nrf_usbd_common_ep_dtoggle_clear(ep_addr_to_nrfx(ep)); /** Endpoint is enabled on SetInterface request. * This should also clear EP's halt status. */ nrf_usbd_common_ep_stall_clear(ep_addr_to_nrfx(ep)); } if (ep_ctx->cfg.en) { return -EALREADY; } LOG_DBG("EP enable: 0x%02x", ep); ep_ctx->cfg.en = true; /* Defer the endpoint enable if USBD is not ready yet. */ if (dev_ready()) { nrf_usbd_common_ep_enable(ep_addr_to_nrfx(ep)); } return 0; } int usb_dc_ep_disable(const uint8_t ep) { struct nrf_usbd_ep_ctx *ep_ctx; ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } if (!ep_ctx->cfg.en) { return -EALREADY; } LOG_DBG("EP disable: 0x%02x", ep); nrf_usbd_common_ep_disable(ep_addr_to_nrfx(ep)); /* Clear write_in_progress as nrf_usbd_common_ep_disable() * terminates endpoint transaction. */ ep_ctx->write_in_progress = false; ep_ctx_reset(ep_ctx); ep_ctx->cfg.en = false; return 0; } int usb_dc_ep_flush(const uint8_t ep) { struct nrf_usbd_ep_ctx *ep_ctx; if (!dev_attached() || !dev_ready()) { return -ENODEV; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } ep_ctx->buf.len = 0U; ep_ctx->buf.curr = ep_ctx->buf.data; nrf_usbd_common_transfer_out_drop(ep_addr_to_nrfx(ep)); return 0; } int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data, const uint32_t data_len, uint32_t *const ret_bytes) { LOG_DBG("ep_write: ep 0x%02x, len %d", ep, data_len); struct nrf_usbd_ctx *ctx = get_usbd_ctx(); struct nrf_usbd_ep_ctx *ep_ctx; int result = 0; if (!dev_attached() || !dev_ready()) { return -ENODEV; } if (NRF_USBD_EPOUT_CHECK(ep)) { return -EINVAL; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } if (!ep_ctx->cfg.en) { LOG_ERR("Endpoint 0x%02x is not enabled", ep); return -EINVAL; } k_mutex_lock(&ctx->drv_lock, K_FOREVER); /* USBD driver does not allow scheduling multiple DMA transfers * for one EP at a time. Next USB transfer on this endpoint can be * triggered after the completion of previous one. */ if (ep_ctx->write_in_progress) { k_mutex_unlock(&ctx->drv_lock); return -EAGAIN; } /** Clear the ZLP flag if current write is ZLP. After the ZLP will be * send the driver will perform status stage. */ if (!data_len && ep_ctx->trans_zlp) { ep_ctx->trans_zlp = false; } /** If writing to a Control Endpoint there might be a need to transfer * ZLP. If the Hosts asks for more data that the device may return and * the last packet is wMaxPacketSize long. The driver must send ZLP. * For consistence with the Zephyr USB stack sending ZLP must be issued * from the stack level. Making trans_zlp flag true results in blocking * the driver from starting setup stage without required ZLP. */ if (ep_ctx->cfg.type == USB_DC_EP_CONTROL) { if (data_len && usbd_ctx.setup.wLength > data_len && !(data_len % ep_ctx->cfg.max_sz)) { ep_ctx->trans_zlp = true; } } /* Setup stage is handled by hardware. * Detect the setup stage initiated by the stack * and perform appropriate action. */ if ((ep_ctx->cfg.type == USB_DC_EP_CONTROL) && (nrf_usbd_common_last_setup_dir_get() != ep)) { nrf_usbd_common_setup_clear(); k_mutex_unlock(&ctx->drv_lock); return 0; } ep_ctx->write_in_progress = true; NRF_USBD_COMMON_TRANSFER_IN(transfer, data, data_len, 0); nrfx_err_t err = nrf_usbd_common_ep_transfer(ep_addr_to_nrfx(ep), &transfer); if (err != NRFX_SUCCESS) { ep_ctx->write_in_progress = false; if (ret_bytes) { *ret_bytes = 0; } result = -EIO; LOG_ERR("nRF USBD write error: %d", (uint32_t)err); } else { if (ret_bytes) { *ret_bytes = data_len; } } k_mutex_unlock(&ctx->drv_lock); return result; } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { struct nrf_usbd_ep_ctx *ep_ctx; struct nrf_usbd_ctx *ctx = get_usbd_ctx(); uint32_t bytes_to_copy; if (!dev_attached() || !dev_ready()) { return -ENODEV; } if (NRF_USBD_EPIN_CHECK(ep)) { return -EINVAL; } if (!data && max_data_len) { return -EINVAL; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } if (!ep_ctx->cfg.en) { LOG_ERR("Endpoint 0x%02x is not enabled", ep); return -EINVAL; } k_mutex_lock(&ctx->drv_lock, K_FOREVER); bytes_to_copy = MIN(max_data_len, ep_ctx->buf.len); if (!data && !max_data_len) { if (read_bytes) { *read_bytes = ep_ctx->buf.len; } k_mutex_unlock(&ctx->drv_lock); return 0; } memcpy(data, ep_ctx->buf.curr, bytes_to_copy); ep_ctx->buf.curr += bytes_to_copy; ep_ctx->buf.len -= bytes_to_copy; if (read_bytes) { *read_bytes = bytes_to_copy; } k_mutex_unlock(&ctx->drv_lock); return 0; } int usb_dc_ep_read_continue(uint8_t ep) { struct nrf_usbd_ep_ctx *ep_ctx; struct nrf_usbd_ctx *ctx = get_usbd_ctx(); if (!dev_attached() || !dev_ready()) { return -ENODEV; } if (NRF_USBD_EPIN_CHECK(ep)) { return -EINVAL; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } if (!ep_ctx->cfg.en) { LOG_ERR("Endpoint 0x%02x is not enabled", ep); return -EINVAL; } k_mutex_lock(&ctx->drv_lock, K_FOREVER); if (!ep_ctx->buf.len) { ep_ctx->buf.curr = ep_ctx->buf.data; ep_ctx->read_complete = true; if (ep_ctx->read_pending) { struct usbd_event *ev = usbd_evt_alloc(); if (!ev) { k_mutex_unlock(&ctx->drv_lock); return -ENOMEM; } ev->evt_type = USBD_EVT_EP; ev->evt.ep_evt.ep = ep_ctx; ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ; usbd_evt_put(ev); usbd_work_schedule(); } } k_mutex_unlock(&ctx->drv_lock); return 0; } int usb_dc_ep_read(const uint8_t ep, uint8_t *const data, const uint32_t max_data_len, uint32_t *const read_bytes) { LOG_DBG("ep_read: ep 0x%02x, maxlen %d", ep, max_data_len); int ret; ret = usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes); if (ret) { return ret; } if (!data && !max_data_len) { return ret; } ret = usb_dc_ep_read_continue(ep); return ret; } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { struct nrf_usbd_ep_ctx *ep_ctx; if (!dev_attached()) { return -ENODEV; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } ep_ctx->cfg.cb = cb; return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { get_usbd_ctx()->status_cb = cb; } int usb_dc_ep_mps(const uint8_t ep) { struct nrf_usbd_ep_ctx *ep_ctx; if (!dev_attached()) { return -ENODEV; } ep_ctx = endpoint_ctx(ep); if (!ep_ctx) { return -EINVAL; } return ep_ctx->cfg.max_sz; } int usb_dc_wakeup_request(void) { bool res = nrf_usbd_common_wakeup_req(); if (!res) { return -EAGAIN; } return 0; } static int usb_init(void) { struct nrf_usbd_ctx *ctx = get_usbd_ctx(); nrfx_err_t err; #ifdef CONFIG_HAS_HW_NRF_USBREG /* Use CLOCK/POWER priority for compatibility with other series where * USB events are handled by CLOCK interrupt handler. */ IRQ_CONNECT(USBREGULATOR_IRQn, DT_IRQ(DT_INST(0, nordic_nrf_clock), priority), nrfx_isr, nrfx_usbreg_irq_handler, 0); irq_enable(USBREGULATOR_IRQn); #endif static const nrfx_power_config_t power_config = { .dcdcen = (DT_PROP(DT_INST(0, nordic_nrf5x_regulator), regulator_initial_mode) == NRF5X_REG_MODE_DCDC), #if NRFX_POWER_SUPPORTS_DCDCEN_VDDH .dcdcenhv = COND_CODE_1(CONFIG_SOC_SERIES_NRF52X, (DT_NODE_HAS_STATUS(DT_INST(0, nordic_nrf52x_regulator_hv), okay)), (DT_NODE_HAS_STATUS(DT_INST(0, nordic_nrf53x_regulator_hv), okay))), #endif }; static const nrfx_power_usbevt_config_t usbevt_config = { .handler = usb_dc_power_event_handler }; err = nrf_usbd_common_init(usbd_event_handler); if (err != NRFX_SUCCESS) { LOG_DBG("nRF USBD driver init failed. Code: %d", (uint32_t)err); return -EIO; } /* Ignore the return value, as NRFX_ERROR_ALREADY_INITIALIZED is not * a problem here. */ (void)nrfx_power_init(&power_config); nrfx_power_usbevt_init(&usbevt_config); k_work_queue_start(&usbd_work_queue, usbd_work_queue_stack, K_KERNEL_STACK_SIZEOF(usbd_work_queue_stack), CONFIG_SYSTEM_WORKQUEUE_PRIORITY, NULL); k_thread_name_set(&usbd_work_queue.thread, "usbd_workq"); k_work_init(&ctx->usb_work, usbd_work_handler); return 0; } SYS_INIT(usb_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE); ```
/content/code_sandbox/drivers/usb/device/usb_dc_nrfx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,765
```c /* * */ #define DT_DRV_COMPAT ite_it82xx2_usb #include <zephyr/kernel.h> #include <zephyr/usb/usb_device.h> #include <zephyr/drivers/pinctrl.h> #include <soc.h> #include <soc_dt.h> #include <string.h> #include <zephyr/irq.h> #include <zephyr/pm/policy.h> #include <zephyr/drivers/interrupt_controller/wuc_ite_it8xxx2.h> #include <zephyr/dt-bindings/interrupt-controller/it8xxx2-wuc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(usb_dc_it82xx2, CONFIG_USB_DRIVER_LOG_LEVEL); #define IT8XXX2_IS_EXTEND_ENDPOINT(n) (USB_EP_GET_IDX(n) >= 4) /* USB Device Controller Registers Bits & Constants */ #define IT8XXX2_USB_IRQ DT_INST_IRQ_BY_IDX(0, 0, irq) #define IT8XXX2_WU90_IRQ DT_INST_IRQ_BY_IDX(0, 1, irq) #define FIFO_NUM 3 #define SETUP_DATA_CNT 8 #define DC_ADDR_NULL 0x00 #define DC_ADDR_MASK 0x7F /* The related definitions of the register EP STATUS: * 0x41/0x45/0x49/0x4D */ #define EP_STATUS_ERROR 0x0F /* The related definitions of the register dc_line_status: 0x51 */ #define RX_LINE_STATE_MASK (RX_LINE_FULL_SPD | RX_LINE_LOW_SPD) #define RX_LINE_LOW_SPD 0x02 #define RX_LINE_FULL_SPD 0x01 #define RX_LINE_RESET 0x00 /* EPN Extend Control 2 Register Mask Definition */ #define COMPLETED_TRANS 0xF0 /* Bit [1:0] represents the TRANSACTION_TYPE as follows: */ enum it82xx2_transaction_types { DC_SETUP_TRANS, DC_IN_TRANS, DC_OUTDATA_TRANS, DC_ALL_TRANS }; /* The bit definitions of the register EP RX/TX FIFO Control: * EP_RX_FIFO_CONTROL: 0X64/0x84/0xA4/0xC4 * EP_TX_FIFO_CONTROL: 0X74/0x94/0xB4/0xD4 */ #define FIFO_FORCE_EMPTY BIT(0) /* The bit definitions of the register Host/Device Control: 0XE0 */ #define RESET_CORE BIT(1) /* ENDPOINT[3..0]_STATUS_REG */ #define DC_STALL_SENT BIT(5) /* DC_INTERRUPT_STATUS_REG */ #define DC_TRANS_DONE BIT(0) #define DC_RESUME_INT BIT(1) #define DC_RESET_EVENT BIT(2) #define DC_SOF_RECEIVED BIT(3) #define DC_NAK_SENT_INT BIT(4) /* DC_CONTROL_REG */ #define DC_GLOBAL_ENABLE BIT(0) #define DC_TX_LINE_STATE_DM BIT(1) #define DC_DIRECT_CONTROL BIT(3) #define DC_FULL_SPEED_LINE_POLARITY BIT(4) #define DC_FULL_SPEED_LINE_RATE BIT(5) #define DC_CONNECT_TO_HOST BIT(6) /* internal pull-up */ /* ENDPOINT[3..0]_CONTROL_REG */ #define ENDPOINT_EN BIT(0) #define EP_SEND_STALL BIT(3) enum it82xx2_ep_status { EP_INIT = 0, EP_CHECK, EP_CONFIG, EP_CONFIG_IN, EP_CONFIG_OUT, }; enum it82xx2_trans_type { SETUP_TOKEN, IN_TOKEN, OUT_TOKEN, }; enum it82xx2_setup_stage { INIT_ST, SETUP_ST, DIN_ST, DOUT_ST, STATUS_ST, STALL_SEND, }; enum it82xx2_ep_ctrl { EP_IN_DIRECTION_SET, EP_STALL_SEND, EP_STALL_CHECK, EP_IOS_ENABLE, EP_ENABLE, EP_DATA_SEQ_1, EP_DATA_SEQ_TOGGLE, EP_READY_ENABLE, }; struct usb_it8xxx2_wuc { /* WUC control device structure */ const struct device *wucs; /* WUC pin mask */ uint8_t mask; }; struct usb_it82xx2_config { struct usb_it82xx2_regs *const base; const struct pinctrl_dev_config *pcfg; const struct usb_it8xxx2_wuc *wuc_list; }; static const struct usb_it8xxx2_wuc usb_wuc0[IT8XXX2_DT_INST_WUCCTRL_LEN(0)] = IT8XXX2_DT_WUC_ITEMS_LIST(0); PINCTRL_DT_INST_DEFINE(0); static const struct usb_it82xx2_config ucfg0 = { .base = (struct usb_it82xx2_regs *)DT_INST_REG_ADDR(0), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), .wuc_list = usb_wuc0 }; struct it82xx2_endpoint_data { usb_dc_ep_callback cb_in; usb_dc_ep_callback cb_out; enum it82xx2_ep_status ep_status; enum usb_dc_ep_transfer_type ep_type; uint16_t remaining; /* remaining bytes */ uint16_t mps; }; struct usb_it82xx2_data { const struct device *dev; struct it82xx2_endpoint_data ep_data[MAX_NUM_ENDPOINTS]; enum it82xx2_setup_stage st_state; /* Setup State */ /* EP0 status */ enum it82xx2_trans_type last_token; /* EP0 status */ enum it82xx2_trans_type now_token; uint8_t attached; uint8_t addr; bool no_data_ctrl; bool suspended; usb_dc_status_callback usb_status_cb; /* FIFO_1/2/3 ready status */ bool fifo_ready[3]; struct k_sem fifo_sem[3]; struct k_sem suspended_sem; struct k_work_delayable check_suspended_work; }; /* The ep_fifo_res[ep_idx % FIFO_NUM] where the FIFO_NUM is 3 represents the * EP mapping because when (ep_idx % FIFO_NUM) is 3, it actually means the EP0. */ static const uint8_t ep_fifo_res[3] = {3, 1, 2}; static struct usb_it82xx2_data udata0; static struct usb_it82xx2_regs *it82xx2_get_usb_regs(void) { const struct device *dev = DEVICE_DT_GET(DT_NODELABEL(usb0)); const struct usb_it82xx2_config *cfg = dev->config; struct usb_it82xx2_regs *const usb_regs = cfg->base; return usb_regs; } static void it82xx2_enable_sof_int(bool enable) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); usb_regs->dc_interrupt_status = DC_SOF_RECEIVED; if (enable) { usb_regs->dc_interrupt_mask |= DC_SOF_RECEIVED; } else { usb_regs->dc_interrupt_mask &= ~DC_SOF_RECEIVED; } } /* Standby(deep doze) mode enable/disable */ static void it82xx2_enable_standby_state(bool enable) { if (enable) { pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } else { pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } } /* WU90 (USB D+) Enable/Disable */ static void it82xx2_enable_wu90_irq(const struct device *dev, bool enable) { const struct usb_it82xx2_config *cfg = dev->config; /* Clear pending interrupt */ it8xxx2_wuc_clear_status(cfg->wuc_list[0].wucs, cfg->wuc_list[0].mask); if (enable) { irq_enable(IT8XXX2_WU90_IRQ); } else { irq_disable(IT8XXX2_WU90_IRQ); } } static void it82xx2_wu90_isr(const struct device *dev) { it82xx2_enable_wu90_irq(dev, false); it82xx2_enable_standby_state(false); LOG_DBG("USB D+ (WU90) Triggered"); } /* WU90 (USB D+) Initializations */ static void it8xxx2_usb_dc_wuc_init(const struct device *dev) { const struct usb_it82xx2_config *cfg = dev->config; /* Initializing the WUI */ it8xxx2_wuc_set_polarity(cfg->wuc_list[0].wucs, cfg->wuc_list[0].mask, WUC_TYPE_EDGE_FALLING); it8xxx2_wuc_clear_status(cfg->wuc_list[0].wucs, cfg->wuc_list[0].mask); /* Enabling the WUI */ it8xxx2_wuc_enable(cfg->wuc_list[0].wucs, cfg->wuc_list[0].mask); /* Connect WU90 (USB D+) interrupt but make it disabled initially */ IRQ_CONNECT(IT8XXX2_WU90_IRQ, 0, it82xx2_wu90_isr, 0, 0); } static int it82xx2_usb_fifo_ctrl(const uint8_t ep, const bool clear) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); volatile uint8_t *ep_fifo_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_BX].fifo_ctrl.ep_fifo_ctrl; uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t fifon_ctrl = (ep_fifo_res[ep_idx % FIFO_NUM] - 1) * 2; unsigned int key; int ret = 0; if (ep_idx == 0) { LOG_ERR("Invalid endpoint 0x%x", ep); return -EINVAL; } key = irq_lock(); if (clear) { ep_fifo_ctrl[fifon_ctrl] = 0x0; ep_fifo_ctrl[fifon_ctrl + 1] = 0x0; goto out; } if (USB_EP_DIR_IS_IN(ep) && udata0.ep_data[ep_idx].ep_status == EP_CONFIG_IN) { if (ep_idx < 8) { ep_fifo_ctrl[fifon_ctrl] = BIT(ep_idx); ep_fifo_ctrl[fifon_ctrl + 1] = 0x0; } else { ep_fifo_ctrl[fifon_ctrl] = 0x0; ep_fifo_ctrl[fifon_ctrl + 1] = BIT(ep_idx - 8); } } else if (USB_EP_DIR_IS_OUT(ep) && udata0.ep_data[ep_idx].ep_status == EP_CONFIG_OUT) { if (ep_idx < 8) { ep_fifo_ctrl[fifon_ctrl] |= BIT(ep_idx); } else { ep_fifo_ctrl[fifon_ctrl + 1] |= BIT(ep_idx - 8); } } else { LOG_ERR("Failed to set fifo control register for ep 0x%x", ep); ret = -EINVAL; } out: irq_unlock(key); return ret; } static volatile void *it82xx2_get_ext_ctrl(int ep_idx, enum it82xx2_ep_ctrl ctrl) { uint8_t idx; struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); union epn0n1_extend_ctrl_reg *epn0n1_ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_9X].ext_4_15.epn0n1_ext_ctrl; struct epn_ext_ctrl_regs *ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_DX].ext_0_3.epn_ext_ctrl; if ((ctrl == EP_IN_DIRECTION_SET) || (ctrl == EP_ENABLE)) { idx = ((ep_idx - 4) % 3) + 1; return &ext_ctrl[idx].epn_ext_ctrl1; } idx = (ep_idx - 4) / 2; return &epn0n1_ext_ctrl[idx]; } static int it82xx2_usb_extend_ep_ctrl(uint8_t ep, enum it82xx2_ep_ctrl ctrl, bool enable) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct epn_ext_ctrl_regs *ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_DX].ext_0_3.epn_ext_ctrl; union epn_extend_ctrl1_reg *epn_ext_ctrl1 = NULL; union epn0n1_extend_ctrl_reg *epn0n1_ext_ctrl = NULL; uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_fifo = (ep_idx > 0) ? (ep_fifo_res[ep_idx % FIFO_NUM]) : 0; if (!IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { return -EINVAL; } if ((ctrl == EP_IN_DIRECTION_SET) || (ctrl == EP_ENABLE)) { epn_ext_ctrl1 = (union epn_extend_ctrl1_reg *)it82xx2_get_ext_ctrl(ep_idx, ctrl); } else { epn0n1_ext_ctrl = (union epn0n1_extend_ctrl_reg *)it82xx2_get_ext_ctrl(ep_idx, ctrl); } switch (ctrl) { case EP_STALL_SEND: if (ep_idx % 2) { epn0n1_ext_ctrl->fields.epn1_send_stall_bit = enable; } else { epn0n1_ext_ctrl->fields.epn0_send_stall_bit = enable; } break; case EP_STALL_CHECK: if (ep_idx % 2) { return epn0n1_ext_ctrl->fields.epn1_send_stall_bit; } else { return epn0n1_ext_ctrl->fields.epn0_send_stall_bit; } break; case EP_IOS_ENABLE: if (ep_idx % 2) { epn0n1_ext_ctrl->fields.epn1_iso_enable_bit = enable; } else { epn0n1_ext_ctrl->fields.epn0_iso_enable_bit = enable; } break; case EP_DATA_SEQ_1: if (ep_idx % 2) { epn0n1_ext_ctrl->fields.epn1_outdata_sequence_bit = enable; } else { epn0n1_ext_ctrl->fields.epn0_outdata_sequence_bit = enable; } break; case EP_DATA_SEQ_TOGGLE: if (!enable) { break; } if (ep_idx % 2) { if (epn0n1_ext_ctrl->fields.epn1_outdata_sequence_bit) { epn0n1_ext_ctrl->fields.epn1_outdata_sequence_bit = 0; } else { epn0n1_ext_ctrl->fields.epn1_outdata_sequence_bit = 1; } } else { if (epn0n1_ext_ctrl->fields.epn0_outdata_sequence_bit) { epn0n1_ext_ctrl->fields.epn0_outdata_sequence_bit = 0; } else { epn0n1_ext_ctrl->fields.epn0_outdata_sequence_bit = 1; } } break; case EP_IN_DIRECTION_SET: if (((ep_idx - 4) / 3 == 0)) { epn_ext_ctrl1->fields.epn0_direction_bit = enable; } else if (((ep_idx - 4) / 3 == 1)) { epn_ext_ctrl1->fields.epn3_direction_bit = enable; } else if (((ep_idx - 4) / 3 == 2)) { epn_ext_ctrl1->fields.epn6_direction_bit = enable; } else if (((ep_idx - 4) / 3 == 3)) { epn_ext_ctrl1->fields.epn9_direction_bit = enable; } else { LOG_ERR("Invalid endpoint 0x%x for control type 0x%x", ep, ctrl); return -EINVAL; } break; case EP_ENABLE: if (((ep_idx - 4) / 3 == 0)) { epn_ext_ctrl1->fields.epn0_enable_bit = enable; } else if (((ep_idx - 4) / 3 == 1)) { epn_ext_ctrl1->fields.epn3_enable_bit = enable; } else if (((ep_idx - 4) / 3 == 2)) { epn_ext_ctrl1->fields.epn6_enable_bit = enable; } else if (((ep_idx - 4) / 3 == 3)) { epn_ext_ctrl1->fields.epn9_enable_bit = enable; } else { LOG_ERR("Invalid endpoint 0x%x for control type 0x%x", ep, ctrl); return -EINVAL; } break; case EP_READY_ENABLE: unsigned int key; int idx = ((ep_idx - 4) % 3) + 1; key = irq_lock(); (enable) ? (ext_ctrl[idx].epn_ext_ctrl2 |= BIT((ep_idx - 4) / 3)) : (ext_ctrl[idx].epn_ext_ctrl2 &= ~BIT((ep_idx - 4) / 3)); ep_regs[ep_fifo].ep_ctrl.fields.ready_bit = enable; irq_unlock(key); break; default: LOG_ERR("Unknown control type 0x%x", ctrl); return -EINVAL; } return 0; } static int it82xx2_usb_ep_ctrl(uint8_t ep, enum it82xx2_ep_ctrl ctrl, bool enable) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; uint8_t ep_idx = USB_EP_GET_IDX(ep); if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { return -EINVAL; } switch (ctrl) { case EP_IN_DIRECTION_SET: ep_regs[ep_idx].ep_ctrl.fields.direction_bit = enable; break; case EP_STALL_SEND: ep_regs[ep_idx].ep_ctrl.fields.send_stall_bit = enable; break; case EP_STALL_CHECK: return ep_regs[ep_idx].ep_ctrl.fields.send_stall_bit; case EP_IOS_ENABLE: ep_regs[ep_idx].ep_ctrl.fields.iso_enable_bit = enable; break; case EP_ENABLE: ep_regs[ep_idx].ep_ctrl.fields.enable_bit = enable; break; case EP_READY_ENABLE: unsigned int key; key = irq_lock(); ep_regs[ep_idx].ep_ctrl.fields.ready_bit = enable; irq_unlock(key); break; case EP_DATA_SEQ_1: ep_regs[ep_idx].ep_ctrl.fields.outdata_sequence_bit = enable; break; case EP_DATA_SEQ_TOGGLE: if (!enable) { break; } if (ep_regs[ep_idx].ep_ctrl.fields.outdata_sequence_bit) { ep_regs[ep_idx].ep_ctrl.fields.outdata_sequence_bit = 0; } else { ep_regs[ep_idx].ep_ctrl.fields.outdata_sequence_bit = 1; } break; default: LOG_ERR("Unknown control type 0x%x", ctrl); return -EINVAL; } return 0; } static int it82xx2_usb_set_ep_ctrl(uint8_t ep, enum it82xx2_ep_ctrl ctrl, bool enable) { uint8_t ep_idx = USB_EP_GET_IDX(ep); int ret = 0; if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { ret = it82xx2_usb_extend_ep_ctrl(ep, ctrl, enable); } else { ret = it82xx2_usb_ep_ctrl(ep, ctrl, enable); } return ret; } static int it82xx2_usb_dc_ip_init(void) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); /* Reset Device Controller */ usb_regs->host_device_control = RESET_CORE; k_msleep(1); usb_regs->port0_misc_control &= ~(PULL_DOWN_EN); usb_regs->port1_misc_control &= ~(PULL_DOWN_EN); /* clear reset bit */ usb_regs->host_device_control = 0; usb_regs->dc_interrupt_status = DC_TRANS_DONE | DC_RESET_EVENT | DC_SOF_RECEIVED; usb_regs->dc_interrupt_mask = 0x00; usb_regs->dc_interrupt_mask = DC_TRANS_DONE | DC_RESET_EVENT | DC_SOF_RECEIVED; usb_regs->dc_address = DC_ADDR_NULL; return 0; } static int it82xx2_usb_dc_attach_init(void) { struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE; /* * Disable USB debug path , prevent CPU enter * JTAG mode and then reset by USB command. */ gctrl_regs->GCTRL_MCCR &= ~(IT8XXX2_GCTRL_MCCR_USB_EN); gctrl_regs->gctrl_pmer2 |= IT8XXX2_GCTRL_PMER2_USB_PAD_EN; return it82xx2_usb_dc_ip_init(); } /* Check the condition that SETUP_TOKEN following OUT_TOKEN and return it */ static bool it82xx2_check_setup_following_out(void) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; return ((ep_regs[EP0].ep_transtype_sts & DC_ALL_TRANS) == 0 || (udata0.last_token == IN_TOKEN && ff_regs[EP0].ep_rx_fifo_dcnt_lsb == SETUP_DATA_CNT)); } static inline void it82xx2_handler_setup(uint8_t fifo_idx, uint8_t ep_ctrl) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; uint8_t ep_idx = fifo_idx; /* wrong trans */ if (ep_ctrl & EP_SEND_STALL) { ep_regs[fifo_idx].ep_ctrl.fields.send_stall_bit = 0; udata0.st_state = STALL_SEND; ff_regs[fifo_idx].ep_rx_fifo_ctrl = FIFO_FORCE_EMPTY; LOG_DBG("Clear Stall Bit & RX FIFO"); return; } if (udata0.st_state == DIN_ST) { /* setup -> in(data) -> out(status) */ udata0.last_token = udata0.now_token; udata0.now_token = OUT_TOKEN; udata0.st_state = STATUS_ST; udata0.ep_data[ep_idx].cb_out(ep_idx | USB_EP_DIR_OUT, USB_DC_EP_DATA_OUT); } else if (udata0.st_state == DOUT_ST || udata0.st_state == SETUP_ST) { /* setup -> out(data) -> in(status) * or * setup -> in(status) */ udata0.last_token = udata0.now_token; udata0.now_token = IN_TOKEN; udata0.st_state = STATUS_ST; udata0.ep_data[ep_idx].cb_in(ep_idx | USB_EP_DIR_IN, USB_DC_EP_DATA_IN); } udata0.last_token = udata0.now_token; udata0.now_token = SETUP_TOKEN; udata0.st_state = SETUP_ST; ep_regs[fifo_idx].ep_ctrl.fields.outdata_sequence_bit = 1; udata0.ep_data[ep_idx].cb_out(ep_idx | USB_EP_DIR_OUT, USB_DC_EP_SETUP); /* Set ready bit to no-data control in */ if (udata0.no_data_ctrl) { it82xx2_usb_set_ep_ctrl(ep_idx, EP_READY_ENABLE, true); udata0.no_data_ctrl = false; } } static inline void it82xx2_handler_in(const uint8_t ep_idx, const uint8_t ep_ctrl) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; if (ep_idx == 0) { if (ep_ctrl & EP_SEND_STALL) { ep_regs[ep_idx].ep_ctrl.fields.send_stall_bit = 0; udata0.st_state = STALL_SEND; LOG_DBG("Clear Stall Bit"); return; } if (udata0.st_state >= STATUS_ST) { return; } udata0.last_token = udata0.now_token; udata0.now_token = IN_TOKEN; if (udata0.addr != DC_ADDR_NULL && udata0.addr != usb_regs->dc_address) { usb_regs->dc_address = udata0.addr; LOG_DBG("Address Is Set Successfully"); } if (udata0.st_state == DOUT_ST) { /* setup -> out(data) -> in(status) */ udata0.st_state = STATUS_ST; } else if (udata0.ep_data[ep_idx].remaining == 0 && udata0.st_state == SETUP_ST) { /* setup -> in(status) */ udata0.st_state = STATUS_ST; } else { /* setup -> in(data) */ udata0.st_state = DIN_ST; } } it82xx2_usb_set_ep_ctrl(ep_idx, EP_DATA_SEQ_TOGGLE, true); if (udata0.ep_data[ep_idx].cb_in) { udata0.ep_data[ep_idx].cb_in(ep_idx | USB_EP_DIR_IN, USB_DC_EP_DATA_IN); } if (ep_idx != 0) { uint8_t ep_fifo = ep_fifo_res[ep_idx % FIFO_NUM]; /* clear fifo ctrl registers when IN transaction is completed */ it82xx2_usb_fifo_ctrl(ep_idx, true); k_sem_give(&udata0.fifo_sem[ep_fifo - 1]); } else { if (udata0.st_state == DIN_ST && udata0.ep_data[ep_idx].remaining == 0) { it82xx2_usb_set_ep_ctrl(ep_idx, EP_READY_ENABLE, true); } } } static inline void it82xx2_handler_out(const uint8_t ep_idx) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; if (ep_idx == 0) { /* ep0 wrong enter check */ if (udata0.st_state >= STATUS_ST) { return; } udata0.last_token = udata0.now_token; udata0.now_token = OUT_TOKEN; if (udata0.st_state == SETUP_ST) { /* setup -> out(data) */ udata0.st_state = DOUT_ST; } else { /* setup -> in(data) -> out(status) */ udata0.st_state = STATUS_ST; } } if (udata0.ep_data[ep_idx].cb_out) { udata0.ep_data[ep_idx].cb_out(ep_idx, USB_DC_EP_DATA_OUT); } if (ep_idx == 0) { /* SETUP_TOKEN follow OUT_TOKEN */ if (it82xx2_check_setup_following_out()) { udata0.last_token = udata0.now_token; udata0.now_token = SETUP_TOKEN; udata0.st_state = SETUP_ST; ep_regs[ep_idx].ep_ctrl.fields.outdata_sequence_bit = 1; udata0.ep_data[ep_idx].cb_out(ep_idx | USB_EP_DIR_OUT, USB_DC_EP_SETUP); if (udata0.no_data_ctrl) { it82xx2_usb_set_ep_ctrl(ep_idx, EP_READY_ENABLE, true); udata0.no_data_ctrl = false; } } } } static bool get_extend_enable_bit(const uint8_t ep_idx) { union epn_extend_ctrl1_reg *epn_ext_ctrl1 = NULL; bool enable; epn_ext_ctrl1 = (union epn_extend_ctrl1_reg *)it82xx2_get_ext_ctrl(ep_idx, EP_ENABLE); if (((ep_idx - 4) / 3 == 0)) { enable = (epn_ext_ctrl1->fields.epn0_enable_bit != 0); } else if (((ep_idx - 4) / 3 == 1)) { enable = (epn_ext_ctrl1->fields.epn3_enable_bit != 0); } else if (((ep_idx - 4) / 3 == 2)) { enable = (epn_ext_ctrl1->fields.epn6_enable_bit != 0); } else { enable = (epn_ext_ctrl1->fields.epn9_enable_bit != 0); } return enable; } static bool get_extend_ready_bit(const uint8_t ep_idx) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct epn_ext_ctrl_regs *ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_DX].ext_0_3.epn_ext_ctrl; int idx = ((ep_idx - 4) % 3) + 1; return ((ext_ctrl[idx].epn_ext_ctrl2 & BIT((ep_idx - 4) / 3)) != 0); } static uint16_t get_fifo_ctrl(const uint8_t fifo_idx) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); volatile uint8_t *ep_fifo_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_BX].fifo_ctrl.ep_fifo_ctrl; uint8_t fifon_ctrl = (fifo_idx - 1) * 2; if (fifo_idx == 0) { LOG_ERR("Invalid fifo_idx 0x%x", fifo_idx); return 0; } return (ep_fifo_ctrl[fifon_ctrl + 1] << 8 | ep_fifo_ctrl[fifon_ctrl]); } static bool it82xx2_usb_fake_token(const uint8_t ep_idx, uint8_t *token_type) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; bool is_fake = false; bool enable_bit, ready_bit; uint8_t ep_fifo = (ep_idx > 0) ? (ep_fifo_res[ep_idx % FIFO_NUM]) : 0; if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { enable_bit = get_extend_enable_bit(ep_idx); ready_bit = get_extend_ready_bit(ep_idx); } else { enable_bit = (ep_regs[ep_idx].ep_ctrl.fields.enable_bit != 0); ready_bit = (ep_regs[ep_idx].ep_ctrl.fields.ready_bit != 0); } /* The enable bit is set and the ready bit is cleared if the * transaction is completed. */ if (!enable_bit || ready_bit) { return true; } *token_type = ep_regs[ep_fifo].ep_transtype_sts & DC_ALL_TRANS; if (ep_idx == 0) { return false; } switch (*token_type) { case DC_IN_TRANS: if (get_fifo_ctrl(ep_fifo) != BIT(ep_idx) || udata0.ep_data[ep_idx].ep_status != EP_CONFIG_IN) { is_fake = true; } break; case DC_OUTDATA_TRANS: if (!udata0.fifo_ready[ep_fifo - 1] || udata0.ep_data[ep_idx].ep_status != EP_CONFIG_OUT) { is_fake = true; } else { udata0.fifo_ready[ep_fifo - 1] = false; } break; case DC_SETUP_TRANS: __fallthrough; default: is_fake = true; break; } return is_fake; } static void it82xx2_usb_dc_trans_done(void) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct epn_ext_ctrl_regs *epn_ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_DX].ext_0_3.epn_ext_ctrl; for (uint8_t fifo_idx = 0; fifo_idx < 4; fifo_idx++) { uint8_t ep_ctrl = ep_regs[fifo_idx].ep_ctrl.value; uint8_t ep_idx, token_type; if (fifo_idx == 0) { ep_idx = 0; } else { ep_idx = (epn_ext_ctrl[fifo_idx].epn_ext_ctrl2 & COMPLETED_TRANS) >> 4; if (ep_idx == 0) { continue; } } if (!it82xx2_usb_fake_token(ep_idx, &token_type)) { switch (token_type) { case DC_SETUP_TRANS: it82xx2_handler_setup(fifo_idx, ep_ctrl); break; case DC_IN_TRANS: it82xx2_handler_in(ep_idx, ep_ctrl); break; case DC_OUTDATA_TRANS: it82xx2_handler_out(ep_idx); break; default: break; } } } } static void it82xx2_usb_dc_isr(void) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); uint8_t status = usb_regs->dc_interrupt_status & usb_regs->dc_interrupt_mask; /* mask non enable int */ /* reset */ if (status & DC_RESET_EVENT) { if ((usb_regs->dc_line_status & RX_LINE_STATE_MASK) == RX_LINE_RESET) { usb_dc_reset(); usb_regs->dc_interrupt_status = DC_RESET_EVENT; return; } else { usb_regs->dc_interrupt_status = DC_RESET_EVENT; } } /* sof received */ if (status & DC_SOF_RECEIVED) { it82xx2_enable_sof_int(false); k_work_reschedule(&udata0.check_suspended_work, K_MSEC(5)); } /* transaction done */ if (status & DC_TRANS_DONE) { /* clear interrupt before new transaction */ usb_regs->dc_interrupt_status = DC_TRANS_DONE; it82xx2_usb_dc_trans_done(); return; } } static void suspended_check_handler(struct k_work *item) { struct k_work_delayable *dwork = k_work_delayable_from_work(item); struct usb_it82xx2_data *udata = CONTAINER_OF(dwork, struct usb_it82xx2_data, check_suspended_work); struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); if (usb_regs->dc_interrupt_status & DC_SOF_RECEIVED) { usb_regs->dc_interrupt_status = DC_SOF_RECEIVED; if (udata->suspended) { if (udata->usb_status_cb) { (*(udata->usb_status_cb))(USB_DC_RESUME, NULL); } udata->suspended = false; k_sem_give(&udata->suspended_sem); } k_work_reschedule(&udata->check_suspended_work, K_MSEC(5)); return; } it82xx2_enable_sof_int(true); if (!udata->suspended) { if (udata->usb_status_cb) { (*(udata->usb_status_cb))(USB_DC_SUSPEND, NULL); } udata->suspended = true; it82xx2_enable_wu90_irq(udata->dev, true); it82xx2_enable_standby_state(true); k_sem_reset(&udata->suspended_sem); } } /* * USB Device Controller API */ int usb_dc_attach(void) { int ret; struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); if (udata0.attached) { LOG_DBG("Already Attached"); return 0; } LOG_DBG("Attached"); ret = it82xx2_usb_dc_attach_init(); if (ret) { return ret; } for (uint8_t idx = 0; idx < MAX_NUM_ENDPOINTS; idx++) { udata0.ep_data[idx].ep_status = EP_INIT; } udata0.attached = 1U; /* init fifo ready status */ udata0.fifo_ready[0] = false; udata0.fifo_ready[1] = false; udata0.fifo_ready[2] = false; k_sem_init(&udata0.fifo_sem[0], 1, 1); k_sem_init(&udata0.fifo_sem[1], 1, 1); k_sem_init(&udata0.fifo_sem[2], 1, 1); k_sem_init(&udata0.suspended_sem, 0, 1); k_work_init_delayable(&udata0.check_suspended_work, suspended_check_handler); /* Connect USB interrupt */ IRQ_CONNECT(IT8XXX2_USB_IRQ, 0, it82xx2_usb_dc_isr, 0, 0); usb_regs->dc_control = DC_GLOBAL_ENABLE | DC_FULL_SPEED_LINE_POLARITY | DC_FULL_SPEED_LINE_RATE | DC_CONNECT_TO_HOST; /* Enable USB D+ and USB interrupts */ it82xx2_enable_wu90_irq(udata0.dev, true); irq_enable(IT8XXX2_USB_IRQ); return 0; } int usb_dc_detach(void) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); if (!udata0.attached) { LOG_DBG("Already Detached"); return 0; } LOG_DBG("Detached"); irq_disable(IT8XXX2_USB_IRQ); /* stop pull-up D+ D-*/ usb_regs->dc_control &= ~DC_CONNECT_TO_HOST; udata0.attached = 0U; return 0; } int usb_dc_reset(void) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; LOG_DBG("USB Device Reset"); ff_regs[EP0].ep_rx_fifo_ctrl = FIFO_FORCE_EMPTY; ff_regs[EP0].ep_tx_fifo_ctrl = FIFO_FORCE_EMPTY; for (uint8_t idx = 1; idx < 4; idx++) { if (udata0.ep_data[idx].ep_status > EP_CHECK) { ff_regs[idx].ep_rx_fifo_ctrl = FIFO_FORCE_EMPTY; ff_regs[idx].ep_tx_fifo_ctrl = FIFO_FORCE_EMPTY; } } ep_regs[0].ep_ctrl.value = ENDPOINT_EN; usb_regs->dc_address = DC_ADDR_NULL; udata0.addr = DC_ADDR_NULL; usb_regs->dc_interrupt_status = DC_NAK_SENT_INT | DC_SOF_RECEIVED; if (udata0.usb_status_cb) { (*(udata0.usb_status_cb))(USB_DC_RESET, NULL); } return 0; } int usb_dc_set_address(const uint8_t addr) { LOG_DBG("Set Address(0x%02x) to Data", addr); udata0.addr = addr & DC_ADDR_MASK; return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { udata0.usb_status_cb = cb; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); bool in = USB_EP_DIR_IS_IN(cfg->ep_addr); if ((cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx > EP0) { LOG_ERR("Invalid Endpoint Configuration"); return -EINVAL; } if (ep_idx >= MAX_NUM_ENDPOINTS) { LOG_WRN("Invalid Endpoint Number 0x%02x", cfg->ep_addr); return -EINVAL; } if ((ep_idx != 0) && (!in && ep_idx % FIFO_NUM != 2)) { LOG_WRN("Invalid Endpoint Number 0x%02x", cfg->ep_addr); return -EINVAL; } if ((ep_idx != 0) && (in && ep_idx % FIFO_NUM == 2)) { LOG_WRN("Invalid Endpoint Number 0x%02x", cfg->ep_addr); return -EINVAL; } if (udata0.ep_data[ep_idx].ep_status > EP_INIT) { LOG_WRN("EP%d have been used", ep_idx); return -EINVAL; } if (ep_idx > EP0) { udata0.ep_data[ep_idx].mps = cfg->ep_mps; } udata0.ep_data[ep_idx].ep_status = EP_CHECK; LOG_DBG("Check cap(%02x)", cfg->ep_addr); return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); bool in = USB_EP_DIR_IS_IN(cfg->ep_addr); if (!udata0.attached || ep_idx >= MAX_NUM_ENDPOINTS) { LOG_DBG("Not attached / Invalid Endpoint: 0x%X", cfg->ep_addr); return -EINVAL; } if (!cfg->ep_mps) { LOG_DBG("Wrong EP or Descriptor"); return -EINVAL; } udata0.ep_data[ep_idx].ep_status = EP_CONFIG; udata0.ep_data[ep_idx].mps = cfg->ep_mps; LOG_DBG("ep_status: %d, mps: %d", udata0.ep_data[ep_idx].ep_status, udata0.ep_data[ep_idx].mps); if (!(ep_idx > EP0)) { return 0; } it82xx2_usb_set_ep_ctrl(ep_idx, EP_IN_DIRECTION_SET, in); if (in) { if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { it82xx2_usb_extend_ep_ctrl(ep_idx, EP_DATA_SEQ_1, false); } udata0.ep_data[ep_idx].ep_status = EP_CONFIG_IN; } else { udata0.ep_data[ep_idx].ep_status = EP_CONFIG_OUT; it82xx2_usb_fifo_ctrl(cfg->ep_addr, false); } switch (cfg->ep_type) { case USB_DC_EP_CONTROL: return -EINVAL; case USB_DC_EP_ISOCHRONOUS: it82xx2_usb_set_ep_ctrl(ep_idx, EP_IOS_ENABLE, true); break; case USB_DC_EP_BULK: __fallthrough; case USB_DC_EP_INTERRUPT: __fallthrough; default: it82xx2_usb_set_ep_ctrl(ep_idx, EP_IOS_ENABLE, false); break; } udata0.ep_data[ep_idx].ep_type = cfg->ep_type; LOG_DBG("EP%d Configured: 0x%2X(%d)", ep_idx, !!(in), cfg->ep_type); return 0; } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!udata0.attached || ep_idx >= MAX_NUM_ENDPOINTS) { LOG_ERR("(%d)Not attached / Invalid endpoint: EP 0x%x", __LINE__, ep); return -EINVAL; } if (cb == NULL) { LOG_ERR("(%d): NO callback function", __LINE__); return -EINVAL; } LOG_DBG("EP%d set callback: %d", ep_idx, !!(ep & USB_EP_DIR_IN)); if (USB_EP_DIR_IS_IN(ep)) { udata0.ep_data[ep_idx].cb_in = cb; } else { udata0.ep_data[ep_idx].cb_out = cb; } return 0; } int usb_dc_ep_enable(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); int ret = 0; if (!udata0.attached || ep_idx >= MAX_NUM_ENDPOINTS) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep_idx); return -EINVAL; } if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { uint8_t ep_fifo = ep_fifo_res[ep_idx % FIFO_NUM]; it82xx2_usb_set_ep_ctrl(ep_fifo, EP_ENABLE, true); } ret = it82xx2_usb_set_ep_ctrl(ep_idx, EP_ENABLE, true); if (ret < 0) { return ret; } LOG_DBG("Endpoint 0x%02x is enabled", ep); return 0; } int usb_dc_ep_disable(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!udata0.attached || ep_idx >= MAX_NUM_ENDPOINTS) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep_idx); return -EINVAL; } return it82xx2_usb_set_ep_ctrl(ep_idx, EP_ENABLE, false); } int usb_dc_ep_set_stall(const uint8_t ep) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; uint8_t ep_idx = USB_EP_GET_IDX(ep); struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE; if (ep_idx >= MAX_NUM_ENDPOINTS) { return -EINVAL; } it82xx2_usb_set_ep_ctrl(ep_idx, EP_STALL_SEND, true); if (ep_idx == 0) { uint32_t idx = 0; it82xx2_usb_set_ep_ctrl(ep_idx, EP_READY_ENABLE, true); /* polling if stall send for 3ms */ while (idx < 198 && !(ep_regs[ep_idx].ep_status & DC_STALL_SENT)) { /* wait 15.15us */ gctrl_regs->GCTRL_WNCKR = 0; idx++; } if (idx < 198) { ep_regs[ep_idx].ep_ctrl.fields.send_stall_bit = 0; } udata0.no_data_ctrl = false; udata0.st_state = STALL_SEND; } LOG_DBG("EP(%d) ctrl: 0x%02x", ep_idx, ep_regs[ep_idx].ep_ctrl.value); LOG_DBG("EP(%d) Set Stall", ep_idx); return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= MAX_NUM_ENDPOINTS) { return -EINVAL; } it82xx2_usb_set_ep_ctrl(ep_idx, EP_STALL_SEND, false); LOG_DBG("EP(%d) clear stall", ep_idx); return 0; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *stalled) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if ((!stalled) || (ep_idx >= MAX_NUM_ENDPOINTS)) { return -EINVAL; } *stalled = it82xx2_usb_set_ep_ctrl(ep_idx, EP_STALL_CHECK, true); return 0; } int usb_dc_ep_halt(uint8_t ep) { return usb_dc_ep_set_stall(ep); } int usb_dc_ep_flush(uint8_t ep) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_fifo = (ep_idx > 0) ? (ep_fifo_res[ep_idx % FIFO_NUM]) : 0; if (ep_idx >= MAX_NUM_ENDPOINTS) { return -EINVAL; } if (USB_EP_DIR_IS_IN(ep)) { ff_regs[ep_fifo].ep_tx_fifo_ctrl = FIFO_FORCE_EMPTY; } else { ff_regs[ep_fifo].ep_rx_fifo_ctrl = FIFO_FORCE_EMPTY; } return 0; } int usb_dc_ep_write(uint8_t ep, const uint8_t *buf, uint32_t data_len, uint32_t *ret_bytes) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; unsigned int key; uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_fifo = (ep_idx > 0) ? (ep_fifo_res[ep_idx % FIFO_NUM]) : 0; if (ep_idx >= MAX_NUM_ENDPOINTS) { return -EINVAL; } if (ep_idx == EP0) { if ((udata0.now_token == SETUP_TOKEN) && (data_len == 0)) { return 0; } /* clear fifo before write*/ ff_regs[ep_idx].ep_tx_fifo_ctrl = FIFO_FORCE_EMPTY; if (udata0.st_state == SETUP_ST) { udata0.st_state = DIN_ST; } } else { k_sem_take(&udata0.fifo_sem[ep_fifo - 1], K_FOREVER); key = irq_lock(); it82xx2_usb_fifo_ctrl(ep, false); } if (data_len > udata0.ep_data[ep_idx].mps) { for (uint32_t idx = 0; idx < udata0.ep_data[ep_idx].mps; idx++) { ff_regs[ep_fifo].ep_tx_fifo_data = buf[idx]; } *ret_bytes = udata0.ep_data[ep_idx].mps; udata0.ep_data[ep_idx].remaining = data_len - udata0.ep_data[ep_idx].mps; LOG_DBG("data_len: %d, Write Max Packets to TX FIFO(%d)", data_len, ep_idx); } else { for (uint32_t idx = 0; idx < data_len; idx++) { ff_regs[ep_fifo].ep_tx_fifo_data = buf[idx]; } *ret_bytes = data_len; udata0.ep_data[ep_idx].remaining = 0; LOG_DBG("Write %d Packets to TX FIFO(%d)", data_len, ep_idx); } it82xx2_usb_set_ep_ctrl(ep_idx, EP_READY_ENABLE, true); if (ep_idx != 0) { irq_unlock(key); } LOG_DBG("Set EP%d Ready(%d)", ep_idx, __LINE__); return 0; } /* Read data from an OUT endpoint */ int usb_dc_ep_read(uint8_t ep, uint8_t *buf, uint32_t max_data_len, uint32_t *read_bytes) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_fifo = (ep_idx > 0) ? (ep_fifo_res[ep_idx % FIFO_NUM]) : 0; uint16_t rx_fifo_len; if (ep_idx >= MAX_NUM_ENDPOINTS) { return -EINVAL; } if (ep_regs[ep_fifo].ep_status & EP_STATUS_ERROR) { LOG_WRN("fifo_%d error status: 0x%02x", ep_fifo, ep_regs[ep_fifo].ep_status); } rx_fifo_len = (uint16_t)ff_regs[ep_fifo].ep_rx_fifo_dcnt_lsb + (((uint16_t)ff_regs[ep_fifo].ep_rx_fifo_dcnt_msb) << 8); if (!buf && !max_data_len) { /* * When both buffer and max data to read are zero return * the available data length in buffer. */ if (read_bytes) { *read_bytes = rx_fifo_len; } if (ep_idx > 0 && !rx_fifo_len) { udata0.fifo_ready[ep_fifo - 1] = true; it82xx2_usb_set_ep_ctrl(ep_idx, EP_READY_ENABLE, true); } return 0; } if (ep_idx == 0) { /* Prevent wrong read_bytes cause memory error * if EP0 is in OUT status stage */ if (udata0.st_state == STATUS_ST) { *read_bytes = 0; return 0; } else if (udata0.now_token == SETUP_TOKEN) { if (rx_fifo_len == 0) { LOG_ERR("Setup length 0, reset to 8"); rx_fifo_len = 8; } if (rx_fifo_len != 8) { LOG_ERR("Setup length: %d", rx_fifo_len); ff_regs[0].ep_rx_fifo_ctrl = FIFO_FORCE_EMPTY; return -EIO; } } } if (rx_fifo_len > max_data_len) { *read_bytes = max_data_len; for (uint32_t idx = 0; idx < max_data_len; idx++) { buf[idx] = ff_regs[ep_fifo].ep_rx_fifo_data; } LOG_DBG("Read Max (%d) Packets", max_data_len); } else { *read_bytes = rx_fifo_len; for (uint32_t idx = 0; idx < rx_fifo_len; idx++) { buf[idx] = ff_regs[ep_fifo].ep_rx_fifo_data; } if (ep_fifo == 0 && udata0.now_token == SETUP_TOKEN) { LOG_DBG("RX buf: (%x)(%x)(%x)(%x)(%x)(%x)(%x)(%x)", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); } if (ep_fifo > EP0) { udata0.fifo_ready[ep_fifo - 1] = true; it82xx2_usb_set_ep_ctrl(ep_idx, EP_READY_ENABLE, true); } else if (udata0.now_token == SETUP_TOKEN) { if (!(buf[0] & USB_EP_DIR_MASK)) { /* request type: host-to-device transfer direction */ ff_regs[0].ep_tx_fifo_ctrl = FIFO_FORCE_EMPTY; if (buf[6] != 0 || buf[7] != 0) { /* set status IN after data OUT */ ep_regs[0].ep_ctrl.fields.outdata_sequence_bit = 1; it82xx2_usb_set_ep_ctrl(ep_idx, EP_READY_ENABLE, true); } else { /* no_data_ctrl status */ udata0.no_data_ctrl = true; } } } } return 0; } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *buf, uint32_t max_data_len, uint32_t *read_bytes) { struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_fifo = (ep_idx > 0) ? (ep_fifo_res[ep_idx % FIFO_NUM]) : 0; uint16_t rx_fifo_len; if (ep_idx >= MAX_NUM_ENDPOINTS) { LOG_ERR("(%d): Wrong Endpoint Index/Address", __LINE__); return -EINVAL; } if (USB_EP_DIR_IS_IN(ep)) { LOG_ERR("Wrong Endpoint Direction"); return -EINVAL; } if (ep_regs[ep_fifo].ep_status & EP_STATUS_ERROR) { LOG_WRN("fifo_%d error status(%02x)", ep_fifo, ep_regs[ep_fifo].ep_status); } rx_fifo_len = (uint16_t)ff_regs[ep_fifo].ep_rx_fifo_dcnt_lsb + (((uint16_t)ff_regs[ep_fifo].ep_rx_fifo_dcnt_msb) << 8); LOG_DBG("ep_read_wait (EP: %d), len: %d", ep_idx, rx_fifo_len); *read_bytes = (rx_fifo_len > max_data_len) ? max_data_len : rx_fifo_len; for (uint32_t idx = 0; idx < *read_bytes; idx++) { buf[idx] = ff_regs[ep_fifo].ep_rx_fifo_data; } LOG_DBG("Read %d packets", *read_bytes); return 0; } int usb_dc_ep_read_continue(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_fifo = (ep_idx > 0) ? (ep_fifo_res[ep_idx % FIFO_NUM]) : 0; if (ep_idx >= MAX_NUM_ENDPOINTS) { LOG_ERR("(%d): Wrong Endpoint Index/Address", __LINE__); return -EINVAL; } if (USB_EP_DIR_IS_IN(ep)) { LOG_ERR("Wrong Endpoint Direction"); return -EINVAL; } udata0.fifo_ready[ep_fifo - 1] = true; it82xx2_usb_set_ep_ctrl(ep_idx, EP_READY_ENABLE, true); LOG_DBG("EP(%d) Read Continue", ep_idx); return 0; } int usb_dc_ep_mps(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx >= MAX_NUM_ENDPOINTS) { LOG_ERR("(%d): Wrong Endpoint Index/Address", __LINE__); return -EINVAL; } /* Not configured, return length 0 */ if (udata0.ep_data[ep_idx].ep_status < EP_CONFIG) { LOG_WRN("(%d)EP not set", __LINE__); return 0; } return udata0.ep_data[ep_idx].mps; } int usb_dc_wakeup_request(void) { int ret; struct usb_it82xx2_regs *const usb_regs = it82xx2_get_usb_regs(); if (udata0.suspended) { usb_regs->dc_control = DC_GLOBAL_ENABLE | DC_FULL_SPEED_LINE_POLARITY | DC_FULL_SPEED_LINE_RATE | DC_DIRECT_CONTROL | DC_TX_LINE_STATE_DM | DC_CONNECT_TO_HOST; /* The remote wakeup device must hold the resume signal for */ /* at least 1 ms but for no more than 15 ms */ k_msleep(2); usb_regs->dc_control = DC_GLOBAL_ENABLE | DC_FULL_SPEED_LINE_POLARITY | DC_FULL_SPEED_LINE_RATE | DC_CONNECT_TO_HOST; ret = k_sem_take(&udata0.suspended_sem, K_MSEC(500)); if (ret < 0) { LOG_ERR("failed to wake up host"); } } return 0; } static int it82xx2_usb_dc_init(const struct device *dev) { const struct usb_it82xx2_config *cfg = dev->config; int status = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (status < 0) { LOG_ERR("Failed to configure USB pins"); return status; } /* Initializing WU90 (USB D+) */ it8xxx2_usb_dc_wuc_init(dev); udata0.dev = dev; return 0; } DEVICE_DT_INST_DEFINE(0, &it82xx2_usb_dc_init, NULL, &udata0, &ucfg0, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, NULL); ```
/content/code_sandbox/drivers/usb/device/usb_dc_it82xx2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
13,209
```c /* * */ /** * @file * @brief USB native_posix device driver */ #include <string.h> #include <stdio.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/usb/usb_dc.h> #include <zephyr/usb/usb_device.h> #include <zephyr/net/net_ip.h> #include "usb_dc_native_posix_adapt.h" #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(native_posix); #define USBIP_IN_EP_NUM 8 #define USBIP_OUT_EP_NUM 8 #ifdef CONFIG_USB_NATIVE_POSIX_HS #define USBIP_MAX_PACKET_SIZE 512 #else /* CONFIG_USB_NATIVE_POSIX_HS */ #define USBIP_MAX_PACKET_SIZE 64 #endif /* !CONFIG_USB_NATIVE_POSIX_HS */ K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE); static struct k_thread thread; static void thread_main(void *a, void *b, void *c) { LOG_DBG(""); usbip_start(); } /* * USBIP private structures and logic initially copied from * Designware USB driver */ /* * USB endpoint private structure. */ struct usb_ep_ctrl_prv { uint8_t ep_ena; uint16_t mps; usb_dc_ep_callback cb; uint32_t data_len; uint8_t buf[64]; uint8_t buf_len; }; /* * USB controller private structure. */ static struct usbip_ctrl_prv { usb_dc_status_callback status_cb; struct usb_ep_ctrl_prv in_ep_ctrl[USBIP_IN_EP_NUM]; struct usb_ep_ctrl_prv out_ep_ctrl[USBIP_OUT_EP_NUM]; uint8_t attached; } usbip_ctrl; static uint8_t usbip_ep_is_valid(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); /* Check if ep is valid */ if ((USB_EP_DIR_IS_OUT(ep)) && ep_idx < USBIP_OUT_EP_NUM) { return 1; } else if ((USB_EP_DIR_IS_IN(ep)) && ep_idx < USBIP_IN_EP_NUM) { return 1; } return 0; } static uint8_t usbip_ep_is_enabled(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); LOG_DBG("ep %x", ep); /* Check if ep enabled */ if ((USB_EP_DIR_IS_OUT(ep)) && usbip_ctrl.out_ep_ctrl[ep_idx].ep_ena) { return 1; } else if ((USB_EP_DIR_IS_IN(ep)) && usbip_ctrl.in_ep_ctrl[ep_idx].ep_ena) { return 1; } return 0; } int usb_dc_attach(void) { LOG_DBG(""); if (usbip_ctrl.attached) { LOG_WRN("Already attached"); return 0; } k_thread_create(&thread, thread_stack, CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE, thread_main, NULL, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); usbip_ctrl.attached = 1U; return 0; } int usb_dc_detach(void) { LOG_DBG(""); if (!usbip_ctrl.attached) { return 0; } usbip_ctrl.attached = 0U; return 0; } int usb_dc_reset(void) { LOG_DBG(""); /* Clear private data */ memset(&usbip_ctrl, 0, sizeof(usbip_ctrl)); return 0; } int usb_dc_set_address(const uint8_t addr) { LOG_DBG(""); return 0; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); LOG_DBG("ep %x, mps %d, type %d", cfg->ep_addr, cfg->ep_mps, cfg->ep_type); if ((cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx) { LOG_ERR("invalid endpoint configuration"); return -1; } if (cfg->ep_mps > USBIP_MAX_PACKET_SIZE) { LOG_WRN("unsupported packet size"); return -1; } if ((USB_EP_DIR_IS_OUT(cfg->ep_addr)) && (ep_idx >= USBIP_OUT_EP_NUM)) { LOG_WRN("OUT endpoint address out of range"); return -1; } if ((USB_EP_DIR_IS_IN(cfg->ep_addr)) && (ep_idx >= USBIP_IN_EP_NUM)) { LOG_WRN("IN endpoint address out of range"); return -1; } return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data * const cfg) { uint16_t ep_mps = cfg->ep_mps; uint8_t ep = cfg->ep_addr; uint8_t ep_idx = USB_EP_GET_IDX(ep); if (usb_dc_ep_check_cap(cfg)) { return -EINVAL; } if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (USB_EP_DIR_IS_OUT(ep)) { usbip_ctrl.out_ep_ctrl[ep_idx].mps = ep_mps; } else { usbip_ctrl.in_ep_ctrl[ep_idx].mps = ep_mps; } return 0; } int usb_dc_ep_set_stall(const uint8_t ep) { LOG_DBG("ep %x", ep); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* Use standard reply for now */ usb_dc_ep_write(0x80, NULL, 0, NULL); return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); LOG_DBG("ep %x", ep); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (!ep_idx) { /* Not possible to clear stall for EP0 */ return -EINVAL; } return 0; } int usb_dc_ep_halt(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); LOG_DBG("ep %x", ep); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (!ep_idx) { /* Cannot disable EP0, just set stall */ usb_dc_ep_set_stall(ep); } return 0; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled) { LOG_DBG("ep %x", ep); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (!stalled) { return -EINVAL; } return 0; } int usb_dc_ep_enable(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); LOG_DBG("ep %x", ep); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* Enable Ep */ if (USB_EP_DIR_IS_OUT(ep)) { usbip_ctrl.out_ep_ctrl[ep_idx].ep_ena = 1U; } else { usbip_ctrl.in_ep_ctrl[ep_idx].ep_ena = 1U; } return 0; } int usb_dc_ep_disable(const uint8_t ep) { LOG_DBG("ep %x", ep); if (!usbip_ep_is_valid(ep)) { LOG_ERR("Invalid endpoint: EP 0x%x", ep); return -EINVAL; } return 0; } int usb_dc_ep_flush(const uint8_t ep) { LOG_DBG("ep %x", ep); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (USB_EP_DIR_IS_OUT(ep)) { /* RX FIFO is global and cannot be flushed per EP */ return -EINVAL; } return 0; } int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data, const uint32_t data_len, uint32_t * const ret_bytes) { LOG_DBG("ep %x len %u", ep, data_len); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* Check if IN ep */ if (USB_EP_GET_DIR(ep) != USB_EP_DIR_IN) { return -EINVAL; } /* Check if ep enabled */ if (!usbip_ep_is_enabled(ep)) { LOG_WRN("ep %x disabled", ep); return -EINVAL; } if (USB_EP_GET_IDX(ep) == 0) { if (!usbip_send_common(ep, data_len)) { return -EIO; } if (usbip_send(ep, data, data_len) != data_len) { return -EIO; } } else { uint8_t ep_idx = USB_EP_GET_IDX(ep); struct usb_ep_ctrl_prv *ctrl = &usbip_ctrl.in_ep_ctrl[ep_idx]; if (data_len > ARRAY_SIZE(ctrl->buf)) { return -EINVAL; } memcpy(ctrl->buf, data, data_len); ctrl->buf_len = data_len; } if (ret_bytes) { *ret_bytes = data_len; } return 0; } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t to_copy; if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* Check if OUT ep */ if (USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT) { LOG_ERR("Wrong endpoint direction"); return -EINVAL; } /* Allow to read 0 bytes */ if (!data && max_data_len) { LOG_ERR("Wrong arguments"); return -EINVAL; } /* Check if ep enabled */ if (!usbip_ep_is_enabled(ep)) { LOG_ERR("Not enabled endpoint"); return -EINVAL; } if (data == NULL && max_data_len == 0 && read_bytes != NULL) { /* Return length of the available data in endpoint buffer */ *read_bytes = usbip_ctrl.out_ep_ctrl[ep_idx].data_len; return 0; } to_copy = MIN(usbip_ctrl.out_ep_ctrl[ep_idx].data_len, max_data_len); LOG_DBG("ep 0x%02x, to_copy %u", ep, to_copy); memcpy(data, usbip_ctrl.out_ep_ctrl[ep_idx].buf, to_copy); if (read_bytes) { *read_bytes = to_copy; } return 0; } int usb_dc_ep_read_continue(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } /* Check if OUT ep */ if (USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT) { LOG_ERR("Wrong endpoint direction"); return -EINVAL; } if (!usbip_ctrl.out_ep_ctrl[ep_idx].data_len) { /* TODO: continue read */ /* usbip_prep_rx(ep_idx, 0); */ } return 0; } int usb_dc_ep_read(const uint8_t ep, uint8_t *const data, const uint32_t max_data_len, uint32_t * const read_bytes) { LOG_DBG("ep %x max_data_len %u", ep, max_data_len); if (usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes) != 0) { return -EINVAL; } if (!data && !max_data_len) { /* When both buffer and max data to read are zero the above * call would fetch the data len and we simply return. */ return 0; } if (usb_dc_ep_read_continue(ep) != 0) { return -EINVAL; } return 0; } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { uint8_t ep_idx = USB_EP_GET_IDX(ep); LOG_DBG("ep %x callback %p", ep, cb); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (USB_EP_DIR_IS_IN(ep)) { usbip_ctrl.in_ep_ctrl[ep_idx].cb = cb; } else { usbip_ctrl.out_ep_ctrl[ep_idx].cb = cb; } return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { usbip_ctrl.status_cb = cb; } int usb_dc_ep_mps(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); LOG_DBG("ep %x", ep); if (!usbip_ctrl.attached || !usbip_ep_is_valid(ep)) { LOG_ERR("Not attached / Invalid endpoint: EP 0x%x", ep); return -EINVAL; } if (USB_EP_DIR_IS_OUT(ep)) { return usbip_ctrl.out_ep_ctrl[ep_idx].mps; } else { return usbip_ctrl.in_ep_ctrl[ep_idx].mps; } } int handle_usb_control(struct usbip_header *hdr) { uint8_t ep_idx = USB_EP_GET_IDX(ntohl(hdr->common.ep)); struct usb_ep_ctrl_prv *ep_ctrl; ep_ctrl = &usbip_ctrl.out_ep_ctrl[ep_idx]; if (ep_ctrl->cb == NULL) { LOG_ERR("Control endpoint callback not set"); return -EIO; } if ((ntohl(hdr->common.direction) == USBIP_DIR_IN) ^ USB_REQTYPE_GET_DIR(hdr->u.submit.bmRequestType)) { LOG_ERR("Failed to verify bmRequestType"); return -EIO; } ep_ctrl->data_len = 8; LOG_DBG("SETUP event ep 0x%02x %u", ep_idx, ep_ctrl->data_len); memcpy(ep_ctrl->buf, &hdr->u.submit.bmRequestType, ep_ctrl->data_len); ep_ctrl->cb(ep_idx, USB_DC_EP_SETUP); if (ntohl(hdr->common.direction) == USBIP_DIR_OUT) { uint32_t data_len = ntohl(hdr->u.submit.transfer_buffer_length); /* Data OUT stage availably */ if (data_len > ARRAY_SIZE(ep_ctrl->buf)) { return -EIO; } ep_ctrl->data_len = data_len; if (usbip_recv(ep_ctrl->buf, ep_ctrl->data_len) < 0) { return -EIO; } LOG_DBG("DATA OUT event ep 0x%02x %u", ep_idx, ep_ctrl->data_len); ep_ctrl->cb(ep_idx, USB_DC_EP_DATA_OUT); } return 0; } int handle_usb_data(struct usbip_header *hdr) { uint8_t ep_idx = ntohl(hdr->common.ep); struct usb_ep_ctrl_prv *ep_ctrl; uint8_t ep; if (ntohl(hdr->common.direction) == USBIP_DIR_OUT) { uint32_t data_len; if (ep_idx >= USBIP_OUT_EP_NUM) { return -EINVAL; } ep_ctrl = &usbip_ctrl.out_ep_ctrl[ep_idx]; ep = ep_idx | USB_EP_DIR_OUT; data_len = ntohl(hdr->u.submit.transfer_buffer_length); if (data_len > ARRAY_SIZE(ep_ctrl->buf)) { return -EIO; } ep_ctrl->data_len = data_len; if (usbip_recv(ep_ctrl->buf, ep_ctrl->data_len) < 0) { return -EIO; } LOG_DBG("DATA OUT event ep 0x%02x %u", ep, ep_ctrl->data_len); ep_ctrl->cb(ep, USB_DC_EP_DATA_OUT); /* Send ACK reply */ if (!usbip_send_common(ep, ep_ctrl->data_len)) { return -EIO; } } else { if (ep_idx >= USBIP_IN_EP_NUM) { return -EINVAL; } ep_ctrl = &usbip_ctrl.in_ep_ctrl[ep_idx]; ep = ep_idx | USB_EP_DIR_IN; LOG_DBG("DATA IN event ep 0x%02x %u", ep, ep_ctrl->buf_len); /* Send queued data */ if (!usbip_send_common(ep, ep_ctrl->buf_len)) { return -EIO; } if (usbip_send(ep, ep_ctrl->buf, ep_ctrl->buf_len) != ep_ctrl->buf_len) { return -EIO; } LOG_HEXDUMP_DBG(ep_ctrl->buf, ep_ctrl->buf_len, ">"); ep_ctrl->cb(ep, USB_DC_EP_DATA_IN); ep_ctrl->buf_len = 0; } return 0; } ```
/content/code_sandbox/drivers/usb/device/usb_dc_native_posix.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,929
```c /* * */ /** * @file usb_dc_smartbond.c * @brief SmartBond USB device controller driver * */ #include <stdio.h> #include <string.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/usb/usb_dc.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/usb/usb_device.h> #include <DA1469xAB.h> #include <soc.h> #include <da1469x_clock.h> #include <da1469x_pd.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/clock_control/smartbond_clock_control.h> #include <zephyr/pm/policy.h> #include <zephyr/drivers/dma.h> LOG_MODULE_REGISTER(usb_dc_smartbond, CONFIG_USB_DRIVER_LOG_LEVEL); /* USB device controller access from devicetree */ #define DT_DRV_COMPAT renesas_smartbond_usbd #define USB_IRQ DT_INST_IRQ_BY_IDX(0, 0, irq) #define USB_IRQ_PRI DT_INST_IRQ_BY_IDX(0, 0, priority) #define VBUS_IRQ DT_INST_IRQ_BY_IDX(0, 1, irq) #define VBUS_IRQ_PRI DT_INST_IRQ_BY_IDX(0, 1, priority) /* * Minimal transfer size needed to use DMA. For short transfers * it may be simpler to just fill hardware FIFO with data instead * of programming DMA registers. */ #define DMA_MIN_TRANSFER_SIZE DT_INST_PROP(0, dma_min_transfer_size) #define FIFO_READ_THRESHOLD DT_INST_PROP(0, fifo_read_threshold) /* Size of hardware RX and TX FIFO. */ #define EP0_FIFO_SIZE 8 #define EP_FIFO_SIZE 64 #define EP0_OUT_BUF_SIZE EP0_FIFO_SIZE #define EP1_OUT_BUF_SIZE DT_INST_PROP_BY_IDX(0, ep_out_buf_size, 1) #define EP2_OUT_BUF_SIZE DT_INST_PROP_BY_IDX(0, ep_out_buf_size, 2) #define EP3_OUT_BUF_SIZE DT_INST_PROP_BY_IDX(0, ep_out_buf_size, 3) #define EP0_IDX 0 #define EP0_IN USB_CONTROL_EP_IN #define EP0_OUT USB_CONTROL_EP_OUT #define EP_MAX 4 /* EP OUT buffers */ static uint8_t ep0_out_buf[EP0_OUT_BUF_SIZE]; static uint8_t ep1_out_buf[EP1_OUT_BUF_SIZE]; static uint8_t ep2_out_buf[EP2_OUT_BUF_SIZE]; static uint8_t ep3_out_buf[EP3_OUT_BUF_SIZE]; static uint8_t *const ep_out_bufs[4] = { ep0_out_buf, ep1_out_buf, ep2_out_buf, ep3_out_buf, }; static const uint16_t ep_out_buf_size[4] = { EP0_OUT_BUF_SIZE, EP1_OUT_BUF_SIZE, EP2_OUT_BUF_SIZE, EP3_OUT_BUF_SIZE, }; /* Node functional states */ #define NFSR_NODE_RESET 0 #define NFSR_NODE_RESUME 1 #define NFSR_NODE_OPERATIONAL 2 #define NFSR_NODE_SUSPEND 3 /* * Those two following states are added to allow going out of sleep mode * using frame interrupt. On remove wakeup RESUME state must be kept for * at least 1ms. It is accomplished by using FRAME interrupt that goes * through those two fake states before entering OPERATIONAL state. */ #define NFSR_NODE_WAKING (0x10 | (NFSR_NODE_RESUME)) #define NFSR_NODE_WAKING2 (0x20 | (NFSR_NODE_RESUME)) struct smartbond_ep_reg_set { volatile uint32_t epc_in; volatile uint32_t txd; volatile uint32_t txs; volatile uint32_t txc; volatile uint32_t epc_out; volatile uint32_t rxd; volatile uint32_t rxs; volatile uint32_t rxc; }; static struct smartbond_ep_reg_set *const reg_sets[4] = { (struct smartbond_ep_reg_set *)&USB->USB_EPC0_REG, (struct smartbond_ep_reg_set *)&USB->USB_EPC1_REG, (struct smartbond_ep_reg_set *)&USB->USB_EPC3_REG, (struct smartbond_ep_reg_set *)&USB->USB_EPC5_REG, }; struct smartbond_ep_state { atomic_t busy; uint8_t *buffer; uint16_t total_len; /** Total length of current transfer */ uint16_t transferred; /** Bytes transferred so far */ uint16_t mps; /** Endpoint max packet size */ /** Packet size sent or received so far. It is used to modify transferred field * after ACK is received or when filling ISO endpoint with size larger then * FIFO size. */ uint16_t last_packet_size; usb_dc_ep_callback cb; /** Endpoint callback function */ uint8_t data1 : 1; /** DATA0/1 toggle bit 1 DATA1 is expected or transmitted */ uint8_t stall : 1; /** Endpoint is stalled */ uint8_t iso : 1; /** ISO endpoint */ uint8_t enabled : 1; /** Endpoint is enabled */ uint8_t ep_addr; /** EP address */ struct smartbond_ep_reg_set *regs; }; static struct usb_smartbond_dma_cfg { int tx_chan; int rx_chan; uint8_t tx_slot_mux; uint8_t rx_slot_mux; const struct device *tx_dev; const struct device *rx_dev; struct dma_config tx_cfg; struct dma_config rx_cfg; struct dma_block_config tx_block_cfg; struct dma_block_config rx_block_cfg; } usbd_dma_cfg = { .tx_chan = DT_DMAS_CELL_BY_NAME(DT_NODELABEL(usbd), tx, channel), .tx_slot_mux = DT_DMAS_CELL_BY_NAME(DT_NODELABEL(usbd), tx, config), .tx_dev = DEVICE_DT_GET(DT_DMAS_CTLR_BY_NAME(DT_NODELABEL(usbd), tx)), .rx_chan = DT_DMAS_CELL_BY_NAME(DT_NODELABEL(usbd), rx, channel), .rx_slot_mux = DT_DMAS_CELL_BY_NAME(DT_NODELABEL(usbd), rx, config), .rx_dev = DEVICE_DT_GET(DT_DMAS_CTLR_BY_NAME(DT_NODELABEL(usbd), rx)), }; struct usb_dc_state { bool vbus_present; bool attached; atomic_t clk_requested; uint8_t nfsr; usb_dc_status_callback status_cb; struct smartbond_ep_state ep_state[2][4]; /** Bitmask of EP OUT endpoints that received data during interrupt */ uint8_t ep_out_data; atomic_ptr_t dma_ep[2]; /** DMA used by channel */ }; static struct usb_dc_state dev_state; /* * DA146xx register fields and bit mask are very long. Filed masks repeat register names. * Those convenience macros are a way to reduce complexity of register modification lines. */ #define GET_BIT(val, field) (val & field ## _Msk) >> field ## _Pos #define REG_GET_BIT(reg, field) (USB->reg & USB_ ## reg ## _ ## field ## _Msk) #define REG_SET_BIT(reg, field) (USB->reg |= USB_ ## reg ## _ ## field ## _Msk) #define REG_CLR_BIT(reg, field) (USB->reg &= ~USB_ ## reg ## _ ## field ## _Msk) #define REG_SET_VAL(reg, field, val) \ (USB->reg = (USB->reg & ~USB_##reg##_##field##_Msk) | \ (val << USB_##reg##_##field##_Pos)) static int usb_smartbond_dma_validate(void) { /* * DMA RX should be assigned an even number and * DMA TX should be assigned the right next * channel (odd number). */ if (!(usbd_dma_cfg.tx_chan & 0x1) || (usbd_dma_cfg.rx_chan & 0x1) || (usbd_dma_cfg.tx_chan != (usbd_dma_cfg.rx_chan + 1))) { LOG_ERR("Invalid RX/TX channel selection"); return -EINVAL; } if (usbd_dma_cfg.rx_slot_mux != usbd_dma_cfg.tx_slot_mux) { LOG_ERR("TX/RX DMA slots mismatch"); return -EINVAL; } if (!device_is_ready(usbd_dma_cfg.tx_dev) || !device_is_ready(usbd_dma_cfg.rx_dev)) { LOG_ERR("TX/RX DMA device is not ready"); return -ENODEV; } return 0; } static int usb_smartbond_dma_config(void) { struct dma_config *tx = &usbd_dma_cfg.tx_cfg; struct dma_config *rx = &usbd_dma_cfg.rx_cfg; struct dma_block_config *tx_block = &usbd_dma_cfg.tx_block_cfg; struct dma_block_config *rx_block = &usbd_dma_cfg.rx_block_cfg; if (dma_request_channel(usbd_dma_cfg.rx_dev, (void *)&usbd_dma_cfg.rx_chan) < 0) { LOG_ERR("RX DMA channel is already occupied"); return -EIO; } if (dma_request_channel(usbd_dma_cfg.tx_dev, (void *)&usbd_dma_cfg.tx_chan) < 0) { LOG_ERR("TX DMA channel is already occupied"); return -EIO; } tx->channel_direction = MEMORY_TO_PERIPHERAL; tx->dma_callback = NULL; tx->user_data = NULL; tx->block_count = 1; tx->head_block = tx_block; tx->error_callback_dis = 1; /* DMA callback is not used */ tx->complete_callback_en = 1; tx->dma_slot = usbd_dma_cfg.tx_slot_mux; tx->channel_priority = 7; /* Burst mode is not using when DREQ is one */ tx->source_burst_length = 1; tx->dest_burst_length = 1; /* USB is byte-oriented protocol */ tx->source_data_size = 1; tx->dest_data_size = 1; /* Do not change */ tx_block->dest_addr_adj = 0x2; /* Incremental */ tx_block->source_addr_adj = 0x0; /* Should reflect TX buffer */ tx_block->source_address = 0; /* Should reflect USB TX FIFO. Temporarily assign an SRAM location. */ tx_block->dest_address = MCU_SYSRAM_M_BASE; /* Should reflect total bytes to be transmitted */ tx_block->block_size = 0; rx->channel_direction = PERIPHERAL_TO_MEMORY; rx->dma_callback = NULL; rx->user_data = NULL; rx->block_count = 1; rx->head_block = rx_block; rx->error_callback_dis = 1; /* DMA callback is not used */ rx->complete_callback_en = 1; rx->dma_slot = usbd_dma_cfg.rx_slot_mux; rx->channel_priority = 2; /* Burst mode is not using when DREQ is one */ rx->source_burst_length = 1; rx->dest_burst_length = 1; /* USB is byte-oriented protocol */ rx->source_data_size = 1; rx->dest_data_size = 1; /* Do not change */ rx_block->source_addr_adj = 0x2; /* Incremenetal */ rx_block->dest_addr_adj = 0x0; /* Should reflect USB RX FIFO */ rx_block->source_address = 0; /* Should reflect RX buffer. Temporarily assign an SRAM location. */ rx_block->dest_address = MCU_SYSRAM_M_BASE; /* Should reflect total bytes to be received */ rx_block->block_size = 0; if (dma_config(usbd_dma_cfg.rx_dev, usbd_dma_cfg.rx_chan, rx) < 0) { LOG_ERR("RX DMA configuration failed"); return -EINVAL; } if (dma_config(usbd_dma_cfg.tx_dev, usbd_dma_cfg.tx_chan, tx) < 0) { LOG_ERR("TX DMA configuration failed"); return -EINVAL; } return 0; } static void usb_smartbond_dma_deconfig(void) { dma_stop(usbd_dma_cfg.tx_dev, usbd_dma_cfg.tx_chan); dma_stop(usbd_dma_cfg.rx_dev, usbd_dma_cfg.rx_chan); dma_release_channel(usbd_dma_cfg.tx_dev, usbd_dma_cfg.tx_chan); dma_release_channel(usbd_dma_cfg.rx_dev, usbd_dma_cfg.rx_chan); } static struct smartbond_ep_state *usb_dc_get_ep_state(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_dir = USB_EP_GET_DIR(ep) ? 1 : 0; return (ep_idx < EP_MAX) ? &dev_state.ep_state[ep_dir][ep_idx] : NULL; } static struct smartbond_ep_state *usb_dc_get_ep_out_state(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); return (ep_idx < EP_MAX && USB_EP_DIR_IS_OUT(ep)) ? &dev_state.ep_state[0][ep_idx] : NULL; } static struct smartbond_ep_state *usb_dc_get_ep_in_state(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); return ep_idx < EP_MAX || USB_EP_DIR_IS_IN(ep) ? &dev_state.ep_state[1][ep_idx] : NULL; } static inline bool dev_attached(void) { return dev_state.attached; } static inline bool dev_ready(void) { return dev_state.vbus_present; } static void set_nfsr(uint8_t val) { dev_state.nfsr = val; /* * Write only lower 2 bits to register, higher bits are used * to count down till OPERATIONAL state can be entered when * remote wakeup activated. */ USB->USB_NFSR_REG = val & 3; } static void fill_tx_fifo(struct smartbond_ep_state *ep_state) { int remaining; const uint8_t *src; uint8_t ep_idx = USB_EP_GET_IDX(ep_state->ep_addr); struct smartbond_ep_reg_set *regs = ep_state->regs; src = &ep_state->buffer[ep_state->transferred]; remaining = ep_state->total_len - ep_state->transferred; if (remaining > ep_state->mps - ep_state->last_packet_size) { remaining = ep_state->mps - ep_state->last_packet_size; } /* * Loop checks TCOUNT all the time since this value is saturated to 31 * and can't be read just once before. */ while ((regs->txs & USB_USB_TXS1_REG_USB_TCOUNT_Msk) > 0 && remaining > 0) { regs->txd = *src++; ep_state->last_packet_size++; remaining--; } if (ep_idx != 0) { if (remaining > 0) { /* * Max packet size is set to value greater then FIFO. * Enable fifo level warning to handle larger packets. */ regs->txc |= (3 << USB_USB_TXC1_REG_USB_TFWL_Pos); USB->USB_FWMSK_REG |= BIT(ep_idx - 1 + USB_USB_FWMSK_REG_USB_M_TXWARN31_Pos); } else { regs->txc &= ~USB_USB_TXC1_REG_USB_TFWL_Msk; USB->USB_FWMSK_REG &= ~(BIT(ep_idx - 1 + USB_USB_FWMSK_REG_USB_M_TXWARN31_Pos)); /* Whole packet already in fifo, no need to * refill it later. Mark last. */ regs->txc |= USB_USB_TXC1_REG_USB_LAST_Msk; } } } static bool try_allocate_dma(struct smartbond_ep_state *ep_state, uint8_t dir) { uint8_t ep_idx = USB_EP_GET_IDX(ep_state->ep_addr); uint8_t dir_ix = dir == USB_EP_DIR_OUT ? 0 : 1; if (atomic_ptr_cas(&dev_state.dma_ep[dir_ix], NULL, ep_state)) { if (dir == USB_EP_DIR_OUT) { USB->USB_DMA_CTRL_REG = (USB->USB_DMA_CTRL_REG & ~USB_USB_DMA_CTRL_REG_USB_DMA_RX_Msk) | ((ep_idx - 1) << USB_USB_DMA_CTRL_REG_USB_DMA_RX_Pos); } else { USB->USB_DMA_CTRL_REG = (USB->USB_DMA_CTRL_REG & ~USB_USB_DMA_CTRL_REG_USB_DMA_TX_Msk) | ((ep_idx - 1) << USB_USB_DMA_CTRL_REG_USB_DMA_TX_Pos); } USB->USB_DMA_CTRL_REG |= USB_USB_DMA_CTRL_REG_USB_DMA_EN_Msk; return true; } else { return false; } } static void start_rx_dma(volatile void *src, void *dst, uint16_t size) { if (dma_reload(usbd_dma_cfg.rx_dev, usbd_dma_cfg.rx_chan, (uint32_t)src, (uint32_t)dst, size) < 0) { LOG_ERR("Failed to reload RX DMA"); } else { dma_start(usbd_dma_cfg.rx_dev, usbd_dma_cfg.rx_chan); } } static void start_rx_packet(struct smartbond_ep_state *ep_state) { uint8_t ep_idx = USB_EP_GET_IDX(ep_state->ep_addr); struct smartbond_ep_reg_set *regs = ep_state->regs; LOG_DBG("%02x", ep_state->ep_addr); ep_state->last_packet_size = 0; ep_state->transferred = 0; ep_state->total_len = 0; if (ep_state->mps > DMA_MIN_TRANSFER_SIZE) { if (try_allocate_dma(ep_state, USB_EP_DIR_OUT)) { start_rx_dma(&regs->rxd, ep_state->buffer, ep_state->mps); } else if (ep_state->mps > EP_FIFO_SIZE) { /* * Other endpoint is using DMA in that direction, * fall back to interrupts. * For endpoint size greater than FIFO size, * enable FIFO level warning interrupt when FIFO * has less than 17 bytes free. */ regs->rxc |= USB_USB_RXC1_REG_USB_RFWL_Msk; USB->USB_FWMSK_REG |= BIT(ep_idx - 1 + USB_USB_FWMSK_REG_USB_M_RXWARN31_Pos); } } else if (ep_idx != 0) { /* If max_packet_size would fit in FIFO no need * for FIFO level warning interrupt. */ regs->rxc &= ~USB_USB_RXC1_REG_USB_RFWL_Msk; USB->USB_FWMSK_REG &= ~(BIT(ep_idx - 1 + USB_USB_FWMSK_REG_USB_M_RXWARN31_Pos)); } regs->rxc |= USB_USB_RXC1_REG_USB_RX_EN_Msk; } static void start_tx_dma(void *src, volatile void *dst, uint16_t size) { if (dma_reload(usbd_dma_cfg.tx_dev, usbd_dma_cfg.tx_chan, (uint32_t)src, (uint32_t)dst, size) < 0) { LOG_ERR("Failed to reload TX DMA"); } else { dma_start(usbd_dma_cfg.tx_dev, usbd_dma_cfg.tx_chan); } } static void start_tx_packet(struct smartbond_ep_state *ep_state) { struct smartbond_ep_reg_set *regs = ep_state->regs; uint16_t remaining = ep_state->total_len - ep_state->transferred; uint16_t size = MIN(remaining, ep_state->mps); LOG_DBG("%02x %d/%d", ep_state->ep_addr, size, remaining); ep_state->last_packet_size = 0; regs->txc = USB_USB_TXC1_REG_USB_FLUSH_Msk; regs->txc = USB_USB_TXC1_REG_USB_IGN_ISOMSK_Msk; if (ep_state->data1) { regs->txc |= USB_USB_TXC1_REG_USB_TOGGLE_TX_Msk; } if (ep_state->ep_addr != EP0_IN && remaining > DMA_MIN_TRANSFER_SIZE && (uint32_t)(ep_state->buffer) >= CONFIG_SRAM_BASE_ADDRESS && try_allocate_dma(ep_state, USB_EP_DIR_IN)) { /* * Whole packet will be put in FIFO by DMA. * Set LAST bit before start. */ start_tx_dma(ep_state->buffer + ep_state->transferred, &regs->txd, size); regs->txc |= USB_USB_TXC1_REG_USB_LAST_Msk; } else { fill_tx_fifo(ep_state); } regs->txc |= USB_USB_TXC1_REG_USB_TX_EN_Msk; } static uint16_t read_rx_fifo(struct smartbond_ep_state *ep_state, uint16_t bytes_in_fifo) { struct smartbond_ep_reg_set *regs = ep_state->regs; uint16_t remaining = ep_state->mps - ep_state->last_packet_size; uint16_t receive_this_time = bytes_in_fifo; uint8_t *buf = ep_state->buffer + ep_state->last_packet_size; if (remaining < bytes_in_fifo) { receive_this_time = remaining; } for (int i = 0; i < receive_this_time; ++i) { buf[i] = regs->rxd; } ep_state->last_packet_size += receive_this_time; return bytes_in_fifo - receive_this_time; } static void handle_ep0_rx(void) { int fifo_bytes; uint32_t rxs0 = USB->USB_RXS0_REG; struct smartbond_ep_state *ep0_out_state = usb_dc_get_ep_out_state(0); struct smartbond_ep_state *ep0_in_state; fifo_bytes = GET_BIT(rxs0, USB_USB_RXS0_REG_USB_RCOUNT); if (rxs0 & USB_USB_RXS0_REG_USB_SETUP_Msk) { ep0_in_state = usb_dc_get_ep_in_state(0); read_rx_fifo(ep0_out_state, EP0_FIFO_SIZE); ep0_out_state->stall = 0; ep0_out_state->data1 = 1; ep0_in_state->stall = 0; ep0_in_state->data1 = 1; REG_SET_BIT(USB_TXC0_REG, USB_TOGGLE_TX0); REG_CLR_BIT(USB_EPC0_REG, USB_STALL); LOG_DBG("Setup %02x %02x %02x %02x %02x %02x %02x %02x", ep0_out_state->buffer[0], ep0_out_state->buffer[1], ep0_out_state->buffer[2], ep0_out_state->buffer[3], ep0_out_state->buffer[4], ep0_out_state->buffer[5], ep0_out_state->buffer[6], ep0_out_state->buffer[7]); ep0_out_state->cb(EP0_OUT, USB_DC_EP_SETUP); } else { if (GET_BIT(rxs0, USB_USB_RXS0_REG_USB_TOGGLE_RX0) != ep0_out_state->data1) { /* Toggle bit does not match discard packet */ REG_SET_BIT(USB_RXC0_REG, USB_FLUSH); ep0_out_state->last_packet_size = 0; } else { read_rx_fifo(ep0_out_state, fifo_bytes); if (rxs0 & USB_USB_RXS0_REG_USB_RX_LAST_Msk) { ep0_out_state->data1 ^= 1; dev_state.ep_out_data |= 1; } } } } static void handle_ep0_tx(void) { uint32_t txs0; struct smartbond_ep_state *ep0_in_state = usb_dc_get_ep_in_state(0); struct smartbond_ep_state *ep0_out_state = usb_dc_get_ep_out_state(0); struct smartbond_ep_reg_set *regs = ep0_in_state->regs; txs0 = regs->txs; LOG_DBG("%02x %02x", ep0_in_state->ep_addr, txs0); if (GET_BIT(txs0, USB_USB_TXS0_REG_USB_TX_DONE)) { /* ACK received */ if (GET_BIT(txs0, USB_USB_TXS0_REG_USB_ACK_STAT)) { ep0_in_state->transferred += ep0_in_state->last_packet_size; ep0_in_state->last_packet_size = 0; ep0_in_state->data1 ^= 1; REG_SET_VAL(USB_TXC0_REG, USB_TOGGLE_TX0, ep0_in_state->data1); if (ep0_in_state->transferred == ep0_in_state->total_len) { /* For control endpoint get ready for ACK stage * from host. */ ep0_out_state = usb_dc_get_ep_out_state(EP0_IDX); ep0_out_state->transferred = 0; ep0_out_state->total_len = 0; ep0_out_state->last_packet_size = 0; REG_SET_BIT(USB_RXC0_REG, USB_RX_EN); atomic_clear(&ep0_in_state->busy); ep0_in_state->cb(EP0_IN, USB_DC_EP_DATA_IN); return; } } else { /* Start from the beginning */ ep0_in_state->last_packet_size = 0; } start_tx_packet(ep0_in_state); } } static void handle_epx_rx_ev(uint8_t ep_idx) { uint32_t rxs; int fifo_bytes; struct smartbond_ep_state *ep_state = usb_dc_get_ep_out_state(ep_idx); struct smartbond_ep_reg_set *regs = ep_state->regs; do { rxs = regs->rxs; if (GET_BIT(rxs, USB_USB_RXS1_REG_USB_RX_ERR)) { regs->rxc |= USB_USB_RXC1_REG_USB_FLUSH_Msk; ep_state->last_packet_size = 0; if (dev_state.dma_ep[0] == ep_state) { /* Stop DMA */ dma_stop(usbd_dma_cfg.rx_dev, usbd_dma_cfg.rx_chan); /* Restart DMA since packet was dropped, * all parameters should still work. */ dma_start(usbd_dma_cfg.rx_dev, usbd_dma_cfg.rx_chan); } break; } if (dev_state.dma_ep[0] == ep_state) { struct dma_status rx_dma_status; dma_get_status(usbd_dma_cfg.rx_dev, usbd_dma_cfg.rx_chan, &rx_dma_status); /* * Disable DMA and update last_packet_size * with what DMA reported. */ dma_stop(usbd_dma_cfg.rx_dev, usbd_dma_cfg.rx_chan); ep_state->last_packet_size = rx_dma_status.total_copied; /* * When DMA did not finished (packet was smaller then MPS), * dma_idx holds exact number of bytes transmitted. When DMA * finished value in dma_idx is one less then actual number of * transmitted bytes. */ if (ep_state->last_packet_size == (rx_dma_status.total_copied + rx_dma_status.pending_length)) { ep_state->last_packet_size++; } /* Release DMA to use by other endpoints. */ dev_state.dma_ep[0] = NULL; } fifo_bytes = GET_BIT(rxs, USB_USB_RXS1_REG_USB_RXCOUNT); /* * FIFO maybe empty if DMA read it before or * it's final iteration and function already read all * that was to read. */ if (fifo_bytes > 0) { fifo_bytes = read_rx_fifo(ep_state, fifo_bytes); } if (GET_BIT(rxs, USB_USB_RXS1_REG_USB_RX_LAST)) { if (!ep_state->iso && GET_BIT(rxs, USB_USB_RXS1_REG_USB_TOGGLE_RX) != ep_state->data1) { /* Toggle bit does not match discard packet */ regs->rxc |= USB_USB_RXC1_REG_USB_FLUSH_Msk; ep_state->last_packet_size = 0; /* Re-enable reception */ start_rx_packet(ep_state); } else { ep_state->data1 ^= 1; atomic_clear(&ep_state->busy); dev_state.ep_out_data |= BIT(ep_idx); } } } while (fifo_bytes > FIFO_READ_THRESHOLD); } static void handle_rx_ev(void) { if (USB->USB_RXEV_REG & BIT(0)) { handle_epx_rx_ev(1); } if (USB->USB_RXEV_REG & BIT(1)) { handle_epx_rx_ev(2); } if (USB->USB_RXEV_REG & BIT(2)) { handle_epx_rx_ev(3); } } static void handle_epx_tx_ev(struct smartbond_ep_state *ep_state) { uint32_t txs; struct smartbond_ep_reg_set *regs = ep_state->regs; txs = regs->txs; if (GET_BIT(txs, USB_USB_TXS1_REG_USB_TX_DONE)) { if (dev_state.dma_ep[1] == ep_state) { struct dma_status tx_dma_status; dma_get_status(usbd_dma_cfg.tx_dev, usbd_dma_cfg.tx_chan, &tx_dma_status); /* * Disable DMA and update last_packet_size with what * DMA reported. */ dma_stop(usbd_dma_cfg.tx_dev, usbd_dma_cfg.tx_chan); ep_state->last_packet_size = tx_dma_status.total_copied + 1; /* Release DMA to used by other endpoints. */ dev_state.dma_ep[1] = NULL; } if (GET_BIT(txs, USB_USB_TXS1_REG_USB_ACK_STAT)) { /* ACK received, update transfer state and DATA0/1 bit */ ep_state->transferred += ep_state->last_packet_size; ep_state->last_packet_size = 0; ep_state->data1 ^= 1; if (ep_state->transferred == ep_state->total_len) { atomic_clear(&ep_state->busy); ep_state->cb(ep_state->ep_addr, USB_DC_EP_DATA_IN); return; } } else if (regs->epc_in & USB_USB_EPC1_REG_USB_STALL_Msk) { /* * TX_DONE also indicates that STALL packet was just sent, * there is no point to put anything into transmit FIFO. * It could result in empty packet being scheduled. */ return; } } if (txs & USB_USB_TXS1_REG_USB_TX_URUN_Msk) { LOG_DBG("EP 0x%02x FIFO underrun\n", ep_state->ep_addr); } /* Start next or repeated packet. */ start_tx_packet(ep_state); } static void handle_tx_ev(void) { if (USB->USB_TXEV_REG & BIT(0)) { handle_epx_tx_ev(usb_dc_get_ep_in_state(1)); } if (USB->USB_TXEV_REG & BIT(1)) { handle_epx_tx_ev(usb_dc_get_ep_in_state(2)); } if (USB->USB_TXEV_REG & BIT(2)) { handle_epx_tx_ev(usb_dc_get_ep_in_state(3)); } } static uint32_t check_reset_end(uint32_t alt_ev) { if (dev_state.nfsr == NFSR_NODE_RESET) { if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_RESET)) { /* * Could be still in reset, but since USB_M_RESET is * disabled it can be also old reset state that was not * cleared yet. * If (after reading USB_ALTEV_REG register again) * bit is cleared reset state just ended. * Keep non-reset bits combined from two previous * ALTEV read and one from the next line. */ alt_ev = (alt_ev & ~USB_USB_ALTEV_REG_USB_RESET_Msk) | USB->USB_ALTEV_REG; } if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_RESET) == 0) { USB->USB_ALTMSK_REG = USB_USB_ALTMSK_REG_USB_M_RESET_Msk | USB_USB_ALTEV_REG_USB_SD3_Msk; if (dev_state.ep_state[0][0].buffer != NULL) { USB->USB_MAMSK_REG |= USB_USB_MAMSK_REG_USB_M_EP0_RX_Msk; } LOG_INF("Set operational %02x", USB->USB_MAMSK_REG); set_nfsr(NFSR_NODE_OPERATIONAL); dev_state.status_cb(USB_DC_CONNECTED, NULL); } } return alt_ev; } static void handle_bus_reset(void) { uint32_t alt_ev; USB->USB_NFSR_REG = 0; USB->USB_FAR_REG = 0x80; USB->USB_ALTMSK_REG = 0; USB->USB_NFSR_REG = NFSR_NODE_RESET; USB->USB_TXMSK_REG = 0; USB->USB_RXMSK_REG = 0; set_nfsr(NFSR_NODE_RESET); for (int i = 0; i < EP_MAX; ++i) { dev_state.ep_state[1][i].buffer = NULL; dev_state.ep_state[1][i].transferred = 0; dev_state.ep_state[1][i].total_len = 0; atomic_clear(&dev_state.ep_state[1][i].busy); } LOG_INF("send USB_DC_RESET"); dev_state.status_cb(USB_DC_RESET, NULL); USB->USB_DMA_CTRL_REG = 0; USB->USB_MAMSK_REG = USB_USB_MAMSK_REG_USB_M_INTR_Msk | USB_USB_MAMSK_REG_USB_M_FRAME_Msk | USB_USB_MAMSK_REG_USB_M_WARN_Msk | USB_USB_MAMSK_REG_USB_M_ALT_Msk | USB_USB_MAMSK_REG_USB_M_EP0_RX_Msk | USB_USB_MAMSK_REG_USB_M_EP0_TX_Msk; USB->USB_ALTMSK_REG = USB_USB_ALTMSK_REG_USB_M_RESUME_Msk; alt_ev = USB->USB_ALTEV_REG; check_reset_end(alt_ev); } static void usb_clock_on(void) { if (atomic_cas(&dev_state.clk_requested, 0, 1)) { clock_control_on(DEVICE_DT_GET(DT_NODELABEL(osc)), (clock_control_subsys_rate_t)SMARTBOND_CLK_USB); } } static void usb_clock_off(void) { if (atomic_cas(&dev_state.clk_requested, 1, 0)) { clock_control_off(DEVICE_DT_GET(DT_NODELABEL(osc)), (clock_control_subsys_rate_t)SMARTBOND_CLK_USB); } } static void handle_alt_ev(void) { struct smartbond_ep_state *ep_state; uint32_t alt_ev = USB->USB_ALTEV_REG; if (USB->USB_NFSR_REG == NFSR_NODE_SUSPEND) { usb_clock_on(); } alt_ev = check_reset_end(alt_ev); if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_RESET) && dev_state.nfsr != NFSR_NODE_RESET) { handle_bus_reset(); } else if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_RESUME)) { if (USB->USB_NFSR_REG == NFSR_NODE_SUSPEND) { set_nfsr(NFSR_NODE_OPERATIONAL); if (dev_state.ep_state[0][0].buffer != NULL) { USB->USB_MAMSK_REG |= USB_USB_MAMSK_REG_USB_M_EP0_RX_Msk; } USB->USB_ALTMSK_REG = USB_USB_ALTMSK_REG_USB_M_RESET_Msk | USB_USB_ALTMSK_REG_USB_M_SD3_Msk; /* Re-enable reception of endpoint with pending transfer */ for (int ep_num = 1; ep_num < EP_MAX; ++ep_num) { ep_state = usb_dc_get_ep_out_state(ep_num); if (ep_state->enabled) { start_rx_packet(ep_state); } } dev_state.status_cb(USB_DC_RESUME, NULL); } } else if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_SD3)) { set_nfsr(NFSR_NODE_SUSPEND); USB->USB_ALTMSK_REG = USB_USB_ALTMSK_REG_USB_M_RESET_Msk | USB_USB_ALTMSK_REG_USB_M_RESUME_Msk; usb_clock_off(); dev_state.status_cb(USB_DC_SUSPEND, NULL); } } static void handle_epx_tx_warn_ev(uint8_t ep_idx) { fill_tx_fifo(usb_dc_get_ep_in_state(ep_idx)); } static void handle_fifo_warning(void) { uint32_t fifo_warning = USB->USB_FWEV_REG; if (fifo_warning & BIT(0)) { handle_epx_tx_warn_ev(1); } if (fifo_warning & BIT(1)) { handle_epx_tx_warn_ev(2); } if (fifo_warning & BIT(2)) { handle_epx_tx_warn_ev(3); } if (fifo_warning & BIT(4)) { handle_epx_rx_ev(1); } if (fifo_warning & BIT(5)) { handle_epx_rx_ev(2); } if (fifo_warning & BIT(6)) { handle_epx_rx_ev(3); } } static void handle_ep0_nak(void) { uint32_t ep0_nak = USB->USB_EP0_NAK_REG; if (REG_GET_BIT(USB_EPC0_REG, USB_STALL)) { if (GET_BIT(ep0_nak, USB_USB_EP0_NAK_REG_USB_EP0_INNAK)) { /* * EP0 is stalled and NAK was sent, it means that * RX is enabled. Disable RX for now. */ REG_CLR_BIT(USB_RXC0_REG, USB_RX_EN); REG_SET_BIT(USB_TXC0_REG, USB_TX_EN); } if (GET_BIT(ep0_nak, USB_USB_EP0_NAK_REG_USB_EP0_OUTNAK)) { REG_SET_BIT(USB_RXC0_REG, USB_RX_EN); } } else { if (REG_GET_BIT(USB_RXC0_REG, USB_RX_EN) == 0 && GET_BIT(ep0_nak, USB_USB_EP0_NAK_REG_USB_EP0_OUTNAK)) { /* NAK over EP0 was sent, receive should conclude */ USB->USB_TXC0_REG = USB_USB_TXC0_REG_USB_FLUSH_Msk; REG_SET_BIT(USB_RXC0_REG, USB_RX_EN); REG_CLR_BIT(USB_MAMSK_REG, USB_M_EP0_NAK); } } } static void usb_dc_smartbond_isr(void) { uint32_t int_status = USB->USB_MAEV_REG & USB->USB_MAMSK_REG; if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_WARN)) { handle_fifo_warning(); } if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_CH_EV)) { /* For now just clear interrupt */ (void)USB->USB_CHARGER_STAT_REG; } if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_EP0_TX)) { handle_ep0_tx(); } if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_EP0_RX)) { handle_ep0_rx(); } if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_EP0_NAK)) { handle_ep0_nak(); } if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_RX_EV)) { handle_rx_ev(); } if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_NAK)) { (void)USB->USB_NAKEV_REG; } if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_FRAME)) { if (dev_state.nfsr == NFSR_NODE_RESET) { /* * During reset FRAME interrupt is enabled to periodically * check when reset state ends. * FRAME interrupt is generated every 1ms without host sending * actual SOF. */ check_reset_end(USB_USB_ALTEV_REG_USB_RESET_Msk); } else if (dev_state.nfsr == NFSR_NODE_WAKING) { /* No need to call set_nfsr, just set state */ dev_state.nfsr = NFSR_NODE_WAKING2; } else if (dev_state.nfsr == NFSR_NODE_WAKING2) { /* No need to call set_nfsr, just set state */ dev_state.nfsr = NFSR_NODE_RESUME; LOG_DBG("dev_state.nfsr = NFSR_NODE_RESUME %02x", USB->USB_MAMSK_REG); } else if (dev_state.nfsr == NFSR_NODE_RESUME) { set_nfsr(NFSR_NODE_OPERATIONAL); if (dev_state.ep_state[0][0].buffer != NULL) { USB->USB_MAMSK_REG |= USB_USB_MAMSK_REG_USB_M_EP0_RX_Msk; } LOG_DBG("Set operational %02x", USB->USB_MAMSK_REG); } else { USB->USB_MAMSK_REG &= ~USB_USB_MAMSK_REG_USB_M_FRAME_Msk; } } if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_TX_EV)) { handle_tx_ev(); } if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_ALT)) { handle_alt_ev(); } for (int i = 0; dev_state.ep_out_data && i < 4; ++i) { uint8_t mask = BIT(i); if (dev_state.ep_out_data & mask) { dev_state.ep_out_data ^= mask; dev_state.ep_state[0][i].cb(dev_state.ep_state[0][i].ep_addr, USB_DC_EP_DATA_OUT); } } } /** * USB functionality can be disabled from HOST and DEVICE side. * Host side is indicated by VBUS line. * Device side is decided by pair of calls usb_dc_attach()/usb_dc_detach, * USB will only work when application calls usb_dc_attach() and VBUS is present. * When both conditions are not met USB clock (PLL) is released, and peripheral * remain in reset state. */ static void usb_change_state(bool attached, bool vbus_present) { if (dev_state.attached == attached && dev_state.vbus_present == vbus_present) { return; } if (attached && vbus_present) { dev_state.attached = true; dev_state.vbus_present = true; /* * Prevent transition to standby, this greatly reduces * IRQ response time */ pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); usb_smartbond_dma_config(); usb_clock_on(); dev_state.status_cb(USB_DC_CONNECTED, NULL); USB->USB_MCTRL_REG = USB_USB_MCTRL_REG_USBEN_Msk; USB->USB_NFSR_REG = 0; USB->USB_FAR_REG = 0x80; USB->USB_TXMSK_REG = 0; USB->USB_RXMSK_REG = 0; USB->USB_MAMSK_REG = USB_USB_MAMSK_REG_USB_M_INTR_Msk | USB_USB_MAMSK_REG_USB_M_ALT_Msk | USB_USB_MAMSK_REG_USB_M_WARN_Msk; USB->USB_ALTMSK_REG = USB_USB_ALTMSK_REG_USB_M_RESET_Msk | USB_USB_ALTEV_REG_USB_SD3_Msk; USB->USB_MCTRL_REG = USB_USB_MCTRL_REG_USBEN_Msk | USB_USB_MCTRL_REG_USB_NAT_Msk; } else if (dev_state.attached && dev_state.vbus_present) { /* * USB was previously in use now either VBUS is gone or application * requested detach, put it down */ dev_state.attached = attached; dev_state.vbus_present = vbus_present; /* * It's imperative that USB_NAT bit-field is updated with the * USBEN bit-field being set. As such, zeroing the control * register at once will result in leaving the USB tranceivers * in a floating state. Such an action, will induce incorect * behavior for subsequent charger detection operations and given * that the device does not enter the sleep state (thus powering off * PD_SYS and resetting the controller along with its tranceivers). */ REG_CLR_BIT(USB_MCTRL_REG, USB_NAT); USB->USB_MCTRL_REG = 0; usb_clock_off(); dev_state.status_cb(USB_DC_DISCONNECTED, NULL); usb_smartbond_dma_deconfig(); /* Allow standby USB not in use or not connected */ pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } else { /* USB still not activated, keep track of what's on and off */ dev_state.attached = attached; dev_state.vbus_present = vbus_present; } } static void usb_dc_smartbond_vbus_isr(void) { LOG_DBG("VBUS_ISR"); CRG_TOP->VBUS_IRQ_CLEAR_REG = 1; usb_change_state(dev_state.attached, (CRG_TOP->ANA_STATUS_REG & CRG_TOP_ANA_STATUS_REG_VBUS_AVAILABLE_Msk) != 0); } static int usb_init(void) { int ret = 0; BUILD_ASSERT(DT_DMAS_HAS_NAME(DT_NODELABEL(usbd), tx), "Unasigned TX DMA"); BUILD_ASSERT(DT_DMAS_HAS_NAME(DT_NODELABEL(usbd), rx), "Unasigned RX DMA"); ret = usb_smartbond_dma_validate(); if (ret != 0) { return ret; } for (int i = 0; i < EP_MAX; ++i) { dev_state.ep_state[0][i].regs = reg_sets[i]; dev_state.ep_state[0][i].ep_addr = i | USB_EP_DIR_OUT; dev_state.ep_state[0][i].buffer = ep_out_bufs[i]; dev_state.ep_state[1][i].regs = reg_sets[i]; dev_state.ep_state[1][i].ep_addr = i | USB_EP_DIR_IN; } /* Max packet size for EP0 is hardwired to 8 */ dev_state.ep_state[0][0].mps = EP0_FIFO_SIZE; dev_state.ep_state[1][0].mps = EP0_FIFO_SIZE; IRQ_CONNECT(VBUS_IRQ, VBUS_IRQ_PRI, usb_dc_smartbond_vbus_isr, 0, 0); CRG_TOP->VBUS_IRQ_CLEAR_REG = 1; NVIC_ClearPendingIRQ(VBUS_IRQ); /* Both connect and disconnect needs to be handled */ CRG_TOP->VBUS_IRQ_MASK_REG = CRG_TOP_VBUS_IRQ_MASK_REG_VBUS_IRQ_EN_FALL_Msk | CRG_TOP_VBUS_IRQ_MASK_REG_VBUS_IRQ_EN_RISE_Msk; irq_enable(VBUS_IRQn); IRQ_CONNECT(USB_IRQ, USB_IRQ_PRI, usb_dc_smartbond_isr, 0, 0); irq_enable(USB_IRQ); return ret; } int usb_dc_ep_disable(const uint8_t ep) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep); LOG_DBG("%02x", ep); if (ep_state == NULL) { LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } ep_state->enabled = 0; if (ep_state->ep_addr == EP0_IN) { REG_SET_BIT(USB_TXC0_REG, USB_IGN_IN); } else if (ep_state->ep_addr == EP0_OUT) { USB->USB_RXC0_REG = USB_USB_RXC0_REG_USB_IGN_SETUP_Msk | USB_USB_RXC0_REG_USB_IGN_OUT_Msk; } else if (USB_EP_DIR_IS_OUT(ep)) { ep_state->regs->epc_out &= ~USB_USB_EPC2_REG_USB_EP_EN_Msk; } else { ep_state->regs->epc_in &= ~USB_USB_EPC1_REG_USB_EP_EN_Msk; } return 0; } int usb_dc_ep_mps(const uint8_t ep) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep); if (ep_state == NULL) { LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } return ep_state->mps; } int usb_dc_ep_read_continue(uint8_t ep) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_out_state(ep); if (ep_state == NULL) { LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } LOG_DBG("ep 0x%02x", ep); /* If no more data in the buffer, start a new read transaction. * DataOutStageCallback will called on transaction complete. */ if (ep_state->transferred >= ep_state->last_packet_size) { start_rx_packet(ep_state); } return 0; } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_out_state(ep); uint16_t read_count; if (ep_state == NULL) { LOG_ERR("Invalid Endpoint %x", ep); return -EINVAL; } LOG_DBG("ep 0x%02x, %u bytes, %p", ep, max_data_len, (void *)data); read_count = ep_state->last_packet_size - ep_state->transferred; /* When both buffer and max data to read are zero, just ignore reading * and return available data in buffer. Otherwise, return data * previously stored in the buffer. */ if (data) { read_count = MIN(read_count, max_data_len); memcpy(data, ep_state->buffer + ep_state->transferred, read_count); ep_state->transferred += read_count; } else if (max_data_len) { LOG_ERR("Wrong arguments"); } if (read_bytes) { *read_bytes = read_count; } return 0; } int usb_dc_ep_read(const uint8_t ep, uint8_t *const data, const uint32_t max_data_len, uint32_t *const read_bytes) { if (usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes) != 0) { return -EINVAL; } if (usb_dc_ep_read_continue(ep) != 0) { return -EINVAL; } return 0; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data *const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); LOG_DBG("ep %x, mps %d, type %d", cfg->ep_addr, cfg->ep_mps, cfg->ep_type); if ((cfg->ep_type == USB_DC_EP_CONTROL && ep_idx != 0) || (cfg->ep_type != USB_DC_EP_CONTROL && ep_idx == 0)) { LOG_ERR("invalid endpoint configuration"); return -EINVAL; } if (ep_idx > 3) { LOG_ERR("endpoint address out of range"); return -EINVAL; } if (ep_out_buf_size[ep_idx] < cfg->ep_mps) { LOG_ERR("endpoint size too big"); return -EINVAL; } return 0; } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep); LOG_DBG("%02x %p", ep, (void *)cb); if (ep_state == NULL) { LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } ep_state->cb = cb; return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { dev_state.status_cb = cb; LOG_DBG("%p", cb); /* Manually call IRQ handler in case when VBUS is already present */ usb_dc_smartbond_vbus_isr(); } int usb_dc_reset(void) { int ret; LOG_DBG(""); if (!dev_attached() || !dev_ready()) { return -ENODEV; } ret = usb_dc_detach(); if (ret) { return ret; } ret = usb_dc_attach(); if (ret) { return ret; } return 0; } int usb_dc_set_address(const uint8_t addr) { LOG_DBG("%d", addr); /* Set default address for one ZLP */ USB->USB_EPC0_REG = USB_USB_EPC0_REG_USB_DEF_Msk; USB->USB_FAR_REG = (addr & USB_USB_FAR_REG_USB_AD_Msk) | USB_USB_FAR_REG_USB_AD_EN_Msk; return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_dir = USB_EP_GET_DIR(ep); struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep); struct smartbond_ep_reg_set *regs; LOG_DBG("%02x", ep); if (ep_state == NULL) { LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } regs = ep_state->regs; /* Clear stall is called in response to Clear Feature ENDPOINT_HALT, * reset toggle */ ep_state->stall = false; ep_state->data1 = 0; if (ep_dir == USB_EP_DIR_OUT) { regs->epc_out &= ~USB_USB_EPC1_REG_USB_STALL_Msk; } else { regs->epc_in &= ~USB_USB_EPC1_REG_USB_STALL_Msk; } if (ep_idx == 0) { REG_CLR_BIT(USB_MAMSK_REG, USB_M_EP0_NAK); } return 0; } int usb_dc_ep_enable(const uint8_t ep) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep); uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_dir = USB_EP_GET_DIR(ep); if (ep_state == NULL) { LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } LOG_DBG("%02x", ep); if (ep_state->ep_addr == EP0_IN) { USB->USB_MAMSK_REG |= USB_USB_MAMSK_REG_USB_M_EP0_TX_Msk; } else if (ep_state->ep_addr == EP0_OUT) { USB->USB_MAMSK_REG |= USB_USB_MAMSK_REG_USB_M_EP0_RX_Msk; /* Clear USB_IGN_SETUP and USB_IGN_OUT */ USB->USB_RXC0_REG = 0; ep_state->last_packet_size = 0; ep_state->transferred = 0; ep_state->total_len = 0; } else if (ep_dir == USB_EP_DIR_OUT) { USB->USB_RXMSK_REG |= 0x11 << (ep_idx - 1); REG_SET_BIT(USB_MAMSK_REG, USB_M_RX_EV); ep_state->regs->epc_out |= USB_USB_EPC1_REG_USB_EP_EN_Msk; if (ep_state->busy) { return 0; } start_rx_packet(ep_state); } else { USB->USB_TXMSK_REG |= 0x11 << (ep_idx - 1); REG_SET_BIT(USB_MAMSK_REG, USB_M_TX_EV); ep_state->regs->epc_in |= USB_USB_EPC2_REG_USB_EP_EN_Msk; } ep_state->enabled = 1; return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const ep_cfg) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep_cfg->ep_addr); uint8_t ep_idx = USB_EP_GET_IDX(ep_cfg->ep_addr); uint8_t ep_dir = USB_EP_GET_DIR(ep_cfg->ep_addr); uint8_t iso_mask; if (ep_state == NULL) { return -EINVAL; } LOG_DBG("%02x", ep_cfg->ep_addr); ep_state->iso = ep_cfg->ep_type == USB_DC_EP_ISOCHRONOUS; iso_mask = (ep_state->iso ? USB_USB_EPC2_REG_USB_ISO_Msk : 0); if (ep_cfg->ep_type == USB_DC_EP_CONTROL) { ep_state->mps = EP0_FIFO_SIZE; } else { ep_state->mps = ep_cfg->ep_mps; } ep_state->data1 = 0; if (ep_dir == USB_EP_DIR_OUT) { if (ep_cfg->ep_mps > ep_out_buf_size[ep_idx]) { return -EINVAL; } ep_state->regs->epc_out = ep_idx | iso_mask; } else { ep_state->regs->epc_in = ep_idx | iso_mask; } return 0; } int usb_dc_detach(void) { LOG_DBG("Detach"); usb_change_state(false, dev_state.vbus_present); return 0; } int usb_dc_attach(void) { LOG_INF("Attach"); usb_change_state(true, dev_state.vbus_present); return 0; } int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data, const uint32_t data_len, uint32_t *const ret_bytes) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep); if (ep_state == NULL) { LOG_ERR("%02x no ep_state", ep); return -EINVAL; } LOG_DBG("%02x %d bytes", ep, (int)data_len); if (!atomic_cas(&ep_state->busy, 0, 1)) { LOG_DBG("%02x transfer already in progress", ep); return -EAGAIN; } ep_state->buffer = (uint8_t *)data; ep_state->transferred = 0; ep_state->total_len = data_len; ep_state->last_packet_size = 0; if (ep == EP0_IN) { /* RX has priority over TX to send packet RX needs to be off */ REG_CLR_BIT(USB_RXC0_REG, USB_RX_EN); /* Handle case when device expect to send more data and * host already send ZLP to confirm reception (that means * that it will no longer try to read). * Enable EP0_NAK. */ (void)USB->USB_EP0_NAK_REG; REG_SET_BIT(USB_MAMSK_REG, USB_M_EP0_NAK); } start_tx_packet(ep_state); if (ret_bytes) { *ret_bytes = data_len; } return 0; } int usb_dc_ep_set_stall(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t ep_dir = USB_EP_GET_DIR(ep); struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep); struct smartbond_ep_reg_set *regs; LOG_DBG("%02x", ep); if (!dev_attached() || !dev_ready()) { return -ENODEV; } if (ep_state == NULL) { LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } regs = ep_state->regs; ep_state->stall = 1; if (ep_idx == 0) { /* EP0 has just one registers to control stall for IN and OUT */ if (ep_dir == USB_EP_DIR_OUT) { regs->rxc = USB_USB_RXC0_REG_USB_RX_EN_Msk; REG_SET_BIT(USB_EPC0_REG, USB_STALL); } else { regs->rxc = 0; regs->txc = USB_USB_TXC0_REG_USB_TX_EN_Msk; REG_SET_BIT(USB_EPC0_REG, USB_STALL); } } else { if (ep_dir == USB_EP_DIR_OUT) { regs->epc_out |= USB_USB_EPC1_REG_USB_STALL_Msk; regs->rxc |= USB_USB_RXC1_REG_USB_RX_EN_Msk; } else { regs->epc_in |= USB_USB_EPC1_REG_USB_STALL_Msk; regs->txc |= USB_USB_TXC1_REG_USB_TX_EN_Msk | USB_USB_TXC1_REG_USB_LAST_Msk; } } return 0; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep); if (!dev_attached() || !dev_ready()) { return -ENODEV; } if (NULL == ep_state || NULL == stalled) { return -EINVAL; } *stalled = ep_state->stall; return 0; } int usb_dc_ep_halt(const uint8_t ep) { return usb_dc_ep_set_stall(ep); } int usb_dc_ep_flush(const uint8_t ep) { struct smartbond_ep_state *ep_state = usb_dc_get_ep_state(ep); if (ep_state == NULL) { LOG_ERR("Not valid endpoint: %02x", ep); return -EINVAL; } LOG_ERR("Not implemented"); return 0; } SYS_INIT(usb_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE); ```
/content/code_sandbox/drivers/usb/device/usb_dc_smartbond.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
13,482
```unknown # USB device configuration options menuconfig USB_DEVICE_DRIVER bool "USB device controller drivers" help Enable USB device controller drivers. if USB_DEVICE_DRIVER config USB_DC_HAS_HS_SUPPORT bool "USB device controller supports high speed" help USB device controller supports high speed. config USB_DEVICE_REMOTE_WAKEUP bool "Remote wakeup support" help USB device controller supports remote wakeup feature. config USB_DW bool "Designware USB Device Controller Driver" default y depends on DT_HAS_SNPS_DWC2_ENABLED help Designware USB Device Controller Driver. config USB_DW_USB_2_0 bool "DesignWare Controller and PHY support for USB specification 2.0" depends on USB_DW help Indicates whether or not USB specification version 2.0 is supported config USB_DC_RPI_PICO bool "USB device controller driver for Raspberry Pi Pico devices" default y depends on DT_HAS_RASPBERRYPI_PICO_USBD_ENABLED help Enable USB support on the RP2 family of processors. DT_STM32_USBHS := $(dt_nodelabel_path,usbotg_hs) DT_STM32_USBHS_SPEED := $(dt_node_str_prop_equals,$(DT_STM32_USBHS),maximum-speed,high-speed) config USB_DC_STM32 bool "USB device controller driver for STM32 devices" default y depends on DT_HAS_ST_STM32_OTGFS_ENABLED \ || DT_HAS_ST_STM32_OTGHS_ENABLED \ || DT_HAS_ST_STM32_USB_ENABLED select USE_STM32_LL_USB select USE_STM32_HAL_PCD select USE_STM32_HAL_PCD_EX select USB_DC_HAS_HS_SUPPORT if "$(DT_STM32_USBHS_SPEED)" imply USB_DEVICE_REMOTE_WAKEUP help Enable STM32 family USB device controller shim driver. config USB_DC_STM32_CLOCK_CHECK bool "Runtime USB 48MHz clock check" depends on USB_DC_STM32 default y if !(SOC_SERIES_STM32F1X || SOC_SERIES_STM32F3X) help Enable USB clock 48MHz configuration runtime check. In specific cases, this check might provide wrong verdict and should be disabled. config USB_DC_SAM0 bool "SAM0 series USB Device Controller driver" default y depends on DT_HAS_ATMEL_SAM0_USB_ENABLED help SAM0 family USB device controller Driver. config USB_DC_SMARTBOND bool "SmartBond series USB Device Controller driver" default y select CLOCK_CONTROL_SMARTBOND select CLOCK_CONTROL select DMA depends on DT_HAS_RENESAS_SMARTBOND_USBD_ENABLED help SmartBond family USB device controller Driver. config USB_DC_SAM_USBC bool "SAM4L USBC Device Controller driver" default y depends on DT_HAS_ATMEL_SAM_USBC_ENABLED help SAM4L family USBC device controller Driver. DT_SAM_USBHS := $(dt_nodelabel_path,usbhs) DT_SAM_USBHS_SPEED := $(dt_node_str_prop_equals,$(DT_SAM_USBHS),maximum-speed,high-speed) config USB_DC_SAM_USBHS bool "SAM series USB HS Device Controller driver" default y depends on DT_HAS_ATMEL_SAM_USBHS_ENABLED imply USB_DC_HAS_HS_SUPPORT if "$(DT_SAM_USBHS_SPEED)" help SAM family USB HS device controller Driver. config USB_NRFX bool "Nordic Semiconductor USB Device Controller Driver" default y depends on DT_HAS_NORDIC_NRF_USBD_ENABLED select NRF_USBD_COMMON select NRFX_POWER imply USB_DEVICE_REMOTE_WAKEUP help nRF USB Device Controller Driver config USB_NRFX_EVT_QUEUE_SIZE int "USBD event queue size" range 4 64 default 32 depends on USB_NRFX help Size of the driver's internal event queue. Required size will depend on number of endpoints (class instances) in use. config USB_NRFX_WORK_QUEUE_STACK_SIZE int "USBD work queue stack size" default 1024 depends on USB_NRFX help Size of the stack for the work queue thread that is used in the driver for handling the events from the USBD ISR, i.e. executing endpoint callbacks and providing proper notifications to the USB device stack. config USB_NRFX_ATTACHED_EVENT_DELAY int "USBD attached event delay" range 0 1000 default 0 depends on USB_NRFX help Configurable attached event delay in milliseconds. Delay can be used to give USB Charging Controller time for initialization. config USB_KINETIS bool "Kinetis USB Device Controller Driver" default y depends on DT_HAS_NXP_KINETIS_USBD_ENABLED help Kinetis USB Device Controller Driver. config USB_MCUX bool "NXP MCUX USB Device Controller Driver" default y depends on DT_HAS_NXP_EHCI_ENABLED || DT_HAS_NXP_LPCIP3511_ENABLED help NXP MCUX USB Device Controller Driver for MXRT and LPC SoC's. if USB_MCUX choice USB_MCUX_CONTROLLER_TYPE prompt "USB Controller type" help Select the type of controller available in the SoC. config USB_DC_NXP_EHCI bool "MXRT EHCI USB Device Controller" select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT select USB_DC_HAS_HS_SUPPORT help Kinetis and RT EHCI USB Device Controller Driver. config USB_DC_NXP_LPCIP3511 bool "LPC USB Device Controller" select USB_DC_HAS_HS_SUPPORT if "$(dt_nodelabel_enabled,usbhs)" select PINCTRL help LPC USB Device Controller Driver. endchoice config USB_DC_MSG_QUEUE_LEN int default 10 help Maximum number of messages USB device controller interrupt can queue for callback thread config USB_MCUX_THREAD_STACK_SIZE int "Stack size for the USB driver" default 2048 if USB_DEVICE_LOG_LEVEL_DBG default 1024 help Size of the stack used for the internal USB thread. endif # USB_MCUX config USB_DC_IT82XX2 bool "ITE IT82XX2 USB Device Controller Driver" default y depends on DT_HAS_ITE_IT82XX2_USB_ENABLED help ITE IT82XX2 USB Device Controller Driver config USB_DC_NUMAKER bool "Nuvoton NuMaker USB 1.1 device controller" default y depends on DT_HAS_NUVOTON_NUMAKER_USBD_ENABLED help Enable Nuvoton NuMaker USB 1.1 device controller driver if USB_DC_NUMAKER config USB_DC_NUMAKER_MSG_QUEUE_SIZE int "USB DC message queue size" default 32 help Maximum number of messages the driver can queue for interrupt bottom half processing config USB_DC_NUMAKER_MSG_HANDLER_THREAD_STACK_SIZE int "USB DC message handler thread stack size" default 1536 help Size of the stack for the message handler thread that is used in the driver for handling messages from the USB DC ISR, i.e. interrupt bottom half processing, including callbacks to the USB device stack. endif # USB_DC_NUMAKER config USB_NATIVE_POSIX bool "Native Posix USB Device Controller Driver" depends on ARCH_POSIX && EXTERNAL_LIBC default y if BOARD_NATIVE_SIM || BOARD_NATIVE_POSIX help Native Posix USB Device Controller Driver. DT_ZEPHYR_UDC0 := $(dt_nodelabel_path,zephyr_udc0) DT_ZEPHYR_UDC0_HS_SPEED := $(dt_node_str_prop_equals,$(DT_ZEPHYR_UDC0),maximum-speed,high-speed) config USB_NATIVE_POSIX_HS bool "High speed support" select USB_DC_HAS_HS_SUPPORT depends on USB_NATIVE_POSIX default y if "$(DT_ZEPHYR_UDC0_HS_SPEED)" help Enable high speed support in the Native Posix USB device controller driver. module = USB_DRIVER module-str = usb driver source "subsys/logging/Kconfig.template.log_config" endif # USB_DEVICE_DRIVER ```
/content/code_sandbox/drivers/usb/device/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,760
```c /* * */ #include <soc.h> #include <string.h> #include <zephyr/drivers/usb/usb_dc.h> #include <zephyr/usb/usb_device.h> #include <soc.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/drivers/pinctrl.h> #include "usb.h" #include "usb_device.h" #include "usb_device_config.h" #include "usb_device_dci.h" #ifdef CONFIG_USB_DC_NXP_EHCI #undef DT_DRV_COMPAT #define DT_DRV_COMPAT nxp_ehci #include "usb_device_ehci.h" #endif #ifdef CONFIG_USB_DC_NXP_LPCIP3511 #undef DT_DRV_COMPAT #define DT_DRV_COMPAT nxp_lpcip3511 #include "usb_device_lpcip3511.h" #endif #ifdef CONFIG_HAS_MCUX_CACHE #include <fsl_cache.h> #endif #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(usb_dc_mcux); static void usb_isr_handler(void); /* the setup transfer state */ #define SETUP_DATA_STAGE_DONE (0) #define SETUP_DATA_STAGE_IN (1) #define SETUP_DATA_STAGE_OUT (2) /* * Endpoint absolute index calculation: * * MCUX EHCI USB device controller supports a specific * number of bidirectional endpoints. Bidirectional means * that an endpoint object is represented to the outside * as an OUT and an IN Endpoint with its own buffers * and control structures. * * EP_ABS_IDX refers to the corresponding control * structure, for example: * * EP addr | ep_idx | ep_abs_idx * ------------------------------- * 0x00 | 0x00 | 0x00 * 0x80 | 0x00 | 0x01 * 0x01 | 0x01 | 0x02 * 0x81 | 0x01 | 0x03 * .... | .... | .... * * The NUM_OF_EP_MAX (and number of s_ep_ctrl) should be double * of num_bidir_endpoints. */ #define EP_ABS_IDX(ep) (USB_EP_GET_IDX(ep) * 2 + \ (USB_EP_GET_DIR(ep) >> 7)) #define NUM_OF_EP_MAX (DT_INST_PROP(0, num_bidir_endpoints) * 2) #define NUM_INSTS DT_NUM_INST_STATUS_OKAY(nxp_ehci) + DT_NUM_INST_STATUS_OKAY(nxp_lpcip3511) BUILD_ASSERT(NUM_INSTS <= 1, "Only one USB device supported"); /* Controller ID is for HAL usage */ #if defined(CONFIG_SOC_SERIES_IMXRT5XX) || \ defined(CONFIG_SOC_SERIES_IMXRT6XX) || \ defined(CONFIG_SOC_LPC55S26) || defined(CONFIG_SOC_LPC55S28) || \ defined(CONFIG_SOC_LPC55S16) #define CONTROLLER_ID kUSB_ControllerLpcIp3511Hs0 #elif defined(CONFIG_SOC_LPC55S36) #define CONTROLLER_ID kUSB_ControllerLpcIp3511Fs0 #elif defined(CONFIG_SOC_LPC55S69_CPU0) || defined(CONFIG_SOC_LPC55S69_CPU1) #if DT_NODE_HAS_STATUS(DT_NODELABEL(usbhs), okay) #define CONTROLLER_ID kUSB_ControllerLpcIp3511Hs0 #elif DT_NODE_HAS_STATUS(DT_NODELABEL(usbfs), okay) #define CONTROLLER_ID kUSB_ControllerLpcIp3511Fs0 #endif /* LPC55s69 */ #elif defined(CONFIG_SOC_SERIES_IMXRT11XX) || \ defined(CONFIG_SOC_SERIES_IMXRT10XX) || \ defined(CONFIG_SOC_SERIES_MCXN) #if DT_NODE_HAS_STATUS(DT_NODELABEL(usb1), okay) #define CONTROLLER_ID kUSB_ControllerEhci0 #elif DT_NODE_HAS_STATUS(DT_NODELABEL(usb2), okay) #define CONTROLLER_ID kUSB_ControllerEhci1 #endif /* IMX RT */ #elif defined(CONFIG_SOC_SERIES_RW6XX) #define CONTROLLER_ID kUSB_ControllerEhci0 #else /* If SOC has EHCI or LPCIP3511 then probably just need to add controller ID to this code */ #error "USB driver does not yet support this SOC" #endif /* CONTROLLER ID */ /* We do not need a buffer for the write side on platforms that have USB RAM. * The SDK driver will copy the data buffer to be sent to USB RAM. */ #ifdef CONFIG_USB_DC_NXP_LPCIP3511 #define EP_BUF_NUMOF_BLOCKS (NUM_OF_EP_MAX / 2) #else #define EP_BUF_NUMOF_BLOCKS NUM_OF_EP_MAX #endif /* The max MPS is 1023 for FS, 1024 for HS. */ #if defined(CONFIG_NOCACHE_MEMORY) #define EP_BUF_NONCACHED K_HEAP_DEFINE_NOCACHE(ep_buf_pool, 1024 * EP_BUF_NUMOF_BLOCKS); #else K_HEAP_DEFINE(ep_buf_pool, 1024 * EP_BUF_NUMOF_BLOCKS); #endif struct usb_ep_ctrl_data { usb_device_callback_message_struct_t transfer_message; void *block; usb_dc_ep_callback callback; uint16_t ep_mps; uint8_t ep_enabled : 1; uint8_t ep_occupied : 1; }; struct usb_dc_state { usb_device_struct_t dev_struct; /* Controller handle */ usb_dc_status_callback status_cb; struct usb_ep_ctrl_data *eps; bool attached; uint8_t setup_data_stage; K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_USB_MCUX_THREAD_STACK_SIZE); struct k_thread thread; }; static struct usb_ep_ctrl_data s_ep_ctrl[NUM_OF_EP_MAX]; static struct usb_dc_state dev_state; /* Message queue for the usb thread */ K_MSGQ_DEFINE(usb_dc_msgq, sizeof(usb_device_callback_message_struct_t), CONFIG_USB_DC_MSG_QUEUE_LEN, 4); #if defined(CONFIG_USB_DC_NXP_EHCI) /* EHCI device driver interface */ static const usb_device_controller_interface_struct_t mcux_usb_iface = { USB_DeviceEhciInit, USB_DeviceEhciDeinit, USB_DeviceEhciSend, USB_DeviceEhciRecv, USB_DeviceEhciCancel, USB_DeviceEhciControl }; extern void USB_DeviceEhciIsrFunction(void *deviceHandle); #elif defined(CONFIG_USB_DC_NXP_LPCIP3511) /* LPCIP3511 device driver interface */ static const usb_device_controller_interface_struct_t mcux_usb_iface = { USB_DeviceLpc3511IpInit, USB_DeviceLpc3511IpDeinit, USB_DeviceLpc3511IpSend, USB_DeviceLpc3511IpRecv, USB_DeviceLpc3511IpCancel, USB_DeviceLpc3511IpControl }; extern void USB_DeviceLpcIp3511IsrFunction(void *deviceHandle); #endif int usb_dc_reset(void) { if (dev_state.dev_struct.controllerHandle != NULL) { dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlSetDefaultStatus, NULL); } return 0; } int usb_dc_attach(void) { usb_status_t status; dev_state.eps = &s_ep_ctrl[0]; if (dev_state.attached) { LOG_WRN("Already attached"); return 0; } dev_state.dev_struct.controllerInterface = &mcux_usb_iface; status = dev_state.dev_struct.controllerInterface->deviceInit(CONTROLLER_ID, &dev_state.dev_struct, &dev_state.dev_struct.controllerHandle); if (kStatus_USB_Success != status) { return -EIO; } IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), usb_isr_handler, 0, 0); irq_enable(DT_INST_IRQN(0)); dev_state.attached = true; status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlRun, NULL); LOG_DBG("Attached"); return 0; } int usb_dc_detach(void) { usb_status_t status; if (dev_state.dev_struct.controllerHandle == NULL) { LOG_WRN("Device not attached"); return 0; } status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlStop, NULL); if (kStatus_USB_Success != status) { return -EIO; } status = dev_state.dev_struct.controllerInterface->deviceDeinit( dev_state.dev_struct.controllerHandle); if (kStatus_USB_Success != status) { return -EIO; } dev_state.dev_struct.controllerHandle = NULL; dev_state.attached = false; LOG_DBG("Detached"); return 0; } int usb_dc_set_address(const uint8_t addr) { usb_status_t status; dev_state.dev_struct.deviceAddress = addr; status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlPreSetDeviceAddress, &dev_state.dev_struct.deviceAddress); if (kStatus_USB_Success != status) { LOG_ERR("Failed to set device address"); return -EINVAL; } return 0; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data *const cfg) { uint8_t ep_abs_idx = EP_ABS_IDX(cfg->ep_addr); uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); if ((cfg->ep_type == USB_DC_EP_CONTROL) && ep_idx) { LOG_ERR("invalid endpoint configuration"); return -1; } if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("endpoint index/address out of range"); return -1; } return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const cfg) { uint8_t ep_abs_idx = EP_ABS_IDX(cfg->ep_addr); usb_device_endpoint_init_struct_t ep_init; struct usb_ep_ctrl_data *eps = &dev_state.eps[ep_abs_idx]; usb_status_t status; uint8_t ep; ep_init.zlt = 0U; ep_init.endpointAddress = cfg->ep_addr; ep_init.maxPacketSize = cfg->ep_mps; ep_init.transferType = cfg->ep_type; if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } if (dev_state.eps[ep_abs_idx].ep_enabled) { LOG_WRN("Endpoint already configured"); return 0; } ep = cfg->ep_addr; status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlEndpointDeinit, &ep); if (kStatus_USB_Success != status) { LOG_WRN("Failed to un-initialize endpoint (status=%d)", (int)status); } #ifdef CONFIG_USB_DC_NXP_LPCIP3511 /* Allocate buffers used during read operation */ if (USB_EP_DIR_IS_OUT(cfg->ep_addr)) { #endif void **block; block = &(eps->block); if (*block) { k_heap_free(&ep_buf_pool, *block); *block = NULL; } *block = k_heap_alloc(&ep_buf_pool, cfg->ep_mps, K_NO_WAIT); if (*block == NULL) { LOG_ERR("Failed to allocate memory"); return -ENOMEM; } memset(*block, 0, cfg->ep_mps); #ifdef CONFIG_USB_DC_NXP_LPCIP3511 } #endif dev_state.eps[ep_abs_idx].ep_mps = cfg->ep_mps; status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlEndpointInit, &ep_init); if (kStatus_USB_Success != status) { LOG_ERR("Failed to initialize endpoint"); return -EIO; } /* * If it is control endpoint, controller will prime setup * here set the occupied flag. */ if ((USB_EP_GET_IDX(cfg->ep_addr) == USB_CONTROL_ENDPOINT) && (USB_EP_DIR_IS_OUT(cfg->ep_addr))) { dev_state.eps[ep_abs_idx].ep_occupied = true; } dev_state.eps[ep_abs_idx].ep_enabled = true; return 0; } int usb_dc_ep_set_stall(const uint8_t ep) { uint8_t endpoint = ep; uint8_t ep_abs_idx = EP_ABS_IDX(ep); usb_status_t status; if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlEndpointStall, &endpoint); if (kStatus_USB_Success != status) { LOG_ERR("Failed to stall endpoint"); return -EIO; } return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { uint8_t endpoint = ep; uint8_t ep_abs_idx = EP_ABS_IDX(ep); usb_status_t status; if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlEndpointUnstall, &endpoint); if (kStatus_USB_Success != status) { LOG_ERR("Failed to clear stall"); return -EIO; } if ((USB_EP_GET_IDX(ep) != USB_CONTROL_ENDPOINT) && (USB_EP_DIR_IS_OUT(ep))) { status = dev_state.dev_struct.controllerInterface->deviceRecv( dev_state.dev_struct.controllerHandle, ep, (uint8_t *)dev_state.eps[ep_abs_idx].block, (uint32_t)dev_state.eps[ep_abs_idx].ep_mps); if (kStatus_USB_Success != status) { LOG_ERR("Failed to enable reception on 0x%02x", ep); return -EIO; } dev_state.eps[ep_abs_idx].ep_occupied = true; } return 0; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled) { uint8_t ep_abs_idx = EP_ABS_IDX(ep); usb_device_endpoint_status_struct_t ep_status; usb_status_t status; if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } if (!stalled) { return -EINVAL; } *stalled = 0; ep_status.endpointAddress = ep; ep_status.endpointStatus = kUSB_DeviceEndpointStateIdle; status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlGetEndpointStatus, &ep_status); if (kStatus_USB_Success != status) { LOG_ERR("Failed to get endpoint status"); return -EIO; } *stalled = (uint8_t)ep_status.endpointStatus; return 0; } int usb_dc_ep_halt(const uint8_t ep) { return usb_dc_ep_set_stall(ep); } int usb_dc_ep_enable(const uint8_t ep) { uint8_t ep_abs_idx = EP_ABS_IDX(ep); usb_status_t status; /* * endpoint 0 OUT is primed by controller driver when configure this * endpoint. */ if (!ep_abs_idx) { return 0; } if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } if (dev_state.eps[ep_abs_idx].ep_occupied) { LOG_WRN("endpoint 0x%x already enabled", ep); return -EALREADY; } if ((USB_EP_GET_IDX(ep) != USB_CONTROL_ENDPOINT) && (USB_EP_DIR_IS_OUT(ep))) { status = dev_state.dev_struct.controllerInterface->deviceRecv( dev_state.dev_struct.controllerHandle, ep, (uint8_t *)dev_state.eps[ep_abs_idx].block, (uint32_t)dev_state.eps[ep_abs_idx].ep_mps); if (kStatus_USB_Success != status) { LOG_ERR("Failed to enable reception on 0x%02x", ep); return -EIO; } dev_state.eps[ep_abs_idx].ep_occupied = true; } else { /* * control endpoint just be enabled before enumeration, * when running here, setup has been primed. */ dev_state.eps[ep_abs_idx].ep_occupied = true; } return 0; } int usb_dc_ep_disable(const uint8_t ep) { uint8_t ep_abs_idx = EP_ABS_IDX(ep); usb_status_t status; if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } if (dev_state.dev_struct.controllerHandle != NULL) { status = dev_state.dev_struct.controllerInterface->deviceCancel( dev_state.dev_struct.controllerHandle, ep); if (kStatus_USB_Success != status) { LOG_ERR("Failed to disable ep 0x%02x", ep); return -EIO; } } dev_state.eps[ep_abs_idx].ep_enabled = false; dev_state.eps[ep_abs_idx].ep_occupied = false; return 0; } int usb_dc_ep_flush(const uint8_t ep) { uint8_t ep_abs_idx = EP_ABS_IDX(ep); if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } LOG_DBG("Not implemented, idx 0x%02x, ep %u", ep_abs_idx, ep); return 0; } int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data, const uint32_t data_len, uint32_t *const ret_bytes) { uint8_t ep_abs_idx = EP_ABS_IDX(ep); uint8_t *buffer; uint32_t len_to_send = data_len; usb_status_t status; if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } if (USB_EP_GET_DIR(ep) != USB_EP_DIR_IN) { LOG_ERR("Wrong endpoint direction"); return -EINVAL; } /* Copy the data for SoC's that do not have a USB RAM * as the SDK driver will copy the data into USB RAM, * if available. */ #ifndef CONFIG_USB_DC_NXP_LPCIP3511 buffer = (uint8_t *)dev_state.eps[ep_abs_idx].block; if (data_len > dev_state.eps[ep_abs_idx].ep_mps) { len_to_send = dev_state.eps[ep_abs_idx].ep_mps; } for (uint32_t n = 0; n < len_to_send; n++) { buffer[n] = data[n]; } #else buffer = (uint8_t *)data; #endif #if defined(CONFIG_HAS_MCUX_CACHE) && !defined(EP_BUF_NONCACHED) DCACHE_CleanByRange((uint32_t)buffer, len_to_send); #endif status = dev_state.dev_struct.controllerInterface->deviceSend( dev_state.dev_struct.controllerHandle, ep, buffer, len_to_send); if (kStatus_USB_Success != status) { LOG_ERR("Failed to fill ep 0x%02x buffer", ep); return -EIO; } if (ret_bytes) { *ret_bytes = len_to_send; } return 0; } static void update_control_stage(usb_device_callback_message_struct_t *cb_msg, uint32_t data_len, uint32_t max_data_len) { struct usb_setup_packet *usbd_setup; usbd_setup = (struct usb_setup_packet *)cb_msg->buffer; if (cb_msg->isSetup) { if (usbd_setup->wLength == 0) { dev_state.setup_data_stage = SETUP_DATA_STAGE_DONE; } else if (usb_reqtype_is_to_host(usbd_setup)) { dev_state.setup_data_stage = SETUP_DATA_STAGE_IN; } else { dev_state.setup_data_stage = SETUP_DATA_STAGE_OUT; } } else { if (dev_state.setup_data_stage != SETUP_DATA_STAGE_DONE) { if ((data_len >= max_data_len) || (data_len < dev_state.eps[0].ep_mps)) { dev_state.setup_data_stage = SETUP_DATA_STAGE_DONE; } } } } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { uint8_t ep_abs_idx = EP_ABS_IDX(ep); uint32_t data_len; uint8_t *bufp = NULL; if (dev_state.eps[ep_abs_idx].ep_occupied) { LOG_ERR("Endpoint is occupied by the controller"); return -EBUSY; } if ((ep_abs_idx >= NUM_OF_EP_MAX) || (USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT)) { LOG_ERR("Wrong endpoint index/address/direction"); return -EINVAL; } /* Allow to read 0 bytes */ if (!data && max_data_len) { LOG_ERR("Wrong arguments"); return -EINVAL; } /* * It is control setup, we should use message.buffer, * this buffer is from internal setup array. */ bufp = dev_state.eps[ep_abs_idx].transfer_message.buffer; data_len = dev_state.eps[ep_abs_idx].transfer_message.length; if (data_len == USB_UNINITIALIZED_VAL_32) { if (read_bytes) { *read_bytes = 0; } return -EINVAL; } if (!data && !max_data_len) { /* When both buffer and max data to read are zero return the * available data in buffer. */ if (read_bytes) { *read_bytes = data_len; } return 0; } if (data_len > max_data_len) { LOG_WRN("Not enough room to copy all the data!"); data_len = max_data_len; } if (data != NULL) { for (uint32_t i = 0; i < data_len; i++) { data[i] = bufp[i]; } } if (read_bytes) { *read_bytes = data_len; } if (USB_EP_GET_IDX(ep) == USB_ENDPOINT_CONTROL) { update_control_stage(&dev_state.eps[0].transfer_message, data_len, max_data_len); } return 0; } int usb_dc_ep_read_continue(uint8_t ep) { uint8_t ep_abs_idx = EP_ABS_IDX(ep); usb_status_t status; if (ep_abs_idx >= NUM_OF_EP_MAX || USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT) { LOG_ERR("Wrong endpoint index/address/direction"); return -EINVAL; } if (dev_state.eps[ep_abs_idx].ep_occupied) { LOG_WRN("endpoint 0x%x already occupied", ep); return -EBUSY; } if (USB_EP_GET_IDX(ep) == USB_ENDPOINT_CONTROL) { if (dev_state.setup_data_stage == SETUP_DATA_STAGE_DONE) { return 0; } if (dev_state.setup_data_stage == SETUP_DATA_STAGE_IN) { dev_state.setup_data_stage = SETUP_DATA_STAGE_DONE; } } status = dev_state.dev_struct.controllerInterface->deviceRecv( dev_state.dev_struct.controllerHandle, ep, (uint8_t *)dev_state.eps[ep_abs_idx].block, dev_state.eps[ep_abs_idx].ep_mps); if (kStatus_USB_Success != status) { LOG_ERR("Failed to enable reception on ep 0x%02x", ep); return -EIO; } dev_state.eps[ep_abs_idx].ep_occupied = true; return 0; } int usb_dc_ep_read(const uint8_t ep, uint8_t *const data, const uint32_t max_data_len, uint32_t *const read_bytes) { int retval = usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes); if (retval) { return retval; } if (!data && !max_data_len) { /* * When both buffer and max data to read are zero the above * call would fetch the data len and we simply return. */ return 0; } return usb_dc_ep_read_continue(ep); } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { uint8_t ep_abs_idx = EP_ABS_IDX(ep); if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } if (!dev_state.attached) { return -EINVAL; } dev_state.eps[ep_abs_idx].callback = cb; return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { dev_state.status_cb = cb; } int usb_dc_ep_mps(const uint8_t ep) { uint8_t ep_abs_idx = EP_ABS_IDX(ep); if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } return dev_state.eps[ep_abs_idx].ep_mps; } static void handle_bus_reset(void) { usb_device_endpoint_init_struct_t ep_init; uint8_t ep_abs_idx = 0; usb_status_t status; dev_state.dev_struct.deviceAddress = 0; status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlSetDefaultStatus, NULL); if (kStatus_USB_Success != status) { LOG_ERR("Failed to set default status"); } for (int i = 0; i < NUM_OF_EP_MAX; i++) { dev_state.eps[i].ep_occupied = false; dev_state.eps[i].ep_enabled = false; } ep_init.zlt = 0U; ep_init.transferType = USB_ENDPOINT_CONTROL; ep_init.maxPacketSize = USB_CONTROL_EP_MPS; ep_init.endpointAddress = USB_CONTROL_EP_OUT; ep_abs_idx = EP_ABS_IDX(ep_init.endpointAddress); dev_state.eps[ep_abs_idx].ep_mps = USB_CONTROL_EP_MPS; status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlEndpointInit, &ep_init); if (kStatus_USB_Success != status) { LOG_ERR("Failed to initialize control OUT endpoint"); } dev_state.eps[ep_abs_idx].ep_occupied = false; dev_state.eps[ep_abs_idx].ep_enabled = true; ep_init.endpointAddress = USB_CONTROL_EP_IN; ep_abs_idx = EP_ABS_IDX(ep_init.endpointAddress); dev_state.eps[ep_abs_idx].ep_mps = USB_CONTROL_EP_MPS; status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlEndpointInit, &ep_init); if (kStatus_USB_Success != status) { LOG_ERR("Failed to initialize control IN endpoint"); } dev_state.eps[ep_abs_idx].ep_occupied = false; dev_state.eps[ep_abs_idx].ep_enabled = true; } static void handle_transfer_msg(usb_device_callback_message_struct_t *cb_msg) { uint8_t ep_status_code = 0; uint8_t ep = cb_msg->code; uint8_t ep_abs_idx = EP_ABS_IDX(ep); usb_status_t status; dev_state.eps[ep_abs_idx].ep_occupied = false; if (cb_msg->length == UINT32_MAX) { /* * Probably called from USB_DeviceEhciCancel() * LOG_WRN("Drop message for ep 0x%02x", ep); */ return; } if (cb_msg->isSetup) { ep_status_code = USB_DC_EP_SETUP; } else { /* IN TOKEN */ if (USB_EP_DIR_IS_IN(ep)) { if ((dev_state.dev_struct.deviceAddress != 0) && (ep_abs_idx == 1)) { /* * Set Address in the status stage in * the IN transfer. */ status = dev_state.dev_struct.controllerInterface->deviceControl( dev_state.dev_struct.controllerHandle, kUSB_DeviceControlSetDeviceAddress, &dev_state.dev_struct.deviceAddress); if (kStatus_USB_Success != status) { LOG_ERR("Failed to set device address"); return; } dev_state.dev_struct.deviceAddress = 0; } ep_status_code = USB_DC_EP_DATA_IN; } /* OUT TOKEN */ else { ep_status_code = USB_DC_EP_DATA_OUT; } } if (dev_state.eps[ep_abs_idx].callback) { #if defined(CONFIG_HAS_MCUX_CACHE) && !defined(EP_BUF_NONCACHED) if (cb_msg->length) { DCACHE_InvalidateByRange((uint32_t)cb_msg->buffer, cb_msg->length); } #endif dev_state.eps[ep_abs_idx].callback(ep, ep_status_code); } else { LOG_ERR("No cb pointer for endpoint 0x%02x", ep); } } /** * Similar to the kinetis driver, this thread is used to not run the USB device * stack/endpoint callbacks in the ISR context. This is because callbacks from * the USB stack may use mutexes, or other kernel functions not supported from * an interrupt context. */ static void usb_mcux_thread_main(void *arg1, void *arg2, void *arg3) { ARG_UNUSED(arg1); ARG_UNUSED(arg2); ARG_UNUSED(arg3); uint8_t ep_abs_idx; usb_device_callback_message_struct_t msg; while (1) { k_msgq_get(&usb_dc_msgq, &msg, K_FOREVER); switch (msg.code) { case kUSB_DeviceNotifyBusReset: handle_bus_reset(); dev_state.status_cb(USB_DC_RESET, NULL); break; case kUSB_DeviceNotifyError: dev_state.status_cb(USB_DC_ERROR, NULL); break; case kUSB_DeviceNotifySuspend: dev_state.status_cb(USB_DC_SUSPEND, NULL); break; case kUSB_DeviceNotifyResume: dev_state.status_cb(USB_DC_RESUME, NULL); break; default: ep_abs_idx = EP_ABS_IDX(msg.code); if (ep_abs_idx >= NUM_OF_EP_MAX) { LOG_ERR("Wrong endpoint index/address"); return; } memcpy(&dev_state.eps[ep_abs_idx].transfer_message, &msg, sizeof(usb_device_callback_message_struct_t)); handle_transfer_msg(&dev_state.eps[ep_abs_idx].transfer_message); } } } /* Notify the up layer the KHCI status changed. */ usb_status_t USB_DeviceNotificationTrigger(void *handle, void *msg) { /* Submit to message queue */ k_msgq_put(&usb_dc_msgq, (usb_device_callback_message_struct_t *)msg, K_NO_WAIT); return kStatus_USB_Success; } static void usb_isr_handler(void) { #if defined(CONFIG_USB_DC_NXP_EHCI) USB_DeviceEhciIsrFunction(&dev_state); #elif defined(CONFIG_USB_DC_NXP_LPCIP3511) USB_DeviceLpcIp3511IsrFunction(&dev_state); #endif } static int usb_mcux_init(void) { int err; k_thread_create(&dev_state.thread, dev_state.thread_stack, CONFIG_USB_MCUX_THREAD_STACK_SIZE, usb_mcux_thread_main, NULL, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); k_thread_name_set(&dev_state.thread, "usb_mcux"); PINCTRL_DT_INST_DEFINE(0); /* Apply pinctrl state */ err = pinctrl_apply_state(PINCTRL_DT_INST_DEV_CONFIG_GET(0), PINCTRL_STATE_DEFAULT); if (err) { return err; } return 0; } SYS_INIT(usb_mcux_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE); ```
/content/code_sandbox/drivers/usb/device/usb_dc_mcux.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,974
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_USB_DEVICE_USB_DC_DW_STM32_H #define ZEPHYR_DRIVERS_USB_DEVICE_USB_DC_DW_STM32_H #include <stdint.h> #include <zephyr/device.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <usb_dwc2_hw.h> struct usb_dw_stm32_clk { const struct device *const dev; const struct stm32_pclken *const pclken; size_t pclken_len; }; static inline int clk_enable_st_stm32f4_fsotg(const struct usb_dw_stm32_clk *const clk) { int ret; if (!device_is_ready(clk->dev)) { return -ENODEV; } if (clk->pclken_len > 1) { uint32_t clk_rate; ret = clock_control_configure(clk->dev, (void *)&clk->pclken[1], NULL); if (ret) { return ret; } ret = clock_control_get_rate(clk->dev, (void *)&clk->pclken[1], &clk_rate); if (ret) { return ret; } if (clk_rate != MHZ(48)) { return -ENOTSUP; } } return clock_control_on(clk->dev, (void *)&clk->pclken[0]); } static inline int pwr_on_st_stm32f4_fsotg(struct usb_dwc2_reg *const base) { base->ggpio |= USB_DWC2_GGPIO_STM32_PWRDWN | USB_DWC2_GGPIO_STM32_VBDEN; return 0; } #define QUIRK_ST_STM32F4_FSOTG_DEFINE(n) \ static const struct stm32_pclken pclken_##n[] = STM32_DT_INST_CLOCKS(n);\ \ static const struct usb_dw_stm32_clk stm32f4_clk_##n = { \ .dev = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), \ .pclken = pclken_##n, \ .pclken_len = DT_INST_NUM_CLOCKS(n), \ }; \ \ static int clk_enable_st_stm32f4_fsotg_##n(void) \ { \ return clk_enable_st_stm32f4_fsotg(&stm32f4_clk_##n); \ } #define USB_DW_QUIRK_ST_STM32F4_FSOTG_DEFINE(n) \ COND_CODE_1(DT_INST_NODE_HAS_COMPAT(n, st_stm32f4_fsotg), \ (QUIRK_ST_STM32F4_FSOTG_DEFINE(n)), ()) #endif /* ZEPHYR_DRIVERS_USB_DEVICE_USB_DC_DW_STM32_H */ ```
/content/code_sandbox/drivers/usb/device/usb_dc_dw_stm32.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
622
```c /* usb_dc_kinetis.c - Kinetis USBFSOTG usb device driver */ /* * */ #define DT_DRV_COMPAT nxp_kinetis_usbd #include <soc.h> #include <string.h> #include <stdio.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <zephyr/usb/usb_device.h> #include <zephyr/init.h> #define LOG_LEVEL CONFIG_USB_DRIVER_LOG_LEVEL #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(usb_dc_kinetis); #define NUM_OF_EP_MAX DT_INST_PROP(0, num_bidir_endpoints) #define BD_OWN_MASK (1 << 5) #define BD_DATA01_MASK (1 << 4) #define BD_KEEP_MASK (1 << 3) #define BD_NINC_MASK (1 << 2) #define BD_DTS_MASK (1 << 1) #define BD_STALL_MASK (1 << 0) #define KINETIS_SETUP_TOKEN 0x0d #define KINETIS_IN_TOKEN 0x09 #define KINETIS_OUT_TOKEN 0x01 #define USBFSOTG_PERID 0x04 #define USBFSOTG_REV 0x33 #define KINETIS_EP_NUMOF_MASK 0xf #define KINETIS_ADDR2IDX(addr) ((addr) & (KINETIS_EP_NUMOF_MASK)) /* * Buffer Descriptor (BD) entry provides endpoint buffer control * information for USBFS controller. Every endpoint direction requires * two BD entries. */ struct buf_descriptor { union { uint32_t bd_fields; struct { uint32_t reserved_1_0 : 2; uint32_t tok_pid : 4; uint32_t data01 : 1; uint32_t own : 1; uint32_t reserved_15_8 : 8; uint32_t bc : 16; } get __packed; struct { uint32_t reserved_1_0 : 2; uint32_t bd_ctrl : 6; uint32_t reserved_15_8 : 8; uint32_t bc : 16; } set __packed; } __packed; uint32_t buf_addr; } __packed; /* * Buffer Descriptor Table for the endpoints buffer management. * The driver configuration with 16 fully bidirectional endpoints would require * four BD entries per endpoint and 512 bytes of memory. */ static struct buf_descriptor __aligned(512) bdt[(NUM_OF_EP_MAX) * 2 * 2]; #define BD_IDX_EP0TX_EVEN 2 #define BD_IDX_EP0TX_ODD 3 #define EP_BUF_NUMOF_BLOCKS (NUM_OF_EP_MAX / 2) K_HEAP_DEFINE(ep_buf_pool, 512 * EP_BUF_NUMOF_BLOCKS + 128); struct ep_mem_block { void *data; }; struct usb_ep_ctrl_data { struct ep_status { uint16_t in_enabled : 1; uint16_t out_enabled : 1; uint16_t in_data1 : 1; uint16_t out_data1 : 1; uint16_t in_odd : 1; uint16_t out_odd : 1; uint16_t in_stalled : 1; uint16_t out_stalled : 1; } status; uint16_t mps_in; uint16_t mps_out; struct ep_mem_block mblock_in; struct ep_mem_block mblock_out; usb_dc_ep_callback cb_in; usb_dc_ep_callback cb_out; }; #define USBD_THREAD_STACK_SIZE 1024 struct usb_device_data { usb_dc_status_callback status_cb; uint8_t address; uint32_t bd_active; struct usb_ep_ctrl_data ep_ctrl[NUM_OF_EP_MAX]; bool attached; K_KERNEL_STACK_MEMBER(thread_stack, USBD_THREAD_STACK_SIZE); struct k_thread thread; }; static struct usb_device_data dev_data; #define USB_DC_CB_TYPE_MGMT 0 #define USB_DC_CB_TYPE_EP 1 struct cb_msg { uint8_t ep; uint8_t type; uint32_t cb; }; K_MSGQ_DEFINE(usb_dc_msgq, sizeof(struct cb_msg), 10, 4); static void usb_kinetis_isr_handler(void); /* * This function returns the BD element index based on * endpoint address and the odd bit. */ static inline uint8_t get_bdt_idx(uint8_t ep, uint8_t odd) { if (ep & USB_EP_DIR_IN) { return ((((KINETIS_ADDR2IDX(ep)) * 4) + 2 + (odd & 1))); } return ((((KINETIS_ADDR2IDX(ep)) * 4) + (odd & 1))); } static int kinetis_usb_init(void) { /* enable USB voltage regulator */ SIM->SOPT1 |= SIM_SOPT1_USBREGEN_MASK; USB0->USBTRC0 |= USB_USBTRC0_USBRESET_MASK; k_busy_wait(2000); USB0->CTL = 0; /* enable USB module, AKA USBEN bit in CTL1 register */ USB0->CTL |= USB_CTL_USBENSOFEN_MASK; if ((USB0->PERID != USBFSOTG_PERID) || (USB0->REV != USBFSOTG_REV)) { return -1; } USB0->BDTPAGE1 = (uint8_t)(((uint32_t)bdt) >> 8); USB0->BDTPAGE2 = (uint8_t)(((uint32_t)bdt) >> 16); USB0->BDTPAGE3 = (uint8_t)(((uint32_t)bdt) >> 24); /* clear interrupt flags */ USB0->ISTAT = 0xFF; /* enable reset interrupt */ USB0->INTEN = USB_INTEN_USBRSTEN_MASK; USB0->USBCTRL = USB_USBCTRL_PDE_MASK; LOG_DBG(""); return 0; } int usb_dc_reset(void) { for (uint8_t i = 0; i < 16; i++) { USB0->ENDPOINT[i].ENDPT = 0; } dev_data.bd_active = 0U; dev_data.address = 0U; USB0->CTL |= USB_CTL_ODDRST_MASK; USB0->CTL &= ~USB_CTL_ODDRST_MASK; /* Clear interrupt status flags */ USB0->ISTAT = 0xFF; /* Clear error flags */ USB0->ERRSTAT = 0xFF; /* Enable all error interrupt sources */ USB0->ERREN = 0xFF; /* Reset default address */ USB0->ADDR = 0x00; USB0->INTEN = (USB_INTEN_USBRSTEN_MASK | USB_INTEN_TOKDNEEN_MASK | USB_INTEN_SLEEPEN_MASK | USB_INTEN_SOFTOKEN_MASK | USB_INTEN_STALLEN_MASK | USB_INTEN_ERROREN_MASK); LOG_DBG(""); return 0; } int usb_dc_attach(void) { if (dev_data.attached) { LOG_WRN("already attached"); } kinetis_usb_init(); /* * Call usb_dc_reset here because the device stack does not make it * after USB_DC_RESET status event. */ usb_dc_reset(); dev_data.attached = 1; LOG_DBG("attached"); /* non-OTG device mode, enable DP Pullup */ USB0->CONTROL = USB_CONTROL_DPPULLUPNONOTG_MASK; return 0; } int usb_dc_detach(void) { LOG_DBG(""); /* disable USB and DP Pullup */ USB0->CTL &= ~USB_CTL_USBENSOFEN_MASK; USB0->CONTROL &= ~USB_CONTROL_DPPULLUPNONOTG_MASK; return 0; } int usb_dc_set_address(const uint8_t addr) { LOG_DBG(""); if (!dev_data.attached) { return -EINVAL; } /* * The device stack tries to set the address before * sending the ACK with ZLP, which is totally stupid, * as workaround the address will be buffered and * placed later inside isr handler (see KINETIS_IN_TOKEN). */ dev_data.address = 0x80 | (addr & 0x7f); return 0; } int usb_dc_ep_check_cap(const struct usb_dc_ep_cfg_data * const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("endpoint index/address out of range"); return -EINVAL; } switch (cfg->ep_type) { case USB_DC_EP_CONTROL: if (cfg->ep_mps > USB_MAX_CTRL_MPS) { return -EINVAL; } return 0; case USB_DC_EP_BULK: if (cfg->ep_mps > USB_MAX_FS_BULK_MPS) { return -EINVAL; } break; case USB_DC_EP_INTERRUPT: if (cfg->ep_mps > USB_MAX_FS_INT_MPS) { return -EINVAL; } break; case USB_DC_EP_ISOCHRONOUS: if (cfg->ep_mps > USB_MAX_FS_ISO_MPS) { return -EINVAL; } break; default: LOG_ERR("Unknown endpoint type!"); return -EINVAL; } if (ep_idx & BIT(0)) { if (USB_EP_GET_DIR(cfg->ep_addr) != USB_EP_DIR_IN) { LOG_INF("pre-selected as IN endpoint"); return -1; } } else { if (USB_EP_GET_DIR(cfg->ep_addr) != USB_EP_DIR_OUT) { LOG_INF("pre-selected as OUT endpoint"); return -1; } } return 0; } int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data * const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr); struct usb_ep_ctrl_data *ep_ctrl; struct ep_mem_block *block; uint8_t idx_even; uint8_t idx_odd; if (usb_dc_ep_check_cap(cfg)) { return -EINVAL; } idx_even = get_bdt_idx(cfg->ep_addr, 0); idx_odd = get_bdt_idx(cfg->ep_addr, 1); ep_ctrl = &dev_data.ep_ctrl[ep_idx]; if (ep_idx && (dev_data.ep_ctrl[ep_idx].status.in_enabled || dev_data.ep_ctrl[ep_idx].status.out_enabled)) { LOG_WRN("endpoint already configured"); return -EALREADY; } LOG_DBG("ep %x, mps %d, type %d", cfg->ep_addr, cfg->ep_mps, cfg->ep_type); if (USB_EP_DIR_IS_OUT(cfg->ep_addr)) { block = &(ep_ctrl->mblock_out); } else { block = &(ep_ctrl->mblock_in); } if (bdt[idx_even].buf_addr) { k_heap_free(&ep_buf_pool, block->data); } USB0->ENDPOINT[ep_idx].ENDPT = 0; (void)memset(&bdt[idx_even], 0, sizeof(struct buf_descriptor)); (void)memset(&bdt[idx_odd], 0, sizeof(struct buf_descriptor)); block->data = k_heap_alloc(&ep_buf_pool, cfg->ep_mps * 2U, K_NO_WAIT); if (block->data != NULL) { (void)memset(block->data, 0, cfg->ep_mps * 2U); } else { LOG_ERR("Memory allocation time-out"); return -ENOMEM; } bdt[idx_even].buf_addr = (uint32_t)block->data; LOG_INF("idx_even %x", (uint32_t)block->data); bdt[idx_odd].buf_addr = (uint32_t)((uint8_t *)block->data + cfg->ep_mps); LOG_INF("idx_odd %x", (uint32_t)((uint8_t *)block->data + cfg->ep_mps)); if (cfg->ep_addr & USB_EP_DIR_IN) { dev_data.ep_ctrl[ep_idx].mps_in = cfg->ep_mps; } else { dev_data.ep_ctrl[ep_idx].mps_out = cfg->ep_mps; } bdt[idx_even].set.bc = cfg->ep_mps; bdt[idx_odd].set.bc = cfg->ep_mps; dev_data.ep_ctrl[ep_idx].status.out_data1 = false; dev_data.ep_ctrl[ep_idx].status.in_data1 = false; switch (cfg->ep_type) { case USB_DC_EP_CONTROL: LOG_DBG("configure control endpoint"); USB0->ENDPOINT[ep_idx].ENDPT |= (USB_ENDPT_EPHSHK_MASK | USB_ENDPT_EPRXEN_MASK | USB_ENDPT_EPTXEN_MASK); break; case USB_DC_EP_BULK: case USB_DC_EP_INTERRUPT: USB0->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPHSHK_MASK; if (USB_EP_DIR_IS_OUT(cfg->ep_addr)) { USB0->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPRXEN_MASK; } else { USB0->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPTXEN_MASK; } break; case USB_DC_EP_ISOCHRONOUS: if (USB_EP_DIR_IS_OUT(cfg->ep_addr)) { USB0->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPRXEN_MASK; } else { USB0->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPTXEN_MASK; } break; default: return -EINVAL; } return 0; } int usb_dc_ep_set_stall(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t bd_idx; if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } LOG_DBG("ep %x, idx %d", ep, ep_idx); if (USB_EP_DIR_IS_OUT(ep)) { dev_data.ep_ctrl[ep_idx].status.out_stalled = 1U; bd_idx = get_bdt_idx(ep, ~dev_data.ep_ctrl[ep_idx].status.out_odd); } else { dev_data.ep_ctrl[ep_idx].status.in_stalled = 1U; bd_idx = get_bdt_idx(ep, dev_data.ep_ctrl[ep_idx].status.in_odd); } bdt[bd_idx].set.bd_ctrl = BD_STALL_MASK | BD_DTS_MASK | BD_OWN_MASK; return 0; } int usb_dc_ep_clear_stall(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t bd_idx; if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } LOG_DBG("ep %x, idx %d", ep, ep_idx); USB0->ENDPOINT[ep_idx].ENDPT &= ~USB_ENDPT_EPSTALL_MASK; if (USB_EP_DIR_IS_OUT(ep)) { dev_data.ep_ctrl[ep_idx].status.out_stalled = 0U; dev_data.ep_ctrl[ep_idx].status.out_data1 = false; bd_idx = get_bdt_idx(ep, ~dev_data.ep_ctrl[ep_idx].status.out_odd); bdt[bd_idx].set.bd_ctrl = 0U; bdt[bd_idx].set.bd_ctrl = BD_DTS_MASK | BD_OWN_MASK; } else { dev_data.ep_ctrl[ep_idx].status.in_stalled = 0U; dev_data.ep_ctrl[ep_idx].status.in_data1 = false; bd_idx = get_bdt_idx(ep, dev_data.ep_ctrl[ep_idx].status.in_odd); bdt[bd_idx].set.bd_ctrl = 0U; } /* Resume TX token processing, see USBx_CTL field descriptions */ if (ep == 0U) { USB0->CTL &= ~USB_CTL_TXSUSPENDTOKENBUSY_MASK; } return 0; } int usb_dc_ep_is_stalled(const uint8_t ep, uint8_t *const stalled) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } LOG_DBG("ep %x, idx %d", ep_idx, ep); if (!stalled) { return -EINVAL; } *stalled = 0U; if (USB_EP_DIR_IS_OUT(ep)) { *stalled = dev_data.ep_ctrl[ep_idx].status.out_stalled; } else { *stalled = dev_data.ep_ctrl[ep_idx].status.in_stalled; } uint8_t bd_idx = get_bdt_idx(ep, dev_data.ep_ctrl[ep_idx].status.in_odd); LOG_WRN("active bd ctrl: %x", bdt[bd_idx].set.bd_ctrl); bd_idx = get_bdt_idx(ep, ~dev_data.ep_ctrl[ep_idx].status.in_odd); LOG_WRN("next bd ctrl: %x", bdt[bd_idx].set.bd_ctrl); return 0; } int usb_dc_ep_halt(const uint8_t ep) { return usb_dc_ep_set_stall(ep); } int usb_dc_ep_enable(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t idx_even; uint8_t idx_odd; if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } idx_even = get_bdt_idx(ep, 0); idx_odd = get_bdt_idx(ep, 1); if (ep_idx && (dev_data.ep_ctrl[ep_idx].status.in_enabled || dev_data.ep_ctrl[ep_idx].status.out_enabled)) { LOG_WRN("endpoint 0x%x already enabled", ep); return -EALREADY; } if (USB_EP_DIR_IS_OUT(ep)) { bdt[idx_even].set.bd_ctrl = BD_DTS_MASK | BD_OWN_MASK; bdt[idx_odd].set.bd_ctrl = 0U; dev_data.ep_ctrl[ep_idx].status.out_odd = 0U; dev_data.ep_ctrl[ep_idx].status.out_stalled = 0U; dev_data.ep_ctrl[ep_idx].status.out_data1 = false; dev_data.ep_ctrl[ep_idx].status.out_enabled = true; } else { bdt[idx_even].bd_fields = 0U; bdt[idx_odd].bd_fields = 0U; dev_data.ep_ctrl[ep_idx].status.in_odd = 0U; dev_data.ep_ctrl[ep_idx].status.in_stalled = 0U; dev_data.ep_ctrl[ep_idx].status.in_data1 = false; dev_data.ep_ctrl[ep_idx].status.in_enabled = true; } LOG_INF("ep 0x%x, ep_idx %d", ep, ep_idx); return 0; } int usb_dc_ep_disable(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t idx_even; uint8_t idx_odd; if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } idx_even = get_bdt_idx(ep, 0); idx_odd = get_bdt_idx(ep, 1); LOG_INF("ep %x, idx %d", ep_idx, ep); bdt[idx_even].bd_fields = 0U; bdt[idx_odd].bd_fields = 0U; if (USB_EP_DIR_IS_OUT(ep)) { dev_data.ep_ctrl[ep_idx].status.out_enabled = false; } else { dev_data.ep_ctrl[ep_idx].status.in_enabled = false; } return 0; } int usb_dc_ep_flush(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } LOG_DBG("ep %x, idx %d", ep_idx, ep); return 0; } int usb_dc_ep_write(const uint8_t ep, const uint8_t *const data, const uint32_t data_len, uint32_t * const ret_bytes) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t len_to_send = data_len; uint8_t odd; uint8_t bd_idx; uint8_t *bufp; if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } odd = dev_data.ep_ctrl[ep_idx].status.in_odd; bd_idx = get_bdt_idx(ep, odd); bufp = (uint8_t *)bdt[bd_idx].buf_addr; if (USB_EP_GET_DIR(ep) != USB_EP_DIR_IN) { LOG_ERR("Wrong endpoint direction"); return -EINVAL; } if (dev_data.ep_ctrl[ep_idx].status.in_stalled) { LOG_WRN("endpoint is stalled"); return -EBUSY; } while (bdt[bd_idx].get.own) { LOG_DBG("ep 0x%x is busy", ep); k_yield(); } LOG_DBG("bd idx %x bufp %p odd %d", bd_idx, bufp, odd); if (data_len > dev_data.ep_ctrl[ep_idx].mps_in) { len_to_send = dev_data.ep_ctrl[ep_idx].mps_in; } bdt[bd_idx].set.bc = len_to_send; for (uint32_t n = 0; n < len_to_send; n++) { bufp[n] = data[n]; } dev_data.ep_ctrl[ep_idx].status.in_odd = ~odd; if (dev_data.ep_ctrl[ep_idx].status.in_data1) { bdt[bd_idx].set.bd_ctrl = BD_DTS_MASK | BD_DATA01_MASK | BD_OWN_MASK; } else { bdt[bd_idx].set.bd_ctrl = BD_DTS_MASK | BD_OWN_MASK; } /* Toggle next Data1 */ dev_data.ep_ctrl[ep_idx].status.in_data1 ^= 1; LOG_DBG("ep 0x%x write %d bytes from %d", ep, len_to_send, data_len); if (ret_bytes) { *ret_bytes = len_to_send; } return 0; } int usb_dc_ep_read_wait(uint8_t ep, uint8_t *data, uint32_t max_data_len, uint32_t *read_bytes) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint32_t data_len; uint8_t bd_idx; uint8_t *bufp; if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } /* select the index of active endpoint buffer */ bd_idx = get_bdt_idx(ep, dev_data.ep_ctrl[ep_idx].status.out_odd); bufp = (uint8_t *)bdt[bd_idx].buf_addr; if (USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT) { LOG_ERR("Wrong endpoint direction"); return -EINVAL; } if (dev_data.ep_ctrl[ep_idx].status.out_stalled) { LOG_WRN("endpoint is stalled"); return -EBUSY; } /* Allow to read 0 bytes */ if (!data && max_data_len) { LOG_ERR("Wrong arguments"); return -EINVAL; } while (bdt[bd_idx].get.own) { LOG_ERR("Endpoint is occupied by the controller"); return -EBUSY; } data_len = bdt[bd_idx].get.bc; if (!data && !max_data_len) { /* * When both buffer and max data to read are zero return * the available data in buffer. */ if (read_bytes) { *read_bytes = data_len; } return 0; } if (data_len > max_data_len) { LOG_WRN("Not enough room to copy all the data!"); data_len = max_data_len; } if (data != NULL) { for (uint32_t i = 0; i < data_len; i++) { data[i] = bufp[i]; } } LOG_DBG("Read idx %d, req %d, read %d bytes", bd_idx, max_data_len, data_len); if (read_bytes) { *read_bytes = data_len; } return 0; } int usb_dc_ep_read_continue(uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t bd_idx; if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } bd_idx = get_bdt_idx(ep, dev_data.ep_ctrl[ep_idx].status.out_odd); if (USB_EP_GET_DIR(ep) != USB_EP_DIR_OUT) { LOG_ERR("Wrong endpoint direction"); return -EINVAL; } if (bdt[bd_idx].get.own) { /* May occur when usb_transfer initializes the OUT transfer */ LOG_WRN("Current buffer is claimed by the controller"); return 0; } /* select the index of the next endpoint buffer */ bd_idx = get_bdt_idx(ep, ~dev_data.ep_ctrl[ep_idx].status.out_odd); /* Update next toggle bit */ dev_data.ep_ctrl[ep_idx].status.out_data1 ^= 1; bdt[bd_idx].set.bc = dev_data.ep_ctrl[ep_idx].mps_out; /* Reset next buffer descriptor and set next toggle bit */ if (dev_data.ep_ctrl[ep_idx].status.out_data1) { bdt[bd_idx].set.bd_ctrl = BD_DTS_MASK | BD_DATA01_MASK | BD_OWN_MASK; } else { bdt[bd_idx].set.bd_ctrl = BD_DTS_MASK | BD_OWN_MASK; } /* Resume TX token processing, see USBx_CTL field descriptions */ if (ep_idx == 0U) { USB0->CTL &= ~USB_CTL_TXSUSPENDTOKENBUSY_MASK; } LOG_DBG("idx next %x", bd_idx); return 0; } int usb_dc_ep_read(const uint8_t ep, uint8_t *const data, const uint32_t max_data_len, uint32_t *const read_bytes) { int retval = usb_dc_ep_read_wait(ep, data, max_data_len, read_bytes); if (retval) { return retval; } if (!data && !max_data_len) { /* When both buffer and max data to read are zero the above * call would fetch the data len and we simply return. */ return 0; } if (usb_dc_ep_read_continue(ep) != 0) { return -EINVAL; } LOG_DBG(""); return 0; } int usb_dc_ep_set_callback(const uint8_t ep, const usb_dc_ep_callback cb) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } if (!dev_data.attached) { return -EINVAL; } if (ep & USB_EP_DIR_IN) { dev_data.ep_ctrl[ep_idx].cb_in = cb; } else { dev_data.ep_ctrl[ep_idx].cb_out = cb; } LOG_DBG("ep_idx %x", ep_idx); return 0; } void usb_dc_set_status_callback(const usb_dc_status_callback cb) { LOG_DBG(""); dev_data.status_cb = cb; } int usb_dc_ep_mps(const uint8_t ep) { uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ep_idx > (NUM_OF_EP_MAX - 1)) { LOG_ERR("Wrong endpoint index/address"); return -EINVAL; } if (ep & USB_EP_DIR_IN) { return dev_data.ep_ctrl[ep_idx].mps_in; } else { return dev_data.ep_ctrl[ep_idx].mps_out; } } static inline void reenable_control_endpoints(void) { struct usb_dc_ep_cfg_data ep_cfg; /* Reconfigure control endpoint 0 after a reset */ ep_cfg.ep_addr = USB_CONTROL_EP_OUT; ep_cfg.ep_mps = USB_CONTROL_EP_MPS; ep_cfg.ep_type = USB_DC_EP_CONTROL; usb_dc_ep_configure(&ep_cfg); ep_cfg.ep_addr = USB_CONTROL_EP_IN; usb_dc_ep_configure(&ep_cfg); /* Enable both endpoint directions */ usb_dc_ep_enable(USB_CONTROL_EP_OUT); usb_dc_ep_enable(USB_CONTROL_EP_IN); } static void usb_kinetis_isr_handler(void) { uint8_t istatus = USB0->ISTAT; uint8_t status = USB0->STAT; struct cb_msg msg; if (istatus & USB_ISTAT_USBRST_MASK) { dev_data.address = 0U; USB0->ADDR = (uint8_t)0; /* * Device reset is not possible because the stack does not * configure the endpoints after the USB_DC_RESET event, * therefore, we must re-enable the default control 0 endpoint * after a reset event */ USB0->CTL |= USB_CTL_ODDRST_MASK; USB0->CTL &= ~USB_CTL_ODDRST_MASK; reenable_control_endpoints(); msg.ep = 0U; msg.type = USB_DC_CB_TYPE_MGMT; msg.cb = USB_DC_RESET; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); } if (istatus == USB_ISTAT_ERROR_MASK) { USB0->ERRSTAT = 0xFF; msg.ep = 0U; msg.type = USB_DC_CB_TYPE_MGMT; msg.cb = USB_DC_ERROR; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); } if (istatus & USB_ISTAT_STALL_MASK) { if (dev_data.ep_ctrl[0].status.out_stalled) { usb_dc_ep_clear_stall(0); } if (dev_data.ep_ctrl[0].status.in_stalled) { usb_dc_ep_clear_stall(0x80); } } if (istatus & USB_ISTAT_TOKDNE_MASK) { uint8_t ep_idx = status >> USB_STAT_ENDP_SHIFT; uint8_t ep = ((status << 4) & USB_EP_DIR_IN) | ep_idx; uint8_t odd = (status & USB_STAT_ODD_MASK) >> USB_STAT_ODD_SHIFT; uint8_t idx = get_bdt_idx(ep, odd); uint8_t token_pid = bdt[idx].get.tok_pid; msg.ep = ep; msg.type = USB_DC_CB_TYPE_EP; switch (token_pid) { case KINETIS_SETUP_TOKEN: dev_data.ep_ctrl[ep_idx].status.out_odd = odd; /* clear tx entries */ bdt[BD_IDX_EP0TX_EVEN].bd_fields = 0U; bdt[BD_IDX_EP0TX_ODD].bd_fields = 0U; /* * Set/Reset here the toggle bits for control endpoint * because the device stack does not care about it. */ dev_data.ep_ctrl[ep_idx].status.in_data1 = true; dev_data.ep_ctrl[ep_idx].status.out_data1 = false; dev_data.ep_ctrl[ep_idx].status.out_odd = odd; msg.cb = USB_DC_EP_SETUP; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); break; case KINETIS_OUT_TOKEN: dev_data.ep_ctrl[ep_idx].status.out_odd = odd; msg.cb = USB_DC_EP_DATA_OUT; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); break; case KINETIS_IN_TOKEN: /* SET ADDRESS workaround */ if (dev_data.address & 0x80) { USB0->ADDR = dev_data.address & 0x7f; dev_data.address = 0U; } msg.cb = USB_DC_EP_DATA_IN; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); break; default: break; } } if (istatus & USB_ISTAT_SLEEP_MASK) { /* Enable resume interrupt */ USB0->INTEN |= USB_INTEN_RESUMEEN_MASK; msg.ep = 0U; msg.type = USB_DC_CB_TYPE_MGMT; msg.cb = USB_DC_SUSPEND; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); } if (istatus & USB_ISTAT_RESUME_MASK) { /* Disable resume interrupt */ USB0->INTEN &= ~USB_INTEN_RESUMEEN_MASK; msg.ep = 0U; msg.type = USB_DC_CB_TYPE_MGMT; msg.cb = USB_DC_RESUME; k_msgq_put(&usb_dc_msgq, &msg, K_NO_WAIT); } /* Clear interrupt status bits */ USB0->ISTAT = istatus; } /* * This thread is only used to not run the USB device stack and endpoint * callbacks in the ISR context, which happens when an callback function * is called. TODO: something similar should be implemented in the USB * device stack so that it can be used by all drivers. */ static void usb_kinetis_thread_main(void *arg1, void *unused1, void *unused2) { ARG_UNUSED(arg1); ARG_UNUSED(unused1); ARG_UNUSED(unused2); struct cb_msg msg; uint8_t ep_idx; while (true) { k_msgq_get(&usb_dc_msgq, &msg, K_FOREVER); ep_idx = USB_EP_GET_IDX(msg.ep); if (msg.type == USB_DC_CB_TYPE_EP) { switch (msg.cb) { case USB_DC_EP_SETUP: if (dev_data.ep_ctrl[ep_idx].cb_out) { dev_data.ep_ctrl[ep_idx].cb_out(msg.ep, USB_DC_EP_SETUP); } break; case USB_DC_EP_DATA_OUT: if (dev_data.ep_ctrl[ep_idx].cb_out) { dev_data.ep_ctrl[ep_idx].cb_out(msg.ep, USB_DC_EP_DATA_OUT); } break; case USB_DC_EP_DATA_IN: if (dev_data.ep_ctrl[ep_idx].cb_in) { dev_data.ep_ctrl[ep_idx].cb_in(msg.ep, USB_DC_EP_DATA_IN); } break; default: LOG_ERR("unknown msg"); break; } } else if (dev_data.status_cb) { switch (msg.cb) { case USB_DC_RESET: dev_data.status_cb(USB_DC_RESET, NULL); break; case USB_DC_ERROR: dev_data.status_cb(USB_DC_ERROR, NULL); break; case USB_DC_SUSPEND: dev_data.status_cb(USB_DC_SUSPEND, NULL); break; case USB_DC_RESUME: dev_data.status_cb(USB_DC_RESUME, NULL); break; default: LOG_ERR("unknown msg"); break; } } } } static int usb_kinetis_init(void) { (void)memset(bdt, 0, sizeof(bdt)); k_thread_create(&dev_data.thread, dev_data.thread_stack, USBD_THREAD_STACK_SIZE, usb_kinetis_thread_main, NULL, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); k_thread_name_set(&dev_data.thread, "usb_kinetis"); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), usb_kinetis_isr_handler, 0, 0); irq_enable(DT_INST_IRQN(0)); return 0; } SYS_INIT(usb_kinetis_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE); ```
/content/code_sandbox/drivers/usb/device/usb_dc_kinetis.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,980
```unknown rsource "nrf_usbd_common/Kconfig" ```
/content/code_sandbox/drivers/usb/common/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
13
```objective-c /* * */ /* This file is undergoing transition towards native Zephyr nrf USB driver. */ /** @cond INTERNAL_HIDDEN */ #ifndef NRF_USBD_COMMON_ERRATA_H__ #define NRF_USBD_COMMON_ERRATA_H__ #include <nrfx.h> #include <nrf_erratas.h> #ifndef NRF_USBD_COMMON_ERRATA_ENABLE /** * @brief The constant that informs if errata should be enabled at all. * * If this constant is set to 0, all the Errata bug fixes will be automatically disabled. */ #define NRF_USBD_COMMON_ERRATA_ENABLE 1 #endif /* Errata: ISO double buffering not functional. **/ static inline bool nrf_usbd_common_errata_166(void) { return NRF_USBD_COMMON_ERRATA_ENABLE && nrf52_errata_166(); } /* Errata: USBD might not reach its active state. **/ static inline bool nrf_usbd_common_errata_171(void) { return NRF_USBD_COMMON_ERRATA_ENABLE && nrf52_errata_171(); } /* Errata: USB cannot be enabled. **/ static inline bool nrf_usbd_common_errata_187(void) { return NRF_USBD_COMMON_ERRATA_ENABLE && nrf52_errata_187(); } /* Errata: USBD cannot receive tasks during DMA. **/ static inline bool nrf_usbd_common_errata_199(void) { return NRF_USBD_COMMON_ERRATA_ENABLE && nrf52_errata_199(); } /* Errata: Device remains in SUSPEND too long. */ static inline bool nrf_usbd_common_errata_211(void) { return NRF_USBD_COMMON_ERRATA_ENABLE && nrf52_errata_211(); } /* Errata: Unexpected behavior after reset. **/ static inline bool nrf_usbd_common_errata_223(void) { return NRF_USBD_COMMON_ERRATA_ENABLE && nrf52_errata_223(); } #endif /* NRF_USBD_COMMON_ERRATA_H__ */ /** @endcond */ ```
/content/code_sandbox/drivers/usb/common/nrf_usbd_common/nrf_usbd_common_errata.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
407
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_USB_COMMON_USB_DWC2_HW #define ZEPHYR_DRIVERS_USB_COMMON_USB_DWC2_HW #include <stdint.h> #ifdef __cplusplus extern "C" { #endif /* This file describes register set for the DesignWare USB 2.0 controller IP */ /* IN endpoint register block */ struct usb_dwc2_in_ep { volatile uint32_t diepctl; uint32_t reserved; volatile uint32_t diepint; uint32_t reserved1; volatile uint32_t dieptsiz; volatile uint32_t diepdma; volatile uint32_t dtxfsts; volatile uint32_t diepdmab; }; /* OUT endpoint register block */ struct usb_dwc2_out_ep { volatile uint32_t doepctl; uint32_t reserved; volatile uint32_t doepint; uint32_t reserved1; volatile uint32_t doeptsiz; volatile uint32_t doepdma; uint32_t reserved2; volatile uint32_t doepdmab; }; /* DWC2 register map * TODO: This should probably be split into global, host, and device register * blocks */ struct usb_dwc2_reg { volatile uint32_t gotgctl; volatile uint32_t gotgint; volatile uint32_t gahbcfg; volatile uint32_t gusbcfg; volatile uint32_t grstctl; volatile uint32_t gintsts; volatile uint32_t gintmsk; volatile uint32_t grxstsr; volatile uint32_t grxstsp; volatile uint32_t grxfsiz; volatile uint32_t gnptxfsiz; volatile uint32_t gnptxsts; volatile uint32_t gi2cctl; volatile uint32_t gpvndctl; volatile uint32_t ggpio; volatile uint32_t guid; volatile uint32_t gsnpsid; volatile uint32_t ghwcfg1; volatile uint32_t ghwcfg2; volatile uint32_t ghwcfg3; volatile uint32_t ghwcfg4; volatile uint32_t glpmcfg; volatile uint32_t gpwrdn; volatile uint32_t gdfifocfg; volatile uint32_t gadpctl; volatile uint32_t grefclk; volatile uint32_t gintmsk2; volatile uint32_t gintsts2; volatile uint32_t reserved1[36]; volatile uint32_t hptxfsiz; union { volatile uint32_t dptxfsiz[15]; volatile uint32_t dieptxf[15]; }; volatile uint32_t reserved2[176]; /* Host mode register 0x0400 .. 0x0670 */ uint32_t reserved3[256]; /* Device mode register 0x0800 .. 0x0D00 */ volatile uint32_t dcfg; volatile uint32_t dctl; volatile uint32_t dsts; uint32_t reserved4; volatile uint32_t diepmsk; volatile uint32_t doepmsk; volatile uint32_t daint; volatile uint32_t daintmsk; volatile uint32_t dtknqr1; volatile uint32_t dtknqr2; volatile uint32_t dvbusdis; volatile uint32_t dvbuspulse; union { volatile uint32_t dtknqr3; volatile uint32_t dthrctl; }; union { volatile uint32_t dtknqr4; volatile uint32_t diepempmsk; }; volatile uint32_t deachint; volatile uint32_t deachintmsk; volatile uint32_t diepeachmsk[16]; volatile uint32_t doepeachmsk[16]; volatile uint32_t reserved5[16]; struct usb_dwc2_in_ep in_ep[16]; struct usb_dwc2_out_ep out_ep[16]; }; /* * With the maximum number of supported endpoints, register map * of the controller must be equal to 0x0D00. */ BUILD_ASSERT(sizeof(struct usb_dwc2_reg) == 0x0D00); /* * GET_FIELD/SET_FIELD macros below are intended to be used to define functions * to get/set a bitfield of a register from/into a value. They should not be * used to get/set a bitfield consisting of only one bit. */ #define USB_DWC2_GET_FIELD_DEFINE(name, reg_name_and_field) \ static inline uint32_t usb_dwc2_get_##name(const uint32_t value) \ { \ return (value & USB_DWC2_##reg_name_and_field##_MASK) >> \ USB_DWC2_##reg_name_and_field##_POS; \ } #define USB_DWC2_SET_FIELD_DEFINE(name, reg_name_and_field) \ static inline uint32_t usb_dwc2_set_##name(const uint32_t value) \ { \ return (value << USB_DWC2_##reg_name_and_field##_POS) & \ USB_DWC2_##reg_name_and_field##_MASK; \ } #define USB_DWC2_GET_FIELD_AND_IDX_DEFINE(name, reg_name_and_field) \ static inline uint32_t usb_dwc2_get_##name(const uint32_t value, \ const uint32_t idx) \ { \ return (value & USB_DWC2_##reg_name_and_field##_MASK(idx)) >> \ USB_DWC2_##reg_name_and_field##_POS(idx); \ } /* AHB configuration register */ #define USB_DWC2_GAHBCFG 0x0008UL #define USB_DWC2_GAHBCFG_LOA_EOP_WORD_POS 27UL #define USB_DWC2_GAHBCFG_LOA_EOP_WORD_MASK (0x3UL << USB_DWC2_GAHBCFG_LOA_EOP_WORD_POS) #define USB_DWC2_GAHBCFG_LOA_EOP_WORD_ONE 1 #define USB_DWC2_GAHBCFG_LOA_EOP_WORD_TWO 2 #define USB_DWC2_GAHBCFG_LOA_EOP_BYTE_POS 25UL #define USB_DWC2_GAHBCFG_LOA_EOP_BYTE_MASK (0x3UL << USB_DWC2_GAHBCFG_LOA_EOP_BYTE_POS) #define USB_DWC2_GAHBCFG_LOA_EOP_BYTE_ONE 1 #define USB_DWC2_GAHBCFG_LOA_EOP_BYTE_TWO 2 #define USB_DWC2_GAHBCFG_INVDESCENDIANESS_POS 24UL #define USB_DWC2_GAHBCFG_INVDESCENDIANESS BIT(USB_DWC2_GAHBCFG_INVDESCENDIANESS_POS) #define USB_DWC2_GAHBCFG_AHBSINGLE_POS 23UL #define USB_DWC2_GAHBCFG_AHBSINGLE BIT(USB_DWC2_GAHBCFG_AHBSINGLE_POS) #define USB_DWC2_GAHBCFG_NOTIALLDMAWRIT_POS 22UL #define USB_DWC2_GAHBCFG_NOTIALLDMAWRIT BIT(USB_DWC2_GAHBCFG_NOTIALLDMAWRIT_POS) #define USB_DWC2_GAHBCFG_REMMEMSUPP_POS 21UL #define USB_DWC2_GAHBCFG_REMMEMSUPP BIT(USB_DWC2_GAHBCFG_REMMEMSUPP_POS) #define USB_DWC2_GAHBCFG_PTXFEMPLVL_POS 8UL #define USB_DWC2_GAHBCFG_PTXFEMPLVL BIT(USB_DWC2_GAHBCFG_PTXFEMPLVL_POS) #define USB_DWC2_GAHBCFG_NPTXFEMPLVL_POS 7UL #define USB_DWC2_GAHBCFG_NPTXFEMPLVL BIT(USB_DWC2_GAHBCFG_NPTXFEMPLVL_POS) #define USB_DWC2_GAHBCFG_DMAEN_POS 5UL #define USB_DWC2_GAHBCFG_DMAEN BIT(USB_DWC2_GAHBCFG_DMAEN_POS) #define USB_DWC2_GAHBCFG_HBSTLEN_POS 1UL #define USB_DWC2_GAHBCFG_HBSTLEN_MASK (0xFUL << USB_DWC2_GAHBCFG_HBSTLEN_POS) #define USB_DWC2_GAHBCFG_HBSTLEN_SINGLE 0 #define USB_DWC2_GAHBCFG_HBSTLEN_INCR 1 #define USB_DWC2_GAHBCFG_HBSTLEN_INCR4 3 #define USB_DWC2_GAHBCFG_HBSTLEN_INCR8 5 #define USB_DWC2_GAHBCFG_HBSTLEN_INCR16 7 #define USB_DWC2_GAHBCFG_GLBINTRMASK_POS 0UL #define USB_DWC2_GAHBCFG_GLBINTRMASK BIT(USB_DWC2_GAHBCFG_GLBINTRMASK_POS) USB_DWC2_SET_FIELD_DEFINE(gahbcfg_loa_eop_word, GAHBCFG_LOA_EOP_WORD) USB_DWC2_SET_FIELD_DEFINE(gahbcfg_loa_eop_byte, GAHBCFG_LOA_EOP_BYTE) USB_DWC2_SET_FIELD_DEFINE(gahbcfg_hbstlen, GAHBCFG_HBSTLEN) USB_DWC2_GET_FIELD_DEFINE(gahbcfg_loa_eop_word, GAHBCFG_LOA_EOP_WORD) USB_DWC2_GET_FIELD_DEFINE(gahbcfg_loa_eop_byte, GAHBCFG_LOA_EOP_BYTE) USB_DWC2_GET_FIELD_DEFINE(gahbcfg_hbstlen, GAHBCFG_HBSTLEN) /* USB configuration register */ #define USB_DWC2_GUSBCFG 0x000CUL #define USB_DWC2_GUSBCFG_FORCEDEVMODE_POS 30UL #define USB_DWC2_GUSBCFG_FORCEDEVMODE BIT(USB_DWC2_GUSBCFG_FORCEDEVMODE_POS) #define USB_DWC2_GUSBCFG_FORCEHSTMODE_POS 29UL #define USB_DWC2_GUSBCFG_FORCEHSTMODE BIT(USB_DWC2_GUSBCFG_FORCEHSTMODE_POS) #define USB_DWC2_GUSBCFG_PHYSEL_POS 6UL #define USB_DWC2_GUSBCFG_PHYSEL_USB11 BIT(USB_DWC2_GUSBCFG_PHYSEL_POS) #define USB_DWC2_GUSBCFG_PHYSEL_USB20 0UL #define USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_POS 4UL #define USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_ULPI BIT(USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_POS) #define USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_UTMI 0UL #define USB_DWC2_GUSBCFG_PHYIF_POS 3UL #define USB_DWC2_GUSBCFG_PHYIF_16_BIT BIT(USB_DWC2_GUSBCFG_PHYIF_POS) #define USB_DWC2_GUSBCFG_PHYIF_8_BIT 0UL /* Reset register */ #define USB_DWC2_GRSTCTL 0x0010UL #define USB_DWC2_GRSTCTL_AHBIDLE_POS 31UL #define USB_DWC2_GRSTCTL_AHBIDLE BIT(USB_DWC2_GRSTCTL_AHBIDLE_POS) #define USB_DWC2_GRSTCTL_CSFTRSTDONE_POS 29UL #define USB_DWC2_GRSTCTL_CSFTRSTDONE BIT(USB_DWC2_GRSTCTL_CSFTRSTDONE_POS) #define USB_DWC2_GRSTCTL_TXFNUM_POS 6UL #define USB_DWC2_GRSTCTL_TXFNUM_MASK (0x1FUL << USB_DWC2_GRSTCTL_TXFNUM_POS) #define USB_DWC2_GRSTCTL_TXFFLSH_POS 5UL #define USB_DWC2_GRSTCTL_TXFFLSH BIT(USB_DWC2_GRSTCTL_TXFFLSH_POS) #define USB_DWC2_GRSTCTL_RXFFLSH_POS 4UL #define USB_DWC2_GRSTCTL_RXFFLSH BIT(USB_DWC2_GRSTCTL_RXFFLSH_POS) #define USB_DWC2_GRSTCTL_CSFTRST_POS 0UL #define USB_DWC2_GRSTCTL_CSFTRST BIT(USB_DWC2_GRSTCTL_CSFTRST_POS) USB_DWC2_SET_FIELD_DEFINE(grstctl_txfnum, GRSTCTL_TXFNUM) /* Core interrupt registers */ #define USB_DWC2_GINTSTS 0x0014UL #define USB_DWC2_GINTMSK 0x0018UL #define USB_DWC2_GINTSTS_WKUPINT_POS 31UL #define USB_DWC2_GINTSTS_WKUPINT BIT(USB_DWC2_GINTSTS_WKUPINT_POS) #define USB_DWC2_GINTSTS_SESSREQINT_POS 30UL #define USB_DWC2_GINTSTS_SESSREQINT BIT(USB_DWC2_GINTSTS_SESSREQINT_POS) #define USB_DWC2_GINTSTS_DISCONNINT_POS 29UL #define USB_DWC2_GINTSTS_DISCONNINT BIT(USB_DWC2_GINTSTS_DISCONNINT_POS) #define USB_DWC2_GINTSTS_CONIDSTSCHNG_POS 28UL #define USB_DWC2_GINTSTS_CONIDSTSCHNG BIT(USB_DWC2_GINTSTS_CONIDSTSCHNG_POS) #define USB_DWC2_GINTSTS_LPM_INT_POS 27UL #define USB_DWC2_GINTSTS_LPM_INT BIT(USB_DWC2_GINTSTS_LPM_INT_POS) #define USB_DWC2_GINTSTS_HCHINT_POS 25UL #define USB_DWC2_GINTSTS_HCHINT BIT(USB_DWC2_GINTSTS_HCHINT_POS) #define USB_DWC2_GINTSTS_PRTINT_POS 24UL #define USB_DWC2_GINTSTS_PRTINT BIT(USB_DWC2_GINTSTS_PRTINT_POS) #define USB_DWC2_GINTSTS_RESETDET_POS 23UL #define USB_DWC2_GINTSTS_RESETDET BIT(USB_DWC2_GINTSTS_RESETDET_POS) #define USB_DWC2_GINTSTS_FETSUSP_POS 22UL #define USB_DWC2_GINTSTS_FETSUSP BIT(USB_DWC2_GINTSTS_FETSUSP_POS) #define USB_DWC2_GINTSTS_INCOMPIP_POS 21UL #define USB_DWC2_GINTSTS_INCOMPIP BIT(USB_DWC2_GINTSTS_INCOMPIP_POS) #define USB_DWC2_GINTSTS_INCOMPISOIN_POS 20UL #define USB_DWC2_GINTSTS_INCOMPISOIN BIT(USB_DWC2_GINTSTS_INCOMPISOIN_POS) #define USB_DWC2_GINTSTS_OEPINT_POS 19UL #define USB_DWC2_GINTSTS_OEPINT BIT(USB_DWC2_GINTSTS_OEPINT_POS) #define USB_DWC2_GINTSTS_IEPINT_POS 18UL #define USB_DWC2_GINTSTS_IEPINT BIT(USB_DWC2_GINTSTS_IEPINT_POS) #define USB_DWC2_GINTSTS_EPMIS_POS 17UL #define USB_DWC2_GINTSTS_EPMIS BIT(USB_DWC2_GINTSTS_EPMIS_POS) #define USB_DWC2_GINTSTS_RSTRDONEINT_POS 16UL #define USB_DWC2_GINTSTS_RSTRDONEINT BIT(USB_DWC2_GINTSTS_RSTRDONEINT_POS) #define USB_DWC2_GINTSTS_EOPF_POS 15UL #define USB_DWC2_GINTSTS_EOPF BIT(USB_DWC2_GINTSTS_EOPF_POS) #define USB_DWC2_GINTSTS_ISOOUTDROP_POS 14UL #define USB_DWC2_GINTSTS_ISOOUTDROP BIT(USB_DWC2_GINTSTS_ISOOUTDROP_POS) #define USB_DWC2_GINTSTS_ENUMDONE_POS 13UL #define USB_DWC2_GINTSTS_ENUMDONE BIT(USB_DWC2_GINTSTS_ENUMDONE_POS) #define USB_DWC2_GINTSTS_USBRST_POS 12UL #define USB_DWC2_GINTSTS_USBRST BIT(USB_DWC2_GINTSTS_USBRST_POS) #define USB_DWC2_GINTSTS_USBSUSP_POS 11UL #define USB_DWC2_GINTSTS_USBSUSP BIT(USB_DWC2_GINTSTS_USBSUSP_POS) #define USB_DWC2_GINTSTS_ERLYSUSP_POS 10UL #define USB_DWC2_GINTSTS_ERLYSUSP BIT(USB_DWC2_GINTSTS_ERLYSUSP_POS) #define USB_DWC2_GINTSTS_GOUTNAKEFF_POS 7UL #define USB_DWC2_GINTSTS_GOUTNAKEFF BIT(USB_DWC2_GINTSTS_GOUTNAKEFF_POS) #define USB_DWC2_GINTSTS_GINNAKEFF_POS 6UL #define USB_DWC2_GINTSTS_GINNAKEFF BIT(USB_DWC2_GINTSTS_GINNAKEFF_POS) #define USB_DWC2_GINTSTS_NPTXFEMP_POS 5UL #define USB_DWC2_GINTSTS_NPTXFEMP BIT(USB_DWC2_GINTSTS_NPTXFEMP_POS) #define USB_DWC2_GINTSTS_RXFLVL_POS 4UL #define USB_DWC2_GINTSTS_RXFLVL BIT(USB_DWC2_GINTSTS_RXFLVL_POS) #define USB_DWC2_GINTSTS_SOF_POS 3UL #define USB_DWC2_GINTSTS_SOF BIT(USB_DWC2_GINTSTS_SOF_POS) #define USB_DWC2_GINTSTS_OTGINT_POS 2UL #define USB_DWC2_GINTSTS_OTGINT BIT(USB_DWC2_GINTSTS_OTGINT_POS) #define USB_DWC2_GINTSTS_MODEMIS_POS 1UL #define USB_DWC2_GINTSTS_MODEMIS BIT(USB_DWC2_GINTSTS_MODEMIS_POS) #define USB_DWC2_GINTSTS_CURMOD_POS 0UL #define USB_DWC2_GINTSTS_CURMOD BIT(USB_DWC2_GINTSTS_CURMOD_POS) /* Status read and pop registers */ #define USB_DWC2_GRXSTSR 0x001CUL #define USB_DWC2_GRXSTSP 0x0020UL #define USB_DWC2_GRXSTSR_FN_POS 21UL #define USB_DWC2_GRXSTSR_FN_MASK (0xFUL << USB_DWC2_GRXSTSR_FN_POS) #define USB_DWC2_GRXSTSR_PKTSTS_POS 17UL #define USB_DWC2_GRXSTSR_PKTSTS_MASK (0xFUL << USB_DWC2_GRXSTSR_PKTSTS_POS) #define USB_DWC2_GRXSTSR_PKTSTS_GLOBAL_OUT_NAK 1 #define USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA 2 #define USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA_DONE 3 #define USB_DWC2_GRXSTSR_PKTSTS_SETUP_DONE 4 #define USB_DWC2_GRXSTSR_PKTSTS_SETUP 6 #define USB_DWC2_GRXSTSR_DPID_POS 15UL #define USB_DWC2_GRXSTSR_DPID_MASK (0x3UL << USB_DWC2_GRXSTSR_DPID_POS) #define USB_DWC2_GRXSTSR_DPID_DATA0 0 #define USB_DWC2_GRXSTSR_DPID_DATA2 1 #define USB_DWC2_GRXSTSR_DPID_DATA1 2 #define USB_DWC2_GRXSTSR_DPID_MDATA 3 #define USB_DWC2_GRXSTSR_BCNT_POS 4UL #define USB_DWC2_GRXSTSR_BCNT_MASK (0x000007FFUL << USB_DWC2_GRXSTSR_BCNT_POS) #define USB_DWC2_GRXSTSR_EPNUM_POS 0UL #define USB_DWC2_GRXSTSR_EPNUM_MASK 0x0000000FUL #define USB_DWC2_GRXSTSR_CHNUM_POS 0UL #define USB_DWC2_GRXSTSR_CHNUM_MASK 0x0000000FUL USB_DWC2_GET_FIELD_DEFINE(grxstsp_fn, GRXSTSR_FN) USB_DWC2_GET_FIELD_DEFINE(grxstsp_pktsts, GRXSTSR_PKTSTS) USB_DWC2_GET_FIELD_DEFINE(grxstsp_bcnt, GRXSTSR_BCNT) USB_DWC2_GET_FIELD_DEFINE(grxstsp_epnum, GRXSTSR_EPNUM) /* Receive FIFO size register (device mode) */ #define USB_DWC2_GRXFSIZ 0x0024UL #define USB_DWC2_GRXFSIZ_RXFDEP_POS 0UL #define USB_DWC2_GRXFSIZ_RXFDEP_MASK (0xFFFFUL << USB_DWC2_GRXFSIZ_RXFDEP_POS) USB_DWC2_GET_FIELD_DEFINE(grxfsiz, GRXFSIZ_RXFDEP) USB_DWC2_SET_FIELD_DEFINE(grxfsiz, GRXFSIZ_RXFDEP) /* Non-periodic transmit FIFO size register (device mode) */ #define USB_DWC2_GNPTXFSIZ 0x0028UL #define USB_DWC2_GNPTXFSIZ_NPTXFDEP_POS 16UL #define USB_DWC2_GNPTXFSIZ_NPTXFDEP_MASK (0xFFFFUL << USB_DWC2_GNPTXFSIZ_NPTXFDEP_POS) #define USB_DWC2_GNPTXFSIZ_NPTXFSTADDR_POS 0UL #define USB_DWC2_GNPTXFSIZ_NPTXFSTADDR_MASK (0xFFFFUL << USB_DWC2_GNPTXFSIZ_NPTXFSTADDR_POS) USB_DWC2_GET_FIELD_DEFINE(gnptxfsiz_nptxfdep, GNPTXFSIZ_NPTXFDEP) USB_DWC2_GET_FIELD_DEFINE(gnptxfsiz_nptxfstaddr, GNPTXFSIZ_NPTXFSTADDR) USB_DWC2_SET_FIELD_DEFINE(gnptxfsiz_nptxfdep, GNPTXFSIZ_NPTXFDEP) USB_DWC2_SET_FIELD_DEFINE(gnptxfsiz_nptxfstaddr, GNPTXFSIZ_NPTXFSTADDR) /* Application (vendor) general purpose registers */ #define USB_DWC2_GGPIO 0x0038UL #define USB_DWC2_GGPIO_STM32_VBDEN_POS 21UL #define USB_DWC2_GGPIO_STM32_VBDEN BIT(USB_DWC2_GGPIO_STM32_VBDEN_POS) #define USB_DWC2_GGPIO_STM32_PWRDWN_POS 16UL #define USB_DWC2_GGPIO_STM32_PWRDWN BIT(USB_DWC2_GGPIO_STM32_PWRDWN_POS) /* GHWCFG1 register */ #define USB_DWC2_GHWCFG1 0x0044UL #define USB_DWC2_GHWCFG1_EPDIR_POS(i) (i * 2) #define USB_DWC2_GHWCFG1_EPDIR_MASK(i) (0x3UL << USB_DWC2_GHWCFG1_EPDIR_POS(i)) #define USB_DWC2_GHWCFG1_EPDIR_OUT 2 #define USB_DWC2_GHWCFG1_EPDIR_IN 1 #define USB_DWC2_GHWCFG1_EPDIR_BDIR 0 USB_DWC2_GET_FIELD_AND_IDX_DEFINE(ghwcfg1_epdir, GHWCFG1_EPDIR) /* GHWCFG2 register */ #define USB_DWC2_GHWCFG2 0x0048UL #define USB_DWC2_GHWCFG2_DYNFIFOSIZING_POS 19UL #define USB_DWC2_GHWCFG2_DYNFIFOSIZING BIT(USB_DWC2_GHWCFG2_DYNFIFOSIZING_POS) #define USB_DWC2_GHWCFG2_NUMDEVEPS_POS 10UL #define USB_DWC2_GHWCFG2_NUMDEVEPS_MASK (0xFUL << USB_DWC2_GHWCFG2_NUMDEVEPS_POS) #define USB_DWC2_GHWCFG2_FSPHYTYPE_POS 8UL #define USB_DWC2_GHWCFG2_FSPHYTYPE_MASK (0x3UL << USB_DWC2_GHWCFG2_FSPHYTYPE_POS) #define USB_DWC2_GHWCFG2_FSPHYTYPE_FSPLUSULPI 3 #define USB_DWC2_GHWCFG2_FSPHYTYPE_FSPLUSUTMI 2 #define USB_DWC2_GHWCFG2_FSPHYTYPE_FS 1 #define USB_DWC2_GHWCFG2_FSPHYTYPE_NO_FS 0 #define USB_DWC2_GHWCFG2_HSPHYTYPE_POS 6UL #define USB_DWC2_GHWCFG2_HSPHYTYPE_MASK (0x3UL << USB_DWC2_GHWCFG2_HSPHYTYPE_POS) #define USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUSULPI 3 #define USB_DWC2_GHWCFG2_HSPHYTYPE_ULPI 2 #define USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUS 1 #define USB_DWC2_GHWCFG2_HSPHYTYPE_NO_HS 0 #define USB_DWC2_GHWCFG2_OTGARCH_POS 3UL #define USB_DWC2_GHWCFG2_OTGARCH_MASK (0x3UL << USB_DWC2_GHWCFG2_OTGARCH_POS) #define USB_DWC2_GHWCFG2_OTGARCH_INTERNALDMA 2 #define USB_DWC2_GHWCFG2_OTGARCH_EXTERNALDMA 1 #define USB_DWC2_GHWCFG2_OTGARCH_SLAVEMODE 0 #define USB_DWC2_GHWCFG2_OTGMODE_POS 0UL #define USB_DWC2_GHWCFG2_OTGMODE_MASK (0x7UL << USB_DWC2_GHWCFG2_OTGMODE_POS) #define USB_DWC2_GHWCFG2_OTGMODE_NONOTGH 6 #define USB_DWC2_GHWCFG2_OTGMODE_SRPCAPH 5 #define USB_DWC2_GHWCFG2_OTGMODE_NONOTGD 4 #define USB_DWC2_GHWCFG2_OTGMODE_SRPCAPD 3 #define USB_DWC2_GHWCFG2_OTGMODE_NHNPNSRP 2 #define USB_DWC2_GHWCFG2_OTGMODE_SRPOTG 1 #define USB_DWC2_GHWCFG2_OTGMODE_HNPSRP 0 USB_DWC2_GET_FIELD_DEFINE(ghwcfg2_numdeveps, GHWCFG2_NUMDEVEPS) USB_DWC2_GET_FIELD_DEFINE(ghwcfg2_fsphytype, GHWCFG2_FSPHYTYPE) USB_DWC2_GET_FIELD_DEFINE(ghwcfg2_hsphytype, GHWCFG2_HSPHYTYPE) USB_DWC2_GET_FIELD_DEFINE(ghwcfg2_otgarch, GHWCFG2_OTGARCH) USB_DWC2_GET_FIELD_DEFINE(ghwcfg2_otgmode, GHWCFG2_OTGMODE) /* GHWCFG3 register */ #define USB_DWC2_GHWCFG3 0x004CUL #define USB_DWC2_GHWCFG3_DFIFODEPTH_POS 16UL #define USB_DWC2_GHWCFG3_DFIFODEPTH_MASK (0xFFFFUL << USB_DWC2_GHWCFG3_DFIFODEPTH_POS) #define USB_DWC2_GHWCFG3_LPMMODE_POS 15UL #define USB_DWC2_GHWCFG3_LPMMODE BIT(USB_DWC2_GHWCFG3_LPMMODE_POS) #define USB_DWC2_GHWCFG3_OPTFEATURE_POS 10UL #define USB_DWC2_GHWCFG3_OPTFEATURE BIT(USB_DWC2_GHWCFG3_OPTFEATURE_POS) #define USB_DWC2_GHWCFG3_VNDCTLSUPT_POS 9UL #define USB_DWC2_GHWCFG3_VNDCTLSUPT BIT(USB_DWC2_GHWCFG3_VNDCTLSUPT_POS) #define USB_DWC2_GHWCFG3_OTGEN_POS 7UL #define USB_DWC2_GHWCFG3_OTGEN BIT(USB_DWC2_GHWCFG3_OTGEN_POS) #define USB_DWC2_GHWCFG3_PKTSIZEWIDTH_POS 4UL #define USB_DWC2_GHWCFG3_PKTSIZEWIDTH_MASK (0x7UL << USB_DWC2_GHWCFG3_PKTSIZEWIDTH_POS) #define USB_DWC2_GHWCFG3_PKTSIZEWIDTH_BITS10 6U #define USB_DWC2_GHWCFG3_PKTSIZEWIDTH_BITS9 5U #define USB_DWC2_GHWCFG3_PKTSIZEWIDTH_BITS8 4U #define USB_DWC2_GHWCFG3_PKTSIZEWIDTH_BITS7 3U #define USB_DWC2_GHWCFG3_PKTSIZEWIDTH_BITS6 2U #define USB_DWC2_GHWCFG3_PKTSIZEWIDTH_BITS5 1U #define USB_DWC2_GHWCFG3_PKTSIZEWIDTH_BITS4 0U #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_POS 0UL #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_MASK (0xFUL << USB_DWC2_GHWCFG3_XFERSIZEWIDTH_POS) #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_WIDTH19 8U #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_WIDTH18 7U #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_WIDTH17 6U #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_WIDTH16 5U #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_WIDTH15 4U #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_WIDTH14 3U #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_WIDTH13 2U #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_WIDTH12 1U #define USB_DWC2_GHWCFG3_XFERSIZEWIDTH_WIDTH11 0U #define GHWCFG3_PKTCOUNT(pktsizewidth) BIT_MASK(pktsizewidth + 4) #define GHWCFG3_XFERSIZE(xfersizewidth) BIT_MASK(xfersizewidth + 11) USB_DWC2_GET_FIELD_DEFINE(ghwcfg3_dfifodepth, GHWCFG3_DFIFODEPTH) USB_DWC2_GET_FIELD_DEFINE(ghwcfg3_pktsizewidth, GHWCFG3_PKTSIZEWIDTH) USB_DWC2_GET_FIELD_DEFINE(ghwcfg3_xfersizewidth, GHWCFG3_XFERSIZEWIDTH) /* GHWCFG4 register */ #define USB_DWC2_GHWCFG4 0x0050UL #define USB_DWC2_GHWCFG4_INEPS_POS 26UL #define USB_DWC2_GHWCFG4_INEPS_MASK (0xFUL << USB_DWC2_GHWCFG4_INEPS_POS) #define USB_DWC2_GHWCFG4_DEDFIFOMODE_POS 25UL #define USB_DWC2_GHWCFG4_DEDFIFOMODE BIT(USB_DWC2_GHWCFG4_DEDFIFOMODE_POS) #define USB_DWC2_GHWCFG4_NUMCTLEPS_POS 16UL #define USB_DWC2_GHWCFG4_NUMCTLEPS_MASK (0xFUL << USB_DWC2_GHWCFG4_NUMCTLEPS_POS) #define USB_DWC2_GHWCFG4_PHYDATAWIDTH_POS 14UL #define USB_DWC2_GHWCFG4_PHYDATAWIDTH_MASK (0x3UL << USB_DWC2_GHWCFG4_PHYDATAWIDTH_POS) #define USB_DWC2_GHWCFG4_NUMDEVPERIOEPS_POS 0UL #define USB_DWC2_GHWCFG4_NUMDEVPERIOEPS_MASK (0xFUL << USB_DWC2_GHWCFG4_NUMDEVPERIOEPS_POS) USB_DWC2_GET_FIELD_DEFINE(ghwcfg4_ineps, GHWCFG4_INEPS) USB_DWC2_GET_FIELD_DEFINE(ghwcfg4_numctleps, GHWCFG4_NUMCTLEPS) USB_DWC2_GET_FIELD_DEFINE(ghwcfg4_phydatawidth, GHWCFG4_PHYDATAWIDTH) USB_DWC2_GET_FIELD_DEFINE(ghwcfg4_numdevperioeps, GHWCFG4_NUMDEVPERIOEPS) /* GDFIFOCFG register */ #define USB_DWC2_GDFIFOCFG 0x005CUL #define USB_DWC2_GDFIFOCFG_EPINFOBASEADDR_POS 16UL #define USB_DWC2_GDFIFOCFG_EPINFOBASEADDR_MASK (0xFFFFUL << USB_DWC2_GDFIFOCFG_EPINFOBASEADDR_POS) #define USB_DWC2_GDFIFOCFG_GDFIFOCFG_POS 0UL #define USB_DWC2_GDFIFOCFG_GDFIFOCFG_MASK (0xFFFFUL << USB_DWC2_GDFIFOCFG_GDFIFOCFG_POS) USB_DWC2_GET_FIELD_DEFINE(gdfifocfg_epinfobaseaddr, GDFIFOCFG_EPINFOBASEADDR) USB_DWC2_GET_FIELD_DEFINE(gdfifocfg_gdfifocfg, GDFIFOCFG_GDFIFOCFG) USB_DWC2_SET_FIELD_DEFINE(gdfifocfg_epinfobaseaddr, GDFIFOCFG_EPINFOBASEADDR) USB_DWC2_SET_FIELD_DEFINE(gdfifocfg_gdfifocfg, GDFIFOCFG_GDFIFOCFG) /* Device IN endpoint transmit FIFO size register */ #define USB_DWC2_DIEPTXF1 0x0104UL #define USB_DWC2_DIEPTXF_INEPNTXFDEP_POS 16UL #define USB_DWC2_DIEPTXF_INEPNTXFDEP_MASK (0xFFFFUL << USB_DWC2_DIEPTXF_INEPNTXFDEP_POS) #define USB_DWC2_DIEPTXF_INEPNTXFSTADDR_POS 0UL #define USB_DWC2_DIEPTXF_INEPNTXFSTADDR_MASK (0xFFFFUL << USB_DWC2_DIEPTXF_INEPNTXFSTADDR_POS) USB_DWC2_GET_FIELD_DEFINE(dieptxf_inepntxfdep, DIEPTXF_INEPNTXFDEP) USB_DWC2_GET_FIELD_DEFINE(dieptxf_inepntxfstaddr, DIEPTXF_INEPNTXFSTADDR) USB_DWC2_SET_FIELD_DEFINE(dieptxf_inepntxfdep, DIEPTXF_INEPNTXFDEP) USB_DWC2_SET_FIELD_DEFINE(dieptxf_inepntxfstaddr, DIEPTXF_INEPNTXFSTADDR) /* Device configuration registers */ #define USB_DWC2_DCFG 0x0800UL #define USB_DWC2_DCFG_RESVALID_POS 26UL #define USB_DWC2_DCFG_RESVALID_MASK (0x3FUL << USB_DWC2_DCFG_RESVALID_POS) #define USB_DWC2_DCFG_PERSCHINTVL_POS 24UL #define USB_DWC2_DCFG_PERSCHINTVL_MASK (0x3UL << USB_DWC2_DCFG_PERSCHINTVL_POS) #define USB_DWC2_DCFG_PERSCHINTVL_MF25 0 #define USB_DWC2_DCFG_PERSCHINTVL_MF50 1 #define USB_DWC2_DCFG_PERSCHINTVL_MF75 2 #define USB_DWC2_DCFG_PERSCHINTVL_RESERVED 3 #define USB_DWC2_DCFG_DESCDMA_POS 23UL #define USB_DWC2_DCFG_DESCDMA BIT(USB_DWC2_DCFG_DESCDMA_POS) #define USB_DWC2_DCFG_EPMISCNT_POS 18UL #define USB_DWC2_DCFG_EPMISCNT_MASK (0x1FUL << USB_DWC2_DCFG_EPMISCNT_POS) #define USB_DWC2_DCFG_IPGISOCSUPT_POS 17UL #define USB_DWC2_DCFG_IPGISOCSUPT BIT(USB_DWC2_DCFG_IPGISOCSUPT_POS) #define USB_DWC2_DCFG_ERRATICINTMSK_POS 15UL #define USB_DWC2_DCFG_ERRATICINTMSK BIT(USB_DWC2_DCFG_ERRATICINTMSK_POS) #define USB_DWC2_DCFG_XCVRDLY_POS 14UL #define USB_DWC2_DCFG_XCVRDLY BIT(USB_DWC2_DCFG_XCVRDLY_POS) #define USB_DWC2_DCFG_ENDEVOUTNAK_POS 13UL #define USB_DWC2_DCFG_ENDEVOUTNAK BIT(USB_DWC2_DCFG_ENDEVOUTNAK_POS) #define USB_DWC2_DCFG_PERFRINT_POS 11UL #define USB_DWC2_DCFG_PERFRINT_MASK (0x3UL << USB_DWC2_DCFG_PERFRINT_POS) #define USB_DWC2_DCFG_PERFRINT_EOPF80 0 #define USB_DWC2_DCFG_PERFRINT_EOPF85 1 #define USB_DWC2_DCFG_PERFRINT_EOPF90 2 #define USB_DWC2_DCFG_PERFRINT_EOPF95 3 #define USB_DWC2_DCFG_DEVADDR_POS 4UL #define USB_DWC2_DCFG_DEVADDR_MASK (0x7FUL << USB_DWC2_DCFG_DEVADDR_POS) #define USB_DWC2_DCFG_ENA32KHZSUSP_POS 3UL #define USB_DWC2_DCFG_ENA32KHZSUSP BIT(USB_DWC2_DCFG_ENA32KHZSUSP_POS) #define USB_DWC2_DCFG_NZSTSOUTHSHK_POS 2UL #define USB_DWC2_DCFG_NZSTSOUTHSHK BIT(USB_DWC2_DCFG_NZSTSOUTHSHK_POS) #define USB_DWC2_DCFG_DEVSPD_POS 0UL #define USB_DWC2_DCFG_DEVSPD_MASK (0x03UL << USB_DWC2_DCFG_DEVSPD_POS) #define USB_DWC2_DCFG_DEVSPD_USBHS20 0 #define USB_DWC2_DCFG_DEVSPD_USBFS20 1 #define USB_DWC2_DCFG_DEVSPD_USBLS116 2 #define USB_DWC2_DCFG_DEVSPD_USBFS1148 3 USB_DWC2_SET_FIELD_DEFINE(dcfg_resvalid, DCFG_RESVALID) USB_DWC2_SET_FIELD_DEFINE(dcfg_perschintvl, DCFG_PERSCHINTVL) USB_DWC2_SET_FIELD_DEFINE(dcfg_epmiscnt, DCFG_EPMISCNT) USB_DWC2_SET_FIELD_DEFINE(dcfg_perfrint, DCFG_PERFRINT) USB_DWC2_SET_FIELD_DEFINE(dcfg_devaddr, DCFG_DEVADDR) USB_DWC2_SET_FIELD_DEFINE(dcfg_devspd, DCFG_DEVSPD) USB_DWC2_GET_FIELD_DEFINE(dcfg_resvalid, DCFG_RESVALID) USB_DWC2_GET_FIELD_DEFINE(dcfg_perschintvl, DCFG_PERSCHINTVL) USB_DWC2_GET_FIELD_DEFINE(dcfg_epmiscnt, DCFG_EPMISCNT) USB_DWC2_GET_FIELD_DEFINE(dcfg_perfrint, DCFG_PERFRINT) USB_DWC2_GET_FIELD_DEFINE(dcfg_devaddr, DCFG_DEVADDR) USB_DWC2_GET_FIELD_DEFINE(dcfg_devspd, DCFG_DEVSPD) /* Device control register */ #define USB_DWC2_DCTL 0x0804UL #define USB_DWC2_DCTL_SERVINT_POS 19UL #define USB_DWC2_DCTL_SERVINT BIT(USB_DWC2_DCTL_SERVINT_POS) #define USB_DWC2_DCTL_DEEPSLEEPBESLREJECT_POS 18UL #define USB_DWC2_DCTL_DEEPSLEEPBESLREJECT BIT(USB_DWC2_DCTL_DEEPSLEEPBESLREJECT_POS) #define USB_DWC2_DCTL_NAKONBBLE_POS 16UL #define USB_DWC2_DCTL_NAKONBBLE BIT(USB_DWC2_DCTL_NAKONBBLE_POS) #define USB_DWC2_DCTL_IGNRFRMNUM_POS 15UL #define USB_DWC2_DCTL_IGNRFRMNUM BIT(USB_DWC2_DCTL_IGNRFRMNUM_POS) #define USB_DWC2_DCTL_PWRONPRGDONE_POS 11UL #define USB_DWC2_DCTL_PWRONPRGDONE BIT(USB_DWC2_DCTL_PWRONPRGDONE_POS) #define USB_DWC2_DCTL_CGOUTNAK_POS 10UL #define USB_DWC2_DCTL_CGOUTNAK BIT(USB_DWC2_DCTL_CGOUTNAK_POS) #define USB_DWC2_DCTL_SGOUTNAK_POS 9UL #define USB_DWC2_DCTL_SGOUTNAK BIT(USB_DWC2_DCTL_SGOUTNAK_POS) #define USB_DWC2_DCTL_CGNPINNAK_POS 8UL #define USB_DWC2_DCTL_CGNPINNAK BIT(USB_DWC2_DCTL_CGNPINNAK_POS) #define USB_DWC2_DCTL_SGNPINNAK_POS 7UL #define USB_DWC2_DCTL_SGNPINNAK BIT(USB_DWC2_DCTL_SGNPINNAK_POS) #define USB_DWC2_DCTL_TSTCTL_POS 4UL #define USB_DWC2_DCTL_TSTCTL_MASK (0x7UL << USB_DWC2_DCTL_TSTCTL_POS) #define USB_DWC2_DCTL_TSTCTL_TESTFE 5UL #define USB_DWC2_DCTL_TSTCTL_TESTPM 4UL #define USB_DWC2_DCTL_TSTCTL_TESTSN 3UL #define USB_DWC2_DCTL_TSTCTL_TESTK 2UL #define USB_DWC2_DCTL_TSTCTL_TESTJ 1UL #define USB_DWC2_DCTL_TSTCTL_DISABLED 0UL #define USB_DWC2_DCTL_GOUTNAKSTS_POS 3UL #define USB_DWC2_DCTL_GOUTNAKSTS BIT(USB_DWC2_DCTL_GOUTNAKSTS_POS) #define USB_DWC2_DCTL_GNPINNAKSTS_POS 2UL #define USB_DWC2_DCTL_GNPINNAKSTS BIT(USB_DWC2_DCTL_GNPINNAKSTS_POS) #define USB_DWC2_DCTL_SFTDISCON_POS 1UL #define USB_DWC2_DCTL_SFTDISCON BIT(USB_DWC2_DCTL_SFTDISCON_POS) #define USB_DWC2_DCTL_RMTWKUPSIG_POS 0UL #define USB_DWC2_DCTL_RMTWKUPSIG BIT(USB_DWC2_DCTL_RMTWKUPSIG_POS) USB_DWC2_GET_FIELD_DEFINE(dctl_tstctl, DCTL_TSTCTL) USB_DWC2_SET_FIELD_DEFINE(dctl_tstctl, DCTL_TSTCTL) /* Device status register */ #define USB_DWC2_DSTS 0x0808UL #define USB_DWC2_DSTS_ENUMSPD_POS 1UL #define USB_DWC2_DSTS_ENUMSPD_MASK (0x3UL << USB_DWC2_DSTS_ENUMSPD_POS) #define USB_DWC2_DSTS_ENUMSPD_HS3060 0 #define USB_DWC2_DSTS_ENUMSPD_FS3060 1 #define USB_DWC2_DSTS_ENUMSPD_LS6 2 #define USB_DWC2_DSTS_ENUMSPD_FS48 3 USB_DWC2_GET_FIELD_DEFINE(dsts_enumspd, DSTS_ENUMSPD) /* Device all endpoints interrupt registers */ #define USB_DWC2_DAINT 0x0818UL #define USB_DWC2_DAINTMSK 0x081CUL #define USB_DWC2_DAINT_OUTEPINT(ep_num) BIT(16UL + ep_num) #define USB_DWC2_DAINT_INEPINT(ep_num) BIT(ep_num) /* Device threshold control register */ #define USB_DWC2_DTHRCTL 0x0830UL #define USB_DWC2_DTHRCTL_ARBPRKEN_POS 27UL #define USB_DWC2_DTHRCTL_ARBPRKEN BIT(USB_DWC2_DTHRCTL_ARBPRKEN_POS) #define USB_DWC2_DTHRCTL_RXTHRLEN_POS 17UL #define USB_DWC2_DTHRCTL_RXTHRLEN_MASK (0x1FFUL << USB_DWC2_DTHRCTL_RXTHRLEN_POS) #define USB_DWC2_DTHRCTL_RXTHREN_POS 16UL #define USB_DWC2_DTHRCTL_RXTHREN BIT(USB_DWC2_DTHRCTL_RXTHREN_POS) #define USB_DWC2_DTHRCTL_AHBTHRRATIO_POS 11UL #define USB_DWC2_DTHRCTL_AHBTHRRATIO_MASK (0x3UL << USB_DWC2_DTHRCTL_AHBTHRRATIO_POS) #define USB_DWC2_DTHRCTL_AHBTHRRATIO_THRESZERO 0 #define USB_DWC2_DTHRCTL_AHBTHRRATIO_THRESONE 1 #define USB_DWC2_DTHRCTL_AHBTHRRATIO_THRESTWO 2 #define USB_DWC2_DTHRCTL_AHBTHRRATIO_THRESTHREE 3 #define USB_DWC2_DTHRCTL_TXTHRLEN_POS 2UL #define USB_DWC2_DTHRCTL_TXTHRLEN_MASK (0x1FFUL << USB_DWC2_DTHRCTL_TXTHRLEN_POS) #define USB_DWC2_DTHRCTL_ISOTHREN_POS 1UL #define USB_DWC2_DTHRCTL_ISOTHREN BIT(USB_DWC2_DTHRCTL_ISOTHREN_POS) #define USB_DWC2_DTHRCTL_NONISOTHREN_POS 0UL #define USB_DWC2_DTHRCTL_NONISOTHREN BIT(USB_DWC2_DTHRCTL_NONISOTHREN_POS) USB_DWC2_GET_FIELD_DEFINE(dthrctl_rxthrlen, DTHRCTL_RXTHRLEN) USB_DWC2_GET_FIELD_DEFINE(dthrctl_ahbthrratio, DTHRCTL_AHBTHRRATIO) USB_DWC2_GET_FIELD_DEFINE(dthrctl_txthrlen, DTHRCTL_TXTHRLEN) USB_DWC2_SET_FIELD_DEFINE(dthrctl_rxthrlen, DTHRCTL_RXTHRLEN) USB_DWC2_SET_FIELD_DEFINE(dthrctl_ahbthrratio, DTHRCTL_AHBTHRRATIO) USB_DWC2_SET_FIELD_DEFINE(dthrctl_txthrlen, DTHRCTL_TXTHRLEN) /* * Device IN/OUT endpoint control register * IN endpoint offsets 0x0900 + (0x20 * n), n = 0 .. x, * OUT endpoint offsets 0x0B00 + (0x20 * n), n = 0 .. x, * */ #define USB_DWC2_DIEPCTL0 0x0900UL #define USB_DWC2_DOEPCTL0 0x0B00UL #define USB_DWC2_DEPCTL_EPENA_POS 31UL #define USB_DWC2_DEPCTL_EPENA BIT(USB_DWC2_DEPCTL_EPENA_POS) #define USB_DWC2_DEPCTL_EPDIS_POS 30UL #define USB_DWC2_DEPCTL_EPDIS BIT(USB_DWC2_DEPCTL_EPDIS_POS) #define USB_DWC2_DEPCTL_SETD1PID_POS 29UL #define USB_DWC2_DEPCTL_SETD1PID BIT(USB_DWC2_DEPCTL_SETD1PID_POS) #define USB_DWC2_DEPCTL_SETD0PID_POS 28UL #define USB_DWC2_DEPCTL_SETD0PID BIT(USB_DWC2_DEPCTL_SETD0PID_POS) #define USB_DWC2_DEPCTL_SNAK_POS 27UL #define USB_DWC2_DEPCTL_SNAK BIT(USB_DWC2_DEPCTL_SNAK_POS) #define USB_DWC2_DEPCTL_CNAK_POS 26UL #define USB_DWC2_DEPCTL_CNAK BIT(USB_DWC2_DEPCTL_CNAK_POS) #define USB_DWC2_DEPCTL_TXFNUM_POS 22UL #define USB_DWC2_DEPCTL_TXFNUM_MASK (0xFUL << USB_DWC2_DEPCTL_TXFNUM_POS) #define USB_DWC2_DEPCTL_STALL_POS 21UL #define USB_DWC2_DEPCTL_STALL BIT(USB_DWC2_DEPCTL_STALL_POS) #define USB_DWC2_DEPCTL_EPTYPE_POS 18UL #define USB_DWC2_DEPCTL_EPTYPE_MASK (0x3UL << USB_DWC2_DEPCTL_EPTYPE_POS) #define USB_DWC2_DEPCTL_EPTYPE_INTERRUPT 3 #define USB_DWC2_DEPCTL_EPTYPE_BULK 2 #define USB_DWC2_DEPCTL_EPTYPE_ISO 1 #define USB_DWC2_DEPCTL_EPTYPE_CONTROL 0 #define USB_DWC2_DEPCTL_NAKSTS_POS 17UL #define USB_DWC2_DEPCTL_NAKSTS BIT(USB_DWC2_DEPCTL_NAKSTS_POS) #define USB_DWC2_DEPCTL_DPID_POS 16UL #define USB_DWC2_DEPCTL_DPID BIT(USB_DWC2_DEPCTL_DPID_POS) #define USB_DWC2_DEPCTL_USBACTEP_POS 15UL #define USB_DWC2_DEPCTL_USBACTEP BIT(USB_DWC2_DEPCTL_USBACTEP_POS) #define USB_DWC2_DEPCTL0_MPS_POS 0UL #define USB_DWC2_DEPCTL0_MPS_MASK (0x3UL << USB_DWC2_DEPCTL0_MPS_POS) #define USB_DWC2_DEPCTL0_MPS_8 3 #define USB_DWC2_DEPCTL0_MPS_16 2 #define USB_DWC2_DEPCTL0_MPS_32 1 #define USB_DWC2_DEPCTL0_MPS_64 0 #define USB_DWC2_DEPCTL_MPS_POS 0UL #define USB_DWC2_DEPCTL_MPS_MASK (0x7FF << USB_DWC2_DEPCTL_MPS_POS) USB_DWC2_GET_FIELD_DEFINE(depctl_txfnum, DEPCTL_TXFNUM) USB_DWC2_SET_FIELD_DEFINE(depctl_txfnum, DEPCTL_TXFNUM) USB_DWC2_GET_FIELD_DEFINE(depctl_eptype, DEPCTL_EPTYPE) USB_DWC2_SET_FIELD_DEFINE(depctl_eptype, DEPCTL_EPTYPE) USB_DWC2_GET_FIELD_DEFINE(depctl0_mps, DEPCTL0_MPS) USB_DWC2_SET_FIELD_DEFINE(depctl0_mps, DEPCTL0_MPS) USB_DWC2_GET_FIELD_DEFINE(depctl_mps, DEPCTL_MPS) USB_DWC2_SET_FIELD_DEFINE(depctl_mps, DEPCTL_MPS) /* * Device IN endpoint interrupt register * offsets 0x0908 + (0x20 * n), n = 0 .. x */ #define USB_DWC2_DIEPINT0 0x0908UL #define USB_DWC2_DIEPINT_NYETINTRPT_POS 14UL #define USB_DWC2_DIEPINT_NYETINTRPT BIT(USB_DWC2_DIEPINT_NYETINTRPT_POS) #define USB_DWC2_DIEPINT_NAKINTRPT_POS 13UL #define USB_DWC2_DIEPINT_NAKINTRPT BIT(USB_DWC2_DIEPINT_NAKINTRPT_POS) #define USB_DWC2_DIEPINT_BBLEERR_POS 12UL #define USB_DWC2_DIEPINT_BBLEERR BIT(USB_DWC2_DIEPINT_BBLEERR_POS) #define USB_DWC2_DIEPINT_PKTDRPSTS_POS 11UL #define USB_DWC2_DIEPINT_PKTDRPSTS BIT(USB_DWC2_DIEPINT_PKTDRPSTS_POS) #define USB_DWC2_DIEPINT_BNAINTR_POS 9UL #define USB_DWC2_DIEPINT_BNAINTR BIT(USB_DWC2_DIEPINT_BNAINTR_POS) #define USB_DWC2_DIEPINT_TXFIFOUNDRN_POS 8UL #define USB_DWC2_DIEPINT_TXFIFOUNDRN BIT(USB_DWC2_DIEPINT_TXFIFOUNDRN_POS) #define USB_DWC2_DIEPINT_TXFEMP_POS 7UL #define USB_DWC2_DIEPINT_TXFEMP BIT(USB_DWC2_DIEPINT_TXFEMP_POS) #define USB_DWC2_DIEPINT_INEPNAKEFF_POS 6UL #define USB_DWC2_DIEPINT_INEPNAKEFF BIT(USB_DWC2_DIEPINT_INEPNAKEFF_POS) #define USB_DWC2_DIEPINT_INTKNEPMIS_POS 5UL #define USB_DWC2_DIEPINT_INTKNEPMIS BIT(USB_DWC2_DIEPINT_INTKNEPMIS_POS) #define USB_DWC2_DIEPINT_INTKNTXFEMP_POS 4UL #define USB_DWC2_DIEPINT_INTKNTXFEMP BIT(USB_DWC2_DIEPINT_INTKNTXFEMP_POS) #define USB_DWC2_DIEPINT_TIMEOUT_POS 3UL #define USB_DWC2_DIEPINT_TIMEOUT BIT(USB_DWC2_DIEPINT_TIMEOUT_POS) #define USB_DWC2_DIEPINT_AHBERR_POS 2UL #define USB_DWC2_DIEPINT_AHBERR BIT(USB_DWC2_DIEPINT_AHBERR_POS) #define USB_DWC2_DIEPINT_EPDISBLD_POS 1UL #define USB_DWC2_DIEPINT_EPDISBLD BIT(USB_DWC2_DIEPINT_EPDISBLD_POS) #define USB_DWC2_DIEPINT_XFERCOMPL_POS 0UL #define USB_DWC2_DIEPINT_XFERCOMPL BIT(USB_DWC2_DIEPINT_XFERCOMPL_POS) /* * Device OUT endpoint interrupt register * offsets 0x0B08 + (0x20 * n), n = 0 .. x */ #define USB_DWC2_DOEPINT0 0x0B08UL #define USB_DWC2_DOEPINT_STUPPKTRCVD_POS 15UL #define USB_DWC2_DOEPINT_STUPPKTRCVD BIT(USB_DWC2_DOEPINT_STUPPKTRCVD_POS) #define USB_DWC2_DOEPINT_NYETINTRPT_POS 14UL #define USB_DWC2_DOEPINT_NYETINTRPT BIT(USB_DWC2_DOEPINT_NYETINTRPT_POS) #define USB_DWC2_DOEPINT_NAKINTRPT_POS 13UL #define USB_DWC2_DOEPINT_NAKINTRPT BIT(USB_DWC2_DOEPINT_NAKINTRPT_POS) #define USB_DWC2_DOEPINT_BBLEERR_POS 12UL #define USB_DWC2_DOEPINT_BBLEERR BIT(USB_DWC2_DOEPINT_BBLEERR_POS) #define USB_DWC2_DOEPINT_PKTDRPSTS_POS 11UL #define USB_DWC2_DOEPINT_PKTDRPSTS BIT(USB_DWC2_DOEPINT_PKTDRPSTS_POS) #define USB_DWC2_DOEPINT_BNAINTR_POS 9UL #define USB_DWC2_DOEPINT_BNAINTR BIT(USB_DWC2_DOEPINT_BNAINTR_POS) #define USB_DWC2_DOEPINT_OUTPKTERR_POS 8UL #define USB_DWC2_DOEPINT_OUTPKTERR BIT(USB_DWC2_DOEPINT_OUTPKTERR_POS) #define USB_DWC2_DOEPINT_BACK2BACKSETUP_POS 6UL #define USB_DWC2_DOEPINT_BACK2BACKSETUP BIT(USB_DWC2_DOEPINT_BACK2BACKSETUP_POS) #define USB_DWC2_DOEPINT_STSPHSERCVD_POS 5UL #define USB_DWC2_DOEPINT_STSPHSERCVD BIT(USB_DWC2_DOEPINT_STSPHSERCVD_POS) #define USB_DWC2_DOEPINT_OUTTKNEPDIS_POS 4UL #define USB_DWC2_DOEPINT_OUTTKNEPDIS BIT(USB_DWC2_DOEPINT_OUTTKNEPDIS_POS) #define USB_DWC2_DOEPINT_SETUP_POS 3UL #define USB_DWC2_DOEPINT_SETUP BIT(USB_DWC2_DOEPINT_SETUP_POS) #define USB_DWC2_DOEPINT_AHBERR_POS 2UL #define USB_DWC2_DOEPINT_AHBERR BIT(USB_DWC2_DOEPINT_AHBERR_POS) #define USB_DWC2_DOEPINT_EPDISBLD_POS 1UL #define USB_DWC2_DOEPINT_EPDISBLD BIT(USB_DWC2_DOEPINT_EPDISBLD_POS) #define USB_DWC2_DOEPINT_XFERCOMPL_POS 0UL #define USB_DWC2_DOEPINT_XFERCOMPL BIT(USB_DWC2_DOEPINT_XFERCOMPL_POS) /* * Device IN/OUT control endpoint transfer size register */ #define USB_DWC2_DIEPTSIZ0 0x0910UL #define USB_DWC2_DOEPTSIZ0 0x0B10UL #define USB_DWC2_DOEPTSIZ0_SUPCNT_POS 29UL #define USB_DWC2_DOEPTSIZ0_SUPCNT_MASK (0x3UL << USB_DWC2_DOEPTSIZ0_SUPCNT_POS) #define USB_DWC2_DOEPTSIZ0_PKTCNT_POS 19UL #define USB_DWC2_DOEPTSIZ0_PKTCNT_MASK (0x1UL << USB_DWC2_DOEPTSIZ0_PKTCNT_POS) #define USB_DWC2_DIEPTSIZ0_PKTCNT_POS 19UL #define USB_DWC2_DIEPTSIZ0_PKTCNT_MASK (0x3UL << USB_DWC2_DIEPTSIZ0_PKTCNT_POS) #define USB_DWC2_DEPTSIZ0_XFERSIZE_POS 0UL #define USB_DWC2_DEPTSIZ0_XFERSIZE_MASK 0x7FUL USB_DWC2_GET_FIELD_DEFINE(doeptsiz0_supcnt, DOEPTSIZ0_SUPCNT) USB_DWC2_GET_FIELD_DEFINE(doeptsiz0_pktcnt, DOEPTSIZ0_PKTCNT) USB_DWC2_GET_FIELD_DEFINE(doeptsiz0_xfersize, DEPTSIZ0_XFERSIZE) USB_DWC2_GET_FIELD_DEFINE(dieptsiz0_pktcnt, DIEPTSIZ0_PKTCNT) USB_DWC2_GET_FIELD_DEFINE(dieptsiz0_xfersize, DEPTSIZ0_XFERSIZE) /* * Device IN/OUT endpoint transfer size register * IN at offsets 0x0910 + (0x20 * n), n = 1 .. x, * OUT at offsets 0x0B10 + (0x20 * n), n = 1 .. x */ #define USB_DWC2_DEPTSIZN_PKTCNT_POS 19UL #define USB_DWC2_DEPTSIZN_PKTCNT_MASK (0x3FFUL << USB_DWC2_DEPTSIZN_PKTCNT_POS) #define USB_DWC2_DEPTSIZN_XFERSIZE_POS 0UL #define USB_DWC2_DEPTSIZN_XFERSIZE_MASK 0x7FFFFUL USB_DWC2_GET_FIELD_DEFINE(deptsizn_pktcnt, DEPTSIZN_PKTCNT) USB_DWC2_GET_FIELD_DEFINE(deptsizn_xfersize, DEPTSIZN_XFERSIZE) USB_DWC2_SET_FIELD_DEFINE(deptsizn_pktcnt, DEPTSIZN_PKTCNT) USB_DWC2_SET_FIELD_DEFINE(deptsizn_xfersize, DEPTSIZN_XFERSIZE) /* * Device IN/OUT endpoint transfer size register * IN at offsets 0x0910 + (0x20 * n), n = 0 .. x, * OUT at offsets 0x0B10 + (0x20 * n), n = 0 .. x * * Note: Legacy definitions for the usb_dc_dw.c driver only. */ #define USB_DWC2_DEPTSIZ_PKT_CNT_POS 19UL #define USB_DWC2_DIEPTSIZ0_PKT_CNT_MASK (0x3 << 19) #define USB_DWC2_DIEPTSIZn_PKT_CNT_MASK (0x3FF << 19) #define USB_DWC2_DOEPTSIZn_PKT_CNT_MASK (0x3FF << 19) #define USB_DWC2_DOEPTSIZ0_PKT_CNT_MASK (0x1 << 19) #define USB_DWC2_DOEPTSIZ_SUP_CNT_POS 29UL #define USB_DWC2_DOEPTSIZ_SUP_CNT_MASK (0x3 << 29) #define USB_DWC2_DEPTSIZ_XFER_SIZE_POS 0UL #define USB_DWC2_DEPTSIZ0_XFER_SIZE_MASK 0x7F #define USB_DWC2_DEPTSIZn_XFER_SIZE_MASK 0x7FFFF /* * Device IN endpoint transmit FIFO status register, * offsets 0x0918 + (0x20 * n), n = 0 .. x */ #define USB_DWC2_DTXFSTS0 0x0918UL #define USB_DWC2_DTXFSTS_INEPTXFSPCAVAIL_POS 0UL #define USB_DWC2_DTXFSTS_INEPTXFSPCAVAIL_MASK 0xFFFFUL USB_DWC2_GET_FIELD_DEFINE(dtxfsts_ineptxfspcavail, DTXFSTS_INEPTXFSPCAVAIL) #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_USB_COMMON_USB_DWC2_HW */ ```
/content/code_sandbox/drivers/usb/common/usb_dwc2_hw.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
13,883
```objective-c /* * */ /* This file is undergoing transition towards native Zephyr nrf USB driver. */ /** @cond INTERNAL_HIDDEN */ #ifndef NRF_USBD_COMMON_H__ #define NRF_USBD_COMMON_H__ #include <nrfx.h> #ifdef __cplusplus extern "C" { #endif /** * @defgroup nrf_usbd_common USBD driver * @{ * @ingroup nrf_usbd * @brief Universal Serial Bus Device (USBD) peripheral driver. */ /** * @brief Number of bytes in the endpoint. */ #define NRF_USBD_COMMON_EPSIZE 64 /** * @brief Number of bytes for isochronous endpoints. * * Number of bytes for isochronous endpoints in total. * This number would be shared between IN and OUT endpoint. * It may be also assigned totaly to one endpoint. * @sa nrf_usbd_isosplit_set * @sa nrf_usbd_isosplit_get */ #define NRF_USBD_COMMON_ISOSIZE 1023 /** * @name Macros for creating endpoint identifiers. * * Auxiliary macros for creating endpoint identifiers compatible with the USB specification. * @{ */ /** * @brief Create identifier for IN endpoint. * * Simple macro to create IN endpoint identifier for given endpoint number. * * @param[in] n Endpoint number. * * @return Endpoint identifier that connects endpoint number and endpoint direction. */ #define NRF_USBD_COMMON_EPIN(n) ((nrf_usbd_common_ep_t)(0x80 | n)) /** * @brief Create identifier for OUT endpoint. * * Simple macro to create OUT endpoint identifier for given endpoint number. * * @param[in] n Endpoint number. * * @return Endpoint identifier that connects endpoint number and endpoint direction. */ #define NRF_USBD_COMMON_EPOUT(n) ((nrf_usbd_common_ep_t)(n)) /** @} */ /** * @brief Endpoint identifier. * * Endpoint identifier used in the driver. * This endpoint number is consistent with USB 2.0 specification. */ typedef enum { NRF_USBD_COMMON_EPOUT0 = 0x00, /**< Endpoint OUT 0 */ NRF_USBD_COMMON_EPOUT1 = 0x01, /**< Endpoint OUT 1 */ NRF_USBD_COMMON_EPOUT2 = 0x02, /**< Endpoint OUT 2 */ NRF_USBD_COMMON_EPOUT3 = 0x03, /**< Endpoint OUT 3 */ NRF_USBD_COMMON_EPOUT4 = 0x04, /**< Endpoint OUT 4 */ NRF_USBD_COMMON_EPOUT5 = 0x05, /**< Endpoint OUT 5 */ NRF_USBD_COMMON_EPOUT6 = 0x06, /**< Endpoint OUT 6 */ NRF_USBD_COMMON_EPOUT7 = 0x07, /**< Endpoint OUT 7 */ NRF_USBD_COMMON_EPOUT8 = 0x08, /**< Endpoint OUT 8 */ NRF_USBD_COMMON_EPIN0 = 0x80, /**< Endpoint IN 0 */ NRF_USBD_COMMON_EPIN1 = 0x81, /**< Endpoint IN 1 */ NRF_USBD_COMMON_EPIN2 = 0x82, /**< Endpoint IN 2 */ NRF_USBD_COMMON_EPIN3 = 0x83, /**< Endpoint IN 3 */ NRF_USBD_COMMON_EPIN4 = 0x84, /**< Endpoint IN 4 */ NRF_USBD_COMMON_EPIN5 = 0x85, /**< Endpoint IN 5 */ NRF_USBD_COMMON_EPIN6 = 0x86, /**< Endpoint IN 6 */ NRF_USBD_COMMON_EPIN7 = 0x87, /**< Endpoint IN 7 */ NRF_USBD_COMMON_EPIN8 = 0x88, /**< Endpoint IN 8 */ } nrf_usbd_common_ep_t; /** * @brief Events generated by the driver. * * Enumeration of possible events that may be generated by the driver. */ typedef enum { NRF_USBD_COMMON_EVT_SOF, /**< Start Of Frame event on USB bus detected. */ NRF_USBD_COMMON_EVT_RESET, /**< Reset condition on USB bus detected. */ NRF_USBD_COMMON_EVT_SUSPEND, /**< This device should go to suspend mode now. */ NRF_USBD_COMMON_EVT_RESUME, /**< This device should resume from suspend now. */ /** Wakeup request - the USBD peripheral is ready to generate * WAKEUP signal after exiting low power mode. */ NRF_USBD_COMMON_EVT_WUREQ, NRF_USBD_COMMON_EVT_SETUP, /**< Setup frame received and decoded. */ /** For Rx (OUT: Host->Device): * 1. The packet has been received but there is no buffer * prepared for transfer already. * 2. Whole transfer has been finished. * * For Tx (IN: Device->Host): * The last packet from requested transfer has been transferred * over USB bus and acknowledged. */ NRF_USBD_COMMON_EVT_EPTRANSFER, NRF_USBD_COMMON_EVT_CNT /**< Number of defined events. */ } nrf_usbd_common_event_type_t; /** * @brief Endpoint status codes. * * Status codes that may be returned by @ref nrf_usbd_common_ep_status_get or, except for * @ref NRF_USBD_COMMON_EP_BUSY, reported together with @ref NRF_USBD_COMMON_EVT_EPTRANSFER. */ typedef enum { /** No error occurred. */ NRF_USBD_COMMON_EP_OK, /** Data received, no buffer prepared already - waiting for configured transfer. */ NRF_USBD_COMMON_EP_WAITING, /** Received number of bytes cannot fit given buffer. * This error would also be returned when next_transfer function * has been defined but currently received data cannot fit completely * in current buffer. No data split from single endpoint transmission * is supported. * * When this error is reported - data is left inside endpoint * buffer. Clear endpoint or prepare new buffer and read it. */ NRF_USBD_COMMON_EP_OVERLOAD, /** EP0 transfer can be aborted when new setup comes. * Any other transfer can be aborted by USB reset or driver stopping. */ NRF_USBD_COMMON_EP_ABORTED, /** Transfer is in progress. */ NRF_USBD_COMMON_EP_BUSY, } nrf_usbd_common_ep_status_t; /** * @brief Event structure. * * Structure passed to event handler. */ typedef struct { nrf_usbd_common_event_type_t type; /**< Event type. */ union { struct { uint16_t framecnt; /**< Current value of frame counter. */ } sof; /**< Data available for @ref NRF_USBD_COMMON_EVT_SOF. */ struct { nrf_usbd_common_ep_t ep; /**< Endpoint number. */ } isocrc; /**< Isochronouns channel endpoint number. */ struct { nrf_usbd_common_ep_t ep; /**< Endpoint number. */ nrf_usbd_common_ep_status_t status; /**< Status for the endpoint. */ } eptransfer; /**< Endpoint transfer status. */ } data; /**< Union to store event data. */ } nrf_usbd_common_evt_t; /** * @brief USBD event callback function type. * * @param[in] p_event Event information structure. */ typedef void (*nrf_usbd_common_event_handler_t)(nrf_usbd_common_evt_t const *p_event); /** * @brief Universal data pointer. * * Universal data pointer that can be used for any type of transfer. */ typedef union { void const *tx; /*!< Constant TX buffer pointer. */ void *rx; /*!< Writable RX buffer pointer. */ uint32_t addr; /*!< Numeric value used internally by the driver. */ } nrf_usbd_common_data_ptr_t; /** * @brief Structure to be filled with information about the next transfer. * * This is used mainly for transfer feeders and consumers. * It describes a single endpoint transfer and therefore the size of the buffer * can never be higher than the endpoint size. */ typedef struct { /** Union with available data pointers used by the driver. */ nrf_usbd_common_data_ptr_t p_data; /** Size of the requested transfer. */ size_t size; } nrf_usbd_common_ep_transfer_t; /** * @brief Flags for the current transfer. * * Flags configured for the transfer that can be merged using the bitwise 'or' operator (|). */ typedef enum { NRF_USBD_COMMON_TRANSFER_ZLP_FLAG = 1U << 0, /*!< Add a zero-length packet. */ } nrf_usbd_common_transfer_flags_t; /** * @brief Total transfer configuration. * * This structure is used to configure total transfer information. * It is used by internal built-in feeders and consumers. */ typedef struct { /** Union with available data pointers used by the driver. */ nrf_usbd_common_data_ptr_t p_data; /** Total size of the requested transfer. */ size_t size; /*!< Transfer flags. Use the @ref nrf_usbd_common_transfer_flags_t values. */ uint32_t flags; } nrf_usbd_common_transfer_t; /** * @brief Auxiliary macro for declaring IN transfer description with optional flags. * * The base macro for creating transfers with any configuration option. * * @param name Instance name. * @param tx_buff Buffer to transfer. * @param tx_size Transfer size. * @param tx_flags Flags for the transfer (see @ref nrf_usbd_common_transfer_flags_t). * * @return Configured variable with total transfer description. */ #define NRF_USBD_COMMON_TRANSFER_IN(name, tx_buff, tx_size, tx_flags) \ const nrf_usbd_common_transfer_t name = { \ .p_data = {.tx = (tx_buff)}, .size = (tx_size), .flags = (tx_flags)} /** * @brief Helper macro for declaring OUT transfer item (@ref nrf_usbd_common_transfer_t). * * @param name Instance name. * @param rx_buff Buffer to transfer. * @param rx_size Transfer size. */ #define NRF_USBD_COMMON_TRANSFER_OUT(name, rx_buff, rx_size) \ const nrf_usbd_common_transfer_t name = { \ .p_data = {.rx = (rx_buff)}, .size = (rx_size), .flags = 0} /** * @brief Setup packet structure. * * Structure that contains interpreted SETUP packet as described in USB specification. */ typedef struct { uint8_t bmRequestType; /*!< byte 0 */ uint8_t bRequest; /*!< byte 1 */ uint16_t wValue; /*!< byte 2, 3 */ uint16_t wIndex; /*!< byte 4, 5 */ uint16_t wLength; /*!< byte 6, 7 */ } nrf_usbd_common_setup_t; /** * @brief Driver initialization. * * @param[in] event_handler Event handler provided by the user. Cannot be null. * * @retval NRFX_SUCCESS Initialization successful. * @retval NRFX_ERROR_INVALID_STATE Driver was already initialized. */ nrfx_err_t nrf_usbd_common_init(nrf_usbd_common_event_handler_t event_handler); /** * @brief Driver deinitialization. */ void nrf_usbd_common_uninit(void); /** * @brief Enable the USBD port. * * After calling this function USBD peripheral would be enabled. * The USB LDO would be enabled. * Enabled USBD peripheral would request HFCLK. * This function does not enable external oscillator, so if it is not enabled by other part of the * program after enabling USBD driver HFINT would be used for the USBD peripheral. * It is perfectly fine until USBD is started. See @ref nrf_usbd_common_start. * * In normal situation this function should be called in reaction to USBDETECTED * event from POWER peripheral. * * Interrupts and USB pins pull-up would stay disabled until @ref nrf_usbd_common_start * function is called. */ void nrf_usbd_common_enable(void); /** * @brief Disable the USBD port. * * After calling this function USBD peripheral would be disabled. * No events would be detected or processed by the driver. * Clock for the peripheral would be disconnected. */ void nrf_usbd_common_disable(void); /** * @brief Start USB functionality. * * After calling this function USBD peripheral should be fully functional * and all new incoming events / interrupts would be processed by the driver. * * Also only after calling this function host sees new connected device. * * Call this function when USBD power LDO regulator is ready - on USBPWRRDY event * from POWER peripheral. * * Before USBD interrupts are enabled, external HFXO is requested. * * @param enable_sof The flag that is used to enable SOF processing. * If it is false, SOF interrupt is left disabled and will not be generated. * This improves power saving if SOF is not required. * * @note If the isochronous endpoints are going to be used, * it is required to enable the SOF. * In other case any isochronous endpoint would stay busy * after first transmission. */ void nrf_usbd_common_start(bool enable_sof); /** * @brief Check if driver is initialized. * * @retval false Driver is not initialized. * @retval true Driver is initialized. */ bool nrf_usbd_common_is_initialized(void); /** * @brief Check if driver is enabled. * * @retval false Driver is disabled. * @retval true Driver is enabled. */ bool nrf_usbd_common_is_enabled(void); /** * @brief Check if driver is started. * * @retval false Driver is not started. * @retval true Driver is started (fully functional). * @note The USBD peripheral interrupt state is checked. */ bool nrf_usbd_common_is_started(void); /** * @brief Suspend USBD operation. * * The USBD peripheral is forced to go into the low power mode. * The function has to be called in the reaction to @ref NRF_USBD_COMMON_EVT_SUSPEND event * when the firmware is ready. * * After successful call of this function most of the USBD registers would be unavailable. * * @note Check returned value for the feedback if suspending was successful. * * @retval true USBD peripheral successfully suspended. * @retval false USBD peripheral was not suspended due to resume detection. */ bool nrf_usbd_common_suspend(void); /** * @brief Start wake up procedure. * * The USBD peripheral is forced to quit the low power mode. * After calling this function all the USBD registers would be available. * * The hardware starts measuring time when wake up is possible. * This may take 0-5&nbsp;ms depending on how long the SUSPEND state was kept on the USB line. * When NRF_USBD_COMMON_EVT_WUREQ event is generated it means that Wake Up signaling has just been * started on the USB lines. * * @note Do not expect only @ref NRF_USBD_COMMON_EVT_WUREQ event. * There always may appear @ref NRF_USBD_COMMON_EVT_RESUME event. * @note NRF_USBD_COMMON_EVT_WUREQ event means that Remote WakeUp signal * has just begun to be generated. * This may take up to 20&nbsp;ms for the bus to become active. * * @retval true WakeUp procedure started. * @retval false No WakeUp procedure started - bus is already active. */ bool nrf_usbd_common_wakeup_req(void); /** * @brief Check if USBD is in SUSPEND mode. * * @note This is the information about peripheral itself, not about the bus state. * * @retval true USBD peripheral is suspended. * @retval false USBD peripheral is active. */ bool nrf_usbd_common_suspend_check(void); /** * @brief Check the bus state. * * This function checks if the bus state is suspended. * * @note The value returned by this function changes on SUSPEND and RESUME event processing. * * @retval true USBD bus is suspended. * @retval false USBD bus is active. */ bool nrf_usbd_common_bus_suspend_check(void); /** * @brief Force the bus state to active */ void nrf_usbd_common_force_bus_wakeup(void); /** * @brief Configure packet size that should be supported by the endpoint. * * The real endpoint buffer size is always the same. * This value sets max packet size that would be transmitted over the endpoint. * This is required by the driver. * * @param[in] ep Endpoint number. * @param[in] size Required maximum packet size. * * @note Endpoint size is always set to @ref NRF_USBD_COMMON_EPSIZE * or @ref NRF_USBD_COMMON_ISOSIZE / 2 * when @ref nrf_usbd_common_ep_enable function is called. */ void nrf_usbd_common_ep_max_packet_size_set(nrf_usbd_common_ep_t ep, uint16_t size); /** * @brief Get configured endpoint packet size. * * Function to get configured endpoint size on the buffer. * * @param[in] ep Endpoint number. * * @return Maximum pocket size configured on selected endpoint. */ uint16_t nrf_usbd_common_ep_max_packet_size_get(nrf_usbd_common_ep_t ep); /** * @brief Check if the selected endpoint is enabled. * * @param[in] ep Endpoint number to check. * * @retval true Endpoint is enabled. * @retval false Endpoint is disabled. */ bool nrf_usbd_common_ep_enable_check(nrf_usbd_common_ep_t ep); /** * @brief Enable selected endpoint. * * This function enables endpoint itself and its interrupts. * * @param[in] ep Endpoint number to enable. * * @note * Max packet size is set to endpoint default maximum value. * * @sa nrf_usbd_common_ep_max_packet_size_set */ void nrf_usbd_common_ep_enable(nrf_usbd_common_ep_t ep); /** * @brief Disable selected endpoint. * * This function disables endpoint itself and its interrupts. * * @param[in] ep Endpoint number to disable. */ void nrf_usbd_common_ep_disable(nrf_usbd_common_ep_t ep); /** * @brief Start sending data over endpoint. * * Function initializes endpoint transmission. * This is asynchronous function - it finishes immediately after configuration * for transmission is prepared. * * @note Data buffer pointed by p_data have to be kept active till * @ref NRF_USBD_COMMON_EVT_EPTRANSFER event is generated. * * @param[in] ep Endpoint number. * For IN endpoint sending would be initiated. * For OUT endpoint receiving would be initiated. * @param[in] p_transfer Transfer parameters. * * @retval NRFX_SUCCESS Transfer queued or started. * @retval NRFX_ERROR_BUSY Selected endpoint is pending. * @retval NRFX_ERROR_INVALID_ADDR Unexpected transfer on EPIN0 or EPOUT0. */ nrfx_err_t nrf_usbd_common_ep_transfer(nrf_usbd_common_ep_t ep, nrf_usbd_common_transfer_t const *p_transfer); /** * @brief Get the information about last finished or current transfer. * * Function returns the status of the last buffer set for transfer on selected endpoint. * The status considers last buffer set by @ref nrf_usbd_common_ep_transfer function or * by transfer callback function. * * @param[in] ep Endpoint number. * @param[out] p_size Information about the current/last transfer size. * * @return Endpoint status. * * @sa nrf_usbd_common_ep_status_t */ nrf_usbd_common_ep_status_t nrf_usbd_common_ep_status_get(nrf_usbd_common_ep_t ep, size_t *p_size); /** * @brief Get number of received bytes. * * Get the number of received bytes. * The function behavior is undefined when called on IN endpoint. * * @param[in] ep Endpoint number. * * @return Number of received bytes. */ size_t nrf_usbd_common_epout_size_get(nrf_usbd_common_ep_t ep); /** * @brief Check if endpoint buffer is ready or is under USB IP control. * * Function to test if endpoint is busy. * Endpoint that is busy cannot be accessed by MCU. * It means that: * - OUT (TX) endpoint: Last uploaded data is still in endpoint and is waiting * to be received by the host. * - IN (RX) endpoint: Endpoint is ready to receive data from the host * and the endpoint does not have any data. * When endpoint is not busy: * - OUT (TX) endpoint: New data can be uploaded. * - IN (RX) endpoint: New data can be downloaded using @ref nrf_usbd_common_ep_transfer * function. * * @param[in] ep Endpoint number. * * @retval false Endpoint is not busy. * @retval true Endpoint is busy. */ bool nrf_usbd_common_ep_is_busy(nrf_usbd_common_ep_t ep); /** * @brief Stall endpoint * * Stall endpoit to send error information during next transfer request from * the host. * * @note To stall endpoint it is safer to use @ref nrf_usbd_common_setup_stall * @note Stalled endpoint would not be cleared when DMA transfer finishes. * * @param[in] ep Endpoint number to stall. */ void nrf_usbd_common_ep_stall(nrf_usbd_common_ep_t ep); /** * @brief Clear stall flag on endpoint. * * This function clears endpoint that is stalled. * @note * If it is OUT endpoint (receiving) it would be also prepared for reception. * It means that busy flag would be set. * @note * In endpoint (transmitting) would not be cleared - it gives possibility to * write new data before transmitting. * * @param[in] ep Endpoint number. */ void nrf_usbd_common_ep_stall_clear(nrf_usbd_common_ep_t ep); /** * @brief Check if endpoint is stalled. * * This function gets stall state of selected endpoint. * * @param[in] ep Endpoint number to check. * * @retval false Endpoint is not stalled. * @retval true Endpoint is stalled. */ bool nrf_usbd_common_ep_stall_check(nrf_usbd_common_ep_t ep); /** * @brief Clear current endpoint data toggle. * * @param[in] ep Endpoint number to clear. */ void nrf_usbd_common_ep_dtoggle_clear(nrf_usbd_common_ep_t ep); /** * @brief Get parsed setup data. * * Function fills the parsed setup data structure. * * @param[out] p_setup Pointer to data structure that would be filled by * parsed data. */ void nrf_usbd_common_setup_get(nrf_usbd_common_setup_t *p_setup); /** * @brief Clear the control endpoint for packet reception during DATA stage. * * This function may be called if any more data in control write transfer is expected. * Clears only OUT endpoint to be able to take another OUT data token. * It does not allow STATUS stage. * @sa nrf_usbd_common_setup_clear */ void nrf_usbd_common_setup_data_clear(void); /** * @brief Clear setup endpoint. * * This function acknowledges setup when SETUP command was received and processed. * It has to be called if no data respond for the SETUP command is sent. */ void nrf_usbd_common_setup_clear(void); /** * @brief Stall setup endpoint. * * Mark an error on setup endpoint. */ void nrf_usbd_common_setup_stall(void); /** * @brief Abort pending transfer on selected endpoint. * * @param[in] ep Endpoint number. */ void nrf_usbd_common_ep_abort(nrf_usbd_common_ep_t ep); /** * @brief Get the information about expected transfer SETUP data direction. * * Function returns the information about last expected transfer direction. * * @retval NRF_USBD_COMMON_EPOUT0 Expecting OUT (Host->Device) direction or no data. * @retval NRF_USBD_COMMON_EPIN0 Expecting IN (Device->Host) direction. */ nrf_usbd_common_ep_t nrf_usbd_common_last_setup_dir_get(void); /** * @brief Drop transfer on OUT endpoint. * * @param[in] ep OUT endpoint ID. */ void nrf_usbd_common_transfer_out_drop(nrf_usbd_common_ep_t ep); /** @} */ void nrf_usbd_common_irq_handler(void); #ifdef __cplusplus } #endif #endif /* NRF_USBD_COMMON_H__ */ /** @endcond */ ```
/content/code_sandbox/drivers/usb/common/nrf_usbd_common/nrf_usbd_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,203
```unknown module = NRF_USBD_COMMON module-str = nRF USBD common source "subsys/logging/Kconfig.template.log_config" config NRF_USBD_COMMON bool "USBD driver" depends on HAS_NRFX depends on $(dt_has_compat,$(DT_COMPAT_NORDIC_NRF_USBD)) config NRF_USBD_ISO_IN_ZLP bool "Send ZLP on ISO IN when not ready" depends on NRF_USBD_COMMON default y help Controls the response of the ISO IN endpoint to an IN token when no data is ready to be sent. When enabled, ZLP is sent when no data is ready. When disabled, no response is sent (bus timeout occurs). ```
/content/code_sandbox/drivers/usb/common/nrf_usbd_common/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
152
```c /* * */ /* This file is undergoing transition towards native Zephyr nrf USB driver. */ /** @cond INTERNAL_HIDDEN */ #include <nrfx.h> #include "nrf_usbd_common.h" #include "nrf_usbd_common_errata.h" #include <string.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nrf_usbd_common, CONFIG_NRF_USBD_COMMON_LOG_LEVEL); #define NRF_USBD_COMMON_EPIN_CNT 9 #define NRF_USBD_COMMON_EPOUT_CNT 9 #define NRF_USBD_COMMON_EP_NUM(ep) (ep & 0xF) #define NRF_USBD_COMMON_EP_IS_IN(ep) ((ep & 0x80) == 0x80) #define NRF_USBD_COMMON_EP_IS_OUT(ep) ((ep & 0x80) == 0) #define NRF_USBD_COMMON_EP_IS_ISO(ep) ((ep & 0xF) >= 8) #ifndef NRF_USBD_COMMON_ISO_DEBUG /* Also generate information about ISOCHRONOUS events and transfers. * Turn this off if no ISOCHRONOUS transfers are going to be debugged and this * option generates a lot of useless messages. */ #define NRF_USBD_COMMON_ISO_DEBUG 1 #endif #ifndef NRF_USBD_COMMON_FAILED_TRANSFERS_DEBUG /* Also generate debug information for failed transfers. * It might be useful but may generate a lot of useless debug messages * in some library usages (for example when transfer is generated and the * result is used to check whatever endpoint was busy. */ #define NRF_USBD_COMMON_FAILED_TRANSFERS_DEBUG 1 #endif #ifndef NRF_USBD_COMMON_DMAREQ_PROCESS_DEBUG /* Generate additional messages that mark the status inside * @ref usbd_dmareq_process. * It is useful to debug library internals but may generate a lot of * useless debug messages. */ #define NRF_USBD_COMMON_DMAREQ_PROCESS_DEBUG 1 #endif #ifndef NRF_USBD_COMMON_USE_WORKAROUND_FOR_ANOMALY_211 /* Anomaly 211 - Device remains in SUSPEND too long when host resumes * a bus activity (sending SOF packets) without a RESUME condition. */ #define NRF_USBD_COMMON_USE_WORKAROUND_FOR_ANOMALY_211 0 #endif /** * @defgroup nrf_usbd_common_int USB Device driver internal part * @internal * @ingroup nrf_usbd_common * * This part contains auxiliary internal macros, variables and functions. * @{ */ /** * @brief Assert endpoint number validity. * * Internal macro to be used during program creation in debug mode. * Generates assertion if endpoint number is not valid. * * @param ep Endpoint number to validity check. */ #define NRF_USBD_COMMON_ASSERT_EP_VALID(ep) __ASSERT_NO_MSG( \ ((NRF_USBD_COMMON_EP_IS_IN(ep) && \ (NRF_USBD_COMMON_EP_NUM(ep) < NRF_USBD_COMMON_EPIN_CNT)) || \ (NRF_USBD_COMMON_EP_IS_OUT(ep) && \ (NRF_USBD_COMMON_EP_NUM(ep) < NRF_USBD_COMMON_EPOUT_CNT)))); /** * @brief Lowest position of bit for IN endpoint. * * The first bit position corresponding to IN endpoint. * @sa ep2bit bit2ep */ #define NRF_USBD_COMMON_EPIN_BITPOS_0 0 /** * @brief Lowest position of bit for OUT endpoint. * * The first bit position corresponding to OUT endpoint * @sa ep2bit bit2ep */ #define NRF_USBD_COMMON_EPOUT_BITPOS_0 16 /** * @brief Input endpoint bits mask. */ #define NRF_USBD_COMMON_EPIN_BIT_MASK (0xFFFFU << NRF_USBD_COMMON_EPIN_BITPOS_0) /** * @brief Output endpoint bits mask. */ #define NRF_USBD_COMMON_EPOUT_BIT_MASK (0xFFFFU << NRF_USBD_COMMON_EPOUT_BITPOS_0) /** * @brief Isochronous endpoint bit mask */ #define USBD_EPISO_BIT_MASK \ ((1U << NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPOUT8)) | \ (1U << NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPIN8))) /** * @brief Auxiliary macro to change EP number into bit position. * * This macro is used by @ref ep2bit function but also for statically check * the bitpos values integrity during compilation. * * @param[in] ep Endpoint number. * @return Endpoint bit position. */ #define NRF_USBD_COMMON_EP_BITPOS(ep) ((NRF_USBD_COMMON_EP_IS_IN(ep) \ ? NRF_USBD_COMMON_EPIN_BITPOS_0 : NRF_USBD_COMMON_EPOUT_BITPOS_0) \ + NRF_USBD_COMMON_EP_NUM(ep)) /** * @brief Helper macro for creating an endpoint transfer event. * * @param[in] name Name of the created transfer event variable. * @param[in] endpoint Endpoint number. * @param[in] ep_stat Endpoint state to report. * * @return Initialized event constant variable. */ #define NRF_USBD_COMMON_EP_TRANSFER_EVENT(name, endpont, ep_stat) \ const nrf_usbd_common_evt_t name = {NRF_USBD_COMMON_EVT_EPTRANSFER, \ .data = {.eptransfer = {.ep = endpont, .status = ep_stat}}} /* Check it the bit positions values match defined DATAEPSTATUS bit positions */ BUILD_ASSERT( (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPIN1) == USBD_EPDATASTATUS_EPIN1_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPIN2) == USBD_EPDATASTATUS_EPIN2_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPIN3) == USBD_EPDATASTATUS_EPIN3_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPIN4) == USBD_EPDATASTATUS_EPIN4_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPIN5) == USBD_EPDATASTATUS_EPIN5_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPIN6) == USBD_EPDATASTATUS_EPIN6_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPIN7) == USBD_EPDATASTATUS_EPIN7_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPOUT1) == USBD_EPDATASTATUS_EPOUT1_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPOUT2) == USBD_EPDATASTATUS_EPOUT2_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPOUT3) == USBD_EPDATASTATUS_EPOUT3_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPOUT4) == USBD_EPDATASTATUS_EPOUT4_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPOUT5) == USBD_EPDATASTATUS_EPOUT5_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPOUT6) == USBD_EPDATASTATUS_EPOUT6_Pos) && (NRF_USBD_COMMON_EP_BITPOS(NRF_USBD_COMMON_EPOUT7) == USBD_EPDATASTATUS_EPOUT7_Pos), "NRF_USBD_COMMON bit positions do not match hardware" ); /** * @brief Current driver state. */ static nrfx_drv_state_t m_drv_state = NRFX_DRV_STATE_UNINITIALIZED; /** * @brief Event handler for the driver. * * Event handler that would be called on events. * * @note Currently it cannot be null if any interrupt is activated. */ static nrf_usbd_common_event_handler_t m_event_handler; /** * @brief Detected state of the bus. * * Internal state changed in interrupts handling when * RESUME or SUSPEND event is processed. * * Values: * - true - bus suspended * - false - ongoing normal communication on the bus * * @note This is only the bus state and does not mean that the peripheral is in suspend state. */ static volatile bool m_bus_suspend; /** * @brief Direction of last received Setup transfer. * * This variable is used to redirect internal setup data event * into selected endpoint (IN or OUT). */ static nrf_usbd_common_ep_t m_last_setup_dir; /** * @brief Mark endpoint readiness for DMA transfer. * * Bits in this variable are cleared and set in interrupts. * 1 means that endpoint is ready for DMA transfer. * 0 means that DMA transfer cannot be performed on selected endpoint. */ static uint32_t m_ep_ready; /** * @brief Mark endpoint with prepared data to transfer by DMA. * * This variable can be set in interrupt context or within critical section. * It would be cleared only from USBD interrupt. * * Mask prepared USBD data for transmission. * It is cleared when no more data to transmit left. */ static uint32_t m_ep_dma_waiting; /* Semaphore to guard EasyDMA access. * In USBD there is only one DMA channel working in background, and new transfer * cannot be started when there is ongoing transfer on any other channel. */ static K_SEM_DEFINE(dma_available, 1, 1); /* Endpoint on which DMA was started. */ static nrf_usbd_common_ep_t dma_ep; /** * @brief Tracks whether total bytes transferred by DMA is even or odd. */ static uint8_t m_dma_odd; /** * @brief First time enabling after reset. Used in nRF52 errata 223. */ static bool m_first_enable = true; /** * @brief The structure that would hold transfer configuration to every endpoint * * The structure that holds all the data required by the endpoint to proceed * with LIST functionality and generate quick callback directly when data * buffer is ready. */ typedef struct { nrf_usbd_common_transfer_t transfer_state; bool more_transactions; /** Number of transferred bytes in the current transfer. */ size_t transfer_cnt; /** Configured endpoint size. */ uint16_t max_packet_size; /** NRFX_SUCCESS or error code, never NRFX_ERROR_BUSY - this one is calculated. */ nrf_usbd_common_ep_status_t status; } usbd_ep_state_t; /** * @brief The array of transfer configurations for the endpoints. * * The status of the transfer on each endpoint. */ static struct { usbd_ep_state_t ep_out[NRF_USBD_COMMON_EPOUT_CNT]; /*!< Status for OUT endpoints. */ usbd_ep_state_t ep_in[NRF_USBD_COMMON_EPIN_CNT]; /*!< Status for IN endpoints. */ } m_ep_state; #define NRF_USBD_COMMON_FEEDER_BUFFER_SIZE NRF_USBD_COMMON_EPSIZE /** * @brief Buffer used to send data directly from FLASH. * * This is internal buffer that would be used to emulate the possibility * to transfer data directly from FLASH. * We do not have to care about the source of data when calling transfer functions. * * We do not need more buffers that one, because only one transfer can be pending * at once. */ static uint32_t m_tx_buffer[NRFX_CEIL_DIV(NRF_USBD_COMMON_FEEDER_BUFFER_SIZE, sizeof(uint32_t))]; /* Early declaration. Documentation above definition. */ static void usbd_dmareq_process(void); static inline void usbd_int_rise(void); static void nrf_usbd_common_stop(void); /* Get EasyDMA end event address for given endpoint */ static volatile uint32_t *usbd_ep_to_endevent(nrf_usbd_common_ep_t ep) { int ep_in = NRF_USBD_COMMON_EP_IS_IN(ep); int ep_num = NRF_USBD_COMMON_EP_NUM(ep); NRF_USBD_COMMON_ASSERT_EP_VALID(ep); if (!NRF_USBD_COMMON_EP_IS_ISO(ep_num)) { if (ep_in) { return &NRF_USBD->EVENTS_ENDEPIN[ep_num]; } else { return &NRF_USBD->EVENTS_ENDEPOUT[ep_num]; } } return ep_in ? &NRF_USBD->EVENTS_ENDISOIN : &NRF_USBD->EVENTS_ENDISOOUT; } /* Return number of bytes last transferred by EasyDMA on given endpoint */ static uint32_t usbd_ep_amount_get(nrf_usbd_common_ep_t ep) { int ep_in = NRF_USBD_COMMON_EP_IS_IN(ep); int ep_num = NRF_USBD_COMMON_EP_NUM(ep); NRF_USBD_COMMON_ASSERT_EP_VALID(ep); if (!NRF_USBD_COMMON_EP_IS_ISO(ep_num)) { if (ep_in) { return NRF_USBD->EPIN[ep_num].AMOUNT; } else { return NRF_USBD->EPOUT[ep_num].AMOUNT; } } return ep_in ? NRF_USBD->ISOIN.AMOUNT : NRF_USBD->ISOOUT.AMOUNT; } /* Start EasyDMA on given endpoint */ static void usbd_ep_dma_start(nrf_usbd_common_ep_t ep, uint32_t addr, size_t len) { int ep_in = NRF_USBD_COMMON_EP_IS_IN(ep); int ep_num = NRF_USBD_COMMON_EP_NUM(ep); NRF_USBD_COMMON_ASSERT_EP_VALID(ep); if (!NRF_USBD_COMMON_EP_IS_ISO(ep_num)) { if (ep_in) { NRF_USBD->EPIN[ep_num].PTR = addr; NRF_USBD->EPIN[ep_num].MAXCNT = len; NRF_USBD->TASKS_STARTEPIN[ep_num] = 1; } else { NRF_USBD->EPOUT[ep_num].PTR = addr; NRF_USBD->EPOUT[ep_num].MAXCNT = len; NRF_USBD->TASKS_STARTEPOUT[ep_num] = 1; } } else if (ep_in) { NRF_USBD->ISOIN.PTR = addr; NRF_USBD->ISOIN.MAXCNT = len; NRF_USBD->TASKS_STARTISOIN = 1; } else { NRF_USBD->ISOOUT.PTR = addr; NRF_USBD->ISOOUT.MAXCNT = len; NRF_USBD->TASKS_STARTISOOUT = 1; } } static bool nrf_usbd_common_consumer(nrf_usbd_common_ep_transfer_t *p_next, nrf_usbd_common_transfer_t *p_transfer, size_t ep_size, size_t data_size) { __ASSERT_NO_MSG(ep_size >= data_size); __ASSERT_NO_MSG((p_transfer->p_data.rx == NULL) || nrfx_is_in_ram(p_transfer->p_data.rx)); size_t size = p_transfer->size; if (size < data_size) { LOG_DBG("consumer: buffer too small: r: %u, l: %u", data_size, size); /* Buffer size to small */ p_next->size = 0; p_next->p_data = p_transfer->p_data; } else { p_next->size = data_size; p_next->p_data = p_transfer->p_data; size -= data_size; p_transfer->size = size; p_transfer->p_data.addr += data_size; } return (ep_size == data_size) && (size != 0); } static bool nrf_usbd_common_feeder(nrf_usbd_common_ep_transfer_t *p_next, nrf_usbd_common_transfer_t *p_transfer, size_t ep_size) { size_t tx_size = p_transfer->size; if (tx_size > ep_size) { tx_size = ep_size; } if (!nrfx_is_in_ram(p_transfer->p_data.tx)) { __ASSERT_NO_MSG(tx_size <= NRF_USBD_COMMON_FEEDER_BUFFER_SIZE); memcpy(m_tx_buffer, (p_transfer->p_data.tx), tx_size); p_next->p_data.tx = m_tx_buffer; } else { p_next->p_data = p_transfer->p_data; } p_next->size = tx_size; p_transfer->size -= tx_size; p_transfer->p_data.addr += tx_size; if (p_transfer->flags & NRF_USBD_COMMON_TRANSFER_ZLP_FLAG) { return (tx_size != 0); } else { return (p_transfer->size != 0); } } /** * @brief Change Driver endpoint number to HAL endpoint number. * * @param ep Driver endpoint identifier. * * @return Endpoint identifier in HAL. * * @sa nrf_usbd_common_ep_from_hal */ static inline uint8_t ep_to_hal(nrf_usbd_common_ep_t ep) { NRF_USBD_COMMON_ASSERT_EP_VALID(ep); return (uint8_t)ep; } /** * @brief Access selected endpoint state structure. * * Function used to change or just read the state of selected endpoint. * It is used for internal transmission state. * * @param ep Endpoint number. */ static inline usbd_ep_state_t *ep_state_access(nrf_usbd_common_ep_t ep) { NRF_USBD_COMMON_ASSERT_EP_VALID(ep); return ((NRF_USBD_COMMON_EP_IS_IN(ep) ? m_ep_state.ep_in : m_ep_state.ep_out) + NRF_USBD_COMMON_EP_NUM(ep)); } /** * @brief Change endpoint number to bit position. * * Bit positions are defined the same way as they are placed in DATAEPSTATUS register, * but bits for endpoint 0 are included. * * @param ep Endpoint number. * * @return Bit position related to the given endpoint number. * * @sa bit2ep */ static inline uint8_t ep2bit(nrf_usbd_common_ep_t ep) { NRF_USBD_COMMON_ASSERT_EP_VALID(ep); return NRF_USBD_COMMON_EP_BITPOS(ep); } /** * @brief Change bit position to endpoint number. * * @param bitpos Bit position. * * @return Endpoint number corresponding to given bit position. * * @sa ep2bit */ static inline nrf_usbd_common_ep_t bit2ep(uint8_t bitpos) { BUILD_ASSERT(NRF_USBD_COMMON_EPOUT_BITPOS_0 > NRF_USBD_COMMON_EPIN_BITPOS_0, "OUT endpoint bits should be higher than IN endpoint bits"); return (nrf_usbd_common_ep_t)((bitpos >= NRF_USBD_COMMON_EPOUT_BITPOS_0) ? NRF_USBD_COMMON_EPOUT(bitpos - NRF_USBD_COMMON_EPOUT_BITPOS_0) : NRF_USBD_COMMON_EPIN(bitpos)); } /** * @brief Mark that EasyDMA is working. * * Internal function to set the flag informing about EasyDMA transfer pending. * This function is called always just after the EasyDMA transfer is started. */ static inline void usbd_dma_pending_set(void) { if (nrf_usbd_common_errata_199()) { *((volatile uint32_t *)0x40027C1C) = 0x00000082; } } /** * @brief Mark that EasyDMA is free. * * Internal function to clear the flag informing about EasyDMA transfer pending. * This function is called always just after the finished EasyDMA transfer is detected. */ static inline void usbd_dma_pending_clear(void) { if (nrf_usbd_common_errata_199()) { *((volatile uint32_t *)0x40027C1C) = 0x00000000; } } /** * @brief Abort pending transfer on selected endpoint. * * @param ep Endpoint number. * * @note * This function locks interrupts that may be costly. * It is good idea to test if the endpoint is still busy before calling this function: * @code (m_ep_dma_waiting & (1U << ep2bit(ep))) * @endcode * This function would check it again, but it makes it inside critical section. */ static inline void usbd_ep_abort(nrf_usbd_common_ep_t ep) { unsigned int irq_lock_key = irq_lock(); usbd_ep_state_t *p_state = ep_state_access(ep); if (NRF_USBD_COMMON_EP_IS_OUT(ep)) { /* Host -> Device */ if ((~m_ep_dma_waiting) & (1U << ep2bit(ep))) { /* If the bit in m_ep_dma_waiting in cleared - nothing would be * processed inside transfer processing */ nrf_usbd_common_transfer_out_drop(ep); } else { p_state->more_transactions = false; m_ep_dma_waiting &= ~(1U << ep2bit(ep)); m_ep_ready &= ~(1U << ep2bit(ep)); } /* Aborted */ p_state->status = NRF_USBD_COMMON_EP_ABORTED; } else { if (!NRF_USBD_COMMON_EP_IS_ISO(ep)) { /* Workaround: Disarm the endpoint if there is any data buffered. */ if (ep != NRF_USBD_COMMON_EPIN0) { *((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x800)) = 0x7B6 + (2u * (NRF_USBD_COMMON_EP_NUM(ep) - 1)); uint8_t temp = *((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x804)); temp |= (1U << 1); *((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x804)) |= temp; (void)(*((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x804))); } else { *((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x800)) = 0x7B4; uint8_t temp = *((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x804)); temp |= (1U << 2); *((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x804)) |= temp; (void)(*((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x804))); } } if ((m_ep_dma_waiting | (~m_ep_ready)) & (1U << ep2bit(ep))) { /* Device -> Host */ m_ep_dma_waiting &= ~(1U << ep2bit(ep)); m_ep_ready |= 1U << ep2bit(ep); p_state->more_transactions = false; p_state->status = NRF_USBD_COMMON_EP_ABORTED; NRF_USBD_COMMON_EP_TRANSFER_EVENT(evt, ep, NRF_USBD_COMMON_EP_ABORTED); m_event_handler(&evt); } } irq_unlock(irq_lock_key); } void nrf_usbd_common_ep_abort(nrf_usbd_common_ep_t ep) { /* Only abort if there is no active DMA */ k_sem_take(&dma_available, K_FOREVER); usbd_ep_abort(ep); k_sem_give(&dma_available); /* This function was holding DMA semaphore and could potentially prevent * next DMA from executing. Fire IRQ handler to check if any DMA needs * to be started. */ usbd_int_rise(); } /** * @brief Abort all pending endpoints. * * Function aborts all pending endpoint transfers. */ static void usbd_ep_abort_all(void) { uint32_t ep_waiting = m_ep_dma_waiting | (m_ep_ready & NRF_USBD_COMMON_EPOUT_BIT_MASK); while (ep_waiting != 0) { uint8_t bitpos = NRF_CTZ(ep_waiting); if (!NRF_USBD_COMMON_EP_IS_ISO(bit2ep(bitpos))) { usbd_ep_abort(bit2ep(bitpos)); } ep_waiting &= ~(1U << bitpos); } m_ep_ready = (((1U << NRF_USBD_COMMON_EPIN_CNT) - 1U) << NRF_USBD_COMMON_EPIN_BITPOS_0); } /** * @brief Force the USBD interrupt into pending state. * * This function is used to force USBD interrupt to be processed right now. * It makes it possible to process all EasyDMA access on one thread priority level. */ static inline void usbd_int_rise(void) { NVIC_SetPendingIRQ(USBD_IRQn); } /** * @name USBD interrupt runtimes. * * Interrupt runtimes that would be vectorized using @ref m_isr. * @{ */ static void ev_usbreset_handler(void) { m_bus_suspend = false; m_last_setup_dir = NRF_USBD_COMMON_EPOUT0; const nrf_usbd_common_evt_t evt = {.type = NRF_USBD_COMMON_EVT_RESET}; m_event_handler(&evt); } static void nrf_usbd_dma_finished(nrf_usbd_common_ep_t ep) { /* DMA finished, track if total bytes transferred is even or odd */ m_dma_odd ^= usbd_ep_amount_get(ep) & 1; usbd_dma_pending_clear(); k_sem_give(&dma_available); usbd_ep_state_t *p_state = ep_state_access(ep); if (p_state->status == NRF_USBD_COMMON_EP_ABORTED) { /* Clear transfer information just in case */ m_ep_dma_waiting &= ~(1U << ep2bit(ep)); } else if (!p_state->more_transactions) { m_ep_dma_waiting &= ~(1U << ep2bit(ep)); if (NRF_USBD_COMMON_EP_IS_OUT(ep) || (ep == NRF_USBD_COMMON_EPIN8)) { /* Send event to the user - for an ISO IN or any OUT endpoint, * the whole transfer is finished in this moment */ NRF_USBD_COMMON_EP_TRANSFER_EVENT(evt, ep, NRF_USBD_COMMON_EP_OK); m_event_handler(&evt); } } else if (ep == NRF_USBD_COMMON_EPOUT0) { nrf_usbd_common_setup_data_clear(); } } static void ev_sof_handler(void) { nrf_usbd_common_evt_t evt = { NRF_USBD_COMMON_EVT_SOF, .data = {.sof = {.framecnt = (uint16_t)NRF_USBD->FRAMECNTR}}}; /* Process isochronous endpoints */ uint32_t iso_ready_mask = (1U << ep2bit(NRF_USBD_COMMON_EPIN8)); /* SIZE.ISOOUT is 0 only when no packet was received at all */ if (NRF_USBD->SIZE.ISOOUT) { iso_ready_mask |= (1U << ep2bit(NRF_USBD_COMMON_EPOUT8)); } m_ep_ready |= iso_ready_mask; m_event_handler(&evt); } /** * @brief React on data transfer finished. * * Auxiliary internal function. * @param ep Endpoint number. * @param bitpos Bit position for selected endpoint number. */ static void usbd_ep_data_handler(nrf_usbd_common_ep_t ep, uint8_t bitpos) { LOG_DBG("USBD event: EndpointData: %x", ep); /* Mark endpoint ready for next DMA access */ m_ep_ready |= (1U << bitpos); if (NRF_USBD_COMMON_EP_IS_IN(ep)) { /* IN endpoint (Device -> Host) */ if (0 == (m_ep_dma_waiting & (1U << bitpos))) { LOG_DBG("USBD event: EndpointData: In finished"); /* No more data to be send - transmission finished */ NRF_USBD_COMMON_EP_TRANSFER_EVENT(evt, ep, NRF_USBD_COMMON_EP_OK); m_event_handler(&evt); } } else { /* OUT endpoint (Host -> Device) */ if (0 == (m_ep_dma_waiting & (1U << bitpos))) { LOG_DBG("USBD event: EndpointData: Out waiting"); /* No buffer prepared - send event to the application */ NRF_USBD_COMMON_EP_TRANSFER_EVENT(evt, ep, NRF_USBD_COMMON_EP_WAITING); m_event_handler(&evt); } } } static void ev_setup_handler(void) { LOG_DBG("USBD event: Setup (rt:%.2x r:%.2x v:%.4x i:%.4x l:%u )", NRF_USBD->BMREQUESTTYPE, NRF_USBD->BREQUEST, NRF_USBD->WVALUEL | (NRF_USBD->WVALUEH << 8), NRF_USBD->WINDEXL | (NRF_USBD->WINDEXH << 8), NRF_USBD->WLENGTHL | (NRF_USBD->WLENGTHH << 8)); uint8_t bmRequestType = NRF_USBD->BMREQUESTTYPE; m_last_setup_dir = ((bmRequestType & USBD_BMREQUESTTYPE_DIRECTION_Msk) == (USBD_BMREQUESTTYPE_DIRECTION_HostToDevice << USBD_BMREQUESTTYPE_DIRECTION_Pos)) ? NRF_USBD_COMMON_EPOUT0 : NRF_USBD_COMMON_EPIN0; m_ep_dma_waiting &= ~((1U << ep2bit(NRF_USBD_COMMON_EPOUT0)) | (1U << ep2bit(NRF_USBD_COMMON_EPIN0))); m_ep_ready &= ~(1U << ep2bit(NRF_USBD_COMMON_EPOUT0)); m_ep_ready |= 1U << ep2bit(NRF_USBD_COMMON_EPIN0); const nrf_usbd_common_evt_t evt = {.type = NRF_USBD_COMMON_EVT_SETUP}; m_event_handler(&evt); } static void ev_usbevent_handler(void) { uint32_t event = NRF_USBD->EVENTCAUSE; /* Clear handled events */ NRF_USBD->EVENTCAUSE = event; if (event & USBD_EVENTCAUSE_ISOOUTCRC_Msk) { LOG_DBG("USBD event: ISOOUTCRC"); /* Currently no support */ } if (event & USBD_EVENTCAUSE_SUSPEND_Msk) { LOG_DBG("USBD event: SUSPEND"); m_bus_suspend = true; const nrf_usbd_common_evt_t evt = {.type = NRF_USBD_COMMON_EVT_SUSPEND}; m_event_handler(&evt); } if (event & USBD_EVENTCAUSE_RESUME_Msk) { LOG_DBG("USBD event: RESUME"); m_bus_suspend = false; const nrf_usbd_common_evt_t evt = {.type = NRF_USBD_COMMON_EVT_RESUME}; m_event_handler(&evt); } if (event & USBD_EVENTCAUSE_USBWUALLOWED_Msk) { LOG_DBG("USBD event: WUREQ (%s)", m_bus_suspend ? "In Suspend" : "Active"); if (m_bus_suspend) { __ASSERT_NO_MSG(!nrf_usbd_common_suspend_check()); m_bus_suspend = false; NRF_USBD->DPDMVALUE = USBD_DPDMVALUE_STATE_Resume << USBD_DPDMVALUE_STATE_Pos; NRF_USBD->TASKS_DPDMDRIVE = 1; const nrf_usbd_common_evt_t evt = {.type = NRF_USBD_COMMON_EVT_WUREQ}; m_event_handler(&evt); } } } static void ev_epdata_handler(uint32_t dataepstatus) { LOG_DBG("USBD event: EndpointEPStatus: %x", dataepstatus); /* All finished endpoint have to be marked as busy */ while (dataepstatus) { uint8_t bitpos = NRF_CTZ(dataepstatus); nrf_usbd_common_ep_t ep = bit2ep(bitpos); dataepstatus &= ~(1UL << bitpos); (void)(usbd_ep_data_handler(ep, bitpos)); } } /** * @brief Function to select the endpoint to start. * * Function that realizes algorithm to schedule right channel for EasyDMA transfer. * It gets a variable with flags for the endpoints currently requiring transfer. * * @param[in] req Bit flags for channels currently requiring transfer. * Bits 0...8 used for IN endpoints. * Bits 16...24 used for OUT endpoints. * @note * This function would be never called with 0 as a @c req argument. * @return The bit number of the endpoint that should be processed now. */ static uint8_t usbd_dma_scheduler_algorithm(uint32_t req) { /* Only prioritized scheduling mode is supported. */ return NRF_CTZ(req); } /** * @brief Get the size of isochronous endpoint. * * The size of isochronous endpoint is configurable. * This function returns the size of isochronous buffer taking into account * current configuration. * * @param[in] ep Endpoint number. * * @return The size of endpoint buffer. */ static inline size_t usbd_ep_iso_capacity(nrf_usbd_common_ep_t ep) { (void)ep; if (NRF_USBD->ISOSPLIT == USBD_ISOSPLIT_SPLIT_HalfIN << USBD_ISOSPLIT_SPLIT_Pos) { return NRF_USBD_COMMON_ISOSIZE / 2; } return NRF_USBD_COMMON_ISOSIZE; } /** * @brief Process all DMA requests. * * Function that have to be called from USBD interrupt handler. * It have to be called when all the interrupts connected with endpoints transfer * and DMA transfer are already handled. */ static void usbd_dmareq_process(void) { if ((m_ep_dma_waiting & m_ep_ready) && (k_sem_take(&dma_available, K_NO_WAIT) == 0)) { uint32_t req; while (0 != (req = m_ep_dma_waiting & m_ep_ready)) { uint8_t pos; if (NRFX_USBD_CONFIG_DMASCHEDULER_ISO_BOOST && ((req & USBD_EPISO_BIT_MASK) != 0)) { pos = usbd_dma_scheduler_algorithm(req & USBD_EPISO_BIT_MASK); } else { pos = usbd_dma_scheduler_algorithm(req); } nrf_usbd_common_ep_t ep = bit2ep(pos); usbd_ep_state_t *p_state = ep_state_access(ep); nrf_usbd_common_ep_transfer_t transfer; bool continue_transfer; __ASSERT_NO_MSG(p_state->more_transactions); if (NRF_USBD_COMMON_EP_IS_IN(ep)) { /* Device -> Host */ continue_transfer = nrf_usbd_common_feeder( &transfer, &p_state->transfer_state, p_state->max_packet_size); } else { /* Host -> Device */ const size_t rx_size = nrf_usbd_common_epout_size_get(ep); continue_transfer = nrf_usbd_common_consumer( &transfer, &p_state->transfer_state, p_state->max_packet_size, rx_size); if (transfer.p_data.rx == NULL) { /* Dropping transfer - allow processing */ __ASSERT_NO_MSG(transfer.size == 0); } else if (transfer.size < rx_size) { LOG_DBG("Endpoint %x overload (r: %u, e: %u)", ep, rx_size, transfer.size); p_state->status = NRF_USBD_COMMON_EP_OVERLOAD; m_ep_dma_waiting &= ~(1U << pos); NRF_USBD_COMMON_EP_TRANSFER_EVENT(evt, ep, NRF_USBD_COMMON_EP_OVERLOAD); m_event_handler(&evt); /* This endpoint will not be transmitted now, repeat the * loop */ continue; } else { /* Nothing to do - only check integrity if assertions are * enabled */ __ASSERT_NO_MSG(transfer.size == rx_size); } } if (!continue_transfer) { p_state->more_transactions = false; } usbd_dma_pending_set(); m_ep_ready &= ~(1U << pos); if (NRF_USBD_COMMON_ISO_DEBUG || (!NRF_USBD_COMMON_EP_IS_ISO(ep))) { LOG_DBG("USB DMA process: Starting transfer on EP: %x, size: %u", ep, transfer.size); } /* Update number of currently transferred bytes */ p_state->transfer_cnt += transfer.size; /* Start transfer to the endpoint buffer */ dma_ep = ep; usbd_ep_dma_start(ep, transfer.p_data.addr, transfer.size); /* Transfer started - exit the loop */ return; } k_sem_give(&dma_available); } else { if (NRF_USBD_COMMON_DMAREQ_PROCESS_DEBUG) { LOG_DBG("USB DMA process - EasyDMA busy"); } } } /** * @brief Begin errata 171. */ static inline void usbd_errata_171_begin(void) { unsigned int irq_lock_key = irq_lock(); if (*((volatile uint32_t *)(0x4006EC00)) == 0x00000000) { *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; *((volatile uint32_t *)(0x4006EC14)) = 0x000000C0; *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; } else { *((volatile uint32_t *)(0x4006EC14)) = 0x000000C0; } irq_unlock(irq_lock_key); } /** * @brief End errata 171. */ static inline void usbd_errata_171_end(void) { unsigned int irq_lock_key = irq_lock(); if (*((volatile uint32_t *)(0x4006EC00)) == 0x00000000) { *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; *((volatile uint32_t *)(0x4006EC14)) = 0x00000000; *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; } else { *((volatile uint32_t *)(0x4006EC14)) = 0x00000000; } irq_unlock(irq_lock_key); } /** * @brief Begin erratas 187 and 211. */ static inline void usbd_errata_187_211_begin(void) { unsigned int irq_lock_key = irq_lock(); if (*((volatile uint32_t *)(0x4006EC00)) == 0x00000000) { *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; *((volatile uint32_t *)(0x4006ED14)) = 0x00000003; *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; } else { *((volatile uint32_t *)(0x4006ED14)) = 0x00000003; } irq_unlock(irq_lock_key); } /** * @brief End erratas 187 and 211. */ static inline void usbd_errata_187_211_end(void) { unsigned int irq_lock_key = irq_lock(); if (*((volatile uint32_t *)(0x4006EC00)) == 0x00000000) { *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; *((volatile uint32_t *)(0x4006ED14)) = 0x00000000; *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; } else { *((volatile uint32_t *)(0x4006ED14)) = 0x00000000; } irq_unlock(irq_lock_key); } /** * @brief Enable USBD peripheral. */ static void usbd_enable(void) { if (nrf_usbd_common_errata_187()) { usbd_errata_187_211_begin(); } if (nrf_usbd_common_errata_171()) { usbd_errata_171_begin(); } /* Enable the peripheral */ NRF_USBD->ENABLE = 1; /* Waiting for peripheral to enable, this should take a few us */ while ((NRF_USBD->EVENTCAUSE & USBD_EVENTCAUSE_READY_Msk) == 0) { } NRF_USBD->EVENTCAUSE = USBD_EVENTCAUSE_READY_Msk; if (nrf_usbd_common_errata_171()) { usbd_errata_171_end(); } if (nrf_usbd_common_errata_187()) { usbd_errata_187_211_end(); } } /** @} */ /** * @name Interrupt handlers * * @{ */ void nrf_usbd_common_irq_handler(void) { volatile uint32_t *dma_endevent; uint32_t epdatastatus = 0; /* Always check and clear SOF but call handler only if SOF interrupt * is actually enabled. */ if (NRF_USBD->EVENTS_SOF) { NRF_USBD->EVENTS_SOF = 0; if (NRF_USBD->INTENSET & USBD_INTEN_SOF_Msk) { ev_sof_handler(); } } /* Clear EPDATA event and only then get and clear EPDATASTATUS to make * sure we don't miss any event. */ if (NRF_USBD->EVENTS_EPDATA) { NRF_USBD->EVENTS_EPDATA = 0; epdatastatus = NRF_USBD->EPDATASTATUS; NRF_USBD->EPDATASTATUS = epdatastatus; } /* Use common variable to store EP0DATADONE processing needed flag */ if (NRF_USBD->EVENTS_EP0DATADONE) { NRF_USBD->EVENTS_EP0DATADONE = 0; epdatastatus |= BIT(ep2bit(m_last_setup_dir)); } /* Check DMA end event only for last enabled DMA channel. Other channels * cannot be active and there's no harm in rechecking the event multiple * times (it is not a problem to check it even if DMA is not active). * * It is important to check DMA and handle DMA finished event before * handling acknowledged data transfer bits (epdatastatus) to avoid * a race condition between interrupt handler and host IN token. */ dma_endevent = usbd_ep_to_endevent(dma_ep); if (*dma_endevent) { *dma_endevent = 0; nrf_usbd_dma_finished(dma_ep); } /* Process acknowledged transfers so we can prepare next DMA (if any) */ ev_epdata_handler(epdatastatus); if (NRF_USBD->EVENTS_USBRESET) { NRF_USBD->EVENTS_USBRESET = 0; ev_usbreset_handler(); } if (NRF_USBD->EVENTS_USBEVENT) { NRF_USBD->EVENTS_USBEVENT = 0; ev_usbevent_handler(); } /* Handle SETUP only if there is no active DMA on EP0 */ if (unlikely(NRF_USBD->EVENTS_EP0SETUP) && (k_sem_count_get(&dma_available) || (dma_ep != NRF_USBD_COMMON_EPIN0 && dma_ep != NRF_USBD_COMMON_EPOUT0))) { NRF_USBD->EVENTS_EP0SETUP = 0; ev_setup_handler(); } usbd_dmareq_process(); } /** @} */ /** @} */ nrfx_err_t nrf_usbd_common_init(nrf_usbd_common_event_handler_t event_handler) { __ASSERT_NO_MSG(event_handler); if (m_drv_state != NRFX_DRV_STATE_UNINITIALIZED) { return NRFX_ERROR_INVALID_STATE; } m_event_handler = event_handler; m_drv_state = NRFX_DRV_STATE_INITIALIZED; uint8_t n; for (n = 0; n < NRF_USBD_COMMON_EPIN_CNT; ++n) { nrf_usbd_common_ep_t ep = NRF_USBD_COMMON_EPIN(n); nrf_usbd_common_ep_max_packet_size_set(ep, NRF_USBD_COMMON_EP_IS_ISO(ep) ? (NRF_USBD_COMMON_ISOSIZE / 2) : NRF_USBD_COMMON_EPSIZE); usbd_ep_state_t *p_state = ep_state_access(ep); p_state->status = NRF_USBD_COMMON_EP_OK; p_state->more_transactions = false; p_state->transfer_cnt = 0; } for (n = 0; n < NRF_USBD_COMMON_EPOUT_CNT; ++n) { nrf_usbd_common_ep_t ep = NRF_USBD_COMMON_EPOUT(n); nrf_usbd_common_ep_max_packet_size_set(ep, NRF_USBD_COMMON_EP_IS_ISO(ep) ? (NRF_USBD_COMMON_ISOSIZE / 2) : NRF_USBD_COMMON_EPSIZE); usbd_ep_state_t *p_state = ep_state_access(ep); p_state->status = NRF_USBD_COMMON_EP_OK; p_state->more_transactions = false; p_state->transfer_cnt = 0; } return NRFX_SUCCESS; } void nrf_usbd_common_uninit(void) { __ASSERT_NO_MSG(m_drv_state == NRFX_DRV_STATE_INITIALIZED); m_event_handler = NULL; m_drv_state = NRFX_DRV_STATE_UNINITIALIZED; } void nrf_usbd_common_enable(void) { __ASSERT_NO_MSG(m_drv_state == NRFX_DRV_STATE_INITIALIZED); /* Prepare for READY event receiving */ NRF_USBD->EVENTCAUSE = USBD_EVENTCAUSE_READY_Msk; usbd_enable(); if (nrf_usbd_common_errata_223() && m_first_enable) { NRF_USBD->ENABLE = 0; usbd_enable(); m_first_enable = false; } #if NRF_USBD_COMMON_USE_WORKAROUND_FOR_ANOMALY_211 if (nrf_usbd_common_errata_187() || nrf_usbd_common_errata_211()) #else if (nrf_usbd_common_errata_187()) #endif { usbd_errata_187_211_begin(); } if (nrf_usbd_common_errata_166()) { *((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x800)) = 0x7E3; *((volatile uint32_t *)((uint32_t)(NRF_USBD) + 0x804)) = 0x40; __ISB(); __DSB(); } NRF_USBD->ISOSPLIT = USBD_ISOSPLIT_SPLIT_HalfIN << USBD_ISOSPLIT_SPLIT_Pos; if (IS_ENABLED(CONFIG_NRF_USBD_ISO_IN_ZLP)) { NRF_USBD->ISOINCONFIG = USBD_ISOINCONFIG_RESPONSE_ZeroData << USBD_ISOINCONFIG_RESPONSE_Pos; } else { NRF_USBD->ISOINCONFIG = USBD_ISOINCONFIG_RESPONSE_NoResp << USBD_ISOINCONFIG_RESPONSE_Pos; } m_ep_ready = (((1U << NRF_USBD_COMMON_EPIN_CNT) - 1U) << NRF_USBD_COMMON_EPIN_BITPOS_0); m_ep_dma_waiting = 0; m_dma_odd = 0; __ASSERT_NO_MSG(k_sem_count_get(&dma_available) == 1); usbd_dma_pending_clear(); m_last_setup_dir = NRF_USBD_COMMON_EPOUT0; m_drv_state = NRFX_DRV_STATE_POWERED_ON; #if NRF_USBD_COMMON_USE_WORKAROUND_FOR_ANOMALY_211 if (nrf_usbd_common_errata_187() && !nrf_usbd_common_errata_211()) #else if (nrf_usbd_common_errata_187()) #endif { usbd_errata_187_211_end(); } } void nrf_usbd_common_disable(void) { __ASSERT_NO_MSG(m_drv_state != NRFX_DRV_STATE_UNINITIALIZED); /* Make sure DMA is not active */ k_sem_take(&dma_available, K_FOREVER); /* Stop just in case */ nrf_usbd_common_stop(); /* Disable all parts */ if (m_dma_odd) { /* Prevent invalid bus request after next USBD enable by ensuring * that total number of bytes transferred by DMA is even. */ NRF_USBD->EVENTS_ENDEPIN[0] = 0; usbd_ep_dma_start(NRF_USBD_COMMON_EPIN0, (uint32_t)&m_dma_odd, 1); while (!NRF_USBD->EVENTS_ENDEPIN[0]) { } NRF_USBD->EVENTS_ENDEPIN[0] = 0; m_dma_odd = 0; } NRF_USBD->ENABLE = 0; usbd_dma_pending_clear(); k_sem_give(&dma_available); m_drv_state = NRFX_DRV_STATE_INITIALIZED; #if NRF_USBD_COMMON_USE_WORKAROUND_FOR_ANOMALY_211 if (nrf_usbd_common_errata_211()) { usbd_errata_187_211_end(); } #endif } void nrf_usbd_common_start(bool enable_sof) { __ASSERT_NO_MSG(m_drv_state == NRFX_DRV_STATE_POWERED_ON); m_bus_suspend = false; uint32_t int_mask = USBD_INTEN_USBRESET_Msk | USBD_INTEN_ENDEPIN0_Msk | USBD_INTEN_ENDEPIN1_Msk | USBD_INTEN_ENDEPIN2_Msk | USBD_INTEN_ENDEPIN3_Msk | USBD_INTEN_ENDEPIN4_Msk | USBD_INTEN_ENDEPIN5_Msk | USBD_INTEN_ENDEPIN6_Msk | USBD_INTEN_ENDEPIN7_Msk | USBD_INTEN_EP0DATADONE_Msk | USBD_INTEN_ENDISOIN_Msk | USBD_INTEN_ENDEPOUT0_Msk | USBD_INTEN_ENDEPOUT1_Msk | USBD_INTEN_ENDEPOUT2_Msk | USBD_INTEN_ENDEPOUT3_Msk | USBD_INTEN_ENDEPOUT4_Msk | USBD_INTEN_ENDEPOUT5_Msk | USBD_INTEN_ENDEPOUT6_Msk | USBD_INTEN_ENDEPOUT7_Msk | USBD_INTEN_ENDISOOUT_Msk | USBD_INTEN_USBEVENT_Msk | USBD_INTEN_EP0SETUP_Msk | USBD_INTEN_EPDATA_Msk; if (enable_sof) { int_mask |= USBD_INTEN_SOF_Msk; } /* Enable all required interrupts */ NRF_USBD->INTEN = int_mask; /* Enable interrupt globally */ irq_enable(USBD_IRQn); /* Enable pullups */ NRF_USBD->USBPULLUP = 1; } static void nrf_usbd_common_stop(void) { __ASSERT_NO_MSG(m_drv_state == NRFX_DRV_STATE_POWERED_ON); /* Clear interrupt */ NVIC_ClearPendingIRQ(USBD_IRQn); if (irq_is_enabled(USBD_IRQn)) { /* Abort transfers */ usbd_ep_abort_all(); /* Disable pullups */ NRF_USBD->USBPULLUP = 0; /* Disable interrupt globally */ irq_disable(USBD_IRQn); /* Disable all interrupts */ NRF_USBD->INTEN = 0; } } bool nrf_usbd_common_is_initialized(void) { return (m_drv_state >= NRFX_DRV_STATE_INITIALIZED); } bool nrf_usbd_common_is_enabled(void) { return (m_drv_state >= NRFX_DRV_STATE_POWERED_ON); } bool nrf_usbd_common_is_started(void) { return (nrf_usbd_common_is_enabled() && irq_is_enabled(USBD_IRQn)); } bool nrf_usbd_common_suspend(void) { bool suspended = false; unsigned int irq_lock_key = irq_lock(); if (m_bus_suspend) { if (!(NRF_USBD->EVENTCAUSE & USBD_EVENTCAUSE_RESUME_Msk)) { NRF_USBD->LOWPOWER = USBD_LOWPOWER_LOWPOWER_LowPower << USBD_LOWPOWER_LOWPOWER_Pos; (void)NRF_USBD->LOWPOWER; if (NRF_USBD->EVENTCAUSE & USBD_EVENTCAUSE_RESUME_Msk) { NRF_USBD->LOWPOWER = USBD_LOWPOWER_LOWPOWER_ForceNormal << USBD_LOWPOWER_LOWPOWER_Pos; } else { suspended = true; } } } irq_unlock(irq_lock_key); return suspended; } bool nrf_usbd_common_wakeup_req(void) { bool started = false; unsigned int irq_lock_key = irq_lock(); if (m_bus_suspend && nrf_usbd_common_suspend_check()) { NRF_USBD->LOWPOWER = USBD_LOWPOWER_LOWPOWER_ForceNormal << USBD_LOWPOWER_LOWPOWER_Pos; started = true; if (nrf_usbd_common_errata_171()) { if (*((volatile uint32_t *)(0x4006EC00)) == 0x00000000) { *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; *((volatile uint32_t *)(0x4006EC14)) = 0x000000C0; *((volatile uint32_t *)(0x4006EC00)) = 0x00009375; } else { *((volatile uint32_t *)(0x4006EC14)) = 0x000000C0; } } } irq_unlock(irq_lock_key); return started; } bool nrf_usbd_common_suspend_check(void) { return NRF_USBD->LOWPOWER != (USBD_LOWPOWER_LOWPOWER_ForceNormal << USBD_LOWPOWER_LOWPOWER_Pos); } bool nrf_usbd_common_bus_suspend_check(void) { return m_bus_suspend; } void nrf_usbd_common_force_bus_wakeup(void) { m_bus_suspend = false; } void nrf_usbd_common_ep_max_packet_size_set(nrf_usbd_common_ep_t ep, uint16_t size) { /* Only the power of 2 size allowed for Control Endpoints */ __ASSERT_NO_MSG((((size & (size - 1)) == 0) || (NRF_USBD_COMMON_EP_NUM(ep) != 0))); /* Only non zero size allowed for Control Endpoints */ __ASSERT_NO_MSG((size != 0) || (NRF_USBD_COMMON_EP_NUM(ep) != 0)); /* Packet size cannot be higher than maximum buffer size */ __ASSERT_NO_MSG((NRF_USBD_COMMON_EP_IS_ISO(ep) && (size <= usbd_ep_iso_capacity(ep))) || (!NRF_USBD_COMMON_EP_IS_ISO(ep) && (size <= NRF_USBD_COMMON_EPSIZE))); usbd_ep_state_t *p_state = ep_state_access(ep); p_state->max_packet_size = size; } uint16_t nrf_usbd_common_ep_max_packet_size_get(nrf_usbd_common_ep_t ep) { usbd_ep_state_t const *p_state = ep_state_access(ep); return p_state->max_packet_size; } bool nrf_usbd_common_ep_enable_check(nrf_usbd_common_ep_t ep) { int ep_in = NRF_USBD_COMMON_EP_IS_IN(ep); int ep_num = NRF_USBD_COMMON_EP_NUM(ep); NRF_USBD_COMMON_ASSERT_EP_VALID(ep); return (ep_in ? NRF_USBD->EPINEN : NRF_USBD->EPOUTEN) & BIT(ep_num); } void nrf_usbd_common_ep_enable(nrf_usbd_common_ep_t ep) { int ep_in = NRF_USBD_COMMON_EP_IS_IN(ep); int ep_num = NRF_USBD_COMMON_EP_NUM(ep); if (nrf_usbd_common_ep_enable_check(ep)) { return; } if (ep_in) { NRF_USBD->EPINEN |= BIT(ep_num); } else { NRF_USBD->EPOUTEN |= BIT(ep_num); } if (ep >= NRF_USBD_COMMON_EPOUT1 && ep <= NRF_USBD_COMMON_EPOUT7) { unsigned int irq_lock_key = irq_lock(); nrf_usbd_common_transfer_out_drop(ep); m_ep_dma_waiting &= ~(1U << ep2bit(ep)); irq_unlock(irq_lock_key); } } void nrf_usbd_common_ep_disable(nrf_usbd_common_ep_t ep) { int ep_in = NRF_USBD_COMMON_EP_IS_IN(ep); int ep_num = NRF_USBD_COMMON_EP_NUM(ep); /* Only disable endpoint if there is no active DMA */ k_sem_take(&dma_available, K_FOREVER); usbd_ep_abort(ep); if (ep_in) { NRF_USBD->EPINEN &= ~BIT(ep_num); } else { NRF_USBD->EPOUTEN &= ~BIT(ep_num); } k_sem_give(&dma_available); /* This function was holding DMA semaphore and could potentially prevent * next DMA from executing. Fire IRQ handler to check if any DMA needs * to be started. */ usbd_int_rise(); } nrfx_err_t nrf_usbd_common_ep_transfer(nrf_usbd_common_ep_t ep, nrf_usbd_common_transfer_t const *p_transfer) { nrfx_err_t ret; const uint8_t ep_bitpos = ep2bit(ep); unsigned int irq_lock_key = irq_lock(); __ASSERT_NO_MSG(p_transfer != NULL); /* Setup data transaction can go only in one direction at a time */ if ((NRF_USBD_COMMON_EP_NUM(ep) == 0) && (ep != m_last_setup_dir)) { ret = NRFX_ERROR_INVALID_ADDR; if (NRF_USBD_COMMON_FAILED_TRANSFERS_DEBUG && (NRF_USBD_COMMON_ISO_DEBUG || (!NRF_USBD_COMMON_EP_IS_ISO(ep)))) { LOG_DBG("Transfer failed: Invalid EPr\n"); } } else if ((m_ep_dma_waiting | ((~m_ep_ready) & NRF_USBD_COMMON_EPIN_BIT_MASK)) & (1U << ep_bitpos)) { /* IN (Device -> Host) transfer has to be transmitted out to allow new transmission */ ret = NRFX_ERROR_BUSY; if (NRF_USBD_COMMON_FAILED_TRANSFERS_DEBUG) { LOG_DBG("Transfer failed: EP is busy"); } } else { usbd_ep_state_t *p_state = ep_state_access(ep); __ASSERT_NO_MSG(NRF_USBD_COMMON_EP_IS_IN(ep) || (p_transfer->p_data.rx == NULL) || (nrfx_is_in_ram(p_transfer->p_data.rx))); p_state->more_transactions = true; p_state->transfer_state = *p_transfer; p_state->transfer_cnt = 0; p_state->status = NRF_USBD_COMMON_EP_OK; m_ep_dma_waiting |= 1U << ep_bitpos; ret = NRFX_SUCCESS; usbd_int_rise(); } irq_unlock(irq_lock_key); return ret; } nrf_usbd_common_ep_status_t nrf_usbd_common_ep_status_get(nrf_usbd_common_ep_t ep, size_t *p_size) { nrf_usbd_common_ep_status_t ret; usbd_ep_state_t const *p_state = ep_state_access(ep); unsigned int irq_lock_key = irq_lock(); *p_size = p_state->transfer_cnt; ret = (!p_state->more_transactions) ? p_state->status : NRF_USBD_COMMON_EP_BUSY; irq_unlock(irq_lock_key); return ret; } size_t nrf_usbd_common_epout_size_get(nrf_usbd_common_ep_t ep) { if (NRF_USBD_COMMON_EP_IS_ISO(ep)) { size_t size = NRF_USBD->SIZE.ISOOUT; if ((size & USBD_SIZE_ISOOUT_ZERO_Msk) == (USBD_SIZE_ISOOUT_ZERO_ZeroData << USBD_SIZE_ISOOUT_ZERO_Pos)) { size = 0; } return size; } return NRF_USBD->SIZE.EPOUT[NRF_USBD_COMMON_EP_NUM(ep)]; } bool nrf_usbd_common_ep_is_busy(nrf_usbd_common_ep_t ep) { return (0 != ((m_ep_dma_waiting | ((~m_ep_ready) & NRF_USBD_COMMON_EPIN_BIT_MASK)) & (1U << ep2bit(ep)))); } void nrf_usbd_common_ep_stall(nrf_usbd_common_ep_t ep) { __ASSERT_NO_MSG(!NRF_USBD_COMMON_EP_IS_ISO(ep)); LOG_DBG("USB: EP %x stalled.", ep); NRF_USBD->EPSTALL = (USBD_EPSTALL_STALL_Stall << USBD_EPSTALL_STALL_Pos) | ep; } void nrf_usbd_common_ep_stall_clear(nrf_usbd_common_ep_t ep) { __ASSERT_NO_MSG(!NRF_USBD_COMMON_EP_IS_ISO(ep)); if (NRF_USBD_COMMON_EP_IS_OUT(ep) && nrf_usbd_common_ep_stall_check(ep)) { nrf_usbd_common_transfer_out_drop(ep); } NRF_USBD->EPSTALL = (USBD_EPSTALL_STALL_UnStall << USBD_EPSTALL_STALL_Pos) | ep; } bool nrf_usbd_common_ep_stall_check(nrf_usbd_common_ep_t ep) { int ep_in = NRF_USBD_COMMON_EP_IS_IN(ep); int ep_num = NRF_USBD_COMMON_EP_NUM(ep); if (!NRF_USBD_COMMON_EP_IS_ISO(ep_num)) { if (ep_in) { return NRF_USBD->HALTED.EPIN[ep_num]; } else { return NRF_USBD->HALTED.EPOUT[ep_num]; } } return false; } void nrf_usbd_common_ep_dtoggle_clear(nrf_usbd_common_ep_t ep) { __ASSERT_NO_MSG(!NRF_USBD_COMMON_EP_IS_ISO(ep)); NRF_USBD->DTOGGLE = ep | (USBD_DTOGGLE_VALUE_Nop << USBD_DTOGGLE_VALUE_Pos); NRF_USBD->DTOGGLE = ep | (USBD_DTOGGLE_VALUE_Data0 << USBD_DTOGGLE_VALUE_Pos); } void nrf_usbd_common_setup_get(nrf_usbd_common_setup_t *p_setup) { memset(p_setup, 0, sizeof(nrf_usbd_common_setup_t)); p_setup->bmRequestType = NRF_USBD->BMREQUESTTYPE; p_setup->bRequest = NRF_USBD->BREQUEST; p_setup->wValue = NRF_USBD->WVALUEL | (NRF_USBD->WVALUEH << 8); p_setup->wIndex = NRF_USBD->WINDEXL | (NRF_USBD->WINDEXH << 8); p_setup->wLength = NRF_USBD->WLENGTHL | (NRF_USBD->WLENGTHH << 8); } void nrf_usbd_common_setup_data_clear(void) { NRF_USBD->TASKS_EP0RCVOUT = 1; } void nrf_usbd_common_setup_clear(void) { LOG_DBG(">> ep0status >>"); NRF_USBD->TASKS_EP0STATUS = 1; } void nrf_usbd_common_setup_stall(void) { LOG_DBG("Setup stalled."); NRF_USBD->TASKS_EP0STALL = 1; } nrf_usbd_common_ep_t nrf_usbd_common_last_setup_dir_get(void) { return m_last_setup_dir; } void nrf_usbd_common_transfer_out_drop(nrf_usbd_common_ep_t ep) { unsigned int irq_lock_key = irq_lock(); __ASSERT_NO_MSG(NRF_USBD_COMMON_EP_IS_OUT(ep)); m_ep_ready &= ~(1U << ep2bit(ep)); if (!NRF_USBD_COMMON_EP_IS_ISO(ep)) { NRF_USBD->SIZE.EPOUT[NRF_USBD_COMMON_EP_NUM(ep)] = 0; } irq_unlock(irq_lock_key); } /** @endcond */ ```
/content/code_sandbox/drivers/usb/common/nrf_usbd_common/nrf_usbd_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
13,759
```unknown # # # menuconfig UVB bool prompt "USB virtual bus" help USB virtual bus service intended for use by virtual UDC and UHC controllers. if UVB config UVB_MAX_MESSAGES int "Maximum number of messages allocatable" range 4 256 default 16 help Maximum number of messages allocatable. module = UVB module-str = USB virtual bus service source "${ZEPHYR_BASE}/subsys/logging/Kconfig.template.log_config" endif # UVB ```
/content/code_sandbox/drivers/usb/uvb/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
113
```linker script ITERABLE_SECTION_RAM(uvb_node, Z_LINK_ITERABLE_SUBALIGN) ```
/content/code_sandbox/drivers/usb/uvb/uvb.ld
linker script
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
16
```c /* * */ #include <stdio.h> #include <zephyr/kernel.h> #include <zephyr/init.h> #include <zephyr/sys/dlist.h> #include <zephyr/sys/iterable_sections.h> #include "uvb.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uvb, CONFIG_UVB_LOG_LEVEL); static struct k_fifo uvb_queue; static void uvb_work_handler(struct k_work *work); static K_WORK_DEFINE(uvb_work, uvb_work_handler); enum uvb_msg_type { UVB_MSG_ADVERT, UVB_MSG_TO_HOST, UVB_MSG_SUBSCRIBE, UVB_MSG_UNSUBSCRIBE, }; struct uvb_msg { sys_snode_t node; enum uvb_msg_type type; const struct uvb_node *source; union { struct uvb_node *sink; struct { enum uvb_event_type type; const void *data; } event; }; }; K_MEM_SLAB_DEFINE_STATIC(uvb_msg_slab, sizeof(struct uvb_msg), CONFIG_UVB_MAX_MESSAGES, sizeof(void *)); K_MEM_SLAB_DEFINE_STATIC(uvb_pkt_slab, sizeof(struct uvb_packet), CONFIG_UVB_MAX_MESSAGES, sizeof(void *)); struct uvb_packet *uvb_alloc_pkt(const enum uvb_request request, const uint8_t addr, const uint8_t ep, uint8_t *const data, const size_t length) { static uint32_t seq; struct uvb_packet *pkt; if (k_mem_slab_alloc(&uvb_pkt_slab, (void **)&pkt, K_NO_WAIT)) { LOG_ERR("Failed to allocate packet memory"); return NULL; } seq++; pkt->seq = seq; pkt->request = request; pkt->reply = UVB_REPLY_TIMEOUT; pkt->addr = addr; pkt->ep = ep; pkt->data = data; pkt->length = length; return pkt; } void uvb_free_pkt(struct uvb_packet *const pkt) { k_mem_slab_free(&uvb_pkt_slab, (void *)pkt); } static ALWAYS_INLINE int submit_new_work(struct uvb_msg *const msg) { k_fifo_put(&uvb_queue, msg); return k_work_submit(&uvb_work); } static struct uvb_msg *uvb_alloc_msg(const struct uvb_node *const node) { struct uvb_msg *msg; if (k_mem_slab_alloc(&uvb_msg_slab, (void **)&msg, K_NO_WAIT)) { LOG_ERR("Failed to allocate msg memory"); return NULL; } memset(msg, 0, sizeof(struct uvb_msg)); msg->source = node; return msg; } int uvb_advert(const struct uvb_node *const host_node, const enum uvb_event_type type, const struct uvb_packet *const pkt) { struct uvb_msg *msg; int err; msg = uvb_alloc_msg(host_node); if (msg == NULL) { return -ENOMEM; } msg->type = UVB_MSG_ADVERT; msg->event.type = type; msg->event.data = (void *)pkt; err = submit_new_work(msg); return err < 0 ? err : 0; } int uvb_to_host(const struct uvb_node *const dev_node, const enum uvb_event_type type, const struct uvb_packet *const pkt) { struct uvb_msg *msg; int err; msg = uvb_alloc_msg(dev_node); if (msg == NULL) { return -ENOMEM; } msg->type = UVB_MSG_TO_HOST; msg->event.type = type; msg->event.data = (void *)pkt; err = submit_new_work(msg); return err < 0 ? err : 0; } static int subscribe_msg(const struct uvb_node *const host_node, struct uvb_node *const dev_node, const enum uvb_msg_type type) { struct uvb_msg *msg; int err; msg = uvb_alloc_msg(host_node); if (msg == NULL) { return -ENOMEM; } msg->type = type; msg->sink = dev_node; err = submit_new_work(msg); return err < 0 ? err : 0; } static int unsubscribe_msg(const struct uvb_node *const host_node, struct uvb_node *const dev_node) { struct uvb_msg *msg; int err; msg = uvb_alloc_msg(host_node); if (msg == NULL) { return -ENOMEM; } msg->type = UVB_MSG_UNSUBSCRIBE; msg->sink = dev_node; err = submit_new_work(msg); return err < 0 ? err : 0; } static struct uvb_node *find_host_node(const char *name) { if (name == NULL || name[0] == '\0') { return NULL; } STRUCT_SECTION_FOREACH(uvb_node, host) { if (strcmp(name, host->name) == 0) { return host; } } return NULL; } int uvb_subscribe(const char *name, struct uvb_node *const dev_node) { const struct uvb_node *host_node; host_node = find_host_node(name); if (host_node == NULL) { return -ENOENT; } return subscribe_msg(host_node, dev_node, UVB_MSG_SUBSCRIBE); } int uvb_unsubscribe(const char *name, struct uvb_node *const dev_node) { const struct uvb_node *host_node; host_node = find_host_node(name); if (host_node == NULL) { return -ENOENT; } return unsubscribe_msg(host_node, dev_node); } static ALWAYS_INLINE void handle_msg_subscribe(struct uvb_msg *const msg) { struct uvb_node *host_node; struct uvb_node *dev_node; host_node = (struct uvb_node *)msg->source; dev_node = msg->sink; if (atomic_get(&dev_node->subscribed)) { LOG_ERR("%p already subscribed", dev_node); return; } LOG_DBG("%p -> %p", dev_node, host_node); sys_dnode_init(&dev_node->node); if (msg->type == UVB_MSG_SUBSCRIBE) { sys_dlist_prepend(&host_node->list, &dev_node->node); } atomic_inc(&dev_node->subscribed); } static ALWAYS_INLINE void handle_msg_unsubscribe(struct uvb_msg *const msg) { struct uvb_node *dev_node; atomic_t tmp; dev_node = msg->sink; tmp = atomic_clear(&dev_node->subscribed); if (tmp) { LOG_DBG("unsubscribe %p", dev_node); sys_dlist_remove(&dev_node->node); } else { LOG_ERR("%p is not subscribed", dev_node); } } static ALWAYS_INLINE void handle_msg_event(struct uvb_msg *const msg) { struct uvb_node *host_node; struct uvb_node *dev_node; host_node = (struct uvb_node *)msg->source; SYS_DLIST_FOR_EACH_CONTAINER(&host_node->list, dev_node, node) { LOG_DBG("%p from %p to %p", msg, host_node, dev_node); if (dev_node->notify) { dev_node->notify(dev_node->priv, msg->event.type, msg->event.data); } } } static ALWAYS_INLINE void handle_msg_to_host(struct uvb_msg *const msg) { struct uvb_node *host_node; struct uvb_node *source; source = (struct uvb_node *)msg->source; if (source->head) { LOG_ERR("Host may not reply"); } SYS_DLIST_FOR_EACH_CONTAINER(&source->node, host_node, node) { LOG_DBG("%p from %p to %p", msg, source, host_node); if (host_node->head && host_node->notify) { host_node->notify(host_node->priv, msg->event.type, msg->event.data); } } } static void uvb_work_handler(struct k_work *work) { struct uvb_msg *msg; msg = k_fifo_get(&uvb_queue, K_NO_WAIT); if (msg == NULL) { return; } LOG_DBG("Message %p %s", msg->source, msg->source->name); switch (msg->type) { case UVB_MSG_SUBSCRIBE: handle_msg_subscribe(msg); break; case UVB_MSG_UNSUBSCRIBE: handle_msg_unsubscribe(msg); break; case UVB_MSG_ADVERT: handle_msg_event(msg); break; case UVB_MSG_TO_HOST: handle_msg_to_host(msg); break; default: break; } k_mem_slab_free(&uvb_msg_slab, (void *)msg); if (!k_fifo_is_empty(&uvb_queue)) { (void)k_work_submit(work); } } static int uvb_init(void) { STRUCT_SECTION_FOREACH(uvb_node, host) { LOG_DBG("Host %p - %s", host, host->name); sys_dlist_init(&host->list); } k_fifo_init(&uvb_queue); return 0; } SYS_INIT(uvb_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEVICE); ```
/content/code_sandbox/drivers/usb/uvb/uvb.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,967
```objective-c /* * */ /** * @file * @brief USB virtual bus service */ #ifndef ZEPHYR_INCLUDE_UVB #define ZEPHYR_INCLUDE_UVB #include <zephyr/sys/atomic.h> #include <zephyr/sys/dlist.h> #include <zephyr/sys/iterable_sections.h> #ifdef __cplusplus extern "C" { #endif /** * @brief Virtual bus event types */ enum uvb_event_type { /** VBUS ready event */ UVB_EVT_VBUS_READY, /** VBUS removed event */ UVB_EVT_VBUS_REMOVED, /** Device resume event */ UVB_EVT_RESUME, /** Device suspended event */ UVB_EVT_SUSPEND, /** Port reset detected */ UVB_EVT_RESET, /** Endpoint request event */ UVB_EVT_REQUEST, /** Endpoint request reply event */ UVB_EVT_REPLY, /** Device activity event */ UVB_EVT_DEVICE_ACT, }; /** * @brief Virtual bus device activity type */ enum uvb_device_act { /** Device issue remote wakeup */ UVB_DEVICE_ACT_RWUP, /** Low speed connection detected */ UVB_DEVICE_ACT_LS, /** Full speed connection detected */ UVB_DEVICE_ACT_FS, /** High speed connection detected */ UVB_DEVICE_ACT_HS, /** Super speed connection detected */ UVB_DEVICE_ACT_SS, /** Connection removed, issued when a device is disabled */ UVB_DEVICE_ACT_REMOVED, }; /** * @brief Virtual bus host request type */ enum uvb_request { /** Setup request */ UVB_REQUEST_SETUP, /** Data request */ UVB_REQUEST_DATA, }; /** * @brief Virtual bus device reply type */ enum uvb_reply { /** Default value */ UVB_REPLY_TIMEOUT, /** Reply ACK handshake to a request */ UVB_REPLY_ACK, /** Reply NACK handshake to a request */ UVB_REPLY_NACK, /** Reply STALL handshake to a request */ UVB_REPLY_STALL, }; /** * USB virtual bus packet */ struct uvb_packet { /** slist node (TBD) */ sys_snode_t node; /** Consecutive packet sequence number */ uint32_t seq; /** Request type */ enum uvb_request request: 8; /** Reply handshake */ enum uvb_reply reply : 8; /** Device (peripheral) address */ unsigned int addr : 8; /** Endpoint address */ unsigned int ep : 8; /** Pointer to a data chunk */ uint8_t *data; /** Length of the data chunk */ size_t length; }; /** * USB virtual bus node */ struct uvb_node { union { /** dlist device node */ sys_dnode_t node; /** dlist host list */ sys_dlist_t list; }; /** Name of the UVB node */ const char *name; /** Pointer to the notify callback of the UVB node */ void (*notify)(const void *const priv, const enum uvb_event_type type, const void *data); /** Internally used atomic value */ atomic_t subscribed; /** True for a host node */ bool head; /** Pointer to the node's private data */ const void *priv; }; /** * @brief Allocate UVB packet for the request or reply. * * @param[in] request Request type * @param[in] addr Device (peripheral) address * @param[in] ep Endpoint address * @param[in] data Pointer to a chunk of the net_buf data * @param[in] length Data chunk length * * @return pointer to allocated packet or NULL on error. */ struct uvb_packet *uvb_alloc_pkt(const enum uvb_request request, const uint8_t addr, const uint8_t ep, uint8_t *const data, const size_t length); /** * @brief Free UVB packet * * @param[in] pkt Pointer to UVB packet */ void uvb_free_pkt(struct uvb_packet *const pkt); /** * @brief Advert UVB event on virtual bus * * All devices subscribed to a controller are advertised. * Events like UVB_EVT_REQUEST are to be filtered by using device address. * * @param[in] host_node Pointer to host controller UVB node * @param[in] type UVB event type * @param[in] pkt Pointer to UVB packet or NULL * * @return 0 on success, all other values should be treated as error. */ int uvb_advert(const struct uvb_node *const host_node, const enum uvb_event_type type, const struct uvb_packet *const pkt); /** * @brief Submit UVB event to host controller node * * Intended for use by virtual device for the request reply * UVB_EVT_REPLY and device activity event UVB_EVT_DEVICE_ACT * * @param[in] dev_node Pointer to device controller UVB node * @param[in] type UVB event type * @param[in] pkt Pointer to UVB packet or NULL * * @return 0 on success, all other values should be treated as error. */ int uvb_to_host(const struct uvb_node *const dev_node, const enum uvb_event_type type, const struct uvb_packet *const pkt); /** * @brief Subscribe to the adverts of the specific host node. * * Intended for use by virtual device during UDC API init call. * * @param[in] name Name of the host node. * @param[in] dev_node Pointer to device controller UVB node * * @return 0 on success, all other values should be treated as error. */ int uvb_subscribe(const char *name, struct uvb_node *const dev_node); /** * @brief Unsubsribe from the adverts of the specific host node. * * Intended for use by virtual device during UDC API shutdown call. * * @param[in] name Name of the host node. * @param[in] dev_node Pointer to device controller UVB node * * @return 0 on success, all other values should be treated as error. */ int uvb_unsubscribe(const char *name, struct uvb_node *const dev_node); /** * @brief Advert request UVB event on virtual bus * * @param[in] host_node Pointer to host controller UVB node * @param[in] pkt Pointer to UVB packet * * @return 0 on success, all other values should be treated as error. */ static inline int uvb_advert_pkt(const struct uvb_node *const host_node, const struct uvb_packet *const pkt) { return uvb_advert(host_node, UVB_EVT_REQUEST, pkt); } /** * @brief Reply to UVB request * * @param[in] dev_node Pointer to host controller UVB node * @param[in] pkt Pointer to UVB packet * * @return 0 on success, all other values should be treated as error. */ static inline int uvb_reply_pkt(const struct uvb_node *const dev_node, const struct uvb_packet *const pkt) { return uvb_to_host(dev_node, UVB_EVT_REPLY, pkt); } /** @brief Helper to define UVB host controller node * * @param host UVB host node structure name * @param host_name UVB host node name * @param host_notify Pointer to host notify callback */ #define UVB_HOST_NODE_DEFINE(host, host_name, host_notify) \ STRUCT_SECTION_ITERABLE(uvb_node, host) = { \ .name = host_name, \ .head = true, \ .notify = host_notify, \ } #ifdef __cplusplus } #endif #endif /* ZEPHYR_INCLUDE_UVB */ ```
/content/code_sandbox/drivers/usb/uvb/uvb.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,654
```unknown config UDC_NXP_EHCI bool "NXP MCUX USB EHCI Device controller driver" default y depends on DT_HAS_NXP_EHCI_ENABLED select NOCACHE_MEMORY if HAS_MCUX_CACHE && CPU_HAS_DCACHE imply UDC_BUF_FORCE_NOCACHE help NXP MCUX USB Device Controller Driver for EHCI. config UDC_NXP_IP3511 bool "NXP MCUX USB IP3511 Device controller driver" default y depends on DT_HAS_NXP_LPCIP3511_ENABLED help NXP MCUX USB Device Controller Driver for KHCI. ```
/content/code_sandbox/drivers/usb/udc/Kconfig.mcux
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
132
```c /* * */ /* * USB device controller (UDC) driver skeleton * * This is a skeleton for a device controller driver using the UDC API. * Please use it as a starting point for a driver implementation for your * USB device controller. Maintaining a common style, terminology and * abbreviations will allow us to speed up reviews and reduce maintenance. * Copy UDC driver skeleton, remove all unrelated comments and replace the * copyright notice with your own. * * Typically, a driver implementation contains only a single source file, * but the large list of e.g. register definitions should be in a separate * .h file. * * If you want to define a helper macro, check if there is something similar * in include/zephyr/sys/util.h or include/zephyr/usb/usb_ch9.h that you can use. * Please keep all identifiers and logging messages concise and clear. */ #include "udc_common.h" #include <string.h> #include <stdio.h> #include <zephyr/kernel.h> #include <zephyr/drivers/usb/udc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(udc_skeleton, CONFIG_UDC_DRIVER_LOG_LEVEL); /* * Structure for holding controller configuration items that can remain in * non-volatile memory. This is usually accessed as * const struct udc_skeleton_config *config = dev->config; */ struct udc_skeleton_config { size_t num_of_eps; struct udc_ep_config *ep_cfg_in; struct udc_ep_config *ep_cfg_out; void (*make_thread)(const struct device *dev); int speed_idx; }; /* * Structure to hold driver private data. * Note that this is not accessible via dev->data, but as * struct udc_skeleton_data *priv = udc_get_private(dev); */ struct udc_skeleton_data { struct k_thread thread_data; }; /* * You can use one thread per driver instance model or UDC driver workqueue, * whichever model suits your needs best. If you decide to use the UDC workqueue, * enable Kconfig option UDC_WORKQUEUE and remove the handler below and * caller from the UDC_SKELETON_DEVICE_DEFINE macro. */ static ALWAYS_INLINE void skeleton_thread_handler(void *const arg) { const struct device *dev = (const struct device *)arg; LOG_DBG("Driver %p thread started", dev); while (true) { k_msleep(1000); } } /* * This is called in the context of udc_ep_enqueue() and must * not block. The driver can immediately claim the buffer if the queue is empty, * but usually it is offloaded to a thread or workqueue to handle transfers * in a single location. Please refer to existing driver implementations * for examples. */ static int udc_skeleton_ep_enqueue(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *buf) { LOG_DBG("%p enqueue %p", dev, buf); udc_buf_put(cfg, buf); if (cfg->stat.halted) { /* * It is fine to enqueue a transfer for a halted endpoint, * you need to make sure that transfers are retriggered when * the halt is cleared. * * Always use the abbreviation 'ep' for the endpoint address * and 'ep_idx' or 'ep_num' for the endpoint number identifiers. * Although struct udc_ep_config uses address to be unambiguous * in its context. */ LOG_DBG("ep 0x%02x halted", cfg->addr); return 0; } return 0; } /* * This is called in the context of udc_ep_dequeue() * and must remove all requests from an endpoint queue * Successful removal should be reported to the higher level with * ECONNABORTED as the request result. * It is up to the request owner to clean up or reuse the buffer. */ static int udc_skeleton_ep_dequeue(const struct device *dev, struct udc_ep_config *const cfg) { unsigned int lock_key; struct net_buf *buf; lock_key = irq_lock(); buf = udc_buf_get_all(dev, cfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } irq_unlock(lock_key); return 0; } /* * Configure and make an endpoint ready for use. * This is called in the context of udc_ep_enable() or udc_ep_enable_internal(), * the latter of which may be used by the driver to enable control endpoints. */ static int udc_skeleton_ep_enable(const struct device *dev, struct udc_ep_config *const cfg) { LOG_DBG("Enable ep 0x%02x", cfg->addr); return 0; } /* * Opposite function to udc_skeleton_ep_enable(). udc_ep_disable_internal() * may be used by the driver to disable control endpoints. */ static int udc_skeleton_ep_disable(const struct device *dev, struct udc_ep_config *const cfg) { LOG_DBG("Disable ep 0x%02x", cfg->addr); return 0; } /* Halt endpoint. Halted endpoint should respond with a STALL handshake. */ static int udc_skeleton_ep_set_halt(const struct device *dev, struct udc_ep_config *const cfg) { LOG_DBG("Set halt ep 0x%02x", cfg->addr); cfg->stat.halted = true; return 0; } /* * Opposite to halt endpoint. If there are requests in the endpoint queue, * the next transfer should be prepared. */ static int udc_skeleton_ep_clear_halt(const struct device *dev, struct udc_ep_config *const cfg) { LOG_DBG("Clear halt ep 0x%02x", cfg->addr); cfg->stat.halted = false; return 0; } static int udc_skeleton_set_address(const struct device *dev, const uint8_t addr) { LOG_DBG("Set new address %u for %p", addr, dev); return 0; } static int udc_skeleton_host_wakeup(const struct device *dev) { LOG_DBG("Remote wakeup from %p", dev); return 0; } /* Return actual USB device speed */ static enum udc_bus_speed udc_skeleton_device_speed(const struct device *dev) { struct udc_data *data = dev->data; return data->caps.hs ? UDC_BUS_SPEED_HS : UDC_BUS_SPEED_FS; } static int udc_skeleton_enable(const struct device *dev) { LOG_DBG("Enable device %p", dev); return 0; } static int udc_skeleton_disable(const struct device *dev) { LOG_DBG("Enable device %p", dev); return 0; } /* * Prepare and configure most of the parts, if the controller has a way * of detecting VBUS activity it should be enabled here. * Only udc_skeleton_enable() makes device visible to the host. */ static int udc_skeleton_init(const struct device *dev) { if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, 64, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, 64, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } return 0; } /* Shut down the controller completely */ static int udc_skeleton_shutdown(const struct device *dev) { if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } return 0; } /* * This is called once to initialize the controller and endpoints * capabilities, and register endpoint structures. */ static int udc_skeleton_driver_preinit(const struct device *dev) { const struct udc_skeleton_config *config = dev->config; struct udc_data *data = dev->data; uint16_t mps = 1023; int err; /* * You do not need to initialize it if your driver does not use * udc_lock_internal() / udc_unlock_internal(), but implements its * own mechanism. */ k_mutex_init(&data->mutex); data->caps.rwup = true; data->caps.mps0 = UDC_MPS0_64; if (config->speed_idx == 2) { data->caps.hs = true; mps = 1024; } for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_out[i].caps.out = 1; if (i == 0) { config->ep_cfg_out[i].caps.control = 1; config->ep_cfg_out[i].caps.mps = 64; } else { config->ep_cfg_out[i].caps.bulk = 1; config->ep_cfg_out[i].caps.interrupt = 1; config->ep_cfg_out[i].caps.iso = 1; config->ep_cfg_out[i].caps.mps = mps; } config->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i; err = udc_register_ep(dev, &config->ep_cfg_out[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_in[i].caps.in = 1; if (i == 0) { config->ep_cfg_in[i].caps.control = 1; config->ep_cfg_in[i].caps.mps = 64; } else { config->ep_cfg_in[i].caps.bulk = 1; config->ep_cfg_in[i].caps.interrupt = 1; config->ep_cfg_in[i].caps.iso = 1; config->ep_cfg_in[i].caps.mps = mps; } config->ep_cfg_in[i].addr = USB_EP_DIR_IN | i; err = udc_register_ep(dev, &config->ep_cfg_in[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } config->make_thread(dev); LOG_INF("Device %p (max. speed %d)", dev, config->speed_idx); return 0; } static int udc_skeleton_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int udc_skeleton_unlock(const struct device *dev) { return udc_unlock_internal(dev); } /* * UDC API structure. * Note, you do not need to implement basic checks, these are done by * the UDC common layer udc_common.c */ static const struct udc_api udc_skeleton_api = { .lock = udc_skeleton_lock, .unlock = udc_skeleton_unlock, .device_speed = udc_skeleton_device_speed, .init = udc_skeleton_init, .enable = udc_skeleton_enable, .disable = udc_skeleton_disable, .shutdown = udc_skeleton_shutdown, .set_address = udc_skeleton_set_address, .host_wakeup = udc_skeleton_host_wakeup, .ep_enable = udc_skeleton_ep_enable, .ep_disable = udc_skeleton_ep_disable, .ep_set_halt = udc_skeleton_ep_set_halt, .ep_clear_halt = udc_skeleton_ep_clear_halt, .ep_enqueue = udc_skeleton_ep_enqueue, .ep_dequeue = udc_skeleton_ep_dequeue, }; #define DT_DRV_COMPAT zephyr_udc_skeleton /* * A UDC driver should always be implemented as a multi-instance * driver, even if your platform does not require it. */ #define UDC_SKELETON_DEVICE_DEFINE(n) \ K_THREAD_STACK_DEFINE(udc_skeleton_stack_##n, CONFIG_UDC_SKELETON); \ \ static void udc_skeleton_thread_##n(void *dev, void *arg1, void *arg2) \ { \ skeleton_thread_handler(dev); \ } \ \ static void udc_skeleton_make_thread_##n(const struct device *dev) \ { \ struct udc_skeleton_data *priv = udc_get_private(dev); \ \ k_thread_create(&priv->thread_data, \ udc_skeleton_stack_##n, \ K_THREAD_STACK_SIZEOF(udc_skeleton_stack_##n), \ udc_skeleton_thread_##n, \ (void *)dev, NULL, NULL, \ K_PRIO_COOP(CONFIG_UDC_SKELETON_THREAD_PRIORITY),\ K_ESSENTIAL, \ K_NO_WAIT); \ k_thread_name_set(&priv->thread_data, dev->name); \ } \ \ static struct udc_ep_config \ ep_cfg_out[DT_INST_PROP(n, num_bidir_endpoints)]; \ static struct udc_ep_config \ ep_cfg_in[DT_INST_PROP(n, num_bidir_endpoints)]; \ \ static const struct udc_skeleton_config udc_skeleton_config_##n = { \ .num_of_eps = DT_INST_PROP(n, num_bidir_endpoints), \ .ep_cfg_in = ep_cfg_out, \ .ep_cfg_out = ep_cfg_in, \ .make_thread = udc_skeleton_make_thread_##n, \ .speed_idx = DT_ENUM_IDX(DT_DRV_INST(n), maximum_speed), \ }; \ \ static struct udc_skeleton_data udc_priv_##n = { \ }; \ \ static struct udc_data udc_data_##n = { \ .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \ .priv = &udc_priv_##n, \ }; \ \ DEVICE_DT_INST_DEFINE(n, udc_skeleton_driver_preinit, NULL, \ &udc_data_##n, &udc_skeleton_config_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &udc_skeleton_api); DT_INST_FOREACH_STATUS_OKAY(UDC_SKELETON_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/usb/udc/udc_skeleton.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,194
```unknown config UDC_SKELETON bool "Skeleton for an USB device controller driver" default y depends on DT_HAS_ZEPHYR_UDC_SKELETON_ENABLED help Skeleton for an USB device controller driver. config UDC_SKELETON_STACK_SIZE int "UDC controller driver internal thread stack size" depends on UDC_SKELETON default 512 help Skeleton device controller driver internal thread stack size. config UDC_SKELETON_THREAD_PRIORITY int "Skeleton controller driver thread priority" depends on UDC_SKELETON default 8 help Skeleton device controller driver thread priority. ```
/content/code_sandbox/drivers/usb/udc/Kconfig.skeleton
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
140
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_USB_UDC_DWC2_H #define ZEPHYR_DRIVERS_USB_UDC_DWC2_H #include <stdint.h> #include <zephyr/device.h> #include <zephyr/drivers/usb/udc.h> #include <usb_dwc2_hw.h> /* Vendor quirks per driver instance */ struct dwc2_vendor_quirks { /* Called at the beginning of udc_dwc2_init() */ int (*init)(const struct device *dev); /* Called on udc_dwc2_enable() before the controller is initialized */ int (*pre_enable)(const struct device *dev); /* Called on udc_dwc2_enable() after the controller is initialized */ int (*post_enable)(const struct device *dev); /* Called at the end of udc_dwc2_disable() */ int (*disable)(const struct device *dev); /* Called at the end of udc_dwc2_shutdown() */ int (*shutdown)(const struct device *dev); /* Called at the end of IRQ handling */ int (*irq_clear)(const struct device *dev); /* Called on driver pre-init */ int (*caps)(const struct device *dev); /* Called while waiting for bits that require PHY to be clocked */ int (*is_phy_clk_off)(const struct device *dev); }; /* Driver configuration per instance */ struct udc_dwc2_config { size_t num_in_eps; size_t num_out_eps; struct udc_ep_config *ep_cfg_in; struct udc_ep_config *ep_cfg_out; struct usb_dwc2_reg *const base; /* Pointer to pin control configuration or NULL */ struct pinctrl_dev_config *const pcfg; /* Pointer to vendor quirks or NULL */ struct dwc2_vendor_quirks *const quirks; void (*make_thread)(const struct device *dev); void (*irq_enable_func)(const struct device *dev); void (*irq_disable_func)(const struct device *dev); uint32_t ghwcfg1; uint32_t ghwcfg2; uint32_t ghwcfg4; }; #define DWC2_QUIRK_FUNC_DEFINE(fname) \ static inline int dwc2_quirk_##fname(const struct device *dev) \ { \ const struct udc_dwc2_config *const config = dev->config; \ struct dwc2_vendor_quirks *quirks = config->quirks; \ \ if (quirks != NULL && config->quirks->fname != NULL) { \ return quirks->fname(dev); \ } \ \ return 0; \ } DWC2_QUIRK_FUNC_DEFINE(init) DWC2_QUIRK_FUNC_DEFINE(pre_enable) DWC2_QUIRK_FUNC_DEFINE(post_enable) DWC2_QUIRK_FUNC_DEFINE(disable) DWC2_QUIRK_FUNC_DEFINE(shutdown) DWC2_QUIRK_FUNC_DEFINE(irq_clear) DWC2_QUIRK_FUNC_DEFINE(caps) DWC2_QUIRK_FUNC_DEFINE(is_phy_clk_off) #endif /* ZEPHYR_DRIVERS_USB_UDC_DWC2_H */ ```
/content/code_sandbox/drivers/usb/udc/udc_dwc2.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
676
```unknown config UDC_DWC2 bool "DWC2 USB device controller driver" default y depends on DT_HAS_SNPS_DWC2_ENABLED select NRFS if NRFS_HAS_VBUS_DETECTOR_SERVICE select NRFS_VBUS_DETECTOR_SERVICE_ENABLED if NRFS_HAS_VBUS_DETECTOR_SERVICE help DWC2 USB device controller driver. config UDC_DWC2_DMA bool "DWC2 USB DMA support" default y depends on UDC_DWC2 help Enable Buffer DMA if DWC2 USB controller supports Internal DMA. config UDC_DWC2_STACK_SIZE int "UDC DWC2 driver internal thread stack size" depends on UDC_DWC2 default 512 help DWC2 driver internal thread stack size. config UDC_DWC2_THREAD_PRIORITY int "UDC DWC2 driver thread priority" depends on UDC_DWC2 default 8 help DWC2 driver thread priority. config UDC_DWC2_MAX_QMESSAGES int "UDC DWC2 maximum number of ISR event messages" depends on UDC_DWC2 range 4 64 default 8 help DWC2 maximum number of ISR event messages. ```
/content/code_sandbox/drivers/usb/udc/Kconfig.dwc2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
264
```c /* * */ /** * @file udc_virtual.c * @brief Virtual USB device controller (UDC) driver * * Virtual device controller does not emulate any hardware * and can only communicate with the virtual host controller * through virtual bus. */ #include "udc_common.h" #include "../uvb/uvb.h" #include <string.h> #include <stdio.h> #include <zephyr/kernel.h> #include <zephyr/drivers/usb/udc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(udc_vrt, CONFIG_UDC_DRIVER_LOG_LEVEL); struct udc_vrt_config { size_t num_of_eps; struct udc_ep_config *ep_cfg_in; struct udc_ep_config *ep_cfg_out; void (*make_thread)(const struct device *dev); struct uvb_node *dev_node; int speed_idx; const char *uhc_name; }; struct udc_vrt_data { struct k_fifo fifo; struct k_thread thread_data; uint8_t addr; }; struct udc_vrt_event { sys_snode_t node; enum uvb_event_type type; struct uvb_packet *pkt; }; K_MEM_SLAB_DEFINE(udc_vrt_slab, sizeof(struct udc_vrt_event), 16, sizeof(void *)); /* Reuse request packet for reply */ static int vrt_request_reply(const struct device *dev, struct uvb_packet *const pkt, const enum uvb_reply reply) { const struct udc_vrt_config *config = dev->config; pkt->reply = reply; return uvb_reply_pkt(config->dev_node, pkt); } static void ctrl_ep_clear_halt(const struct device *dev) { struct udc_ep_config *cfg; cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); cfg->stat.halted = false; cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_IN); cfg->stat.halted = false; } static int vrt_ctrl_feed_dout(const struct device *dev, const size_t length) { struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); struct net_buf *buf; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length); if (buf == NULL) { return -ENOMEM; } udc_buf_put(ep_cfg, buf); return 0; } static int vrt_handle_setup(const struct device *dev, struct uvb_packet *const pkt) { struct net_buf *buf; int err, ret; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, 8); if (buf == NULL) { return -ENOMEM; } net_buf_add_mem(buf, pkt->data, pkt->length); udc_ep_buf_set_setup(buf); ctrl_ep_clear_halt(dev); /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_data_out(dev)) { /* Allocate and feed buffer for data OUT stage */ LOG_DBG("s: %p | feed for -out-", buf); err = vrt_ctrl_feed_dout(dev, udc_data_stage_length(buf)); if (err == -ENOMEM) { /* * Pass it on to the higher level which will * halt control OUT endpoint. */ err = udc_submit_ep_event(dev, buf, err); } } else if (udc_ctrl_stage_is_data_in(dev)) { LOG_DBG("s: %p | submit for -in-", buf); /* Allocate buffer for data IN and submit to upper layer */ err = udc_ctrl_submit_s_in_status(dev); } else { LOG_DBG("s:%p | submit for -status", buf); /* * For all other cases we feed with a buffer * large enough for setup packet. */ err = udc_ctrl_submit_s_status(dev); } ret = vrt_request_reply(dev, pkt, UVB_REPLY_ACK); return ret ? ret : err; } static int vrt_handle_ctrl_out(const struct device *dev, struct net_buf *const buf) { int err = 0; if (udc_ctrl_stage_is_status_out(dev)) { /* Status stage finished, notify upper layer */ err = udc_ctrl_submit_status(dev, buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_in(dev)) { return udc_ctrl_submit_s_out_status(dev, buf); } return err; } static int vrt_handle_out(const struct device *dev, struct uvb_packet *const pkt) { struct udc_ep_config *ep_cfg; const uint8_t ep = pkt->ep; struct net_buf *buf; size_t min_len; int err = 0; int ret; ep_cfg = udc_get_ep_cfg(dev, ep); if (ep_cfg->stat.halted) { LOG_DBG("reply STALL ep 0x%02x", ep); return vrt_request_reply(dev, pkt, UVB_REPLY_STALL); } buf = udc_buf_peek(dev, ep); if (buf == NULL) { LOG_DBG("reply NACK ep 0x%02x", ep); return vrt_request_reply(dev, pkt, UVB_REPLY_NACK); } min_len = MIN(pkt->length, net_buf_tailroom(buf)); net_buf_add_mem(buf, pkt->data, min_len); LOG_DBG("Handle data OUT, %zu | %zu", pkt->length, net_buf_tailroom(buf)); if (net_buf_tailroom(buf) == 0 || pkt->length < ep_cfg->mps) { buf = udc_buf_get(dev, ep); if (ep == USB_CONTROL_EP_OUT) { err = vrt_handle_ctrl_out(dev, buf); } else { err = udc_submit_ep_event(dev, buf, 0); } } ret = vrt_request_reply(dev, pkt, UVB_REPLY_ACK); return ret ? ret : err; } static int isr_handle_ctrl_in(const struct device *dev, struct net_buf *const buf) { int err = 0; if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* Status stage finished, notify upper layer */ err = udc_ctrl_submit_status(dev, buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_out(dev)) { /* * IN transfer finished, release buffer, * Feed control OUT buffer for status stage. */ net_buf_unref(buf); return vrt_ctrl_feed_dout(dev, 0); } return err; } static int vrt_handle_in(const struct device *dev, struct uvb_packet *const pkt) { struct udc_ep_config *ep_cfg; const uint8_t ep = pkt->ep; struct net_buf *buf; size_t min_len; int err = 0; int ret; ep_cfg = udc_get_ep_cfg(dev, ep); if (ep_cfg->stat.halted) { LOG_DBG("reply STALL ep 0x%02x", ep); return vrt_request_reply(dev, pkt, UVB_REPLY_STALL); } buf = udc_buf_peek(dev, ep); if (buf == NULL) { LOG_DBG("reply NACK ep 0x%02x", ep); return vrt_request_reply(dev, pkt, UVB_REPLY_NACK); } LOG_DBG("Handle data IN, %zu | %u | %u", pkt->length, buf->len, ep_cfg->mps); min_len = MIN(pkt->length, buf->len); memcpy(pkt->data, buf->data, min_len); net_buf_pull(buf, min_len); pkt->length = min_len; if (buf->len == 0 || pkt->length < ep_cfg->mps) { if (udc_ep_buf_has_zlp(buf)) { udc_ep_buf_clear_zlp(buf); goto continue_in; } LOG_DBG("Finish data IN %zu | %u", pkt->length, buf->len); buf = udc_buf_get(dev, ep); if (ep == USB_CONTROL_EP_IN) { err = isr_handle_ctrl_in(dev, buf); } else { err = udc_submit_ep_event(dev, buf, 0); } } continue_in: ret = vrt_request_reply(dev, pkt, UVB_REPLY_ACK); return ret ? ret : err; } static int vrt_handle_request(const struct device *dev, struct uvb_packet *const pkt) { LOG_DBG("REQUEST event for %p pkt %p", dev, pkt); if (USB_EP_GET_IDX(pkt->ep) == 0 && pkt->request == UVB_REQUEST_SETUP) { return vrt_handle_setup(dev, pkt); } if (USB_EP_DIR_IS_OUT(pkt->ep) && pkt->request == UVB_REQUEST_DATA) { return vrt_handle_out(dev, pkt); } if (USB_EP_DIR_IS_IN(pkt->ep) && pkt->request == UVB_REQUEST_DATA) { return vrt_handle_in(dev, pkt); } return -ENOTSUP; } static ALWAYS_INLINE void udc_vrt_thread_handler(void *arg) { const struct device *dev = (const struct device *)arg; struct udc_vrt_data *priv = udc_get_private(dev); while (true) { struct udc_vrt_event *vrt_ev; int err = 0; vrt_ev = k_fifo_get(&priv->fifo, K_FOREVER); switch (vrt_ev->type) { case UVB_EVT_VBUS_REMOVED: err = udc_submit_event(dev, UDC_EVT_VBUS_REMOVED, 0); break; case UVB_EVT_VBUS_READY: err = udc_submit_event(dev, UDC_EVT_VBUS_READY, 0); break; case UVB_EVT_SUSPEND: err = udc_submit_event(dev, UDC_EVT_SUSPEND, 0); break; case UVB_EVT_RESUME: err = udc_submit_event(dev, UDC_EVT_RESUME, 0); break; case UVB_EVT_RESET: err = udc_submit_event(dev, UDC_EVT_RESET, 0); break; case UVB_EVT_REQUEST: err = vrt_handle_request(dev, vrt_ev->pkt); break; default: break; }; if (err) { udc_submit_event(dev, UDC_EVT_ERROR, err); } k_mem_slab_free(&udc_vrt_slab, (void *)vrt_ev); } } static void vrt_submit_uvb_event(const struct device *dev, const enum uvb_event_type type, struct uvb_packet *const pkt) { struct udc_vrt_data *priv = udc_get_private(dev); struct udc_vrt_event *vrt_ev; int ret; ret = k_mem_slab_alloc(&udc_vrt_slab, (void **)&vrt_ev, K_NO_WAIT); __ASSERT(ret == 0, "Failed to allocate slab"); vrt_ev->type = type; vrt_ev->pkt = pkt; k_fifo_put(&priv->fifo, vrt_ev); } static void udc_vrt_uvb_cb(const void *const vrt_priv, const enum uvb_event_type type, const void *data) { const struct device *dev = vrt_priv; struct udc_vrt_data *priv = udc_get_private(dev); struct uvb_packet *const pkt = (void *)data; switch (type) { case UVB_EVT_VBUS_REMOVED: __fallthrough; case UVB_EVT_VBUS_READY: if (udc_is_initialized(dev)) { vrt_submit_uvb_event(dev, type, NULL); } break; case UVB_EVT_SUSPEND: __fallthrough; case UVB_EVT_RESUME: __fallthrough; case UVB_EVT_RESET: if (udc_is_enabled(dev)) { vrt_submit_uvb_event(dev, type, NULL); } break; case UVB_EVT_REQUEST: if (udc_is_enabled(dev) && priv->addr == pkt->addr) { vrt_submit_uvb_event(dev, type, pkt); } break; default: LOG_ERR("Unknown event for %p", dev); break; }; } static int udc_vrt_ep_enqueue(const struct device *dev, struct udc_ep_config *cfg, struct net_buf *buf) { LOG_DBG("%p enqueue %p", dev, buf); udc_buf_put(cfg, buf); if (cfg->stat.halted) { LOG_DBG("ep 0x%02x halted", cfg->addr); return 0; } return 0; } static int udc_vrt_ep_dequeue(const struct device *dev, struct udc_ep_config *cfg) { unsigned int lock_key; struct net_buf *buf; lock_key = irq_lock(); /* Draft dequeue implementation */ buf = udc_buf_get_all(dev, cfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } irq_unlock(lock_key); return 0; } static int udc_vrt_ep_enable(const struct device *dev, struct udc_ep_config *cfg) { return 0; } static int udc_vrt_ep_disable(const struct device *dev, struct udc_ep_config *cfg) { return 0; } static int udc_vrt_ep_set_halt(const struct device *dev, struct udc_ep_config *cfg) { LOG_DBG("Set halt ep 0x%02x", cfg->addr); cfg->stat.halted = true; return 0; } static int udc_vrt_ep_clear_halt(const struct device *dev, struct udc_ep_config *cfg) { cfg->stat.halted = false; return 0; } static int udc_vrt_set_address(const struct device *dev, const uint8_t addr) { struct udc_vrt_data *priv = udc_get_private(dev); priv->addr = addr; LOG_DBG("Set new address %u for %p", priv->addr, dev); return 0; } static int udc_vrt_host_wakeup(const struct device *dev) { const struct udc_vrt_config *config = dev->config; return uvb_to_host(config->dev_node, UVB_EVT_DEVICE_ACT, INT_TO_POINTER(UVB_DEVICE_ACT_RWUP)); } static enum udc_bus_speed udc_vrt_device_speed(const struct device *dev) { struct udc_data *data = dev->data; /* FIXME: get actual device speed */ return data->caps.hs ? UDC_BUS_SPEED_HS : UDC_BUS_SPEED_FS; } static int udc_vrt_enable(const struct device *dev) { const struct udc_vrt_config *config = dev->config; enum uvb_device_act act; switch (config->speed_idx) { case 1: act = UVB_DEVICE_ACT_FS; break; case 2: act = UVB_DEVICE_ACT_HS; break; case 3: act = UVB_DEVICE_ACT_SS; break; case 0: default: act = UVB_DEVICE_ACT_LS; break; } return uvb_to_host(config->dev_node, UVB_EVT_DEVICE_ACT, INT_TO_POINTER(act)); } static int udc_vrt_disable(const struct device *dev) { const struct udc_vrt_config *config = dev->config; return uvb_to_host(config->dev_node, UVB_EVT_DEVICE_ACT, INT_TO_POINTER(UVB_DEVICE_ACT_REMOVED)); } static int udc_vrt_init(const struct device *dev) { const struct udc_vrt_config *config = dev->config; if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, 64, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, 64, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } return uvb_subscribe(config->uhc_name, config->dev_node); } static int udc_vrt_shutdown(const struct device *dev) { const struct udc_vrt_config *config = dev->config; if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } return uvb_unsubscribe(config->uhc_name, config->dev_node); } static int udc_vrt_driver_preinit(const struct device *dev) { const struct udc_vrt_config *config = dev->config; struct udc_data *data = dev->data; struct udc_vrt_data *priv = data->priv; uint16_t mps = 1023; int err; k_mutex_init(&data->mutex); k_fifo_init(&priv->fifo); data->caps.rwup = true; data->caps.mps0 = UDC_MPS0_64; if (config->speed_idx == 2) { data->caps.hs = true; mps = 1024; } for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_out[i].caps.out = 1; if (i == 0) { config->ep_cfg_out[i].caps.control = 1; config->ep_cfg_out[i].caps.mps = 64; } else { config->ep_cfg_out[i].caps.bulk = 1; config->ep_cfg_out[i].caps.interrupt = 1; config->ep_cfg_out[i].caps.iso = 1; config->ep_cfg_out[i].caps.mps = mps; } config->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i; err = udc_register_ep(dev, &config->ep_cfg_out[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_in[i].caps.in = 1; if (i == 0) { config->ep_cfg_in[i].caps.control = 1; config->ep_cfg_in[i].caps.mps = 64; } else { config->ep_cfg_in[i].caps.bulk = 1; config->ep_cfg_in[i].caps.interrupt = 1; config->ep_cfg_in[i].caps.iso = 1; config->ep_cfg_in[i].caps.mps = mps; } config->ep_cfg_in[i].addr = USB_EP_DIR_IN | i; err = udc_register_ep(dev, &config->ep_cfg_in[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } config->dev_node->priv = dev; config->make_thread(dev); LOG_INF("Device %p (max. speed %d) belongs to %s", dev, config->speed_idx, config->uhc_name); return 0; } static int udc_vrt_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int udc_vrt_unlock(const struct device *dev) { return udc_unlock_internal(dev); } static const struct udc_api udc_vrt_api = { .lock = udc_vrt_lock, .unlock = udc_vrt_unlock, .device_speed = udc_vrt_device_speed, .init = udc_vrt_init, .enable = udc_vrt_enable, .disable = udc_vrt_disable, .shutdown = udc_vrt_shutdown, .set_address = udc_vrt_set_address, .host_wakeup = udc_vrt_host_wakeup, .ep_enable = udc_vrt_ep_enable, .ep_disable = udc_vrt_ep_disable, .ep_set_halt = udc_vrt_ep_set_halt, .ep_clear_halt = udc_vrt_ep_clear_halt, .ep_enqueue = udc_vrt_ep_enqueue, .ep_dequeue = udc_vrt_ep_dequeue, }; #define DT_DRV_COMPAT zephyr_udc_virtual #define UDC_VRT_DEVICE_DEFINE(n) \ K_THREAD_STACK_DEFINE(udc_vrt_stack_area_##n, \ CONFIG_UDC_VIRTUAL_STACK_SIZE); \ \ static void udc_vrt_thread_##n(void *dev, void *unused1, void *unused2) \ { \ while (1) { \ udc_vrt_thread_handler(dev); \ } \ } \ \ static void udc_vrt_make_thread_##n(const struct device *dev) \ { \ struct udc_vrt_data *priv = udc_get_private(dev); \ \ k_thread_create(&priv->thread_data, \ udc_vrt_stack_area_##n, \ K_THREAD_STACK_SIZEOF(udc_vrt_stack_area_##n), \ udc_vrt_thread_##n, \ (void *)dev, NULL, NULL, \ K_PRIO_COOP(CONFIG_UDC_VIRTUAL_THREAD_PRIORITY), \ K_ESSENTIAL, \ K_NO_WAIT); \ k_thread_name_set(&priv->thread_data, dev->name); \ } \ \ static struct udc_ep_config \ ep_cfg_out[DT_INST_PROP(n, num_bidir_endpoints)]; \ static struct udc_ep_config \ ep_cfg_in[DT_INST_PROP(n, num_bidir_endpoints)]; \ \ static struct uvb_node udc_vrt_dev_node##n = { \ .name = DT_NODE_FULL_NAME(DT_DRV_INST(n)), \ .notify = udc_vrt_uvb_cb, \ }; \ \ static const struct udc_vrt_config udc_vrt_config_##n = { \ .num_of_eps = DT_INST_PROP(n, num_bidir_endpoints), \ .ep_cfg_in = ep_cfg_out, \ .ep_cfg_out = ep_cfg_in, \ .make_thread = udc_vrt_make_thread_##n, \ .dev_node = &udc_vrt_dev_node##n, \ .speed_idx = DT_ENUM_IDX(DT_DRV_INST(n), maximum_speed), \ .uhc_name = DT_NODE_FULL_NAME(DT_INST_PARENT(n)), \ }; \ \ static struct udc_vrt_data udc_priv_##n = { \ }; \ \ static struct udc_data udc_data_##n = { \ .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \ .priv = &udc_priv_##n, \ }; \ \ DEVICE_DT_INST_DEFINE(n, udc_vrt_driver_preinit, NULL, \ &udc_data_##n, &udc_vrt_config_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &udc_vrt_api); DT_INST_FOREACH_STATUS_OKAY(UDC_VRT_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/usb/udc/udc_virtual.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,192
```unknown config UDC_STM32 bool "STM32 USB device controller driver" depends on DT_HAS_ST_STM32_OTGFS_ENABLED \ || DT_HAS_ST_STM32_OTGHS_ENABLED \ || DT_HAS_ST_STM32_USB_ENABLED select USE_STM32_LL_USB select USE_STM32_HAL_PCD select USE_STM32_HAL_PCD_EX default y help STM32 USB device controller driver. ```
/content/code_sandbox/drivers/usb/udc/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
98
```c /* * */ /* * Driver for the USBFSOTG device controller which can be found on * devices like Kinetis K64F. */ #define DT_DRV_COMPAT nxp_kinetis_usbd #include <soc.h> #include <string.h> #include <stdio.h> #include <zephyr/device.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/usb/udc.h> #include "udc_common.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(usbfsotg, CONFIG_UDC_DRIVER_LOG_LEVEL); #define USBFSOTG_BD_OWN BIT(5) #define USBFSOTG_BD_DATA1 BIT(4) #define USBFSOTG_BD_KEEP BIT(3) #define USBFSOTG_BD_NINC BIT(2) #define USBFSOTG_BD_DTS BIT(1) #define USBFSOTG_BD_STALL BIT(0) #define USBFSOTG_SETUP_TOKEN 0x0D #define USBFSOTG_IN_TOKEN 0x09 #define USBFSOTG_OUT_TOKEN 0x01 #define USBFSOTG_PERID 0x04 #define USBFSOTG_REV 0x33 /* * There is no real advantage to change control endpoint size * but we can use it for testing UDC driver API and higher layers. */ #define USBFSOTG_MPS0 UDC_MPS0_64 #define USBFSOTG_EP0_SIZE 64 /* * Buffer Descriptor (BD) entry provides endpoint buffer control * information for USBFSOTG controller. Every endpoint direction requires * two BD entries. */ struct usbfsotg_bd { union { uint32_t bd_fields; struct { uint32_t reserved_1_0 : 2; uint32_t tok_pid : 4; uint32_t data1 : 1; uint32_t own : 1; uint32_t reserved_15_8 : 8; uint32_t bc : 16; } get __packed; struct { uint32_t reserved_1_0 : 2; uint32_t bd_ctrl : 6; uint32_t reserved_15_8 : 8; uint32_t bc : 16; } set __packed; } __packed; uint32_t buf_addr; } __packed; struct usbfsotg_config { USB_Type *base; /* * Pointer to Buffer Descriptor Table for the endpoints * buffer management. The driver configuration with 16 fully * bidirectional endpoints would require four BD entries * per endpoint and 512 bytes of memory. */ struct usbfsotg_bd *bdt; void (*irq_enable_func)(const struct device *dev); void (*irq_disable_func)(const struct device *dev); size_t num_of_eps; struct udc_ep_config *ep_cfg_in; struct udc_ep_config *ep_cfg_out; }; enum usbfsotg_event_type { /* Trigger next transfer, must not be used for control OUT */ USBFSOTG_EVT_XFER, /* Setup packet received */ USBFSOTG_EVT_SETUP, /* OUT transaction for specific endpoint is finished */ USBFSOTG_EVT_DOUT, /* IN transaction for specific endpoint is finished */ USBFSOTG_EVT_DIN, /* Workaround for clear halt in ISR */ USBFSOTG_EVT_CLEAR_HALT, }; /* Structure for driver's endpoint events */ struct usbfsotg_ep_event { sys_snode_t node; const struct device *dev; enum usbfsotg_event_type event; uint8_t ep; }; K_MEM_SLAB_DEFINE(usbfsotg_ee_slab, sizeof(struct usbfsotg_ep_event), CONFIG_UDC_KINETIS_EVENT_COUNT, sizeof(void *)); struct usbfsotg_data { struct k_work work; struct k_fifo fifo; /* * Buffer pointers and busy flags used only for control OUT * to map the buffers to BDs when both are occupied */ struct net_buf *out_buf[2]; bool busy[2]; }; static int usbfsotg_ep_clear_halt(const struct device *dev, struct udc_ep_config *const cfg); /* Get buffer descriptor (BD) based on endpoint address */ static struct usbfsotg_bd *usbfsotg_get_ebd(const struct device *const dev, struct udc_ep_config *const cfg, const bool opposite) { const struct usbfsotg_config *config = dev->config; uint8_t bd_idx; bd_idx = USB_EP_GET_IDX(cfg->addr) * 4U + (cfg->stat.odd ^ opposite); if (USB_EP_DIR_IS_IN(cfg->addr)) { bd_idx += 2U; } return &config->bdt[bd_idx]; } static bool usbfsotg_bd_is_busy(const struct usbfsotg_bd *const bd) { /* Do not use it for control OUT endpoint */ return bd->get.own; } static void usbfsotg_bd_set_ctrl(struct usbfsotg_bd *const bd, const size_t bc, uint8_t *const data, const bool data1) { bd->set.bc = bc; bd->buf_addr = POINTER_TO_UINT(data); if (data1) { bd->set.bd_ctrl = USBFSOTG_BD_OWN | USBFSOTG_BD_DATA1 | USBFSOTG_BD_DTS; } else { bd->set.bd_ctrl = USBFSOTG_BD_OWN | USBFSOTG_BD_DTS; } } /* Resume TX token processing, see USBx_CTL field descriptions */ static ALWAYS_INLINE void usbfsotg_resume_tx(const struct device *dev) { const struct usbfsotg_config *config = dev->config; USB_Type *base = config->base; base->CTL &= ~USB_CTL_TXSUSPENDTOKENBUSY_MASK; } static int usbfsotg_xfer_continue(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *const buf) { const struct usbfsotg_config *config = dev->config; USB_Type *base = config->base; struct usbfsotg_bd *bd; uint8_t *data_ptr; size_t len; bd = usbfsotg_get_ebd(dev, cfg, false); if (unlikely(usbfsotg_bd_is_busy(bd))) { LOG_ERR("ep 0x%02x buf busy", cfg->addr); __ASSERT_NO_MSG(false); return -EBUSY; } if (USB_EP_DIR_IS_OUT(cfg->addr)) { len = MIN(net_buf_tailroom(buf), cfg->mps); data_ptr = net_buf_tail(buf); } else { len = MIN(buf->len, cfg->mps); data_ptr = buf->data; } usbfsotg_bd_set_ctrl(bd, len, data_ptr, cfg->stat.data1); if (USB_EP_GET_IDX(cfg->addr) == 0U) { usbfsotg_resume_tx(dev); } LOG_DBG("xfer %p, bd %p, ENDPT 0x%x, bd field 0x%02x", buf, bd, base->ENDPOINT[USB_EP_GET_IDX(cfg->addr)].ENDPT, bd->bd_fields); return 0; } /* Initiate a new transfer, must not be used for control endpoint OUT */ static int usbfsotg_xfer_next(const struct device *dev, struct udc_ep_config *const cfg) { struct net_buf *buf; buf = udc_buf_peek(dev, cfg->addr); if (buf == NULL) { return -ENODATA; } return usbfsotg_xfer_continue(dev, cfg, buf); } static inline int usbfsotg_ctrl_feed_start(const struct device *dev, struct net_buf *const buf) { struct usbfsotg_data *priv = udc_get_private(dev); struct udc_ep_config *cfg; struct usbfsotg_bd *bd; size_t length; cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); if (priv->busy[cfg->stat.odd]) { return -EBUSY; } bd = usbfsotg_get_ebd(dev, cfg, false); length = MIN(net_buf_tailroom(buf), cfg->mps); priv->out_buf[cfg->stat.odd] = buf; priv->busy[cfg->stat.odd] = true; usbfsotg_bd_set_ctrl(bd, length, net_buf_tail(buf), cfg->stat.data1); LOG_DBG("ep0 %p|odd: %u|d: %u", buf, cfg->stat.odd, cfg->stat.data1); return 0; } static inline int usbfsotg_ctrl_feed_start_next(const struct device *dev, struct net_buf *const buf) { struct usbfsotg_data *priv = udc_get_private(dev); struct udc_ep_config *cfg; struct usbfsotg_bd *bd; size_t length; cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); if (priv->busy[!cfg->stat.odd]) { return -EBUSY; } bd = usbfsotg_get_ebd(dev, cfg, true); length = MIN(net_buf_tailroom(buf), cfg->mps); priv->out_buf[!cfg->stat.odd] = buf; priv->busy[!cfg->stat.odd] = true; usbfsotg_bd_set_ctrl(bd, length, net_buf_tail(buf), cfg->stat.data1); LOG_DBG("ep0 %p|odd: %u|d: %u (n)", buf, cfg->stat.odd, cfg->stat.data1); return 0; } /* * Allocate buffer and initiate a new control OUT transfer, * use successive buffer descriptor when next is true. */ static int usbfsotg_ctrl_feed_dout(const struct device *dev, const size_t length, const bool next, const bool resume_tx) { struct net_buf *buf; int ret; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length); if (buf == NULL) { return -ENOMEM; } if (next) { ret = usbfsotg_ctrl_feed_start_next(dev, buf); } else { ret = usbfsotg_ctrl_feed_start(dev, buf); } if (ret) { net_buf_unref(buf); return ret; } if (resume_tx) { usbfsotg_resume_tx(dev); } return 0; } static inline int work_handler_setup(const struct device *dev) { struct net_buf *buf; int err; buf = udc_buf_get(dev, USB_CONTROL_EP_OUT); if (buf == NULL) { return -ENODATA; } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_data_out(dev)) { /* Allocate and feed buffer for data OUT stage */ LOG_DBG("s:%p|feed for -out-", buf); err = usbfsotg_ctrl_feed_dout(dev, udc_data_stage_length(buf), false, true); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } } else if (udc_ctrl_stage_is_data_in(dev)) { /* * Here we have to feed both descriptor tables so that * no setup packets are lost in case of successive * status OUT stage and next setup. */ LOG_DBG("s:%p|feed for -in-status >setup", buf); err = usbfsotg_ctrl_feed_dout(dev, 8U, false, false); if (err == 0) { err = usbfsotg_ctrl_feed_dout(dev, 8U, true, true); } /* Finally alloc buffer for IN and submit to upper layer */ if (err == 0) { err = udc_ctrl_submit_s_in_status(dev); } } else { LOG_DBG("s:%p|feed >setup", buf); /* * For all other cases we feed with a buffer * large enough for setup packet. */ err = usbfsotg_ctrl_feed_dout(dev, 8U, false, true); if (err == 0) { err = udc_ctrl_submit_s_status(dev); } } return err; } static inline int work_handler_out(const struct device *dev, const uint8_t ep) { struct net_buf *buf; int err = 0; buf = udc_buf_get(dev, ep); if (buf == NULL) { return -ENODATA; } if (ep == USB_CONTROL_EP_OUT) { if (udc_ctrl_stage_is_status_out(dev)) { /* s-in-status finished, next bd is already fed */ LOG_DBG("dout:%p|no feed", buf); /* Status stage finished, notify upper layer */ udc_ctrl_submit_status(dev, buf); } else { /* * For all other cases we feed with a buffer * large enough for setup packet. */ LOG_DBG("dout:%p|feed >setup", buf); err = usbfsotg_ctrl_feed_dout(dev, 8U, false, false); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_in(dev)) { err = udc_ctrl_submit_s_out_status(dev, buf); } } else { err = udc_submit_ep_event(dev, buf, 0); } return err; } static inline int work_handler_in(const struct device *dev, const uint8_t ep) { struct net_buf *buf; buf = udc_buf_get(dev, ep); if (buf == NULL) { return -ENODATA; } if (ep == USB_CONTROL_EP_IN) { if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* Status stage finished, notify upper layer */ udc_ctrl_submit_status(dev, buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_out(dev)) { /* * IN transfer finished, release buffer, * control OUT buffer should be already fed. */ net_buf_unref(buf); } return 0; } return udc_submit_ep_event(dev, buf, 0); } static void usbfsotg_event_submit(const struct device *dev, const uint8_t ep, const enum usbfsotg_event_type event) { struct usbfsotg_data *priv = udc_get_private(dev); struct usbfsotg_ep_event *ev; int ret; ret = k_mem_slab_alloc(&usbfsotg_ee_slab, (void **)&ev, K_NO_WAIT); if (ret) { udc_submit_event(dev, UDC_EVT_ERROR, ret); LOG_ERR("Failed to allocate slab"); return; } ev->dev = dev; ev->ep = ep; ev->event = event; k_fifo_put(&priv->fifo, ev); k_work_submit_to_queue(udc_get_work_q(), &priv->work); } static void xfer_work_handler(struct k_work *item) { struct usbfsotg_ep_event *ev; struct usbfsotg_data *priv; priv = CONTAINER_OF(item, struct usbfsotg_data, work); while ((ev = k_fifo_get(&priv->fifo, K_NO_WAIT)) != NULL) { struct udc_ep_config *ep_cfg; int err = 0; LOG_DBG("dev %p, ep 0x%02x, event %u", ev->dev, ev->ep, ev->event); ep_cfg = udc_get_ep_cfg(ev->dev, ev->ep); if (unlikely(ep_cfg == NULL)) { udc_submit_event(ev->dev, UDC_EVT_ERROR, -ENODATA); goto xfer_work_error; } switch (ev->event) { case USBFSOTG_EVT_SETUP: err = work_handler_setup(ev->dev); break; case USBFSOTG_EVT_DOUT: err = work_handler_out(ev->dev, ev->ep); udc_ep_set_busy(ev->dev, ev->ep, false); break; case USBFSOTG_EVT_DIN: err = work_handler_in(ev->dev, ev->ep); udc_ep_set_busy(ev->dev, ev->ep, false); break; case USBFSOTG_EVT_CLEAR_HALT: err = usbfsotg_ep_clear_halt(ev->dev, ep_cfg); case USBFSOTG_EVT_XFER: default: break; } if (unlikely(err)) { udc_submit_event(ev->dev, UDC_EVT_ERROR, err); } /* Peek next transfer */ if (ev->ep != USB_CONTROL_EP_OUT && !udc_ep_is_busy(ev->dev, ev->ep)) { if (usbfsotg_xfer_next(ev->dev, ep_cfg) == 0) { udc_ep_set_busy(ev->dev, ev->ep, true); } } xfer_work_error: k_mem_slab_free(&usbfsotg_ee_slab, (void *)ev); } } static ALWAYS_INLINE uint8_t stat_reg_get_ep(const uint8_t status) { uint8_t ep_idx = status >> USB_STAT_ENDP_SHIFT; return (status & USB_STAT_TX_MASK) ? (USB_EP_DIR_IN | ep_idx) : ep_idx; } static ALWAYS_INLINE bool stat_reg_is_odd(const uint8_t status) { return (status & USB_STAT_ODD_MASK) >> USB_STAT_ODD_SHIFT; } static ALWAYS_INLINE void set_control_in_pid_data1(const struct device *dev) { struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_IN); /* Set DATA1 PID for data or status stage */ ep_cfg->stat.data1 = true; } static ALWAYS_INLINE void isr_handle_xfer_done(const struct device *dev, const uint8_t istatus, const uint8_t status) { struct usbfsotg_data *priv = udc_get_private(dev); uint8_t ep = stat_reg_get_ep(status); bool odd = stat_reg_is_odd(status); struct usbfsotg_bd *bd, *bd_op; struct udc_ep_config *ep_cfg; struct net_buf *buf; uint8_t token_pid; bool data1; size_t len; ep_cfg = udc_get_ep_cfg(dev, ep); bd = usbfsotg_get_ebd(dev, ep_cfg, false); bd_op = usbfsotg_get_ebd(dev, ep_cfg, true); token_pid = bd->get.tok_pid; len = bd->get.bc; data1 = bd->get.data1 ? true : false; LOG_DBG("TOKDNE, ep 0x%02x len %u odd %u data1 %u", ep, len, odd, data1); switch (token_pid) { case USBFSOTG_SETUP_TOKEN: ep_cfg->stat.odd = !odd; ep_cfg->stat.data1 = true; set_control_in_pid_data1(dev); if (priv->out_buf[odd] != NULL) { net_buf_add(priv->out_buf[odd], len); udc_ep_buf_set_setup(priv->out_buf[odd]); udc_buf_put(ep_cfg, priv->out_buf[odd]); priv->busy[odd] = false; priv->out_buf[odd] = NULL; usbfsotg_event_submit(dev, ep, USBFSOTG_EVT_SETUP); } else { LOG_ERR("No buffer for ep 0x00"); udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); } break; case USBFSOTG_OUT_TOKEN: ep_cfg->stat.odd = !odd; ep_cfg->stat.data1 = !data1; if (ep == USB_CONTROL_EP_OUT) { buf = priv->out_buf[odd]; priv->busy[odd] = false; priv->out_buf[odd] = NULL; } else { buf = udc_buf_peek(dev, ep_cfg->addr); } if (buf == NULL) { LOG_ERR("No buffer for ep 0x%02x", ep); udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); break; } net_buf_add(buf, len); if (net_buf_tailroom(buf) >= ep_cfg->mps && len == ep_cfg->mps) { if (ep == USB_CONTROL_EP_OUT) { usbfsotg_ctrl_feed_start(dev, buf); } else { usbfsotg_xfer_continue(dev, ep_cfg, buf); } } else { if (ep == USB_CONTROL_EP_OUT) { udc_buf_put(ep_cfg, buf); } usbfsotg_event_submit(dev, ep, USBFSOTG_EVT_DOUT); } break; case USBFSOTG_IN_TOKEN: ep_cfg->stat.odd = !odd; ep_cfg->stat.data1 = !data1; buf = udc_buf_peek(dev, ep_cfg->addr); if (buf == NULL) { LOG_ERR("No buffer for ep 0x%02x", ep); udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); break; } net_buf_pull(buf, len); if (buf->len) { usbfsotg_xfer_continue(dev, ep_cfg, buf); } else { if (udc_ep_buf_has_zlp(buf)) { usbfsotg_xfer_continue(dev, ep_cfg, buf); udc_ep_buf_clear_zlp(buf); break; } usbfsotg_event_submit(dev, ep, USBFSOTG_EVT_DIN); } break; default: break; } } static void usbfsotg_isr_handler(const struct device *dev) { const struct usbfsotg_config *config = dev->config; USB_Type *base = config->base; const uint8_t istatus = base->ISTAT; const uint8_t status = base->STAT; if (istatus & USB_ISTAT_USBRST_MASK) { base->ADDR = 0U; udc_submit_event(dev, UDC_EVT_RESET, 0); } if (istatus == USB_ISTAT_SOFTOK_MASK) { udc_submit_event(dev, UDC_EVT_SOF, 0); } if (istatus == USB_ISTAT_ERROR_MASK) { LOG_DBG("ERROR IRQ 0x%02x", base->ERRSTAT); udc_submit_event(dev, UDC_EVT_ERROR, base->ERRSTAT); base->ERRSTAT = 0xFF; } if (istatus & USB_ISTAT_STALL_MASK) { struct udc_ep_config *ep_cfg; LOG_DBG("STALL sent"); ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); if (ep_cfg->stat.halted) { /* * usbfsotg_ep_clear_halt(dev, ep_cfg); cannot * be called in ISR context */ usbfsotg_event_submit(dev, USB_CONTROL_EP_OUT, USBFSOTG_EVT_CLEAR_HALT); } ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_IN); if (ep_cfg->stat.halted) { usbfsotg_event_submit(dev, USB_CONTROL_EP_IN, USBFSOTG_EVT_CLEAR_HALT); } } if (istatus & USB_ISTAT_TOKDNE_MASK) { isr_handle_xfer_done(dev, istatus, status); } if (istatus & USB_ISTAT_SLEEP_MASK) { LOG_DBG("SLEEP IRQ"); /* Enable resume interrupt */ base->INTEN |= USB_INTEN_RESUMEEN_MASK; udc_set_suspended(dev, true); udc_submit_event(dev, UDC_EVT_SUSPEND, 0); } if (istatus & USB_ISTAT_RESUME_MASK) { LOG_DBG("RESUME IRQ"); /* Disable resume interrupt */ base->INTEN &= ~USB_INTEN_RESUMEEN_MASK; udc_set_suspended(dev, false); udc_submit_event(dev, UDC_EVT_RESUME, 0); } /* Clear interrupt status bits */ base->ISTAT = istatus; } static int usbfsotg_ep_enqueue(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *const buf) { udc_buf_put(cfg, buf); if (cfg->stat.halted) { LOG_DBG("ep 0x%02x halted", cfg->addr); return 0; } usbfsotg_event_submit(dev, cfg->addr, USBFSOTG_EVT_XFER); return 0; } static int usbfsotg_ep_dequeue(const struct device *dev, struct udc_ep_config *const cfg) { struct usbfsotg_bd *bd; unsigned int lock_key; struct net_buf *buf; bd = usbfsotg_get_ebd(dev, cfg, false); lock_key = irq_lock(); bd->set.bd_ctrl = USBFSOTG_BD_DTS; irq_unlock(lock_key); cfg->stat.halted = false; buf = udc_buf_get_all(dev, cfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } udc_ep_set_busy(dev, cfg->addr, false); return 0; } static void ctrl_drop_out_successor(const struct device *dev) { struct usbfsotg_data *priv = udc_get_private(dev); struct udc_ep_config *cfg; struct usbfsotg_bd *bd; struct net_buf *buf; cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); if (priv->busy[!cfg->stat.odd]) { bd = usbfsotg_get_ebd(dev, cfg, true); buf = priv->out_buf[!cfg->stat.odd]; bd->bd_fields = 0U; priv->busy[!cfg->stat.odd] = false; if (buf) { net_buf_unref(buf); } } } static int usbfsotg_ep_set_halt(const struct device *dev, struct udc_ep_config *const cfg) { struct usbfsotg_bd *bd; bd = usbfsotg_get_ebd(dev, cfg, false); bd->set.bd_ctrl = USBFSOTG_BD_STALL | USBFSOTG_BD_DTS | USBFSOTG_BD_OWN; cfg->stat.halted = true; LOG_DBG("Halt ep 0x%02x bd %p", cfg->addr, bd); if (cfg->addr == USB_CONTROL_EP_IN) { /* Drop subsequent out transfer, current can be re-used */ ctrl_drop_out_successor(dev); } if (USB_EP_GET_IDX(cfg->addr) == 0U) { usbfsotg_resume_tx(dev); } return 0; } static int usbfsotg_ep_clear_halt(const struct device *dev, struct udc_ep_config *const cfg) { const struct usbfsotg_config *config = dev->config; struct usbfsotg_data *priv = udc_get_private(dev); USB_Type *base = config->base; uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); struct usbfsotg_bd *bd; LOG_DBG("Clear halt ep 0x%02x", cfg->addr); bd = usbfsotg_get_ebd(dev, cfg, false); if (bd->set.bd_ctrl & USBFSOTG_BD_STALL) { LOG_DBG("bd %p: %x", bd, bd->set.bd_ctrl); bd->set.bd_ctrl = USBFSOTG_BD_DTS; } else { LOG_WRN("bd %p is not halted", bd); } cfg->stat.data1 = false; cfg->stat.halted = false; base->ENDPOINT[ep_idx].ENDPT &= ~USB_ENDPT_EPSTALL_MASK; if (cfg->addr == USB_CONTROL_EP_OUT) { if (priv->busy[cfg->stat.odd]) { LOG_DBG("bd %p restarted", bd); bd->set.bd_ctrl = USBFSOTG_BD_DTS | USBFSOTG_BD_OWN; } else { usbfsotg_ctrl_feed_dout(dev, 8U, false, false); } } if (USB_EP_GET_IDX(cfg->addr) == 0U) { usbfsotg_resume_tx(dev); } else { /* trigger queued transfers */ usbfsotg_event_submit(dev, cfg->addr, USBFSOTG_EVT_XFER); } return 0; } static int usbfsotg_ep_enable(const struct device *dev, struct udc_ep_config *const cfg) { const struct usbfsotg_config *config = dev->config; struct usbfsotg_data *priv = udc_get_private(dev); USB_Type *base = config->base; const uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); struct usbfsotg_bd *bd_even, *bd_odd; LOG_DBG("Enable ep 0x%02x", cfg->addr); bd_even = usbfsotg_get_ebd(dev, cfg, false); bd_odd = usbfsotg_get_ebd(dev, cfg, true); bd_even->bd_fields = 0U; bd_even->buf_addr = 0U; bd_odd->bd_fields = 0U; bd_odd->buf_addr = 0U; switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) { case USB_EP_TYPE_CONTROL: base->ENDPOINT[ep_idx].ENDPT = (USB_ENDPT_EPHSHK_MASK | USB_ENDPT_EPRXEN_MASK | USB_ENDPT_EPTXEN_MASK); break; case USB_EP_TYPE_BULK: case USB_EP_TYPE_INTERRUPT: base->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPHSHK_MASK; if (USB_EP_DIR_IS_OUT(cfg->addr)) { base->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPRXEN_MASK; } else { base->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPTXEN_MASK; } break; case USB_EP_TYPE_ISO: if (USB_EP_DIR_IS_OUT(cfg->addr)) { base->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPRXEN_MASK; } else { base->ENDPOINT[ep_idx].ENDPT |= USB_ENDPT_EPTXEN_MASK; } break; default: return -EINVAL; } if (cfg->addr == USB_CONTROL_EP_OUT) { struct net_buf *buf; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, USBFSOTG_EP0_SIZE); usbfsotg_bd_set_ctrl(bd_even, buf->size, buf->data, false); priv->out_buf[0] = buf; } return 0; } static int usbfsotg_ep_disable(const struct device *dev, struct udc_ep_config *const cfg) { const struct usbfsotg_config *config = dev->config; USB_Type *base = config->base; uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); struct usbfsotg_bd *bd_even, *bd_odd; bd_even = usbfsotg_get_ebd(dev, cfg, false); bd_odd = usbfsotg_get_ebd(dev, cfg, true); if (USB_EP_DIR_IS_OUT(cfg->addr)) { base->ENDPOINT[ep_idx].ENDPT &= ~USB_ENDPT_EPRXEN_MASK; } else { base->ENDPOINT[ep_idx].ENDPT &= ~USB_ENDPT_EPTXEN_MASK; } if (usbfsotg_bd_is_busy(bd_even) || usbfsotg_bd_is_busy(bd_odd)) { LOG_DBG("Endpoint buffer is busy"); } bd_even->bd_fields = 0U; bd_even->buf_addr = 0U; bd_odd->bd_fields = 0U; bd_odd->buf_addr = 0U; LOG_DBG("Disable ep 0x%02x", cfg->addr); return 0; } static int usbfsotg_host_wakeup(const struct device *dev) { return -ENOTSUP; } static int usbfsotg_set_address(const struct device *dev, const uint8_t addr) { const struct usbfsotg_config *config = dev->config; USB_Type *base = config->base; base->ADDR = addr; return 0; } static int usbfsotg_enable(const struct device *dev) { const struct usbfsotg_config *config = dev->config; USB_Type *base = config->base; /* non-OTG device mode, enable DP Pullup */ base->CONTROL = USB_CONTROL_DPPULLUPNONOTG_MASK; return 0; } static int usbfsotg_disable(const struct device *dev) { const struct usbfsotg_config *config = dev->config; USB_Type *base = config->base; /* disable USB and DP Pullup */ base->CTL &= ~USB_CTL_USBENSOFEN_MASK; base->CONTROL &= ~USB_CONTROL_DPPULLUPNONOTG_MASK; return 0; } static bool usbfsotg_is_supported(const struct device *dev) { const struct usbfsotg_config *config = dev->config; USB_Type *base = config->base; if ((base->PERID != USBFSOTG_PERID) || (base->REV != USBFSOTG_REV)) { return false; } return true; } static int usbfsotg_init(const struct device *dev) { const struct usbfsotg_config *config = dev->config; USB_Type *base = config->base; /* (FIXME) Enable USB voltage regulator */ SIM->SOPT1 |= SIM_SOPT1_USBREGEN_MASK; /* Reset USB module */ base->USBTRC0 |= USB_USBTRC0_USBRESET_MASK; k_busy_wait(2000); /* enable USB module, AKA USBEN bit in CTL1 register */ base->CTL = USB_CTL_USBENSOFEN_MASK; if (!usbfsotg_is_supported(dev)) { return -ENOTSUP; } for (uint8_t i = 0; i < 16U; i++) { base->ENDPOINT[i].ENDPT = 0; } base->BDTPAGE1 = (uint8_t)(POINTER_TO_UINT(config->bdt) >> 8); base->BDTPAGE2 = (uint8_t)(POINTER_TO_UINT(config->bdt) >> 16); base->BDTPAGE3 = (uint8_t)(POINTER_TO_UINT(config->bdt) >> 24); /* (FIXME) Enables the weak pulldowns on the USB transceiver */ base->USBCTRL = USB_USBCTRL_PDE_MASK; /* Clear interrupt flags */ base->ISTAT = 0xFF; /* Clear error flags */ base->ERRSTAT = 0xFF; /* Enable all error interrupt sources */ base->ERREN = 0xFF; /* Enable reset interrupt */ base->INTEN = (USB_INTEN_SLEEPEN_MASK | USB_INTEN_STALLEN_MASK | USB_INTEN_TOKDNEEN_MASK | USB_INTEN_SOFTOKEN_MASK | USB_INTEN_ERROREN_MASK | USB_INTEN_USBRSTEN_MASK); if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, USBFSOTG_EP0_SIZE, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, USBFSOTG_EP0_SIZE, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } /* Connect and enable USB interrupt */ config->irq_enable_func(dev); LOG_DBG("Initialized USB controller %p", base); return 0; } static int usbfsotg_shutdown(const struct device *dev) { const struct usbfsotg_config *config = dev->config; config->irq_disable_func(dev); if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } /* Disable USB module */ config->base->CTL = 0; /* Disable USB voltage regulator */ SIM->SOPT1 &= ~SIM_SOPT1_USBREGEN_MASK; return 0; } static int usbfsotg_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int usbfsotg_unlock(const struct device *dev) { return udc_unlock_internal(dev); } static int usbfsotg_driver_preinit(const struct device *dev) { const struct usbfsotg_config *config = dev->config; struct udc_data *data = dev->data; struct usbfsotg_data *priv = data->priv; int err; k_mutex_init(&data->mutex); k_fifo_init(&priv->fifo); k_work_init(&priv->work, xfer_work_handler); for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_out[i].caps.out = 1; if (i == 0) { config->ep_cfg_out[i].caps.control = 1; config->ep_cfg_out[i].caps.mps = 64; } else { config->ep_cfg_out[i].caps.bulk = 1; config->ep_cfg_out[i].caps.interrupt = 1; config->ep_cfg_out[i].caps.iso = 1; config->ep_cfg_out[i].caps.mps = 1023; } config->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i; err = udc_register_ep(dev, &config->ep_cfg_out[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_in[i].caps.in = 1; if (i == 0) { config->ep_cfg_in[i].caps.control = 1; config->ep_cfg_in[i].caps.mps = 64; } else { config->ep_cfg_in[i].caps.bulk = 1; config->ep_cfg_in[i].caps.interrupt = 1; config->ep_cfg_in[i].caps.iso = 1; config->ep_cfg_in[i].caps.mps = 1023; } config->ep_cfg_in[i].addr = USB_EP_DIR_IN | i; err = udc_register_ep(dev, &config->ep_cfg_in[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } data->caps.rwup = false; data->caps.mps0 = USBFSOTG_MPS0; return 0; } static const struct udc_api usbfsotg_api = { .ep_enqueue = usbfsotg_ep_enqueue, .ep_dequeue = usbfsotg_ep_dequeue, .ep_set_halt = usbfsotg_ep_set_halt, .ep_clear_halt = usbfsotg_ep_clear_halt, .ep_try_config = NULL, .ep_enable = usbfsotg_ep_enable, .ep_disable = usbfsotg_ep_disable, .host_wakeup = usbfsotg_host_wakeup, .set_address = usbfsotg_set_address, .enable = usbfsotg_enable, .disable = usbfsotg_disable, .init = usbfsotg_init, .shutdown = usbfsotg_shutdown, .lock = usbfsotg_lock, .unlock = usbfsotg_unlock, }; #define USBFSOTG_DEVICE_DEFINE(n) \ static void udc_irq_enable_func##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ usbfsotg_isr_handler, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } \ \ static void udc_irq_disable_func##n(const struct device *dev) \ { \ irq_disable(DT_INST_IRQN(n)); \ } \ \ static struct usbfsotg_bd __aligned(512) \ bdt_##n[DT_INST_PROP(n, num_bidir_endpoints) * 2 * 2]; \ \ static struct udc_ep_config \ ep_cfg_out[DT_INST_PROP(n, num_bidir_endpoints)]; \ static struct udc_ep_config \ ep_cfg_in[DT_INST_PROP(n, num_bidir_endpoints)]; \ \ static struct usbfsotg_config priv_config_##n = { \ .base = (USB_Type *)DT_INST_REG_ADDR(n), \ .bdt = bdt_##n, \ .irq_enable_func = udc_irq_enable_func##n, \ .irq_disable_func = udc_irq_disable_func##n, \ .num_of_eps = DT_INST_PROP(n, num_bidir_endpoints), \ .ep_cfg_in = ep_cfg_out, \ .ep_cfg_out = ep_cfg_in, \ }; \ \ static struct usbfsotg_data priv_data_##n = { \ }; \ \ static struct udc_data udc_data_##n = { \ .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \ .priv = &priv_data_##n, \ }; \ \ DEVICE_DT_INST_DEFINE(n, usbfsotg_driver_preinit, NULL, \ &udc_data_##n, &priv_config_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &usbfsotg_api); DT_INST_FOREACH_STATUS_OKAY(USBFSOTG_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/usb/udc/udc_kinetis.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,503
```c /* * */ #include "udc_common.h" #include <soc.h> #include <soc_dt.h> #include <zephyr/pm/policy.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/interrupt_controller/wuc_ite_it8xxx2.h> #include <zephyr/dt-bindings/interrupt-controller/it8xxx2-wuc.h> LOG_MODULE_REGISTER(udc_it82xx2, CONFIG_UDC_DRIVER_LOG_LEVEL); #define DT_DRV_COMPAT ite_it82xx2_usb #define IT8XXX2_IS_EXTEND_ENDPOINT(n) (USB_EP_GET_IDX(n) >= 4) /* Shared FIFO number including FIFO_1/2/3 */ #define SHARED_FIFO_NUM 3 /* The related definitions of the register dc_line_status: 0x51 */ #define RX_LINE_STATE_MASK (RX_LINE_FULL_SPD | RX_LINE_LOW_SPD) #define RX_LINE_LOW_SPD 0x02 #define RX_LINE_FULL_SPD 0x01 #define RX_LINE_RESET 0x00 #define DC_ADDR_NULL 0x00 #define DC_ADDR_MASK 0x7F /* EPN Extend Control 2 Register Mask Definition */ #define COMPLETED_TRANS 0xF0 /* The related definitions of the register EP STATUS: * 0x41/0x45/0x49/0x4D */ #define EP_STATUS_ERROR 0x0F /* ENDPOINT[3..0]_CONTROL_REG */ #define ENDPOINT_EN BIT(0) #define ENDPOINT_RDY BIT(1) /* The bit definitions of the register EP RX/TX FIFO Control: * EP_RX_FIFO_CONTROL: 0X64/0x84/0xA4/0xC4 * EP_TX_FIFO_CONTROL: 0X74/0x94/0xB4/0xD4 */ #define FIFO_FORCE_EMPTY BIT(0) /* The bit definitions of the register Host/Device Control: 0XE0 */ #define RESET_CORE BIT(1) /* ENDPOINT[3..0]_STATUS_REG */ #define DC_STALL_SENT BIT(5) /* DC_INTERRUPT_STATUS_REG */ #define DC_TRANS_DONE BIT(0) #define DC_RESET_EVENT BIT(2) #define DC_SOF_RECEIVED BIT(3) #define DC_NAK_SENT_INT BIT(4) /* DC_CONTROL_REG */ #define DC_GLOBAL_ENABLE BIT(0) #define DC_TX_LINE_STATE_DM BIT(1) #define DC_DIRECT_CONTROL BIT(3) #define DC_FULL_SPEED_LINE_POLARITY BIT(4) #define DC_FULL_SPEED_LINE_RATE BIT(5) #define DC_CONNECT_TO_HOST BIT(6) /* internal pull-up */ /* Bit [1:0] represents the TRANSACTION_TYPE as follows: */ enum it82xx2_transaction_types { DC_SETUP_TRANS = 0, DC_IN_TRANS, DC_OUTDATA_TRANS, DC_ALL_TRANS }; enum it82xx2_event_type { IT82xx2_EVT_XFER, IT82xx2_EVT_SETUP_TOKEN, IT82xx2_EVT_OUT_TOKEN, IT82xx2_EVT_IN_TOKEN, }; struct it82xx2_ep_event { sys_snode_t node; const struct device *dev; uint8_t ep; enum it82xx2_event_type event; }; K_MSGQ_DEFINE(evt_msgq, sizeof(struct it82xx2_ep_event), CONFIG_UDC_IT82xx2_EVENT_COUNT, sizeof(uint32_t)); struct usb_it8xxx2_wuc { /* WUC control device structure */ const struct device *dev; /* WUC pin mask */ uint8_t mask; }; struct it82xx2_data { const struct device *dev; struct k_fifo fifo; struct k_work_delayable suspended_work; struct k_thread thread_data; struct k_sem suspended_sem; /* FIFO_1/2/3 ready status */ bool fifo_ready[SHARED_FIFO_NUM]; /* FIFO_1/2/3 semaphore */ struct k_sem fifo_sem[SHARED_FIFO_NUM]; /* Record if the previous transaction of endpoint0 is STALL */ bool stall_is_sent; }; struct usb_it82xx2_config { struct usb_it82xx2_regs *const base; const struct pinctrl_dev_config *pcfg; const struct usb_it8xxx2_wuc wuc; uint8_t usb_irq; uint8_t wu_irq; struct udc_ep_config *ep_cfg_in; struct udc_ep_config *ep_cfg_out; void (*make_thread)(const struct device *dev); }; enum it82xx2_ep_ctrl { EP_IN_DIRECTION_SET, EP_IOS_ENABLE, EP_ENABLE, EP_DATA_SEQ_1, EP_DATA_SEQ_TOGGLE, EP_READY_ENABLE, }; static inline void ep_set_halt(const struct device *dev, const uint8_t ep_idx, const bool enable) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; union epn0n1_extend_ctrl_reg *ext_ctrl; uint8_t idx = (ep_idx - 4) / 2; ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_9X].ext_4_15.epn0n1_ext_ctrl; if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { if (ep_idx % 2) { ext_ctrl[idx].fields.epn1_send_stall_bit = enable; } else { ext_ctrl[idx].fields.epn0_send_stall_bit = enable; } } else { ep_regs[ep_idx].ep_ctrl.fields.send_stall_bit = enable; } } static volatile void *it82xx2_get_ext_ctrl(const struct device *dev, const uint8_t ep_idx, const enum it82xx2_ep_ctrl ctrl) { uint8_t idx; const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; union epn0n1_extend_ctrl_reg *epn0n1_ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_9X].ext_4_15.epn0n1_ext_ctrl; struct epn_ext_ctrl_regs *ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_DX].ext_0_3.epn_ext_ctrl; if (ctrl == EP_IN_DIRECTION_SET || ctrl == EP_ENABLE) { idx = ((ep_idx - 4) % 3) + 1; return &ext_ctrl[idx].epn_ext_ctrl1; } idx = (ep_idx - 4) / 2; return &epn0n1_ext_ctrl[idx]; } static int it82xx2_usb_extend_ep_ctrl(const struct device *dev, const uint8_t ep, const enum it82xx2_ep_ctrl ctrl, const bool enable) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct epn_ext_ctrl_regs *ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_DX].ext_0_3.epn_ext_ctrl; volatile union epn_extend_ctrl1_reg *epn_ext_ctrl1 = NULL; volatile union epn0n1_extend_ctrl_reg *epn0n1_ext_ctrl = NULL; const uint8_t ep_idx = USB_EP_GET_IDX(ep); if (ctrl == EP_IN_DIRECTION_SET || ctrl == EP_ENABLE) { epn_ext_ctrl1 = it82xx2_get_ext_ctrl(dev, ep_idx, ctrl); } else { epn0n1_ext_ctrl = it82xx2_get_ext_ctrl(dev, ep_idx, ctrl); } switch (ctrl) { case EP_IOS_ENABLE: if (ep_idx % 2) { epn0n1_ext_ctrl->fields.epn1_iso_enable_bit = enable; } else { epn0n1_ext_ctrl->fields.epn0_iso_enable_bit = enable; } break; case EP_DATA_SEQ_1: if (ep_idx % 2) { epn0n1_ext_ctrl->fields.epn1_outdata_sequence_bit = enable; } else { epn0n1_ext_ctrl->fields.epn0_outdata_sequence_bit = enable; } break; case EP_DATA_SEQ_TOGGLE: if (!enable) { break; } if (ep_idx % 2) { if (epn0n1_ext_ctrl->fields.epn1_outdata_sequence_bit) { epn0n1_ext_ctrl->fields.epn1_outdata_sequence_bit = 0; } else { epn0n1_ext_ctrl->fields.epn1_outdata_sequence_bit = 1; } } else { if (epn0n1_ext_ctrl->fields.epn0_outdata_sequence_bit) { epn0n1_ext_ctrl->fields.epn0_outdata_sequence_bit = 0; } else { epn0n1_ext_ctrl->fields.epn0_outdata_sequence_bit = 1; } } break; case EP_IN_DIRECTION_SET: if (((ep_idx - 4) / 3 == 0)) { epn_ext_ctrl1->fields.epn0_direction_bit = enable; } else if (((ep_idx - 4) / 3 == 1)) { epn_ext_ctrl1->fields.epn3_direction_bit = enable; } else if (((ep_idx - 4) / 3 == 2)) { epn_ext_ctrl1->fields.epn6_direction_bit = enable; } else if (((ep_idx - 4) / 3 == 3)) { epn_ext_ctrl1->fields.epn9_direction_bit = enable; } else { LOG_ERR("Invalid endpoint 0x%x for control type 0x%x", ep, ctrl); return -EINVAL; } break; case EP_ENABLE: if (((ep_idx - 4) / 3 == 0)) { epn_ext_ctrl1->fields.epn0_enable_bit = enable; } else if (((ep_idx - 4) / 3 == 1)) { epn_ext_ctrl1->fields.epn3_enable_bit = enable; } else if (((ep_idx - 4) / 3 == 2)) { epn_ext_ctrl1->fields.epn6_enable_bit = enable; } else if (((ep_idx - 4) / 3 == 3)) { epn_ext_ctrl1->fields.epn9_enable_bit = enable; } else { LOG_ERR("Invalid endpoint 0x%x for control type 0x%x", ep, ctrl); return -EINVAL; } break; case EP_READY_ENABLE: int idx = ((ep_idx - 4) % 3) + 1; (enable) ? (ext_ctrl[idx].epn_ext_ctrl2 |= BIT((ep_idx - 4) / 3)) : (ext_ctrl[idx].epn_ext_ctrl2 &= ~BIT((ep_idx - 4) / 3)); break; default: LOG_ERR("Unknown control type 0x%x", ctrl); return -EINVAL; } return 0; } static int it82xx2_usb_ep_ctrl(const struct device *dev, uint8_t ep, enum it82xx2_ep_ctrl ctrl, bool enable) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; const uint8_t ep_idx = USB_EP_GET_IDX(ep); if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { return -EINVAL; } switch (ctrl) { case EP_IN_DIRECTION_SET: ep_regs[ep_idx].ep_ctrl.fields.direction_bit = enable; break; case EP_IOS_ENABLE: ep_regs[ep_idx].ep_ctrl.fields.iso_enable_bit = enable; break; case EP_ENABLE: ep_regs[ep_idx].ep_ctrl.fields.enable_bit = enable; break; case EP_READY_ENABLE: ep_regs[ep_idx].ep_ctrl.fields.ready_bit = enable; break; case EP_DATA_SEQ_1: ep_regs[ep_idx].ep_ctrl.fields.outdata_sequence_bit = enable; break; case EP_DATA_SEQ_TOGGLE: if (!enable) { break; } if (ep_regs[ep_idx].ep_ctrl.fields.outdata_sequence_bit) { ep_regs[ep_idx].ep_ctrl.fields.outdata_sequence_bit = 0; } else { ep_regs[ep_idx].ep_ctrl.fields.outdata_sequence_bit = 1; } break; default: LOG_ERR("Unknown control type 0x%x", ctrl); return -EINVAL; } return 0; } static int it82xx2_usb_set_ep_ctrl(const struct device *dev, uint8_t ep, enum it82xx2_ep_ctrl ctrl, bool enable) { const uint8_t ep_idx = USB_EP_GET_IDX(ep); int ret = 0; if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { ret = it82xx2_usb_extend_ep_ctrl(dev, ep, ctrl, enable); } else { ret = it82xx2_usb_ep_ctrl(dev, ep, ctrl, enable); } return ret; } /* Standby(deep doze) mode enable/disable */ static void it82xx2_enable_standby_state(bool enable) { if (enable) { pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } else { pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } } /* Wake-up interrupt (USB D+) Enable/Disable */ static void it82xx2_enable_wu_irq(const struct device *dev, bool enable) { const struct usb_it82xx2_config *config = dev->config; /* Clear pending interrupt */ it8xxx2_wuc_clear_status(config->wuc.dev, config->wuc.mask); if (enable) { irq_enable(config->wu_irq); } else { irq_disable(config->wu_irq); } } static void it82xx2_wu_isr(const void *arg) { const struct device *dev = arg; it82xx2_enable_wu_irq(dev, false); it82xx2_enable_standby_state(false); LOG_DBG("USB D+ (WU) Triggered"); } static void it8xxx2_usb_dc_wuc_init(const struct device *dev) { const struct usb_it82xx2_config *config = dev->config; /* Initializing the WUI */ it8xxx2_wuc_set_polarity(config->wuc.dev, config->wuc.mask, WUC_TYPE_EDGE_FALLING); it8xxx2_wuc_clear_status(config->wuc.dev, config->wuc.mask); /* Enabling the WUI */ it8xxx2_wuc_enable(config->wuc.dev, config->wuc.mask); /* Connect WU (USB D+) interrupt but make it disabled initially */ irq_connect_dynamic(config->wu_irq, 0, it82xx2_wu_isr, dev, 0); } /* The ep_fifo_res[ep_idx % SHARED_FIFO_NUM] where the SHARED_FIFO_NUM is 3 represents the * EP mapping because when (ep_idx % SHARED_FIFO_NUM) is 3, it actually means the EP0. */ static const uint8_t ep_fifo_res[SHARED_FIFO_NUM] = {3, 1, 2}; static int it82xx2_usb_fifo_ctrl(const struct device *dev, uint8_t ep) { const uint8_t ep_idx = USB_EP_GET_IDX(ep); const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; volatile uint8_t *ep_fifo_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_BX].fifo_ctrl.ep_fifo_ctrl; uint8_t fifon_ctrl = (ep_fifo_res[ep_idx % SHARED_FIFO_NUM] - 1) * 2; int ret = 0; if (ep_idx == 0) { LOG_ERR("Invalid endpoint 0x%x", ep); return -EINVAL; } if (USB_EP_DIR_IS_IN(ep)) { if (ep_idx < 8) { ep_fifo_ctrl[fifon_ctrl] = BIT(ep_idx); ep_fifo_ctrl[fifon_ctrl + 1] = 0x0; } else { ep_fifo_ctrl[fifon_ctrl] = 0x0; ep_fifo_ctrl[fifon_ctrl + 1] = BIT(ep_idx - 8); } } else if (USB_EP_DIR_IS_OUT(ep)) { if (ep_idx < 8) { ep_fifo_ctrl[fifon_ctrl] |= BIT(ep_idx); } else { ep_fifo_ctrl[fifon_ctrl + 1] |= BIT(ep_idx - 8); } } else { LOG_ERR("Failed to set fifo control register for ep 0x%x", ep); ret = -EINVAL; } return ret; } static void it82xx2_event_submit(const struct device *dev, const uint8_t ep, const enum it82xx2_event_type event) { struct it82xx2_ep_event evt; evt.dev = dev; evt.ep = ep; evt.event = event; k_msgq_put(&evt_msgq, &evt, K_NO_WAIT); } static int it82xx2_ep_enqueue(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *const buf) { udc_buf_put(cfg, buf); it82xx2_event_submit(dev, cfg->addr, IT82xx2_EVT_XFER); return 0; } static int it82xx2_ep_dequeue(const struct device *dev, struct udc_ep_config *const cfg) { const uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; struct net_buf *buf; unsigned int lock_key; uint8_t fifo_idx; fifo_idx = ep_idx > 0 ? ep_fifo_res[ep_idx % SHARED_FIFO_NUM] : 0; lock_key = irq_lock(); if (USB_EP_DIR_IS_IN(cfg->addr)) { ff_regs[fifo_idx].ep_tx_fifo_ctrl = FIFO_FORCE_EMPTY; } else { ff_regs[fifo_idx].ep_rx_fifo_ctrl = FIFO_FORCE_EMPTY; } irq_unlock(lock_key); buf = udc_buf_get_all(dev, cfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } udc_ep_set_busy(dev, cfg->addr, false); return 0; } static inline void ctrl_ep_stall_workaround(const struct device *dev) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE; struct it82xx2_data *priv = udc_get_private(dev); unsigned int lock_key; uint32_t idx = 0; priv->stall_is_sent = true; lock_key = irq_lock(); ep_set_halt(dev, 0, true); it82xx2_usb_set_ep_ctrl(dev, 0, EP_READY_ENABLE, true); /* It82xx2 does not support clearing the STALL bit by hardware; instead, the STALL bit need * to be cleared by firmware. The SETUP token will be STALLed, which isn't compliant to * USB specification, if firmware clears the STALL bit too late. Due to this hardware * limitations, device controller polls to check if the stall bit has been transmitted for * 3ms and then disables it after responsing STALLed. */ while (idx < 198 && !(ep_regs[0].ep_status & DC_STALL_SENT)) { /* wait 15.15us */ gctrl_regs->GCTRL_WNCKR = 0; idx++; } if (idx < 198) { ep_set_halt(dev, 0, false); } irq_unlock(lock_key); } static int it82xx2_ep_set_halt(const struct device *dev, struct udc_ep_config *const cfg) { const uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); if (ep_idx == 0) { ctrl_ep_stall_workaround(dev); } else { ep_set_halt(dev, ep_idx, true); it82xx2_usb_set_ep_ctrl(dev, ep_idx, EP_READY_ENABLE, true); } LOG_DBG("Endpoint 0x%x is halted", cfg->addr); return 0; } static int it82xx2_ep_clear_halt(const struct device *dev, struct udc_ep_config *const cfg) { const uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); ep_set_halt(dev, ep_idx, false); LOG_DBG("Endpoint 0x%x clear halted", cfg->addr); return 0; } static int it82xx2_ep_enable(const struct device *dev, struct udc_ep_config *const cfg) { const uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); /* Configure endpoint */ if (ep_idx != 0) { if (USB_EP_DIR_IS_IN(cfg->addr)) { it82xx2_usb_set_ep_ctrl(dev, ep_idx, EP_IN_DIRECTION_SET, true); } else { it82xx2_usb_set_ep_ctrl(dev, ep_idx, EP_IN_DIRECTION_SET, false); it82xx2_usb_fifo_ctrl(dev, cfg->addr); } switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) { case USB_EP_TYPE_BULK: __fallthrough; case USB_EP_TYPE_INTERRUPT: it82xx2_usb_set_ep_ctrl(dev, ep_idx, EP_IOS_ENABLE, false); break; case USB_EP_TYPE_ISO: it82xx2_usb_set_ep_ctrl(dev, ep_idx, EP_IOS_ENABLE, true); break; case USB_EP_TYPE_CONTROL: __fallthrough; default: return -ENOTSUP; } } if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { uint8_t fifo_idx; fifo_idx = ep_fifo_res[ep_idx % SHARED_FIFO_NUM]; it82xx2_usb_set_ep_ctrl(dev, fifo_idx, EP_ENABLE, true); } it82xx2_usb_set_ep_ctrl(dev, ep_idx, EP_ENABLE, true); LOG_DBG("Endpoint 0x%02x is enabled", cfg->addr); return 0; } static int it82xx2_ep_disable(const struct device *dev, struct udc_ep_config *const cfg) { const uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); it82xx2_usb_set_ep_ctrl(dev, ep_idx, EP_ENABLE, false); LOG_DBG("Endpoint 0x%02x is disabled", cfg->addr); return 0; } static int it82xx2_host_wakeup(const struct device *dev) { struct it82xx2_data *priv = udc_get_private(dev); const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; int ret; if (udc_is_suspended(dev)) { usb_regs->dc_control = DC_GLOBAL_ENABLE | DC_FULL_SPEED_LINE_POLARITY | DC_FULL_SPEED_LINE_RATE | DC_DIRECT_CONTROL | DC_TX_LINE_STATE_DM | DC_CONNECT_TO_HOST; /* The remote wakeup device must hold the resume signal for */ /* at least 1 ms but for no more than 15 ms */ k_msleep(2); usb_regs->dc_control = DC_GLOBAL_ENABLE | DC_FULL_SPEED_LINE_POLARITY | DC_FULL_SPEED_LINE_RATE | DC_CONNECT_TO_HOST; ret = k_sem_take(&priv->suspended_sem, K_MSEC(500)); if (ret < 0) { LOG_ERR("Failed to wake up host"); } } return 0; } static int it82xx2_set_address(const struct device *dev, const uint8_t addr) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; usb_regs->dc_address = addr & DC_ADDR_MASK; LOG_DBG("Set usb address 0x%02x", addr); return 0; } static int it82xx2_usb_dc_ip_init(const struct device *dev) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; /* reset usb controller */ usb_regs->host_device_control = RESET_CORE; k_msleep(1); usb_regs->port0_misc_control &= ~(PULL_DOWN_EN); usb_regs->port1_misc_control &= ~(PULL_DOWN_EN); /* clear reset bit */ usb_regs->host_device_control = 0; usb_regs->dc_interrupt_status = DC_TRANS_DONE | DC_RESET_EVENT | DC_SOF_RECEIVED; usb_regs->dc_interrupt_mask = 0x00; usb_regs->dc_interrupt_mask = DC_TRANS_DONE | DC_RESET_EVENT | DC_SOF_RECEIVED; usb_regs->dc_address = DC_ADDR_NULL; return 0; } static void it82xx2_enable_sof_int(const struct device *dev, bool enable) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; usb_regs->dc_interrupt_status = DC_SOF_RECEIVED; if (enable) { usb_regs->dc_interrupt_mask |= DC_SOF_RECEIVED; } else { usb_regs->dc_interrupt_mask &= ~DC_SOF_RECEIVED; } } void it82xx2_dc_reset(const struct device *dev) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; struct it82xx2_data *priv = udc_get_private(dev); for (uint8_t ep_idx = 0; ep_idx < 4; ep_idx++) { ff_regs[ep_idx].ep_rx_fifo_ctrl = FIFO_FORCE_EMPTY; ff_regs[ep_idx].ep_tx_fifo_ctrl = FIFO_FORCE_EMPTY; } ep_regs[0].ep_ctrl.value = ENDPOINT_EN; usb_regs->dc_address = DC_ADDR_NULL; usb_regs->dc_interrupt_status = DC_NAK_SENT_INT | DC_SOF_RECEIVED; priv->fifo_ready[0] = false; priv->fifo_ready[1] = false; priv->fifo_ready[2] = false; k_sem_give(&priv->fifo_sem[0]); k_sem_give(&priv->fifo_sem[1]); k_sem_give(&priv->fifo_sem[2]); } static int it82xx2_xfer_in_data(const struct device *dev, uint8_t ep, struct net_buf *buf) { const uint8_t ep_idx = USB_EP_GET_IDX(ep); const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; struct it82xx2_data *priv = udc_get_private(dev); struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, ep); uint8_t fifo_idx; size_t len; fifo_idx = ep_idx > 0 ? ep_fifo_res[ep_idx % SHARED_FIFO_NUM] : 0; if (ep_idx == 0) { ff_regs[ep_idx].ep_tx_fifo_ctrl = FIFO_FORCE_EMPTY; } else { k_sem_take(&priv->fifo_sem[fifo_idx - 1], K_FOREVER); it82xx2_usb_fifo_ctrl(dev, ep); } len = MIN(buf->len, ep_cfg->mps); for (size_t i = 0; i < len; i++) { ff_regs[fifo_idx].ep_tx_fifo_data = buf->data[i]; } if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { it82xx2_usb_extend_ep_ctrl(dev, ep_idx, EP_READY_ENABLE, true); } it82xx2_usb_set_ep_ctrl(dev, fifo_idx, EP_READY_ENABLE, true); if (ep_idx != 0) { priv->fifo_ready[fifo_idx - 1] = true; } LOG_DBG("Writed %d packets to endpoint%d tx fifo", buf->len, ep_idx); return 0; } static int it82xx2_xfer_out_data(const struct device *dev, uint8_t ep, struct net_buf *buf) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; const uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t fifo_idx; size_t len; fifo_idx = ep_idx > 0 ? ep_fifo_res[ep_idx % SHARED_FIFO_NUM] : 0; if (ep_regs[fifo_idx].ep_status & EP_STATUS_ERROR) { LOG_WRN("endpoint%d error status 0x%02x", ep_idx, ep_regs[fifo_idx].ep_status); return -EINVAL; } len = (uint16_t)ff_regs[fifo_idx].ep_rx_fifo_dcnt_lsb + (((uint16_t)ff_regs[fifo_idx].ep_rx_fifo_dcnt_msb) << 8); len = MIN(net_buf_tailroom(buf), len); uint8_t *data_ptr = net_buf_tail(buf); for (size_t idx = 0; idx < len; idx++) { data_ptr[idx] = ff_regs[fifo_idx].ep_rx_fifo_data; } net_buf_add(buf, len); return 0; } static int work_handler_xfer_continue(const struct device *dev, uint8_t ep, struct net_buf *buf) { int ret = 0; if (USB_EP_DIR_IS_OUT(ep)) { const uint8_t ep_idx = USB_EP_GET_IDX(ep); struct it82xx2_data *priv = udc_get_private(dev); uint8_t fifo_idx; fifo_idx = ep_idx > 0 ? ep_fifo_res[ep_idx % SHARED_FIFO_NUM] : 0; it82xx2_usb_set_ep_ctrl(dev, ep_idx, EP_READY_ENABLE, true); if (IT8XXX2_IS_EXTEND_ENDPOINT(ep_idx)) { it82xx2_usb_set_ep_ctrl(dev, fifo_idx, EP_READY_ENABLE, true); } if (ep_idx != 0) { priv->fifo_ready[fifo_idx - 1] = true; } } else { ret = it82xx2_xfer_in_data(dev, ep, buf); } return ret; } static int work_handler_xfer_next(const struct device *dev, uint8_t ep) { struct net_buf *buf; buf = udc_buf_peek(dev, ep); if (buf == NULL) { return -ENODATA; } return work_handler_xfer_continue(dev, ep, buf); } /* * Allocate buffer and initiate a new control OUT transfer, * use successive buffer descriptor when next is true. */ static int it82xx2_ctrl_feed_dout(const struct device *dev, const size_t length) { struct udc_ep_config *cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); struct net_buf *buf; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length); if (buf == NULL) { return -ENOMEM; } udc_buf_put(cfg, buf); it82xx2_usb_set_ep_ctrl(dev, 0, EP_READY_ENABLE, true); return 0; } static bool it82xx2_fake_token(const struct device *dev, uint8_t ep, uint8_t token_type) { struct it82xx2_data *priv = udc_get_private(dev); const uint8_t ep_idx = USB_EP_GET_IDX(ep); uint8_t fifo_idx; bool is_fake = true; if (ep_idx == 0) { switch (token_type) { case DC_IN_TRANS: if (priv->stall_is_sent) { return true; } is_fake = !udc_ctrl_stage_is_data_in(dev) && !udc_ctrl_stage_is_status_in(dev) && !udc_ctrl_stage_is_no_data(dev); break; case DC_OUTDATA_TRANS: is_fake = !udc_ctrl_stage_is_data_out(dev) && !udc_ctrl_stage_is_status_out(dev); break; default: LOG_ERR("Invalid token type"); break; } } else { fifo_idx = ep_fifo_res[ep_idx % SHARED_FIFO_NUM]; if (!priv->fifo_ready[fifo_idx - 1]) { is_fake = true; } else { priv->fifo_ready[fifo_idx - 1] = false; is_fake = false; } } return is_fake; } static inline int work_handler_in(const struct device *dev, uint8_t ep) { struct it82xx2_data *priv = udc_get_private(dev); struct udc_ep_config *ep_cfg; struct net_buf *buf; uint8_t fifo_idx; int err = 0; if (it82xx2_fake_token(dev, ep, DC_IN_TRANS)) { return 0; } if (ep != USB_CONTROL_EP_IN) { fifo_idx = ep_fifo_res[USB_EP_GET_IDX(ep) % SHARED_FIFO_NUM]; k_sem_give(&priv->fifo_sem[fifo_idx - 1]); } buf = udc_buf_peek(dev, ep); if (buf == NULL) { return -ENODATA; } ep_cfg = udc_get_ep_cfg(dev, ep); net_buf_pull(buf, MIN(buf->len, ep_cfg->mps)); it82xx2_usb_set_ep_ctrl(dev, ep, EP_DATA_SEQ_TOGGLE, true); if (buf->len) { work_handler_xfer_continue(dev, ep, buf); return 0; } if (udc_ep_buf_has_zlp(buf)) { work_handler_xfer_continue(dev, ep, buf); udc_ep_buf_clear_zlp(buf); return 0; } buf = udc_buf_get(dev, ep); if (buf == NULL) { return -ENODATA; } udc_ep_set_busy(dev, ep, false); if (ep == USB_CONTROL_EP_IN) { if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* Status stage finished, notify upper layer */ udc_ctrl_submit_status(dev, buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_out(dev)) { /* * IN transfer finished, release buffer, * Feed control OUT buffer for status stage. */ net_buf_unref(buf); return it82xx2_ctrl_feed_dout(dev, 0U); } return err; } return udc_submit_ep_event(dev, buf, 0); } static inline int work_handler_setup(const struct device *dev, uint8_t ep) { struct it82xx2_data *priv = udc_get_private(dev); struct net_buf *buf; int err = 0; if (udc_ctrl_stage_is_status_out(dev)) { /* out -> setup */ buf = udc_buf_get(dev, USB_CONTROL_EP_OUT); if (buf) { udc_ep_set_busy(dev, USB_CONTROL_EP_OUT, false); net_buf_unref(buf); } } if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* in -> setup */ work_handler_in(dev, USB_CONTROL_EP_IN); } buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, sizeof(struct usb_setup_packet)); if (buf == NULL) { LOG_ERR("Failed to allocate buffer"); return -ENOMEM; } udc_ep_buf_set_setup(buf); it82xx2_xfer_out_data(dev, ep, buf); if (buf->len != sizeof(struct usb_setup_packet)) { LOG_DBG("buffer length %d read from chip", buf->len); net_buf_unref(buf); return 0; } priv->stall_is_sent = false; LOG_HEXDUMP_DBG(buf->data, buf->len, "setup:"); udc_ctrl_update_stage(dev, buf); it82xx2_usb_set_ep_ctrl(dev, ep, EP_DATA_SEQ_1, true); if (udc_ctrl_stage_is_data_out(dev)) { /* Allocate and feed buffer for data OUT stage */ LOG_DBG("s:%p|feed for -out-", buf); err = it82xx2_ctrl_feed_dout(dev, udc_data_stage_length(buf)); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } } else if (udc_ctrl_stage_is_data_in(dev)) { udc_ctrl_submit_s_in_status(dev); } else { udc_ctrl_submit_s_status(dev); } return err; } static inline int work_handler_out(const struct device *dev, uint8_t ep) { struct net_buf *buf; int err = 0; const uint8_t ep_idx = USB_EP_GET_IDX(ep); const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; struct udc_ep_config *ep_cfg; uint8_t fifo_idx; size_t len; if (it82xx2_fake_token(dev, ep, DC_OUTDATA_TRANS)) { return 0; } buf = udc_buf_get(dev, ep); if (buf == NULL) { return -ENODATA; } udc_ep_set_busy(dev, ep, false); fifo_idx = ep_idx > 0 ? ep_fifo_res[ep_idx % SHARED_FIFO_NUM] : 0; len = (uint16_t)ff_regs[fifo_idx].ep_rx_fifo_dcnt_lsb + (((uint16_t)ff_regs[fifo_idx].ep_rx_fifo_dcnt_msb) << 8); if (ep == USB_CONTROL_EP_OUT) { if (udc_ctrl_stage_is_status_out(dev) && len != 0) { LOG_DBG("Handle early setup token"); /* Notify upper layer */ udc_ctrl_submit_status(dev, buf); /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); return 0; } } ep_cfg = udc_get_ep_cfg(dev, ep); if (len > ep_cfg->mps) { LOG_ERR("Failed to handle this packet due to the packet size"); return -ENOBUFS; } it82xx2_xfer_out_data(dev, ep, buf); if (ep == USB_CONTROL_EP_OUT) { if (udc_ctrl_stage_is_status_out(dev)) { /* Status stage finished, notify upper layer */ udc_ctrl_submit_status(dev, buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_in(dev)) { it82xx2_usb_set_ep_ctrl(dev, ep, EP_DATA_SEQ_1, true); err = udc_ctrl_submit_s_out_status(dev, buf); } } else { err = udc_submit_ep_event(dev, buf, 0); } return err; } static void xfer_work_handler(const struct device *dev) { while (true) { struct it82xx2_ep_event evt; int err = 0; k_msgq_get(&evt_msgq, &evt, K_FOREVER); switch (evt.event) { case IT82xx2_EVT_SETUP_TOKEN: err = work_handler_setup(evt.dev, evt.ep); break; case IT82xx2_EVT_IN_TOKEN: err = work_handler_in(evt.dev, evt.ep); break; case IT82xx2_EVT_OUT_TOKEN: err = work_handler_out(evt.dev, evt.ep); break; case IT82xx2_EVT_XFER: break; default: LOG_ERR("Unknown event type 0x%x", evt.event); err = -EINVAL; break; } if (err) { udc_submit_event(evt.dev, UDC_EVT_ERROR, err); } if (evt.ep != USB_CONTROL_EP_OUT && !udc_ep_is_busy(evt.dev, evt.ep)) { if (work_handler_xfer_next(evt.dev, evt.ep) == 0) { udc_ep_set_busy(evt.dev, evt.ep, true); } } } } static inline bool it82xx2_check_ep0_stall(const struct device *dev, const uint8_t ep_idx, const uint8_t transtype) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct it82xx2_usb_ep_fifo_regs *ff_regs = usb_regs->fifo_regs; if (ep_idx != 0) { return false; } /* Check if the stall bit is set */ if (ep_regs[ep_idx].ep_ctrl.fields.send_stall_bit) { ep_set_halt(dev, ep_idx, false); if (transtype == DC_SETUP_TRANS) { ff_regs[ep_idx].ep_rx_fifo_ctrl = FIFO_FORCE_EMPTY; } LOG_ERR("Cleared stall bit"); return true; } /* Check if the IN transaction is STALL */ if ((transtype == DC_IN_TRANS) && (ep_regs[ep_idx].ep_status & DC_STALL_SENT)) { return true; } return false; } static void it82xx2_usb_xfer_done(const struct device *dev) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_usb_ep_regs *ep_regs = usb_regs->usb_ep_regs; struct epn_ext_ctrl_regs *epn_ext_ctrl = usb_regs->fifo_regs[EP_EXT_REGS_DX].ext_0_3.epn_ext_ctrl; for (uint8_t fifo_idx = 0; fifo_idx < 4; fifo_idx++) { uint8_t ep, ep_idx, ep_ctrl, transtype; ep_ctrl = ep_regs[fifo_idx].ep_ctrl.value; transtype = ep_regs[fifo_idx].ep_transtype_sts & DC_ALL_TRANS; if (!(ep_ctrl & ENDPOINT_EN) || (ep_ctrl & ENDPOINT_RDY)) { continue; } if (fifo_idx == 0) { ep_idx = 0; if (it82xx2_check_ep0_stall(dev, ep_idx, transtype)) { continue; } } else { ep_idx = (epn_ext_ctrl[fifo_idx].epn_ext_ctrl2 & COMPLETED_TRANS) >> 4; if (ep_idx == 0) { continue; } } switch (transtype) { case DC_SETUP_TRANS: /* SETUP transaction done */ if (ep_idx != 0) { break; } it82xx2_event_submit(dev, ep_idx, IT82xx2_EVT_SETUP_TOKEN); break; case DC_IN_TRANS: /* IN transaction done */ ep = USB_EP_DIR_IN | ep_idx; it82xx2_event_submit(dev, ep, IT82xx2_EVT_IN_TOKEN); break; case DC_OUTDATA_TRANS: /* OUT transaction done */ ep = USB_EP_DIR_OUT | ep_idx; it82xx2_event_submit(dev, ep, IT82xx2_EVT_OUT_TOKEN); break; default: LOG_ERR("Unknown transaction type"); break; } } } static void it82xx2_usb_dc_isr(const void *arg) { const struct device *dev = arg; const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_data *priv = udc_get_private(dev); uint8_t status = usb_regs->dc_interrupt_status & usb_regs->dc_interrupt_mask; /* mask non enable int */ /* reset event */ if (status & DC_RESET_EVENT) { if ((usb_regs->dc_line_status & RX_LINE_STATE_MASK) == RX_LINE_RESET) { it82xx2_dc_reset(dev); usb_regs->dc_interrupt_status = DC_RESET_EVENT; udc_submit_event(dev, UDC_EVT_RESET, 0); return; } usb_regs->dc_interrupt_status = DC_RESET_EVENT; } /* sof received */ if (status & DC_SOF_RECEIVED) { it82xx2_enable_sof_int(dev, false); k_work_reschedule(&priv->suspended_work, K_MSEC(5)); } /* transaction done */ if (status & DC_TRANS_DONE) { /* clear interrupt before new transaction */ usb_regs->dc_interrupt_status = DC_TRANS_DONE; if (udc_is_suspended(dev) && udc_is_enabled(dev)) { udc_set_suspended(dev, false); udc_submit_event(dev, UDC_EVT_RESUME, 0); k_sem_give(&priv->suspended_sem); } it82xx2_usb_xfer_done(dev); return; } } static void suspended_handler(struct k_work *item) { struct k_work_delayable *dwork = k_work_delayable_from_work(item); struct it82xx2_data *priv = CONTAINER_OF(dwork, struct it82xx2_data, suspended_work); const struct device *dev = priv->dev; const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; if (usb_regs->dc_interrupt_status & DC_SOF_RECEIVED) { usb_regs->dc_interrupt_status = DC_SOF_RECEIVED; if (udc_is_suspended(dev) && udc_is_enabled(dev)) { udc_set_suspended(dev, false); udc_submit_event(dev, UDC_EVT_RESUME, 0); k_sem_give(&priv->suspended_sem); } k_work_reschedule(&priv->suspended_work, K_MSEC(5)); return; } it82xx2_enable_sof_int(dev, true); if (!udc_is_suspended(dev) && udc_is_enabled(dev)) { udc_set_suspended(dev, true); udc_submit_event(dev, UDC_EVT_SUSPEND, 0); it82xx2_enable_wu_irq(dev, true); it82xx2_enable_standby_state(true); k_sem_reset(&priv->suspended_sem); } } static int it82xx2_enable(const struct device *dev) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; struct it82xx2_data *priv = udc_get_private(dev); k_sem_init(&priv->suspended_sem, 0, 1); k_work_init_delayable(&priv->suspended_work, suspended_handler); /* Initialize FIFO ready status */ priv->fifo_ready[0] = false; priv->fifo_ready[1] = false; priv->fifo_ready[2] = false; /* Initialize FIFO semaphore */ k_sem_init(&priv->fifo_sem[0], 1, 1); k_sem_init(&priv->fifo_sem[1], 1, 1); k_sem_init(&priv->fifo_sem[2], 1, 1); usb_regs->dc_control = DC_GLOBAL_ENABLE | DC_FULL_SPEED_LINE_POLARITY | DC_FULL_SPEED_LINE_RATE | DC_CONNECT_TO_HOST; /* Enable USB D+ and USB interrupts */ it82xx2_enable_wu_irq(dev, true); irq_enable(config->usb_irq); return 0; } static int it82xx2_disable(const struct device *dev) { const struct usb_it82xx2_config *config = dev->config; struct usb_it82xx2_regs *const usb_regs = config->base; irq_disable(config->usb_irq); /* stop pull-up D+ D-*/ usb_regs->dc_control &= ~DC_CONNECT_TO_HOST; return 0; } static int it82xx2_init(const struct device *dev) { const struct usb_it82xx2_config *config = dev->config; struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE; int ret; /* * Disable USB debug path , prevent CPU enter * JTAG mode and then reset by USB command. */ gctrl_regs->GCTRL_MCCR &= ~(IT8XXX2_GCTRL_MCCR_USB_EN); gctrl_regs->gctrl_pmer2 |= IT8XXX2_GCTRL_PMER2_USB_PAD_EN; it82xx2_usb_dc_ip_init(dev); ret = udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, config->ep_cfg_out[0].caps.mps, 0); if (ret) { LOG_ERR("Failed to enable ep 0x%02x", USB_CONTROL_EP_OUT); return ret; } ret = udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, config->ep_cfg_in[0].caps.mps, 0); if (ret) { LOG_ERR("Failed to enable ep 0x%02x", USB_CONTROL_EP_IN); return ret; } return 0; } static int it82xx2_shutdown(const struct device *dev) { if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } return 0; } static int it82xx2_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int it82xx2_unlock(const struct device *dev) { return udc_unlock_internal(dev); } static const struct udc_api it82xx2_api = { .ep_enqueue = it82xx2_ep_enqueue, .ep_dequeue = it82xx2_ep_dequeue, .ep_set_halt = it82xx2_ep_set_halt, .ep_clear_halt = it82xx2_ep_clear_halt, .ep_try_config = NULL, .ep_enable = it82xx2_ep_enable, .ep_disable = it82xx2_ep_disable, .host_wakeup = it82xx2_host_wakeup, .set_address = it82xx2_set_address, .enable = it82xx2_enable, .disable = it82xx2_disable, .init = it82xx2_init, .shutdown = it82xx2_shutdown, .lock = it82xx2_lock, .unlock = it82xx2_unlock, }; static int it82xx2_usb_driver_preinit(const struct device *dev) { const struct usb_it82xx2_config *config = dev->config; struct udc_data *data = dev->data; struct it82xx2_data *priv = udc_get_private(dev); int err; k_mutex_init(&data->mutex); k_fifo_init(&priv->fifo); err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { LOG_ERR("Failed to configure usb pins"); return err; } for (int i = 0; i < MAX_NUM_ENDPOINTS; i++) { config->ep_cfg_out[i].caps.out = 1; if (i == 0) { config->ep_cfg_out[i].caps.control = 1; config->ep_cfg_out[i].caps.mps = USB_CONTROL_EP_MPS; } else if ((i % 3) == 2) { config->ep_cfg_out[i].caps.bulk = 1; config->ep_cfg_out[i].caps.interrupt = 1; config->ep_cfg_out[i].caps.iso = 1; config->ep_cfg_out[i].caps.mps = 64; } config->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i; err = udc_register_ep(dev, &config->ep_cfg_out[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } for (int i = 0; i < MAX_NUM_ENDPOINTS; i++) { config->ep_cfg_in[i].caps.in = 1; if (i == 0) { config->ep_cfg_in[i].caps.control = 1; config->ep_cfg_in[i].caps.mps = USB_CONTROL_EP_MPS; } else if ((i % 3) != 2) { config->ep_cfg_in[i].caps.bulk = 1; config->ep_cfg_in[i].caps.interrupt = 1; config->ep_cfg_in[i].caps.iso = 1; config->ep_cfg_in[i].caps.mps = 64; } config->ep_cfg_in[i].addr = USB_EP_DIR_IN | i; err = udc_register_ep(dev, &config->ep_cfg_in[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } data->caps.rwup = true; data->caps.mps0 = UDC_MPS0_64; priv->dev = dev; config->make_thread(dev); /* Initializing WU (USB D+) */ it8xxx2_usb_dc_wuc_init(dev); /* Connect USB interrupt */ irq_connect_dynamic(config->usb_irq, 0, it82xx2_usb_dc_isr, dev, 0); return 0; } #define IT82xx2_USB_DEVICE_DEFINE(n) \ K_KERNEL_STACK_DEFINE(udc_it82xx2_stack_##n, CONFIG_UDC_IT82xx2_STACK_SIZE); \ \ static void udc_it82xx2_thread_##n(void *dev, void *arg1, void *arg2) \ { \ ARG_UNUSED(arg1); \ ARG_UNUSED(arg2); \ xfer_work_handler(dev); \ } \ \ static void udc_it82xx2_make_thread_##n(const struct device *dev) \ { \ struct it82xx2_data *priv = udc_get_private(dev); \ \ k_thread_create(&priv->thread_data, udc_it82xx2_stack_##n, \ K_THREAD_STACK_SIZEOF(udc_it82xx2_stack_##n), \ udc_it82xx2_thread_##n, (void *)dev, NULL, NULL, K_PRIO_COOP(8), \ 0, K_NO_WAIT); \ k_thread_name_set(&priv->thread_data, dev->name); \ } \ \ PINCTRL_DT_INST_DEFINE(n); \ \ static struct udc_ep_config ep_cfg_out[MAX_NUM_ENDPOINTS]; \ static struct udc_ep_config ep_cfg_in[MAX_NUM_ENDPOINTS]; \ \ static struct usb_it82xx2_config udc_cfg_##n = { \ .base = (struct usb_it82xx2_regs *)DT_INST_REG_ADDR(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .wuc = {.dev = IT8XXX2_DEV_WUC(0, n), .mask = IT8XXX2_DEV_WUC_MASK(0, n)}, \ .usb_irq = DT_INST_IRQ_BY_IDX(n, 0, irq), \ .wu_irq = DT_INST_IRQ_BY_IDX(n, 1, irq), \ .ep_cfg_in = ep_cfg_out, \ .ep_cfg_out = ep_cfg_in, \ .make_thread = udc_it82xx2_make_thread_##n, \ }; \ \ static struct it82xx2_data priv_data_##n = {}; \ \ static struct udc_data udc_data_##n = { \ .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \ .priv = &priv_data_##n, \ }; \ \ DEVICE_DT_INST_DEFINE(n, it82xx2_usb_driver_preinit, NULL, &udc_data_##n, &udc_cfg_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &it82xx2_api); DT_INST_FOREACH_STATUS_OKAY(IT82xx2_USB_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/usb/udc/udc_it82xx2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,637
```unknown config UDC_KINETIS bool "Kinetis USB device controller driver" imply UDC_WORKQUEUE default y depends on DT_HAS_NXP_KINETIS_USBD_ENABLED help Kinetis USB device controller criver. config UDC_KINETIS_EVENT_COUNT int "Number or blocks in event slab" depends on UDC_KINETIS range 4 16 default 4 help Number of blocks in slab for internal endpoint events. ```
/content/code_sandbox/drivers/usb/udc/Kconfig.kinetis
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
105
```unknown config UDC_VIRTUAL bool "Virtual USB device controller driver" select UVB default y depends on DT_HAS_ZEPHYR_UDC_VIRTUAL_ENABLED help Virtual USB device controller driver. config UDC_VIRTUAL_STACK_SIZE int "Virtual controller driver internal thread stack size" depends on UDC_VIRTUAL default 512 help Virtual device controller driver internal thread stack size. config UDC_VIRTUAL_THREAD_PRIORITY int "Virtual controller driver thread priority" depends on UDC_VIRTUAL default 8 help Virtual device controller driver thread priority. ```
/content/code_sandbox/drivers/usb/udc/Kconfig.virtual
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
121
```unknown config UDC_NRF bool "Nordic Semiconductor USB device controller driver" default y depends on DT_HAS_NORDIC_NRF_USBD_ENABLED select NRF_USBD_COMMON select NRFX_POWER help nRF USB device controller driver. if UDC_NRF config UDC_NRF_THREAD_STACK_SIZE int "nRF UDC driver internal thread stack size" default 512 help Size of the stack used in the driver for nRF USBD ISR event handling. config UDC_NRF_MAX_QMESSAGES int "nRF UDC driver maximum number of ISR event messages" range 4 64 default 8 help Maximum number of messages for handling of nRF USBD ISR events. endif # UDC_NRF ```
/content/code_sandbox/drivers/usb/udc/Kconfig.nrf
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
166
```unknown config UDC_NUMAKER bool "Nuvoton NuMaker USB 1.1 device controller" default y depends on DT_HAS_NUVOTON_NUMAKER_USBD_ENABLED help Enable Nuvoton NuMaker USB 1.1 device controller driver if UDC_NUMAKER config UDC_NUMAKER_MSG_QUEUE_SIZE int "UDC NuMaker message queue size" default 32 help Maximum number of messages the driver can queue for interrupt bottom half processing. config UDC_NUMAKER_THREAD_STACK_SIZE int "UDC NuMaker driver internal thread stack size" default 1536 help Size of the stack for the driver internal thread. config UDC_NUMAKER_THREAD_PRIORITY int "UDC NuMaker driver internal thread priority" default 8 help Priority of the driver internal thread. endif # UDC_NUMAKER ```
/content/code_sandbox/drivers/usb/udc/Kconfig.numaker
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
184
```c /* * */ #define DT_DRV_COMPAT nxp_lpcip3511 #include <soc.h> #include <string.h> #include <stdio.h> #include <zephyr/device.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/usb/udc.h> #include <zephyr/drivers/pinctrl.h> #include "udc_common.h" #include "usb.h" #include "usb_device_config.h" #include "usb_device_mcux_drv_port.h" #include "usb_device_lpcip3511.h" #include "usb_phy.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(udc_mcux, CONFIG_UDC_DRIVER_LOG_LEVEL); /* * There is no real advantage to change control endpoint size * but we can use it for testing UDC driver API and higher layers. */ #define USB_MCUX_MPS0 UDC_MPS0_64 #define USB_MCUX_EP0_SIZE 64 #define PRV_DATA_HANDLE(_handle) CONTAINER_OF(_handle, struct udc_mcux_data, mcux_device) struct udc_mcux_config { const usb_device_controller_interface_struct_t *mcux_if; void (*irq_enable_func)(const struct device *dev); void (*irq_disable_func)(const struct device *dev); size_t num_of_eps; struct udc_ep_config *ep_cfg_in; struct udc_ep_config *ep_cfg_out; uintptr_t base; const struct pinctrl_dev_config *pincfg; usb_phy_config_struct_t *phy_config; }; struct udc_mcux_data { const struct device *dev; usb_device_struct_t mcux_device; uint8_t controller_id; /* 0xFF is invalid value */ }; static int udc_mcux_control(const struct device *dev, usb_device_control_type_t command, void *param) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); usb_status_t status; status = mcux_if->deviceControl(priv->mcux_device.controllerHandle, command, param); if (status != kStatus_USB_Success) { return -ENOMEM; } return 0; } /* If ep is busy, return busy. Otherwise feed the buf to controller */ static int udc_mcux_ep_feed(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *const buf) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); usb_status_t status = kStatus_USB_Success; uint8_t *data; uint32_t len; unsigned int key; usb_device_endpoint_status_struct_t ep_status; ep_status.endpointAddress = cfg->addr; udc_mcux_control(dev, kUSB_DeviceControlGetEndpointStatus, &ep_status); if (ep_status.endpointStatus == kUSB_DeviceEndpointStateStalled) { return -EACCES; /* stalled */ } key = irq_lock(); if (!udc_ep_is_busy(dev, cfg->addr)) { udc_ep_set_busy(dev, cfg->addr, true); irq_unlock(key); if (USB_EP_DIR_IS_OUT(cfg->addr)) { len = net_buf_tailroom(buf); data = net_buf_tail(buf); status = mcux_if->deviceRecv(priv->mcux_device.controllerHandle, cfg->addr, data, len); } else { len = buf->len; data = buf->data; status = mcux_if->deviceSend(priv->mcux_device.controllerHandle, cfg->addr, data, len); } key = irq_lock(); if (status != kStatus_USB_Success) { udc_ep_set_busy(dev, cfg->addr, false); } irq_unlock(key); } else { irq_unlock(key); return -EBUSY; } return (status == kStatus_USB_Success ? 0 : -EIO); } /* return success if the ep is busy or stalled. */ static int udc_mcux_ep_try_feed(const struct device *dev, struct udc_ep_config *const cfg) { struct net_buf *feed_buf; feed_buf = udc_buf_peek(dev, cfg->addr); if (feed_buf) { int ret = udc_mcux_ep_feed(dev, cfg, feed_buf); return ((ret == -EBUSY || ret == -EACCES || ret == 0) ? 0 : -EIO); } return 0; } /* * Allocate buffer and initiate a new control OUT transfer. */ static int udc_mcux_ctrl_feed_dout(const struct device *dev, const size_t length) { struct net_buf *buf; struct udc_ep_config *cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); int ret; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length); if (buf == NULL) { return -ENOMEM; } k_fifo_put(&cfg->fifo, buf); ret = udc_mcux_ep_feed(dev, cfg, buf); if (ret) { net_buf_unref(buf); return ret; } return 0; } static int udc_mcux_handler_setup(const struct device *dev, struct usb_setup_packet *setup) { int err; struct net_buf *buf; LOG_DBG("setup packet"); buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, sizeof(struct usb_setup_packet)); if (buf == NULL) { LOG_ERR("Failed to allocate for setup"); return -EIO; } udc_ep_buf_set_setup(buf); memcpy(buf->data, setup, 8); net_buf_add(buf, 8); if (setup->RequestType.type == USB_REQTYPE_TYPE_STANDARD && setup->RequestType.direction == USB_REQTYPE_DIR_TO_DEVICE && setup->bRequest == USB_SREQ_SET_ADDRESS && setup->wLength == 0) { udc_mcux_control(dev, kUSB_DeviceControlPreSetDeviceAddress, &setup->wValue); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (!buf->len) { return -EIO; } if (udc_ctrl_stage_is_data_out(dev)) { /* Allocate and feed buffer for data OUT stage */ LOG_DBG("s:%p|feed for -out-", buf); err = udc_mcux_ctrl_feed_dout(dev, udc_data_stage_length(buf)); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } } else if (udc_ctrl_stage_is_data_in(dev)) { err = udc_ctrl_submit_s_in_status(dev); } else { err = udc_ctrl_submit_s_status(dev); } return err; } static int udc_mcux_handler_ctrl_out(const struct device *dev, struct net_buf *buf, uint8_t *mcux_buf, uint16_t mcux_len) { int err = 0; uint32_t len; len = MIN(net_buf_tailroom(buf), mcux_len); net_buf_add(buf, len); if (udc_ctrl_stage_is_status_out(dev)) { /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); /* Status stage finished, notify upper layer */ err = udc_ctrl_submit_status(dev, buf); } else { /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); } if (udc_ctrl_stage_is_status_in(dev)) { err = udc_ctrl_submit_s_out_status(dev, buf); } return err; } static int udc_mcux_handler_ctrl_in(const struct device *dev, struct net_buf *buf, uint8_t *mcux_buf, uint16_t mcux_len) { int err = 0; uint32_t len; len = MIN(buf->len, mcux_len); buf->data += len; buf->len -= len; if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* Status stage finished, notify upper layer */ err = udc_ctrl_submit_status(dev, buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_out(dev)) { /* * IN transfer finished, release buffer, * control OUT buffer should be already fed. */ net_buf_unref(buf); err = udc_mcux_ctrl_feed_dout(dev, 0u); } return err; } static int udc_mcux_handler_non_ctrl_in(const struct device *dev, uint8_t ep, struct net_buf *buf, uint8_t *mcux_buf, uint16_t mcux_len) { int err; uint32_t len; len = MIN(buf->len, mcux_len); buf->data += len; buf->len -= len; err = udc_submit_ep_event(dev, buf, 0); udc_mcux_ep_try_feed(dev, udc_get_ep_cfg(dev, ep)); return err; } static int udc_mcux_handler_non_ctrl_out(const struct device *dev, uint8_t ep, struct net_buf *buf, uint8_t *mcux_buf, uint16_t mcux_len) { int err; uint32_t len; len = MIN(net_buf_tailroom(buf), mcux_len); net_buf_add(buf, len); err = udc_submit_ep_event(dev, buf, 0); udc_mcux_ep_try_feed(dev, udc_get_ep_cfg(dev, ep)); return err; } static int udc_mcux_handler_out(const struct device *dev, uint8_t ep, uint8_t *mcux_buf, uint16_t mcux_len) { int err; struct net_buf *buf; unsigned int key; buf = udc_buf_get(dev, ep); key = irq_lock(); udc_ep_set_busy(dev, ep, false); irq_unlock(key); if (buf == NULL) { udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); return -ENOBUFS; } if (ep == USB_CONTROL_EP_OUT) { err = udc_mcux_handler_ctrl_out(dev, buf, mcux_buf, mcux_len); } else { err = udc_mcux_handler_non_ctrl_out(dev, ep, buf, mcux_buf, mcux_len); } return err; } /* return true - zlp is feed; false - no zlp */ static bool udc_mcux_handler_zlt(const struct device *dev, uint8_t ep, struct net_buf *buf, uint16_t mcux_len) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); /* The whole transfer is already done by MCUX controller driver. */ if (mcux_len >= buf->len) { if (udc_ep_buf_has_zlp(buf)) { usb_status_t status; udc_ep_buf_clear_zlp(buf); status = mcux_if->deviceRecv(priv->mcux_device.controllerHandle, ep, NULL, 0); if (status != kStatus_USB_Success) { udc_submit_event(dev, UDC_EVT_ERROR, -EIO); return false; } return true; } } return false; } static int udc_mcux_handler_in(const struct device *dev, uint8_t ep, uint8_t *mcux_buf, uint16_t mcux_len) { int err; struct net_buf *buf; unsigned int key; buf = udc_buf_peek(dev, ep); if (buf == NULL) { udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); return -ENOBUFS; } if (udc_mcux_handler_zlt(dev, ep, buf, mcux_len)) { return 0; } buf = udc_buf_get(dev, ep); key = irq_lock(); udc_ep_set_busy(dev, ep, false); irq_unlock(key); if (buf == NULL) { udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); return -ENOBUFS; } if (ep == USB_CONTROL_EP_IN) { err = udc_mcux_handler_ctrl_in(dev, buf, mcux_buf, mcux_len); } else { err = udc_mcux_handler_non_ctrl_in(dev, ep, buf, mcux_buf, mcux_len); } return err; } /* NXP MCUX controller driver notify transfers/status through this interface */ usb_status_t USB_DeviceNotificationTrigger(void *handle, void *msg) { usb_device_callback_message_struct_t *mcux_msg = msg; uint8_t ep; usb_device_notification_t mcux_notify; struct udc_mcux_data *priv; const struct device *dev; usb_status_t mcux_status = kStatus_USB_Success; int err = 0; if ((NULL == msg) || (NULL == handle)) { return kStatus_USB_InvalidHandle; } mcux_notify = (usb_device_notification_t)mcux_msg->code; priv = (struct udc_mcux_data *)(PRV_DATA_HANDLE(handle)); dev = priv->dev; switch (mcux_notify) { case kUSB_DeviceNotifyBusReset: struct udc_ep_config *cfg; udc_mcux_control(dev, kUSB_DeviceControlSetDefaultStatus, NULL); cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); if (cfg->stat.enabled) { udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT); } cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_IN); if (cfg->stat.enabled) { udc_ep_disable_internal(dev, USB_CONTROL_EP_IN); } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, USB_MCUX_EP0_SIZE, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, USB_MCUX_EP0_SIZE, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } udc_submit_event(dev, UDC_EVT_RESET, 0); break; case kUSB_DeviceNotifyError: udc_submit_event(dev, UDC_EVT_ERROR, -EIO); break; case kUSB_DeviceNotifySuspend: udc_set_suspended(dev, true); udc_submit_event(dev, UDC_EVT_SUSPEND, 0); break; case kUSB_DeviceNotifyResume: udc_set_suspended(dev, false); udc_submit_event(dev, UDC_EVT_RESUME, 0); break; case kUSB_DeviceNotifyLPMSleep: break; case kUSB_DeviceNotifyDetach: udc_submit_event(dev, UDC_EVT_VBUS_REMOVED, 0); break; case kUSB_DeviceNotifyAttach: udc_submit_event(dev, UDC_EVT_VBUS_READY, 0); break; case kUSB_DeviceNotifySOF: udc_submit_event(dev, UDC_EVT_SOF, 0); break; default: ep = mcux_msg->code; if (mcux_msg->isSetup) { struct usb_setup_packet *setup = (struct usb_setup_packet *)mcux_msg->buffer; err = udc_mcux_handler_setup(dev, setup); } else if (USB_EP_DIR_IS_IN(ep)) { err = udc_mcux_handler_in(dev, ep, mcux_msg->buffer, mcux_msg->length); } else { err = udc_mcux_handler_out(dev, ep, mcux_msg->buffer, mcux_msg->length); } break; } if (unlikely(err)) { udc_submit_event(dev, UDC_EVT_ERROR, err); mcux_status = kStatus_USB_Error; } return mcux_status; } static void udc_mcux_isr(const struct device *dev) { struct udc_mcux_data *priv = udc_get_private(dev); USB_DeviceLpcIp3511IsrFunction((void *)(&priv->mcux_device)); } /* Return actual USB device speed */ static enum udc_bus_speed udc_mcux_device_speed(const struct device *dev) { int err; uint8_t mcux_speed; err = udc_mcux_control(dev, kUSB_DeviceControlGetSpeed, &mcux_speed); if (err) { /* * In the current version of all NXP USB device drivers, * no error is returned if the parameter is correct. */ return UDC_BUS_SPEED_FS; } switch (mcux_speed) { case USB_SPEED_HIGH: return UDC_BUS_SPEED_HS; case USB_SPEED_LOW: __ASSERT(false, "Low speed mode not supported"); __fallthrough; case USB_SPEED_FULL: __fallthrough; default: return UDC_BUS_SPEED_FS; } } static int udc_mcux_ep_enqueue(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *const buf) { udc_buf_put(cfg, buf); if (cfg->stat.halted) { LOG_DBG("ep 0x%02x halted", cfg->addr); return 0; } return udc_mcux_ep_try_feed(dev, cfg); } static int udc_mcux_ep_dequeue(const struct device *dev, struct udc_ep_config *const cfg) { struct net_buf *buf; unsigned int key; cfg->stat.halted = false; buf = udc_buf_get_all(dev, cfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } key = irq_lock(); udc_ep_set_busy(dev, cfg->addr, false); irq_unlock(key); return 0; } static int udc_mcux_ep_set_halt(const struct device *dev, struct udc_ep_config *const cfg) { return udc_mcux_control(dev, kUSB_DeviceControlEndpointStall, &cfg->addr); } static int udc_mcux_ep_clear_halt(const struct device *dev, struct udc_ep_config *const cfg) { (void)udc_mcux_control(dev, kUSB_DeviceControlEndpointUnstall, &cfg->addr); /* transfer is enqueued after stalled */ return udc_mcux_ep_try_feed(dev, cfg); } static int udc_mcux_ep_enable(const struct device *dev, struct udc_ep_config *const cfg) { usb_device_endpoint_init_struct_t ep_init; LOG_DBG("Enable ep 0x%02x", cfg->addr); ep_init.zlt = 0U; ep_init.interval = cfg->interval; ep_init.endpointAddress = cfg->addr; ep_init.maxPacketSize = cfg->mps; switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) { case USB_EP_TYPE_CONTROL: ep_init.transferType = USB_ENDPOINT_CONTROL; break; case USB_EP_TYPE_BULK: ep_init.transferType = USB_ENDPOINT_BULK; break; case USB_EP_TYPE_INTERRUPT: ep_init.transferType = USB_ENDPOINT_INTERRUPT; break; case USB_EP_TYPE_ISO: ep_init.transferType = USB_ENDPOINT_ISOCHRONOUS; break; default: return -EINVAL; } return udc_mcux_control(dev, kUSB_DeviceControlEndpointInit, &ep_init); } static int udc_mcux_ep_disable(const struct device *dev, struct udc_ep_config *const cfg) { LOG_DBG("Disable ep 0x%02x", cfg->addr); return udc_mcux_control(dev, kUSB_DeviceControlEndpointDeinit, &cfg->addr); } static int udc_mcux_host_wakeup(const struct device *dev) { return -ENOTSUP; } static int udc_mcux_set_address(const struct device *dev, const uint8_t addr) { uint8_t temp_addr = addr; return udc_mcux_control(dev, kUSB_DeviceControlSetDeviceAddress, &temp_addr); } static int udc_mcux_enable(const struct device *dev) { return udc_mcux_control(dev, kUSB_DeviceControlRun, NULL); } static int udc_mcux_disable(const struct device *dev) { return udc_mcux_control(dev, kUSB_DeviceControlStop, NULL); } static int udc_mcux_init(const struct device *dev) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); usb_status_t status; if (priv->controller_id == 0xFFu) { return -ENOMEM; } #ifdef CONFIG_DT_HAS_NXP_USBPHY_ENABLED if (config->phy_config != NULL) { USB_EhciPhyInit(priv->controller_id, 0u, (usb_phy_config_struct_t *)&config->phy_config); } #endif /* Init MCUX USB device driver. */ status = mcux_if->deviceInit(priv->controller_id, &priv->mcux_device, &(priv->mcux_device.controllerHandle)); if (status != kStatus_USB_Success) { return -ENOMEM; } /* enable USB interrupt */ config->irq_enable_func(dev); LOG_DBG("Initialized USB controller %x", (uint32_t)config->base); return 0; } static int udc_mcux_shutdown(const struct device *dev) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); usb_status_t status; /* Disable interrupt */ config->irq_disable_func(dev); /* De-init MCUX USB device driver. */ status = mcux_if->deviceDeinit(priv->mcux_device.controllerHandle); if (status != kStatus_USB_Success) { return -ENOMEM; } return 0; } static int udc_mcux_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int udc_mcux_unlock(const struct device *dev) { return udc_unlock_internal(dev); } static inline void udc_mcux_get_hal_driver_id(struct udc_mcux_data *priv, const struct udc_mcux_config *config) { /* * MCUX USB controller drivers use an ID to tell the HAL drivers * which controller is being used. This part of the code converts * the base address to the ID value. */ #ifdef USB_BASE_ADDRS uintptr_t ip3511_fs_base[] = USB_BASE_ADDRS; #endif #ifdef USBHSD_BASE_ADDRS uintptr_t ip3511_hs_base[] = USBHSD_BASE_ADDRS; #endif /* get the right controller id */ priv->controller_id = 0xFFu; /* invalid value */ #ifdef USB_BASE_ADDRS for (uint8_t i = 0; i < ARRAY_SIZE(ip3511_fs_base); i++) { if (ip3511_fs_base[i] == config->base) { priv->controller_id = kUSB_ControllerLpcIp3511Fs0 + i; break; } } #endif #ifdef USBHSD_BASE_ADDRS if (priv->controller_id == 0xFF) { for (uint8_t i = 0; i < ARRAY_SIZE(ip3511_hs_base); i++) { if (ip3511_hs_base[i] == config->base) { priv->controller_id = kUSB_ControllerLpcIp3511Hs0 + i; break; } } } #endif } static int udc_mcux_driver_preinit(const struct device *dev) { const struct udc_mcux_config *config = dev->config; struct udc_data *data = dev->data; struct udc_mcux_data *priv = data->priv; int err; udc_mcux_get_hal_driver_id(priv, config); if (priv->controller_id == 0xFFu) { return -ENOMEM; } k_mutex_init(&data->mutex); for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_out[i].caps.out = 1; if (i == 0) { config->ep_cfg_out[i].caps.control = 1; config->ep_cfg_out[i].caps.mps = 64; } else { config->ep_cfg_out[i].caps.bulk = 1; config->ep_cfg_out[i].caps.interrupt = 1; config->ep_cfg_out[i].caps.iso = 1; config->ep_cfg_out[i].caps.mps = 1024; } config->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i; err = udc_register_ep(dev, &config->ep_cfg_out[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_in[i].caps.in = 1; if (i == 0) { config->ep_cfg_in[i].caps.control = 1; config->ep_cfg_in[i].caps.mps = 64; } else { config->ep_cfg_in[i].caps.bulk = 1; config->ep_cfg_in[i].caps.interrupt = 1; config->ep_cfg_in[i].caps.iso = 1; config->ep_cfg_in[i].caps.mps = 1024; } config->ep_cfg_in[i].addr = USB_EP_DIR_IN | i; err = udc_register_ep(dev, &config->ep_cfg_in[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } /* Requires udc_mcux_host_wakeup() implementation */ data->caps.rwup = false; data->caps.mps0 = USB_MCUX_MPS0; if ((priv->controller_id == kUSB_ControllerLpcIp3511Hs0) || (priv->controller_id == kUSB_ControllerLpcIp3511Hs1)) { data->caps.hs = true; } priv->dev = dev; pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); return 0; } static const struct udc_api udc_mcux_api = { .device_speed = udc_mcux_device_speed, .ep_enqueue = udc_mcux_ep_enqueue, .ep_dequeue = udc_mcux_ep_dequeue, .ep_set_halt = udc_mcux_ep_set_halt, .ep_clear_halt = udc_mcux_ep_clear_halt, .ep_try_config = NULL, .ep_enable = udc_mcux_ep_enable, .ep_disable = udc_mcux_ep_disable, .host_wakeup = udc_mcux_host_wakeup, .set_address = udc_mcux_set_address, .enable = udc_mcux_enable, .disable = udc_mcux_disable, .init = udc_mcux_init, .shutdown = udc_mcux_shutdown, .lock = udc_mcux_lock, .unlock = udc_mcux_unlock, }; /* IP3511 device driver interface */ static const usb_device_controller_interface_struct_t udc_mcux_if = { USB_DeviceLpc3511IpInit, USB_DeviceLpc3511IpDeinit, USB_DeviceLpc3511IpSend, USB_DeviceLpc3511IpRecv, USB_DeviceLpc3511IpCancel, USB_DeviceLpc3511IpControl }; #define UDC_MCUX_PHY_DEFINE(n) \ static usb_phy_config_struct_t phy_config_##n = { \ .D_CAL = DT_PROP_OR(DT_INST_PHANDLE(n, phy_handle), tx_d_cal, 0), \ .TXCAL45DP = DT_PROP_OR(DT_INST_PHANDLE(n, phy_handle), tx_cal_45_dp_ohms, 0), \ .TXCAL45DM = DT_PROP_OR(DT_INST_PHANDLE(n, phy_handle), tx_cal_45_dm_ohms, 0), \ } #define UDC_MCUX_PHY_DEFINE_OR(n) \ COND_CODE_1(DT_NODE_HAS_PROP(DT_DRV_INST(n), phy_handle), \ (UDC_MCUX_PHY_DEFINE(n)), ()) #define UDC_MCUX_PHY_CFG_PTR_OR_NULL(n) \ COND_CODE_1(DT_NODE_HAS_PROP(DT_DRV_INST(n), phy_handle), \ (&phy_config_##n), (NULL)) #define USB_MCUX_IP3511_DEVICE_DEFINE(n) \ UDC_MCUX_PHY_DEFINE_OR(n); \ \ static void udc_irq_enable_func##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ udc_mcux_isr, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } \ \ static void udc_irq_disable_func##n(const struct device *dev) \ { \ irq_disable(DT_INST_IRQN(n)); \ } \ \ static struct udc_ep_config \ ep_cfg_out##n[DT_INST_PROP(n, num_bidir_endpoints)]; \ static struct udc_ep_config \ ep_cfg_in##n[DT_INST_PROP(n, num_bidir_endpoints)]; \ \ PINCTRL_DT_INST_DEFINE(n); \ \ static struct udc_mcux_config priv_config_##n = { \ .base = DT_INST_REG_ADDR(n), \ .irq_enable_func = udc_irq_enable_func##n, \ .irq_disable_func = udc_irq_disable_func##n, \ .num_of_eps = DT_INST_PROP(n, num_bidir_endpoints), \ .ep_cfg_in = ep_cfg_in##n, \ .ep_cfg_out = ep_cfg_out##n, \ .mcux_if = &udc_mcux_if, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .phy_config = UDC_MCUX_PHY_CFG_PTR_OR_NULL(n), \ }; \ \ static struct udc_mcux_data priv_data_##n = { \ }; \ \ static struct udc_data udc_data_##n = { \ .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \ .priv = &priv_data_##n, \ }; \ \ DEVICE_DT_INST_DEFINE(n, udc_mcux_driver_preinit, NULL, \ &udc_data_##n, &priv_config_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &udc_mcux_api); DT_INST_FOREACH_STATUS_OKAY(USB_MCUX_IP3511_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/usb/udc/udc_mcux_ip3511.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,926
```c /* * */ /** * @file udc_stm32.c * @brief STM32 USB device controller (UDC) driver */ #include <soc.h> #include <stm32_ll_bus.h> #include <stm32_ll_pwr.h> #include <stm32_ll_rcc.h> #include <stm32_ll_system.h> #include <string.h> #include <zephyr/irq.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/sys/util.h> #include "udc_common.h" #include "stm32_hsem.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(udc_stm32, CONFIG_UDC_DRIVER_LOG_LEVEL); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) #define DT_DRV_COMPAT st_stm32_otghs #define UDC_STM32_IRQ_NAME otghs #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otgfs) #define DT_DRV_COMPAT st_stm32_otgfs #define UDC_STM32_IRQ_NAME otgfs #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_usb) #define DT_DRV_COMPAT st_stm32_usb #define UDC_STM32_IRQ_NAME usb #endif #define UDC_STM32_IRQ DT_INST_IRQ_BY_NAME(0, UDC_STM32_IRQ_NAME, irq) #define UDC_STM32_IRQ_PRI DT_INST_IRQ_BY_NAME(0, UDC_STM32_IRQ_NAME, priority) struct udc_stm32_data { PCD_HandleTypeDef pcd; const struct device *dev; uint32_t irq; uint32_t occupied_mem; void (*pcd_prepare)(const struct device *dev); int (*clk_enable)(void); int (*clk_disable)(void); }; struct udc_stm32_config { uint32_t num_endpoints; uint32_t pma_offset; uint32_t dram_size; uint16_t ep0_mps; uint16_t ep_mps; }; static int udc_stm32_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int udc_stm32_unlock(const struct device *dev) { return udc_unlock_internal(dev); } #define hpcd2data(hpcd) CONTAINER_OF(hpcd, struct udc_stm32_data, pcd); void HAL_PCD_ResetCallback(PCD_HandleTypeDef *hpcd) { struct udc_stm32_data *priv = hpcd2data(hpcd); const struct device *dev = priv->dev; const struct udc_stm32_config *cfg = dev->config; struct udc_ep_config *ep; /* Re-Enable control endpoints */ ep = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); if (ep && ep->stat.enabled) { HAL_PCD_EP_Open(&priv->pcd, USB_CONTROL_EP_OUT, cfg->ep0_mps, EP_TYPE_CTRL); } ep = udc_get_ep_cfg(dev, USB_CONTROL_EP_IN); if (ep && ep->stat.enabled) { HAL_PCD_EP_Open(&priv->pcd, USB_CONTROL_EP_IN, cfg->ep0_mps, EP_TYPE_CTRL); } udc_submit_event(priv->dev, UDC_EVT_RESET, 0); } void HAL_PCD_ConnectCallback(PCD_HandleTypeDef *hpcd) { struct udc_stm32_data *priv = hpcd2data(hpcd); udc_submit_event(priv->dev, UDC_EVT_VBUS_READY, 0); } void HAL_PCD_DisconnectCallback(PCD_HandleTypeDef *hpcd) { struct udc_stm32_data *priv = hpcd2data(hpcd); udc_submit_event(priv->dev, UDC_EVT_VBUS_REMOVED, 0); } void HAL_PCD_SuspendCallback(PCD_HandleTypeDef *hpcd) { struct udc_stm32_data *priv = hpcd2data(hpcd); udc_set_suspended(priv->dev, true); udc_submit_event(priv->dev, UDC_EVT_SUSPEND, 0); } void HAL_PCD_ResumeCallback(PCD_HandleTypeDef *hpcd) { struct udc_stm32_data *priv = hpcd2data(hpcd); udc_set_suspended(priv->dev, false); udc_submit_event(priv->dev, UDC_EVT_RESUME, 0); } static int usbd_ctrl_feed_dout(const struct device *dev, const size_t length) { struct udc_stm32_data *priv = udc_get_private(dev); struct udc_ep_config *cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); struct net_buf *buf; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length); if (buf == NULL) { return -ENOMEM; } k_fifo_put(&cfg->fifo, buf); HAL_PCD_EP_Receive(&priv->pcd, cfg->addr, buf->data, buf->size); return 0; } void HAL_PCD_SetupStageCallback(PCD_HandleTypeDef *hpcd) { struct udc_stm32_data *priv = hpcd2data(hpcd); struct usb_setup_packet *setup = (void *)priv->pcd.Setup; const struct device *dev = priv->dev; struct net_buf *buf; int err; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, sizeof(struct usb_setup_packet)); if (buf == NULL) { LOG_ERR("Failed to allocate for setup"); return; } udc_ep_buf_set_setup(buf); memcpy(buf->data, setup, 8); net_buf_add(buf, 8); udc_ctrl_update_stage(dev, buf); if (!buf->len) { return; } if (setup->bRequest == USB_SREQ_SET_ADDRESS) { /* HAL requires we set the address before submitting status */ HAL_PCD_SetAddress(&priv->pcd, setup->wValue); } if (udc_ctrl_stage_is_data_out(dev)) { /* Allocate and feed buffer for data OUT stage */ err = usbd_ctrl_feed_dout(dev, udc_data_stage_length(buf)); if (err == -ENOMEM) { udc_submit_ep_event(dev, buf, err); } } else if (udc_ctrl_stage_is_data_in(dev)) { udc_ctrl_submit_s_in_status(dev); } else { udc_ctrl_submit_s_status(dev); } } void HAL_PCD_SOFCallback(PCD_HandleTypeDef *hpcd) { struct udc_stm32_data *priv = hpcd2data(hpcd); udc_submit_event(priv->dev, UDC_EVT_SOF, 0); } static int udc_stm32_tx(const struct device *dev, uint8_t ep, struct net_buf *buf) { struct udc_stm32_data *priv = udc_get_private(dev); const struct udc_stm32_config *cfg = dev->config; uint8_t *data; uint32_t len; HAL_StatusTypeDef status; LOG_DBG("TX ep 0x%02x len %u", ep, buf->len); if (udc_ep_is_busy(dev, ep)) { return 0; } data = buf->data; len = buf->len; if (ep == USB_CONTROL_EP_IN) { len = MIN(cfg->ep0_mps, buf->len); } buf->data += len; buf->len -= len; status = HAL_PCD_EP_Transmit(&priv->pcd, ep, data, len); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_Transmit failed(0x%02x), %d", ep, (int)status); return -EIO; } udc_ep_set_busy(dev, ep, true); if (ep == USB_CONTROL_EP_IN && len > 0) { /* Wait for an empty package from the host. * This also flushes the TX FIFO to the host. */ usbd_ctrl_feed_dout(dev, 0); } return 0; } static int udc_stm32_rx(const struct device *dev, uint8_t ep, struct net_buf *buf) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; LOG_DBG("RX ep 0x%02x len %u", ep, buf->size); if (udc_ep_is_busy(dev, ep)) { return 0; } status = HAL_PCD_EP_Receive(&priv->pcd, ep, buf->data, buf->size); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_Receive failed(0x%02x), %d", ep, (int)status); return -EIO; } udc_ep_set_busy(dev, ep, true); return 0; } void HAL_PCD_DataOutStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) { uint32_t rx_count = HAL_PCD_EP_GetRxCount(hpcd, epnum); struct udc_stm32_data *priv = hpcd2data(hpcd); const struct device *dev = priv->dev; uint8_t ep = epnum | USB_EP_DIR_OUT; struct net_buf *buf; LOG_DBG("DataOut ep 0x%02x", ep); udc_ep_set_busy(dev, ep, false); buf = udc_buf_get(dev, ep); if (unlikely(buf == NULL)) { LOG_ERR("ep 0x%02x queue is empty", ep); return; } net_buf_add(buf, rx_count); if (ep == USB_CONTROL_EP_OUT) { if (udc_ctrl_stage_is_status_out(dev)) { udc_ctrl_update_stage(dev, buf); udc_ctrl_submit_status(dev, buf); } else { udc_ctrl_update_stage(dev, buf); } if (udc_ctrl_stage_is_status_in(dev)) { udc_ctrl_submit_s_out_status(dev, buf); } } else { udc_submit_ep_event(dev, buf, 0); } buf = udc_buf_peek(dev, ep); if (buf) { udc_stm32_rx(dev, ep, buf); } } void HAL_PCD_DataInStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) { struct udc_stm32_data *priv = hpcd2data(hpcd); const struct device *dev = priv->dev; uint8_t ep = epnum | USB_EP_DIR_IN; struct net_buf *buf; LOG_DBG("DataIn ep 0x%02x", ep); udc_ep_set_busy(dev, ep, false); buf = udc_buf_peek(dev, ep); if (unlikely(buf == NULL)) { return; } if (ep == USB_CONTROL_EP_IN && buf->len) { const struct udc_stm32_config *cfg = dev->config; uint32_t len = MIN(cfg->ep0_mps, buf->len); HAL_PCD_EP_Transmit(&priv->pcd, ep, buf->data, len); buf->len -= len; buf->data += len; return; } if (udc_ep_buf_has_zlp(buf) && ep != USB_CONTROL_EP_IN) { udc_ep_buf_clear_zlp(buf); HAL_PCD_EP_Transmit(&priv->pcd, ep, buf->data, 0); return; } udc_buf_get(dev, ep); if (ep == USB_CONTROL_EP_IN) { if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* Status stage finished, notify upper layer */ udc_ctrl_submit_status(dev, buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_out(dev)) { /* * IN transfer finished, release buffer, * control OUT buffer should be already fed. */ net_buf_unref(buf); } return; } udc_submit_ep_event(dev, buf, 0); buf = udc_buf_peek(dev, ep); if (buf) { udc_stm32_tx(dev, ep, buf); } } #if DT_INST_NODE_HAS_PROP(0, disconnect_gpios) void HAL_PCDEx_SetConnectionState(PCD_HandleTypeDef *hpcd, uint8_t state) { struct gpio_dt_spec usb_disconnect = GPIO_DT_SPEC_INST_GET(0, disconnect_gpios); gpio_pin_configure_dt(&usb_disconnect, state ? GPIO_OUTPUT_ACTIVE : GPIO_OUTPUT_INACTIVE); } #endif static void udc_stm32_irq(const struct device *dev) { const struct udc_stm32_data *priv = udc_get_private(dev); /* HAL irq handler will call the related above callback */ HAL_PCD_IRQHandler((PCD_HandleTypeDef *)&priv->pcd); } int udc_stm32_init(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; if (priv->clk_enable && priv->clk_enable()) { LOG_ERR("Error enabling clock(s)"); return -EIO; } priv->pcd_prepare(dev); status = HAL_PCD_Init(&priv->pcd); if (status != HAL_OK) { LOG_ERR("PCD_Init failed, %d", (int)status); return -EIO; } HAL_PCD_Stop(&priv->pcd); return 0; } #if defined(USB) || defined(USB_DRD_FS) static inline void udc_stm32_mem_init(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); const struct udc_stm32_config *cfg = dev->config; priv->occupied_mem = cfg->pma_offset; } static int udc_stm32_ep_mem_config(const struct device *dev, struct udc_ep_config *ep, bool enable) { struct udc_stm32_data *priv = udc_get_private(dev); const struct udc_stm32_config *cfg = dev->config; uint32_t size; size = MIN(ep->mps, cfg->ep_mps); if (!enable) { priv->occupied_mem -= size; return 0; } if (priv->occupied_mem + size >= cfg->dram_size) { LOG_ERR("Unable to allocate FIFO for 0x%02x", ep->addr); return -ENOMEM; } /* Configure PMA offset for the endpoint */ HAL_PCDEx_PMAConfig(&priv->pcd, ep->addr, PCD_SNG_BUF, priv->occupied_mem); priv->occupied_mem += size; return 0; } #else static void udc_stm32_mem_init(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); const struct udc_stm32_config *cfg = dev->config; int words; LOG_DBG("DRAM size: %ub", cfg->dram_size); if (cfg->ep_mps % 4 || cfg->ep0_mps % 4) { LOG_ERR("Not a 32-bit word multiple: ep0(%u)|ep(%u)", cfg->ep0_mps, cfg->ep_mps); return; } /* The documentation is not clear at all about RX FiFo size requirement, * Allocate a minimum of 0x40 words, which seems to work reliably. */ words = MAX(0x40, cfg->ep_mps / 4); HAL_PCDEx_SetRxFiFo(&priv->pcd, words); priv->occupied_mem = words * 4; /* For EP0 TX, reserve only one MPS */ HAL_PCDEx_SetTxFiFo(&priv->pcd, 0, cfg->ep0_mps / 4); priv->occupied_mem += cfg->ep0_mps; /* Reset TX allocs */ for (unsigned int i = 1U; i < cfg->num_endpoints; i++) { HAL_PCDEx_SetTxFiFo(&priv->pcd, i, 0); } } static int udc_stm32_ep_mem_config(const struct device *dev, struct udc_ep_config *ep, bool enable) { struct udc_stm32_data *priv = udc_get_private(dev); const struct udc_stm32_config *cfg = dev->config; unsigned int words; if (!(ep->addr & USB_EP_DIR_IN) || !USB_EP_GET_IDX(ep->addr)) { return 0; } words = MIN(ep->mps, cfg->ep_mps) / 4; words = (words <= 64) ? words * 2 : words; if (!enable) { if (priv->occupied_mem >= (words * 4)) { priv->occupied_mem -= (words * 4); } HAL_PCDEx_SetTxFiFo(&priv->pcd, USB_EP_GET_IDX(ep->addr), 0); return 0; } if (cfg->dram_size - priv->occupied_mem < words * 4) { LOG_ERR("Unable to allocate FIFO for 0x%02x", ep->addr); return -ENOMEM; } HAL_PCDEx_SetTxFiFo(&priv->pcd, USB_EP_GET_IDX(ep->addr), words); priv->occupied_mem += words * 4; return 0; } #endif static int udc_stm32_enable(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); const struct udc_stm32_config *cfg = dev->config; HAL_StatusTypeDef status; int ret; LOG_DBG("Enable UDC"); udc_stm32_mem_init(dev); status = HAL_PCD_Start(&priv->pcd); if (status != HAL_OK) { LOG_ERR("PCD_Start failed, %d", (int)status); return -EIO; } ret = udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, cfg->ep0_mps, 0); if (ret) { LOG_ERR("Failed enabling ep 0x%02x", USB_CONTROL_EP_OUT); return ret; } ret |= udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, cfg->ep0_mps, 0); if (ret) { LOG_ERR("Failed enabling ep 0x%02x", USB_CONTROL_EP_IN); return ret; } irq_enable(priv->irq); return 0; } static int udc_stm32_disable(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; irq_disable(UDC_STM32_IRQ); if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } status = HAL_PCD_Stop(&priv->pcd); if (status != HAL_OK) { LOG_ERR("PCD_Stop failed, %d", (int)status); return -EIO; } return 0; } static int udc_stm32_shutdown(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; status = HAL_PCD_DeInit(&priv->pcd); if (status != HAL_OK) { LOG_ERR("PCD_DeInit failed, %d", (int)status); /* continue anyway */ } if (priv->clk_disable && priv->clk_disable()) { LOG_ERR("Error disabling clock(s)"); /* continue anyway */ } if (irq_is_enabled(priv->irq)) { irq_disable(priv->irq); } return 0; } static int udc_stm32_set_address(const struct device *dev, const uint8_t addr) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; LOG_DBG("Set Address %u", addr); status = HAL_PCD_SetAddress(&priv->pcd, addr); if (status != HAL_OK) { LOG_ERR("HAL_PCD_SetAddress failed(0x%02x), %d", addr, (int)status); return -EIO; } return 0; } static int udc_stm32_host_wakeup(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; status = HAL_PCD_ActivateRemoteWakeup(&priv->pcd); if (status != HAL_OK) { LOG_ERR("HAL_PCD_ActivateRemoteWakeup, %d", (int)status); return -EIO; } /* Must be active from 1ms to 15ms as per reference manual. */ k_sleep(K_MSEC(2)); status = HAL_PCD_DeActivateRemoteWakeup(&priv->pcd); if (status != HAL_OK) { return -EIO; } return 0; } static int udc_stm32_ep_enable(const struct device *dev, struct udc_ep_config *ep_cfg) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; uint8_t ep_type; int ret; LOG_DBG("Enable ep 0x%02x", ep_cfg->addr); switch (ep_cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) { case USB_EP_TYPE_CONTROL: ep_type = EP_TYPE_CTRL; break; case USB_EP_TYPE_BULK: ep_type = EP_TYPE_BULK; break; case USB_EP_TYPE_INTERRUPT: ep_type = EP_TYPE_INTR; break; case USB_EP_TYPE_ISO: ep_type = EP_TYPE_ISOC; break; default: return -EINVAL; } ret = udc_stm32_ep_mem_config(dev, ep_cfg, true); if (ret) { return ret; } status = HAL_PCD_EP_Open(&priv->pcd, ep_cfg->addr, ep_cfg->mps, ep_type); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_Open failed(0x%02x), %d", ep_cfg->addr, (int)status); return -EIO; } return 0; } static int udc_stm32_ep_disable(const struct device *dev, struct udc_ep_config *ep) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; LOG_DBG("Disable ep 0x%02x", ep->addr); status = HAL_PCD_EP_Close(&priv->pcd, ep->addr); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_Close failed(0x%02x), %d", ep->addr, (int)status); return -EIO; } return udc_stm32_ep_mem_config(dev, ep, false); } static int udc_stm32_ep_set_halt(const struct device *dev, struct udc_ep_config *cfg) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; LOG_DBG("Halt ep 0x%02x", cfg->addr); status = HAL_PCD_EP_SetStall(&priv->pcd, cfg->addr); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_SetStall failed(0x%02x), %d", cfg->addr, (int)status); return -EIO; } return 0; } static int udc_stm32_ep_clear_halt(const struct device *dev, struct udc_ep_config *cfg) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; LOG_DBG("Clear halt for ep 0x%02x", cfg->addr); status = HAL_PCD_EP_ClrStall(&priv->pcd, cfg->addr); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_ClrStall failed(0x%02x), %d", cfg->addr, (int)status); return -EIO; } return 0; } static int udc_stm32_ep_flush(const struct device *dev, struct udc_ep_config *cfg) { struct udc_stm32_data *priv = udc_get_private(dev); HAL_StatusTypeDef status; LOG_DBG("Flush ep 0x%02x", cfg->addr); status = HAL_PCD_EP_Flush(&priv->pcd, cfg->addr); if (status != HAL_OK) { LOG_ERR("HAL_PCD_EP_Flush failed(0x%02x), %d", cfg->addr, (int)status); return -EIO; } return 0; } static int udc_stm32_ep_enqueue(const struct device *dev, struct udc_ep_config *epcfg, struct net_buf *buf) { unsigned int lock_key; int ret; udc_buf_put(epcfg, buf); lock_key = irq_lock(); if (USB_EP_DIR_IS_IN(epcfg->addr)) { ret = udc_stm32_tx(dev, epcfg->addr, buf); } else { ret = udc_stm32_rx(dev, epcfg->addr, buf); } irq_unlock(lock_key); return ret; } static int udc_stm32_ep_dequeue(const struct device *dev, struct udc_ep_config *epcfg) { struct net_buf *buf; udc_stm32_ep_flush(dev, epcfg); buf = udc_buf_get_all(dev, epcfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } udc_ep_set_busy(dev, epcfg->addr, false); return 0; } static enum udc_bus_speed udc_stm32_device_speed(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); #ifdef USBD_HS_SPEED if (priv->pcd.Init.speed == USBD_HS_SPEED) { return UDC_BUS_SPEED_HS; } #endif if (priv->pcd.Init.speed == USBD_FS_SPEED) { return UDC_BUS_SPEED_FS; } return UDC_BUS_UNKNOWN; } static const struct udc_api udc_stm32_api = { .lock = udc_stm32_lock, .unlock = udc_stm32_unlock, .init = udc_stm32_init, .enable = udc_stm32_enable, .disable = udc_stm32_disable, .shutdown = udc_stm32_shutdown, .set_address = udc_stm32_set_address, .host_wakeup = udc_stm32_host_wakeup, .ep_try_config = NULL, .ep_enable = udc_stm32_ep_enable, .ep_disable = udc_stm32_ep_disable, .ep_set_halt = udc_stm32_ep_set_halt, .ep_clear_halt = udc_stm32_ep_clear_halt, .ep_enqueue = udc_stm32_ep_enqueue, .ep_dequeue = udc_stm32_ep_dequeue, .device_speed = udc_stm32_device_speed, }; /* ----------------- Instance/Device specific data ----------------- */ /* * USB, USB_OTG_FS and USB_DRD_FS are defined in STM32Cube HAL and allows to * distinguish between two kind of USB DC. STM32 F0, F3, L0 and G4 series * support USB device controller. STM32 F4 and F7 series support USB_OTG_FS * device controller. STM32 F1 and L4 series support either USB or USB_OTG_FS * device controller.STM32 G0 series supports USB_DRD_FS device controller. * * WARNING: Don't mix USB defined in STM32Cube HAL and CONFIG_USB_* from Zephyr * Kconfig system. */ #define USB_NUM_BIDIR_ENDPOINTS DT_INST_PROP(0, num_bidir_endpoints) #if defined(USB) || defined(USB_DRD_FS) #define EP0_MPS 64U #define EP_MPS 64U #define USB_BTABLE_SIZE (8 * USB_NUM_BIDIR_ENDPOINTS) #define USB_RAM_SIZE DT_INST_PROP(0, ram_size) #else /* USB_OTG_FS */ #define EP0_MPS USB_OTG_MAX_EP0_SIZE #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) #define EP_MPS USB_OTG_HS_MAX_PACKET_SIZE #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otgfs) || DT_HAS_COMPAT_STATUS_OKAY(st_stm32_usb) #define EP_MPS USB_OTG_FS_MAX_PACKET_SIZE #endif #define USB_RAM_SIZE DT_INST_PROP(0, ram_size) #define USB_BTABLE_SIZE 0 #endif /* USB */ #define USB_OTG_HS_EMB_PHY (DT_HAS_COMPAT_STATUS_OKAY(st_stm32_usbphyc) && \ DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs)) #define USB_OTG_HS_ULPI_PHY (DT_HAS_COMPAT_STATUS_OKAY(usb_ulpi_phy) && \ DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs)) static struct udc_stm32_data udc0_priv; static struct udc_data udc0_data = { .mutex = Z_MUTEX_INITIALIZER(udc0_data.mutex), .priv = &udc0_priv, }; static const struct udc_stm32_config udc0_cfg = { .num_endpoints = USB_NUM_BIDIR_ENDPOINTS, .dram_size = USB_RAM_SIZE, .pma_offset = USB_BTABLE_SIZE, .ep0_mps = EP0_MPS, .ep_mps = EP_MPS, }; #if defined(USB_OTG_FS) || defined(USB_OTG_HS) static uint32_t usb_dc_stm32_get_maximum_speed(void) { /* * STM32L4 series USB LL API doesn't provide HIGH and HIGH_IN_FULL speed * defines. */ #if defined(CONFIG_SOC_SERIES_STM32L4X) #define USB_OTG_SPEED_HIGH 0U #define USB_OTG_SPEED_HIGH_IN_FULL 1U #endif /* CONFIG_SOC_SERIES_STM32L4X */ /* * If max-speed is not passed via DT, set it to USB controller's * maximum hardware capability. */ #if USB_OTG_HS_EMB_PHY || USB_OTG_HS_ULPI_PHY uint32_t speed = USB_OTG_SPEED_HIGH; #else uint32_t speed = USB_OTG_SPEED_FULL; #endif #ifdef USB_MAXIMUM_SPEED if (!strncmp(USB_MAXIMUM_SPEED, "high-speed", 10)) { speed = USB_OTG_SPEED_HIGH; } else if (!strncmp(USB_MAXIMUM_SPEED, "full-speed", 10)) { #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(USB_OTG_HS_EMB_PHY) speed = USB_OTG_SPEED_HIGH_IN_FULL; #else speed = USB_OTG_SPEED_FULL; #endif } else { LOG_DBG("Unsupported maximum speed defined in device tree. " "USB controller will default to its maximum HW " "capability"); } #endif return speed; } #endif /* USB_OTG_FS || USB_OTG_HS */ static void priv_pcd_prepare(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); const struct udc_stm32_config *cfg = dev->config; memset(&priv->pcd, 0, sizeof(priv->pcd)); /* Default values */ priv->pcd.Init.dev_endpoints = cfg->num_endpoints; priv->pcd.Init.ep0_mps = cfg->ep0_mps; priv->pcd.Init.speed = PCD_SPEED_FULL; /* Per controller/Phy values */ #if defined(USB) priv->pcd.Instance = USB; #elif defined(USB_DRD_FS) priv->pcd.Instance = USB_DRD_FS; #elif defined(USB_OTG_FS) || defined(USB_OTG_HS) priv->pcd.Init.speed = usb_dc_stm32_get_maximum_speed(); #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) priv->pcd.Instance = USB_OTG_HS; #else priv->pcd.Instance = USB_OTG_FS; #endif #endif /* USB */ #if USB_OTG_HS_EMB_PHY priv->pcd.Init.phy_itface = USB_OTG_HS_EMBEDDED_PHY; #elif USB_OTG_HS_ULPI_PHY priv->pcd.Init.phy_itface = USB_OTG_ULPI_PHY; #else priv->pcd.Init.phy_itface = PCD_PHY_EMBEDDED; #endif /* USB_OTG_HS_EMB_PHY */ } static const struct stm32_pclken pclken[] = STM32_DT_INST_CLOCKS(0); static int priv_clock_enable(void) { const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(clk)) { LOG_ERR("clock control device not ready"); return -ENODEV; } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) && defined(CONFIG_SOC_SERIES_STM32U5X) /* Sequence to enable the power of the OTG HS on a stm32U5 serie : Enable VDDUSB */ bool pwr_clk = LL_AHB3_GRP1_IsEnabledClock(LL_AHB3_GRP1_PERIPH_PWR); if (!pwr_clk) { LL_AHB3_GRP1_EnableClock(LL_AHB3_GRP1_PERIPH_PWR); } /* Check that power range is 1 or 2 */ if (LL_PWR_GetRegulVoltageScaling() < LL_PWR_REGU_VOLTAGE_SCALE2) { LOG_ERR("Wrong Power range to use USB OTG HS"); return -EIO; } LL_PWR_EnableVddUSB(); /* Configure VOSR register of USB HSTransceiverSupply(); */ LL_PWR_EnableUSBPowerSupply(); LL_PWR_EnableUSBEPODBooster(); while (LL_PWR_IsActiveFlag_USBBOOST() != 1) { /* Wait for USB EPOD BOOST ready */ } /* Leave the PWR clock in its initial position */ if (!pwr_clk) { LL_AHB3_GRP1_DisableClock(LL_AHB3_GRP1_PERIPH_PWR); } /* Set the OTG PHY reference clock selection (through SYSCFG) block */ LL_APB3_GRP1_EnableClock(LL_APB3_GRP1_PERIPH_SYSCFG); HAL_SYSCFG_SetOTGPHYReferenceClockSelection(SYSCFG_OTG_HS_PHY_CLK_SELECT_1); /* Configuring the SYSCFG registers OTG_HS PHY : OTG_HS PHY enable*/ HAL_SYSCFG_EnableOTGPHY(SYSCFG_OTG_HS_PHY_ENABLE); #elif defined(PWR_USBSCR_USB33SV) || defined(PWR_SVMCR_USV) /* * VDDUSB independent USB supply (PWR clock is on) * with LL_PWR_EnableVDDUSB function (higher case) */ LL_PWR_EnableVDDUSB(); #endif #if defined(CONFIG_SOC_SERIES_STM32H7X) LL_PWR_EnableUSBVoltageDetector(); /* Per AN2606: USBREGEN not supported when running in FS mode. */ LL_PWR_DisableUSBReg(); while (!LL_PWR_IsActiveFlag_USB()) { LOG_INF("PWR not active yet"); k_sleep(K_MSEC(100)); } #endif if (DT_INST_NUM_CLOCKS(0) > 1) { if (clock_control_configure(clk, (clock_control_subsys_t *)&pclken[1], NULL) != 0) { LOG_ERR("Could not select USB domain clock"); return -EIO; } } if (clock_control_on(clk, (clock_control_subsys_t *)&pclken[0]) != 0) { LOG_ERR("Unable to enable USB clock"); return -EIO; } if (IS_ENABLED(CONFIG_USB_DC_STM32_CLOCK_CHECK)) { uint32_t usb_clock_rate; if (clock_control_get_rate(clk, (clock_control_subsys_t *)&pclken[1], &usb_clock_rate) != 0) { LOG_ERR("Failed to get USB domain clock rate"); return -EIO; } if (usb_clock_rate != MHZ(48)) { LOG_ERR("USB Clock is not 48MHz (%d)", usb_clock_rate); return -ENOTSUP; } } /* Previous check won't work in case of F1/F3. Add build time check */ #if defined(RCC_CFGR_OTGFSPRE) || defined(RCC_CFGR_USBPRE) #if (MHZ(48) == CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) && !defined(STM32_PLL_USBPRE) /* PLL output clock is set to 48MHz, it should not be divided */ #warning USBPRE/OTGFSPRE should be set in rcc node #endif #endif /* RCC_CFGR_OTGFSPRE / RCC_CFGR_USBPRE */ #if USB_OTG_HS_ULPI_PHY #if defined(CONFIG_SOC_SERIES_STM32H7X) LL_AHB1_GRP1_EnableClock(LL_AHB1_GRP1_PERIPH_USB1OTGHSULPI); #else LL_AHB1_GRP1_EnableClock(LL_AHB1_GRP1_PERIPH_OTGHSULPI); #endif #elif DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) /* USB_OTG_HS_ULPI_PHY */ /* Disable ULPI interface (for external high-speed PHY) clock in sleep/low-power mode. * It is disabled by default in run power mode, no need to disable it. */ #if defined(CONFIG_SOC_SERIES_STM32H7X) LL_AHB1_GRP1_DisableClockSleep(LL_AHB1_GRP1_PERIPH_USB1OTGHSULPI); #elif defined(CONFIG_SOC_SERIES_STM32U5X) LL_AHB2_GRP1_EnableClock(LL_AHB2_GRP1_PERIPH_USBPHY); /* Both OTG HS and USBPHY sleep clock MUST be disabled here at the same time */ LL_AHB2_GRP1_DisableClockStopSleep(LL_AHB2_GRP1_PERIPH_OTG_HS || LL_AHB2_GRP1_PERIPH_USBPHY); #else LL_AHB1_GRP1_DisableClockLowPower(LL_AHB1_GRP1_PERIPH_OTGHSULPI); #endif /* defined(CONFIG_SOC_SERIES_STM32H7X) */ #if USB_OTG_HS_EMB_PHY LL_APB2_GRP1_EnableClock(LL_APB2_GRP1_PERIPH_OTGPHYC); #endif #elif defined(CONFIG_SOC_SERIES_STM32H7X) && DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otgfs) /* The USB2 controller only works in FS mode, but the ULPI clock needs * to be disabled in sleep mode for it to work. */ LL_AHB1_GRP1_DisableClockSleep(LL_AHB1_GRP1_PERIPH_USB2OTGHSULPI); #endif /* USB_OTG_HS_ULPI_PHY */ return 0; } static int priv_clock_disable(void) { const struct device *clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (clock_control_off(clk, (clock_control_subsys_t *)&pclken[0]) != 0) { LOG_ERR("Unable to disable USB clock"); return -EIO; } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32_otghs) && defined(CONFIG_SOC_SERIES_STM32U5X) LL_AHB2_GRP1_DisableClock(LL_AHB2_GRP1_PERIPH_USBPHY); #endif return 0; } static struct udc_ep_config ep_cfg_in[DT_INST_PROP(0, num_bidir_endpoints)]; static struct udc_ep_config ep_cfg_out[DT_INST_PROP(0, num_bidir_endpoints)]; PINCTRL_DT_INST_DEFINE(0); static const struct pinctrl_dev_config *usb_pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0); #if USB_OTG_HS_ULPI_PHY static const struct gpio_dt_spec ulpi_reset = GPIO_DT_SPEC_GET_OR(DT_PHANDLE(DT_INST(0, st_stm32_otghs), phys), reset_gpios, {0}); #endif static int udc_stm32_driver_init0(const struct device *dev) { struct udc_stm32_data *priv = udc_get_private(dev); const struct udc_stm32_config *cfg = dev->config; struct udc_data *data = dev->data; int err; for (unsigned int i = 0; i < ARRAY_SIZE(ep_cfg_out); i++) { ep_cfg_out[i].caps.out = 1; if (i == 0) { ep_cfg_out[i].caps.control = 1; ep_cfg_out[i].caps.mps = cfg->ep0_mps; } else { ep_cfg_out[i].caps.bulk = 1; ep_cfg_out[i].caps.interrupt = 1; ep_cfg_out[i].caps.iso = 1; ep_cfg_out[i].caps.mps = cfg->ep_mps; } ep_cfg_out[i].addr = USB_EP_DIR_OUT | i; err = udc_register_ep(dev, &ep_cfg_out[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } for (unsigned int i = 0; i < ARRAY_SIZE(ep_cfg_in); i++) { ep_cfg_in[i].caps.in = 1; if (i == 0) { ep_cfg_in[i].caps.control = 1; ep_cfg_in[i].caps.mps = cfg->ep0_mps; } else { ep_cfg_in[i].caps.bulk = 1; ep_cfg_in[i].caps.interrupt = 1; ep_cfg_in[i].caps.iso = 1; ep_cfg_in[i].caps.mps = 1023; } ep_cfg_in[i].addr = USB_EP_DIR_IN | i; err = udc_register_ep(dev, &ep_cfg_in[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } data->caps.rwup = true; data->caps.out_ack = false; data->caps.mps0 = UDC_MPS0_64; priv->dev = dev; priv->irq = UDC_STM32_IRQ; priv->clk_enable = priv_clock_enable; priv->clk_disable = priv_clock_disable; priv->pcd_prepare = priv_pcd_prepare; IRQ_CONNECT(UDC_STM32_IRQ, UDC_STM32_IRQ_PRI, udc_stm32_irq, DEVICE_DT_INST_GET(0), 0); err = pinctrl_apply_state(usb_pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { LOG_ERR("USB pinctrl setup failed (%d)", err); return err; } #ifdef SYSCFG_CFGR1_USB_IT_RMP /* * STM32F302/F303: USB IRQ collides with CAN_1 IRQ (14.1.3, RM0316) * Remap IRQ by default to enable use of both IPs simultaneoulsy * This should be done before calling any HAL function */ if (LL_APB2_GRP1_IsEnabledClock(LL_APB2_GRP1_PERIPH_SYSCFG)) { LL_SYSCFG_EnableRemapIT_USB(); } else { LOG_ERR("System Configuration Controller clock is " "disabled. Unable to enable IRQ remapping."); } #endif #if USB_OTG_HS_ULPI_PHY if (ulpi_reset.port != NULL) { if (!gpio_is_ready_dt(&ulpi_reset)) { LOG_ERR("Reset GPIO device not ready"); return -EINVAL; } if (gpio_pin_configure_dt(&ulpi_reset, GPIO_OUTPUT_INACTIVE)) { LOG_ERR("Couldn't configure reset pin"); return -EIO; } } #endif /*cd * Required for at least STM32L4 devices as they electrically * isolate USB features from VDDUSB. It must be enabled before * USB can function. Refer to section 5.1.3 in DM00083560 or * DM00310109. */ #ifdef PWR_CR2_USV #if defined(LL_APB1_GRP1_PERIPH_PWR) if (LL_APB1_GRP1_IsEnabledClock(LL_APB1_GRP1_PERIPH_PWR)) { LL_PWR_EnableVddUSB(); } else { LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_PWR); LL_PWR_EnableVddUSB(); LL_APB1_GRP1_DisableClock(LL_APB1_GRP1_PERIPH_PWR); } #else LL_PWR_EnableVddUSB(); #endif /* defined(LL_APB1_GRP1_PERIPH_PWR) */ #endif /* PWR_CR2_USV */ return 0; } DEVICE_DT_INST_DEFINE(0, udc_stm32_driver_init0, NULL, &udc0_data, &udc0_cfg, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &udc_stm32_api); ```
/content/code_sandbox/drivers/usb/udc/udc_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,879
```c /* * */ #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/net/buf.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/__assert.h> #include <zephyr/usb/usb_ch9.h> #include <zephyr/drivers/usb/udc_buf.h> #include "udc_common.h" #include <zephyr/logging/log.h> #if defined(CONFIG_UDC_DRIVER_LOG_LEVEL) #define UDC_COMMON_LOG_LEVEL CONFIG_UDC_DRIVER_LOG_LEVEL #else #define UDC_COMMON_LOG_LEVEL LOG_LEVEL_NONE #endif LOG_MODULE_REGISTER(udc, CONFIG_UDC_DRIVER_LOG_LEVEL); static inline uint8_t *udc_pool_data_alloc(struct net_buf *const buf, size_t *const size, k_timeout_t timeout) { struct net_buf_pool *const buf_pool = net_buf_pool_get(buf->pool_id); struct k_heap *const pool = buf_pool->alloc->alloc_data; void *b; *size = ROUND_UP(*size, UDC_BUF_GRANULARITY); b = k_heap_aligned_alloc(pool, UDC_BUF_ALIGN, *size, timeout); if (b == NULL) { *size = 0; return NULL; } return b; } static inline void udc_pool_data_unref(struct net_buf *buf, uint8_t *const data) { struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id); struct k_heap *pool = buf_pool->alloc->alloc_data; k_heap_free(pool, data); } const struct net_buf_data_cb net_buf_dma_cb = { .alloc = udc_pool_data_alloc, .unref = udc_pool_data_unref, }; static inline void udc_buf_destroy(struct net_buf *buf); UDC_BUF_POOL_VAR_DEFINE(udc_ep_pool, CONFIG_UDC_BUF_COUNT, CONFIG_UDC_BUF_POOL_SIZE, sizeof(struct udc_buf_info), udc_buf_destroy); #define USB_EP_LUT_IDX(ep) (USB_EP_DIR_IS_IN(ep) ? (ep & BIT_MASK(4)) + 16 : \ ep & BIT_MASK(4)) void udc_set_suspended(const struct device *dev, const bool value) { struct udc_data *data = dev->data; if (value == udc_is_suspended(dev)) { LOG_WRN("Spurious suspend/resume event"); } atomic_set_bit_to(&data->status, UDC_STATUS_SUSPENDED, value); } struct udc_ep_config *udc_get_ep_cfg(const struct device *dev, const uint8_t ep) { struct udc_data *data = dev->data; return data->ep_lut[USB_EP_LUT_IDX(ep)]; } bool udc_ep_is_busy(const struct device *dev, const uint8_t ep) { struct udc_ep_config *ep_cfg; ep_cfg = udc_get_ep_cfg(dev, ep); __ASSERT(ep_cfg != NULL, "ep 0x%02x is not available", ep); return ep_cfg->stat.busy; } void udc_ep_set_busy(const struct device *dev, const uint8_t ep, const bool busy) { struct udc_ep_config *ep_cfg; ep_cfg = udc_get_ep_cfg(dev, ep); __ASSERT(ep_cfg != NULL, "ep 0x%02x is not available", ep); ep_cfg->stat.busy = busy; } int udc_register_ep(const struct device *dev, struct udc_ep_config *const cfg) { struct udc_data *data = dev->data; uint8_t idx; if (udc_is_initialized(dev)) { return -EACCES; } idx = USB_EP_LUT_IDX(cfg->addr); __ASSERT_NO_MSG(idx < ARRAY_SIZE(data->ep_lut)); data->ep_lut[idx] = cfg; k_fifo_init(&cfg->fifo); return 0; } struct net_buf *udc_buf_get(const struct device *dev, const uint8_t ep) { struct udc_ep_config *ep_cfg; ep_cfg = udc_get_ep_cfg(dev, ep); if (ep_cfg == NULL) { return NULL; } return k_fifo_get(&ep_cfg->fifo, K_NO_WAIT); } struct net_buf *udc_buf_get_all(const struct device *dev, const uint8_t ep) { struct udc_ep_config *ep_cfg; struct net_buf *buf; ep_cfg = udc_get_ep_cfg(dev, ep); if (ep_cfg == NULL) { return NULL; } buf = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT); if (!buf) { return NULL; } LOG_DBG("ep 0x%02x dequeue %p", ep, buf); for (struct net_buf *n = buf; !k_fifo_is_empty(&ep_cfg->fifo); n = n->frags) { n->frags = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT); LOG_DBG("|-> %p ", n->frags); if (n->frags == NULL) { break; } } return buf; } struct net_buf *udc_buf_peek(const struct device *dev, const uint8_t ep) { struct udc_ep_config *ep_cfg; ep_cfg = udc_get_ep_cfg(dev, ep); if (ep_cfg == NULL) { return NULL; } return k_fifo_peek_head(&ep_cfg->fifo); } void udc_buf_put(struct udc_ep_config *const ep_cfg, struct net_buf *const buf) { k_fifo_put(&ep_cfg->fifo, buf); } void udc_ep_buf_set_setup(struct net_buf *const buf) { struct udc_buf_info *bi = udc_get_buf_info(buf); bi->setup = 1; bi->data = 0; bi->status = 0; } bool udc_ep_buf_has_zlp(const struct net_buf *const buf) { const struct udc_buf_info *bi = udc_get_buf_info(buf); return bi->zlp; } void udc_ep_buf_clear_zlp(const struct net_buf *const buf) { struct udc_buf_info *bi = udc_get_buf_info(buf); bi->zlp = false; } int udc_submit_event(const struct device *dev, const enum udc_event_type type, const int status) { struct udc_data *data = dev->data; struct udc_event drv_evt = { .type = type, .status = status, .dev = dev, }; return data->event_cb(dev, &drv_evt); } int udc_submit_ep_event(const struct device *dev, struct net_buf *const buf, const int err) { struct udc_buf_info *bi = udc_get_buf_info(buf); struct udc_data *data = dev->data; const struct udc_event drv_evt = { .type = UDC_EVT_EP_REQUEST, .buf = buf, .dev = dev, }; if (!udc_is_initialized(dev)) { return -EPERM; } bi->err = err; return data->event_cb(dev, &drv_evt); } static uint8_t ep_attrib_get_transfer(uint8_t attributes) { return attributes & USB_EP_TRANSFER_TYPE_MASK; } static bool ep_check_config(const struct device *dev, const struct udc_ep_config *const cfg, const uint8_t ep, const uint8_t attributes, const uint16_t mps, const uint8_t interval) { bool dir_is_in = USB_EP_DIR_IS_IN(ep); bool dir_is_out = USB_EP_DIR_IS_OUT(ep); LOG_DBG("cfg d:%c|%c t:%c|%c|%c|%c, mps %u", cfg->caps.in ? 'I' : '-', cfg->caps.out ? 'O' : '-', cfg->caps.iso ? 'S' : '-', cfg->caps.bulk ? 'B' : '-', cfg->caps.interrupt ? 'I' : '-', cfg->caps.control ? 'C' : '-', cfg->caps.mps); if (dir_is_out && !cfg->caps.out) { return false; } if (dir_is_in && !cfg->caps.in) { return false; } if (mps > cfg->caps.mps) { return false; } switch (ep_attrib_get_transfer(attributes)) { case USB_EP_TYPE_BULK: if (!cfg->caps.bulk) { return false; } break; case USB_EP_TYPE_INTERRUPT: if (!cfg->caps.interrupt) { return false; } break; case USB_EP_TYPE_ISO: if (!cfg->caps.iso) { return false; } break; case USB_EP_TYPE_CONTROL: if (!cfg->caps.control) { return false; } break; default: return false; } return true; } static void ep_update_mps(const struct device *dev, const struct udc_ep_config *const cfg, const uint8_t attributes, uint16_t *const mps) { struct udc_device_caps caps = udc_caps(dev); const uint16_t spec_int_mps = caps.hs ? 1024 : 64; const uint16_t spec_bulk_mps = caps.hs ? 512 : 64; /* * TODO: It does not take into account the actual speed of the * bus after the RESET. Should be fixed/improved when the driver * for high speed controller are ported. */ switch (ep_attrib_get_transfer(attributes)) { case USB_EP_TYPE_BULK: *mps = MIN(cfg->caps.mps, spec_bulk_mps); break; case USB_EP_TYPE_INTERRUPT: *mps = MIN(cfg->caps.mps, spec_int_mps); break; case USB_EP_TYPE_CONTROL: __fallthrough; case USB_EP_TYPE_ISO: __fallthrough; default: return; } } int udc_ep_try_config(const struct device *dev, const uint8_t ep, const uint8_t attributes, uint16_t *const mps, const uint8_t interval) { const struct udc_api *api = dev->api; struct udc_ep_config *cfg; bool ret; cfg = udc_get_ep_cfg(dev, ep); if (cfg == NULL) { return -ENODEV; } api->lock(dev); ret = ep_check_config(dev, cfg, ep, attributes, *mps, interval); if (ret == true && *mps == 0U) { ep_update_mps(dev, cfg, attributes, mps); } api->unlock(dev); return (ret == false) ? -ENOTSUP : 0; } int udc_ep_enable_internal(const struct device *dev, const uint8_t ep, const uint8_t attributes, const uint16_t mps, const uint8_t interval) { const struct udc_api *api = dev->api; struct udc_ep_config *cfg; int ret; cfg = udc_get_ep_cfg(dev, ep); if (cfg == NULL) { return -ENODEV; } if (cfg->stat.enabled) { LOG_ERR("ep 0x%02x already enabled", cfg->addr); return -EALREADY; } if (!ep_check_config(dev, cfg, ep, attributes, mps, interval)) { LOG_ERR("Endpoint 0x%02x validation failed", cfg->addr); return -ENODEV; } cfg->attributes = attributes; cfg->mps = mps; cfg->interval = interval; cfg->stat.odd = 0; cfg->stat.halted = 0; cfg->stat.data1 = false; ret = api->ep_enable(dev, cfg); cfg->stat.enabled = ret ? false : true; return ret; } int udc_ep_enable(const struct device *dev, const uint8_t ep, const uint8_t attributes, const uint16_t mps, const uint8_t interval) { const struct udc_api *api = dev->api; int ret; if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) { return -EINVAL; } api->lock(dev); if (!udc_is_enabled(dev)) { ret = -EPERM; goto ep_enable_error; } ret = udc_ep_enable_internal(dev, ep, attributes, mps, interval); ep_enable_error: api->unlock(dev); return ret; } int udc_ep_disable_internal(const struct device *dev, const uint8_t ep) { const struct udc_api *api = dev->api; struct udc_ep_config *cfg; int ret; cfg = udc_get_ep_cfg(dev, ep); if (cfg == NULL) { return -ENODEV; } if (!cfg->stat.enabled) { LOG_ERR("ep 0x%02x already disabled", cfg->addr); return -EALREADY; } ret = api->ep_disable(dev, cfg); cfg->stat.enabled = ret ? cfg->stat.enabled : false; return ret; } int udc_ep_disable(const struct device *dev, const uint8_t ep) { const struct udc_api *api = dev->api; int ret; if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) { return -EINVAL; } api->lock(dev); if (!udc_is_initialized(dev)) { ret = -EPERM; goto ep_disable_error; } ret = udc_ep_disable_internal(dev, ep); ep_disable_error: api->unlock(dev); return ret; } int udc_ep_set_halt(const struct device *dev, const uint8_t ep) { const struct udc_api *api = dev->api; struct udc_ep_config *cfg; int ret; api->lock(dev); if (!udc_is_enabled(dev)) { ret = -EPERM; goto ep_set_halt_error; } cfg = udc_get_ep_cfg(dev, ep); if (cfg == NULL) { ret = -ENODEV; goto ep_set_halt_error; } if (!cfg->stat.enabled) { ret = -ENODEV; goto ep_set_halt_error; } if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) { ret = -ENOTSUP; goto ep_set_halt_error; } ret = api->ep_set_halt(dev, cfg); ep_set_halt_error: api->unlock(dev); return ret; } int udc_ep_clear_halt(const struct device *dev, const uint8_t ep) { const struct udc_api *api = dev->api; struct udc_ep_config *cfg; int ret; api->lock(dev); if (!udc_is_enabled(dev)) { ret = -EPERM; goto ep_clear_halt_error; } cfg = udc_get_ep_cfg(dev, ep); if (cfg == NULL) { ret = -ENODEV; goto ep_clear_halt_error; } if (!cfg->stat.enabled) { ret = -ENODEV; goto ep_clear_halt_error; } if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) { ret = -ENOTSUP; goto ep_clear_halt_error; } ret = api->ep_clear_halt(dev, cfg); if (ret == 0) { cfg->stat.halted = false; } ep_clear_halt_error: api->unlock(dev); return ret; } static void udc_debug_ep_enqueue(const struct device *dev, struct udc_ep_config *const cfg) { struct udc_buf_info *bi; struct net_buf *buf; sys_slist_t list; list.head = k_fifo_peek_head(&cfg->fifo); list.tail = k_fifo_peek_tail(&cfg->fifo); if (list.head == NULL) { LOG_DBG("ep 0x%02x queue is empty", cfg->addr); return; } LOG_DBG("[de]queue ep 0x%02x:", cfg->addr); SYS_SLIST_FOR_EACH_CONTAINER(&list, buf, node) { bi = udc_get_buf_info(buf); LOG_DBG("|-> %p (%u) ->", buf, buf->size); } } int udc_ep_enqueue(const struct device *dev, struct net_buf *const buf) { const struct udc_api *api = dev->api; struct udc_ep_config *cfg; struct udc_buf_info *bi; int ret; api->lock(dev); if (!udc_is_enabled(dev)) { ret = -EPERM; goto ep_enqueue_error; } bi = udc_get_buf_info(buf); if (bi->ep == USB_CONTROL_EP_OUT) { ret = -EPERM; goto ep_enqueue_error; } cfg = udc_get_ep_cfg(dev, bi->ep); if (cfg == NULL) { ret = -ENODEV; goto ep_enqueue_error; } LOG_DBG("Queue ep 0x%02x %p len %u", cfg->addr, buf, USB_EP_DIR_IS_IN(cfg->addr) ? buf->len : buf->size); bi->setup = 0; ret = api->ep_enqueue(dev, cfg, buf); ep_enqueue_error: api->unlock(dev); return ret; } int udc_ep_dequeue(const struct device *dev, const uint8_t ep) { const struct udc_api *api = dev->api; struct udc_ep_config *cfg; int ret; api->lock(dev); if (!udc_is_initialized(dev)) { ret = -EPERM; goto ep_dequeue_error; } cfg = udc_get_ep_cfg(dev, ep); if (cfg == NULL) { ret = -ENODEV; goto ep_dequeue_error; } if (cfg->stat.enabled || cfg->stat.halted) { LOG_INF("ep 0x%02x is not halted|disabled", cfg->addr); } if (UDC_COMMON_LOG_LEVEL == LOG_LEVEL_DBG) { udc_debug_ep_enqueue(dev, cfg); } if (k_fifo_is_empty(&cfg->fifo)) { ret = 0; } else { ret = api->ep_dequeue(dev, cfg); } ep_dequeue_error: api->unlock(dev); return ret; } struct net_buf *udc_ep_buf_alloc(const struct device *dev, const uint8_t ep, const size_t size) { const struct udc_api *api = dev->api; struct net_buf *buf = NULL; struct udc_buf_info *bi; api->lock(dev); buf = net_buf_alloc_len(&udc_ep_pool, size, K_NO_WAIT); if (!buf) { LOG_ERR("Failed to allocate net_buf %zd", size); goto ep_alloc_error; } bi = udc_get_buf_info(buf); memset(bi, 0, sizeof(struct udc_buf_info)); bi->ep = ep; LOG_DBG("Allocate net_buf, ep 0x%02x, size %zd", ep, size); ep_alloc_error: api->unlock(dev); return buf; } struct net_buf *udc_ctrl_alloc(const struct device *dev, const uint8_t ep, const size_t size) { /* TODO: for now just pass to udc_buf_alloc() */ return udc_ep_buf_alloc(dev, ep, size); } static inline void udc_buf_destroy(struct net_buf *buf) { /* Adjust level and use together with the log in udc_ep_buf_alloc() */ LOG_DBG("destroy %p", buf); net_buf_destroy(buf); } int udc_ep_buf_free(const struct device *dev, struct net_buf *const buf) { const struct udc_api *api = dev->api; int ret = 0; api->lock(dev); net_buf_unref(buf); api->unlock(dev); return ret; } enum udc_bus_speed udc_device_speed(const struct device *dev) { const struct udc_api *api = dev->api; enum udc_bus_speed speed = UDC_BUS_UNKNOWN; api->lock(dev); if (!udc_is_enabled(dev)) { goto device_speed_error; } if (api->device_speed) { speed = api->device_speed(dev); } else { /* TODO: Shall we track connected status in UDC? */ speed = UDC_BUS_SPEED_FS; } device_speed_error: api->unlock(dev); return speed; } int udc_enable(const struct device *dev) { const struct udc_api *api = dev->api; struct udc_data *data = dev->data; int ret; api->lock(dev); if (!udc_is_initialized(dev)) { ret = -EPERM; goto udc_enable_error; } if (udc_is_enabled(dev)) { ret = -EALREADY; goto udc_enable_error; } data->stage = CTRL_PIPE_STAGE_SETUP; ret = api->enable(dev); if (ret == 0) { atomic_set_bit(&data->status, UDC_STATUS_ENABLED); } udc_enable_error: api->unlock(dev); return ret; } int udc_disable(const struct device *dev) { const struct udc_api *api = dev->api; struct udc_data *data = dev->data; int ret; api->lock(dev); if (!udc_is_enabled(dev)) { ret = -EALREADY; goto udc_disable_error; } ret = api->disable(dev); atomic_clear_bit(&data->status, UDC_STATUS_ENABLED); udc_disable_error: api->unlock(dev); return ret; } int udc_init(const struct device *dev, udc_event_cb_t event_cb, const void *const event_ctx) { const struct udc_api *api = dev->api; struct udc_data *data = dev->data; int ret; if (event_cb == NULL || event_ctx == NULL) { return -EINVAL; } api->lock(dev); if (udc_is_initialized(dev)) { ret = -EALREADY; goto udc_init_error; } data->event_cb = event_cb; data->event_ctx = event_ctx; ret = api->init(dev); if (ret == 0) { atomic_set_bit(&data->status, UDC_STATUS_INITIALIZED); } udc_init_error: api->unlock(dev); return ret; } int udc_shutdown(const struct device *dev) { const struct udc_api *api = dev->api; struct udc_data *data = dev->data; int ret; api->lock(dev); if (udc_is_enabled(dev)) { ret = -EBUSY; goto udc_shutdown_error; } if (!udc_is_initialized(dev)) { ret = -EALREADY; goto udc_shutdown_error; } ret = api->shutdown(dev); atomic_clear_bit(&data->status, UDC_STATUS_INITIALIZED); udc_shutdown_error: api->unlock(dev); return ret; } static ALWAYS_INLINE struct net_buf *udc_ctrl_alloc_stage(const struct device *dev, struct net_buf *const parent, const uint8_t ep, const size_t size) { struct net_buf *buf; buf = udc_ctrl_alloc(dev, ep, size); if (buf == NULL) { return NULL; } if (parent) { net_buf_frag_add(parent, buf); } return buf; } static struct net_buf *udc_ctrl_alloc_data(const struct device *dev, struct net_buf *const setup, const uint8_t ep) { size_t size = udc_data_stage_length(setup); struct udc_buf_info *bi; struct net_buf *buf; buf = udc_ctrl_alloc_stage(dev, setup, ep, size); if (buf) { bi = udc_get_buf_info(buf); bi->data = true; } return buf; } static struct net_buf *udc_ctrl_alloc_status(const struct device *dev, struct net_buf *const parent, const uint8_t ep) { size_t size = (ep == USB_CONTROL_EP_OUT) ? 64 : 0; struct udc_buf_info *bi; struct net_buf *buf; buf = udc_ctrl_alloc_stage(dev, parent, ep, size); if (buf) { bi = udc_get_buf_info(buf); bi->status = true; } return buf; } int udc_ctrl_submit_s_out_status(const struct device *dev, struct net_buf *const dout) { struct udc_buf_info *bi = udc_get_buf_info(dout); struct udc_data *data = dev->data; struct net_buf *buf; int ret = 0; bi->data = true; net_buf_frag_add(data->setup, dout); buf = udc_ctrl_alloc_status(dev, dout, USB_CONTROL_EP_IN); if (buf == NULL) { ret = -ENOMEM; } return udc_submit_ep_event(dev, data->setup, ret); } int udc_ctrl_submit_s_in_status(const struct device *dev) { struct udc_data *data = dev->data; struct net_buf *buf; int ret = 0; if (!udc_ctrl_stage_is_data_in(dev)) { return -ENOTSUP; } /* Allocate buffer for data stage IN */ buf = udc_ctrl_alloc_data(dev, data->setup, USB_CONTROL_EP_IN); if (buf == NULL) { ret = -ENOMEM; } return udc_submit_ep_event(dev, data->setup, ret); } int udc_ctrl_submit_s_status(const struct device *dev) { struct udc_data *data = dev->data; struct net_buf *buf; int ret = 0; /* Allocate buffer for possible status IN */ buf = udc_ctrl_alloc_status(dev, data->setup, USB_CONTROL_EP_IN); if (buf == NULL) { ret = -ENOMEM; } return udc_submit_ep_event(dev, data->setup, ret); } int udc_ctrl_submit_status(const struct device *dev, struct net_buf *const buf) { struct udc_buf_info *bi = udc_get_buf_info(buf); bi->status = true; return udc_submit_ep_event(dev, buf, 0); } bool udc_ctrl_stage_is_data_out(const struct device *dev) { struct udc_data *data = dev->data; return data->stage == CTRL_PIPE_STAGE_DATA_OUT ? true : false; } bool udc_ctrl_stage_is_data_in(const struct device *dev) { struct udc_data *data = dev->data; return data->stage == CTRL_PIPE_STAGE_DATA_IN ? true : false; } bool udc_ctrl_stage_is_status_out(const struct device *dev) { struct udc_data *data = dev->data; return data->stage == CTRL_PIPE_STAGE_STATUS_OUT ? true : false; } bool udc_ctrl_stage_is_status_in(const struct device *dev) { struct udc_data *data = dev->data; return data->stage == CTRL_PIPE_STAGE_STATUS_IN ? true : false; } bool udc_ctrl_stage_is_no_data(const struct device *dev) { struct udc_data *data = dev->data; return data->stage == CTRL_PIPE_STAGE_NO_DATA ? true : false; } static bool udc_data_stage_to_host(const struct net_buf *const buf) { struct usb_setup_packet *setup = (void *)buf->data; return USB_REQTYPE_GET_DIR(setup->bmRequestType); } void udc_ctrl_update_stage(const struct device *dev, struct net_buf *const buf) { struct udc_buf_info *bi = udc_get_buf_info(buf); struct udc_device_caps caps = udc_caps(dev); uint8_t next_stage = CTRL_PIPE_STAGE_ERROR; struct udc_data *data = dev->data; __ASSERT(USB_EP_GET_IDX(bi->ep) == 0, "0x%02x is not a control endpoint", bi->ep); if (bi->setup && bi->ep == USB_CONTROL_EP_OUT) { uint16_t length = udc_data_stage_length(buf); data->setup = buf; if (data->stage != CTRL_PIPE_STAGE_SETUP) { LOG_INF("Sequence %u not completed", data->stage); data->stage = CTRL_PIPE_STAGE_SETUP; } /* * Setup Stage has been completed (setup packet received), * regardless of the previous stage, this is now being reset. * Next state depends on wLength and the direction bit (D7). */ if (length == 0) { /* * No Data Stage, next is Status Stage * complete sequence: s->status */ LOG_DBG("s->(status)"); next_stage = CTRL_PIPE_STAGE_NO_DATA; } else if (udc_data_stage_to_host(buf)) { /* * Next is Data Stage (to host / IN) * complete sequence: s->in->status */ LOG_DBG("s->(in)"); next_stage = CTRL_PIPE_STAGE_DATA_IN; } else { /* * Next is Data Stage (to device / OUT) * complete sequence: s->out->status */ LOG_DBG("s->(out)"); next_stage = CTRL_PIPE_STAGE_DATA_OUT; } } else if (bi->ep == USB_CONTROL_EP_OUT) { if (data->stage == CTRL_PIPE_STAGE_DATA_OUT) { /* * Next sequence is Status Stage if request is okay, * (IN ZLP status to host) */ next_stage = CTRL_PIPE_STAGE_STATUS_IN; } else if (data->stage == CTRL_PIPE_STAGE_STATUS_OUT) { /* * End of a sequence: s->in->status, * We should check the length here because we always * submit a OUT request with the minimum length * of the control endpoint. */ if (buf->len == 0) { LOG_DBG("s-in-status"); next_stage = CTRL_PIPE_STAGE_SETUP; } else { LOG_WRN("ZLP expected"); next_stage = CTRL_PIPE_STAGE_ERROR; } } else { LOG_ERR("Cannot determine the next stage"); next_stage = CTRL_PIPE_STAGE_ERROR; } } else { /* if (bi->ep == USB_CONTROL_EP_IN) */ if (data->stage == CTRL_PIPE_STAGE_STATUS_IN) { /* * End of a sequence: setup->out->in */ LOG_DBG("s-out-status"); next_stage = CTRL_PIPE_STAGE_SETUP; } else if (data->stage == CTRL_PIPE_STAGE_DATA_IN) { /* * Data IN stage completed, next sequence * is Status Stage (OUT ZLP status to device). * over-engineered controllers can send status * on their own, skip this state then. */ if (caps.out_ack) { LOG_DBG("s-in->[status]"); next_stage = CTRL_PIPE_STAGE_SETUP; } else { LOG_DBG("s-in->(status)"); next_stage = CTRL_PIPE_STAGE_STATUS_OUT; } } else if (data->stage == CTRL_PIPE_STAGE_NO_DATA) { /* * End of a sequence (setup->in) * Previous NO Data stage was completed and * we confirmed it with an IN ZLP. */ LOG_DBG("s-status"); next_stage = CTRL_PIPE_STAGE_SETUP; } else { LOG_ERR("Cannot determine the next stage"); next_stage = CTRL_PIPE_STAGE_ERROR; } } if (next_stage == data->stage) { LOG_WRN("State not changed!"); } data->stage = next_stage; } #if defined(CONFIG_UDC_WORKQUEUE) K_KERNEL_STACK_DEFINE(udc_work_q_stack, CONFIG_UDC_WORKQUEUE_STACK_SIZE); struct k_work_q udc_work_q; static int udc_work_q_init(void) { k_work_queue_start(&udc_work_q, udc_work_q_stack, K_KERNEL_STACK_SIZEOF(udc_work_q_stack), CONFIG_UDC_WORKQUEUE_PRIORITY, NULL); k_thread_name_set(&udc_work_q.thread, "udc_work_q"); return 0; } SYS_INIT(udc_work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); #endif ```
/content/code_sandbox/drivers/usb/udc/udc_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,959
```unknown config UDC_IT82XX2 bool "IT82XX2 USB device controller driver" default y depends on DT_HAS_ITE_IT82XX2_USB_ENABLED help IT82xx2 USB device controller driver. if UDC_IT82XX2 config UDC_IT82xx2_EVENT_COUNT int "UDC IT82xx2 event count" range 4 64 default 8 help IT82xx2 event count. config UDC_IT82xx2_STACK_SIZE int "IT82xx2 UDC driver internal thread stack size" default 1024 help Size of the stack used in the driver for IT82xx2 USBD ISR event handling. endif # UDC_IT82XX2 ```
/content/code_sandbox/drivers/usb/udc/Kconfig.it82xx2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
161
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_usbd #include <zephyr/kernel.h> #include <zephyr/drivers/usb/udc.h> #include <zephyr/sys/math_extras.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/clock_control_numaker.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(udc_numaker, CONFIG_UDC_DRIVER_LOG_LEVEL); #include <soc.h> #include <NuMicro.h> #include "udc_common.h" /* USBD notes * * 1. Require 48MHz clock source * (1) Not support HIRC48 as clock source. It involves trim with USB SOF packets * and isn't suitable in HAL. * (2) Instead of HICR48, core clock is required to be multiple of 48MHz e.g. 192MHz, * to generate necessary 48MHz. */ /* For bus reset, keep 'SE0' (USB spec: SE0 >= 2.5 ms) */ #define NUMAKER_USBD_BUS_RESET_DRV_SE0_US 3000 /* For bus resume, generate 'K' (USB spec: 'K' >= 1 ms) */ #define NUMAKER_USBD_BUS_RESUME_DRV_K_US 1500 /* Reserve DMA buffer for Setup/CTRL OUT/CTRL IN, required to be 8-byte aligned */ #define NUMAKER_USBD_DMABUF_SIZE_SETUP 8 #define NUMAKER_USBD_DMABUF_SIZE_CTRLOUT 64 #define NUMAKER_USBD_DMABUF_SIZE_CTRLIN 64 enum numaker_usbd_msg_type { /* Setup packet received */ NUMAKER_USBD_MSG_TYPE_SETUP, /* OUT transaction for specific EP completed */ NUMAKER_USBD_MSG_TYPE_OUT, /* IN transaction for specific EP completed */ NUMAKER_USBD_MSG_TYPE_IN, /* Re-activate queued transfer for specific EP */ NUMAKER_USBD_MSG_TYPE_XFER, /* S/W reconnect */ NUMAKER_USBD_MSG_TYPE_SW_RECONN, }; struct numaker_usbd_msg { enum numaker_usbd_msg_type type; union { struct { enum udc_event_type type; } udc_bus_event; struct { uint8_t packet[8]; } setup; struct { uint8_t ep; } out; struct { uint8_t ep; } in; struct { uint8_t ep; } xfer; }; }; /* EP H/W context */ struct numaker_usbd_ep { bool valid; const struct device *dev; /* Pointer to the containing device */ uint8_t ep_hw_idx; /* BSP USBD driver EP index EP0, EP1, EP2, etc */ uint32_t ep_hw_cfg; /* BSP USBD driver EP configuration */ /* EP DMA buffer */ bool dmabuf_valid; uint32_t dmabuf_base; uint32_t dmabuf_size; /* NOTE: On USBD, Setup and CTRL OUT are not completely separated. CTRL OUT MXPLD * can be overridden to 8 by next Setup. To overcome it, we make one copy of CTRL * OUT MXPLD immediately on its interrupt. */ uint32_t mxpld_ctrlout; /* EP address */ bool addr_valid; uint8_t addr; /* EP MPS */ bool mps_valid; uint16_t mps; }; /* Immutable device context */ struct udc_numaker_config { struct udc_ep_config *ep_cfg_out; struct udc_ep_config *ep_cfg_in; uint32_t ep_cfg_out_size; uint32_t ep_cfg_in_size; USBD_T *base; const struct reset_dt_spec reset; uint32_t clk_modidx; uint32_t clk_src; uint32_t clk_div; const struct device *clkctrl_dev; void (*irq_config_func)(const struct device *dev); void (*irq_unconfig_func)(const struct device *dev); const struct pinctrl_dev_config *pincfg; uint32_t dmabuf_size; bool disallow_iso_inout_same; void (*make_thread)(const struct device *dev); }; /* EP H/W context manager */ struct numaker_usbd_ep_mgmt { /* EP H/W context management * * Allocate-only, and de-allocate all on re-initialize in udc_numaker_init(). */ uint8_t ep_idx; /* DMA buffer management * * Allocate-only, and de-allocate all on re-initialize in udc_numaker_init(). */ uint32_t dmabuf_pos; }; /* Mutable device context */ struct udc_numaker_data { uint8_t addr; /* Host assigned USB device address */ struct k_msgq *msgq; struct numaker_usbd_ep_mgmt ep_mgmt; /* EP management */ struct numaker_usbd_ep *ep_pool; uint32_t ep_pool_size; struct k_thread thread_data; /* Track end of CTRL DATA OUT/STATUS OUT stage * * net_buf can over-allocate for UDC_BUF_GRANULARITY requirement * and net_buf_tailroom() cannot reflect free buffer room exactly * as allocate request. Manually track it instead. */ uint32_t ctrlout_tailroom; }; static inline void numaker_usbd_sw_connect(const struct device *dev) { const struct udc_numaker_config *config = dev->config; USBD_T *const base = config->base; /* Clear all interrupts first for clean */ base->INTSTS = base->INTSTS; /* Enable relevant interrupts */ base->INTEN = USBD_INT_BUS | USBD_INT_USB | USBD_INT_FLDET | USBD_INT_WAKEUP | USBD_INT_SOF; /* Clear SE0 for connect */ base->SE0 &= ~USBD_DRVSE0; } static inline void numaker_usbd_sw_disconnect(const struct device *dev) { const struct udc_numaker_config *config = dev->config; USBD_T *const base = config->base; /* Set SE0 for disconnect */ base->SE0 |= USBD_DRVSE0; } static inline void numaker_usbd_sw_reconnect(const struct device *dev) { /* Keep SE0 to trigger bus reset */ numaker_usbd_sw_disconnect(dev); k_sleep(K_USEC(NUMAKER_USBD_BUS_RESET_DRV_SE0_US)); numaker_usbd_sw_connect(dev); } static inline void numaker_usbd_reset_addr(const struct device *dev) { const struct udc_numaker_config *config = dev->config; struct udc_numaker_data *priv = udc_get_private(dev); USBD_T *const base = config->base; base->FADDR = 0; priv->addr = 0; } static inline void numaker_usbd_set_addr(const struct device *dev) { const struct udc_numaker_config *config = dev->config; struct udc_numaker_data *priv = udc_get_private(dev); USBD_T *const base = config->base; if (base->FADDR != priv->addr) { base->FADDR = priv->addr; } } /* USBD EP base by e.g. EP0, EP1, ... */ static inline USBD_EP_T *numaker_usbd_ep_base(const struct device *dev, uint32_t ep_hw_idx) { const struct udc_numaker_config *config = dev->config; USBD_T *const base = config->base; return base->EP + ep_hw_idx; } static inline void numaker_usbd_ep_sync_udc_halt(struct numaker_usbd_ep *ep_cur, bool stalled) { const struct device *dev = ep_cur->dev; struct udc_ep_config *ep_cfg; __ASSERT_NO_MSG(ep_cur->addr_valid); ep_cfg = udc_get_ep_cfg(dev, ep_cur->addr); ep_cfg->stat.halted = stalled; } static inline void numaker_usbd_ep_set_stall(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* Set EP to stalled */ ep_base->CFGP |= USBD_CFGP_SSTALL_Msk; numaker_usbd_ep_sync_udc_halt(ep_cur, true); } /* Reset EP to unstalled and data toggle bit to 0 */ static inline void numaker_usbd_ep_clear_stall_n_data_toggle(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* Reset EP to unstalled */ ep_base->CFGP &= ~USBD_CFGP_SSTALL_Msk; numaker_usbd_ep_sync_udc_halt(ep_cur, false); /* Reset EP data toggle bit to 0 */ ep_base->CFG &= ~USBD_CFG_DSQSYNC_Msk; } static int numaker_usbd_send_msg(const struct device *dev, const struct numaker_usbd_msg *msg) { struct udc_numaker_data *priv = udc_get_private(dev); int err; err = k_msgq_put(priv->msgq, msg, K_NO_WAIT); if (err < 0) { /* Try to recover by S/W reconnect */ struct numaker_usbd_msg msg_reconn = { .type = NUMAKER_USBD_MSG_TYPE_SW_RECONN, }; LOG_ERR("Message queue overflow"); /* Discard all not yet received messages for error recovery below */ k_msgq_purge(priv->msgq); err = k_msgq_put(priv->msgq, &msg_reconn, K_NO_WAIT); if (err < 0) { LOG_ERR("Message queue overflow again"); } } return err; } static int numaker_usbd_hw_setup(const struct device *dev) { const struct udc_numaker_config *config = dev->config; USBD_T *const base = config->base; int err; struct numaker_scc_subsys scc_subsys; /* Reset controller ready? */ if (!device_is_ready(config->reset.dev)) { LOG_ERR("Reset controller not ready"); return -ENODEV; } SYS_UnlockReg(); /* Configure USB PHY for USBD */ SYS->USBPHY = (SYS->USBPHY & ~SYS_USBPHY_USBROLE_Msk) | (SYS_USBPHY_USBROLE_STD_USBD | SYS_USBPHY_USBEN_Msk | SYS_USBPHY_SBO_Msk); /* Invoke Clock controller to enable module clock */ memset(&scc_subsys, 0x00, sizeof(scc_subsys)); scc_subsys.subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC; scc_subsys.pcc.clk_modidx = config->clk_modidx; scc_subsys.pcc.clk_src = config->clk_src; scc_subsys.pcc.clk_div = config->clk_div; /* Equivalent to CLK_EnableModuleClock() */ err = clock_control_on(config->clkctrl_dev, (clock_control_subsys_t)&scc_subsys); if (err < 0) { goto cleanup; } /* Equivalent to CLK_SetModuleClock() */ err = clock_control_configure(config->clkctrl_dev, (clock_control_subsys_t)&scc_subsys, NULL); if (err < 0) { goto cleanup; } /* Configure pinmux (NuMaker's SYS MFP) */ err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err < 0) { goto cleanup; } /* Invoke Reset controller to reset module to default state */ /* Equivalent to SYS_ResetModule() */ reset_line_toggle_dt(&config->reset); /* Initialize USBD engine */ /* NOTE: BSP USBD driver: ATTR = 0x7D0 */ base->ATTR = USBD_ATTR_BYTEM_Msk | BIT(9) | USBD_ATTR_DPPUEN_Msk | USBD_ATTR_USBEN_Msk | BIT(6) | USBD_ATTR_PHYEN_Msk; /* Set SE0 for S/W disconnect */ numaker_usbd_sw_disconnect(dev); /* NOTE: Ignore DT maximum-speed with USBD fixed to full-speed */ /* Initialize IRQ */ config->irq_config_func(dev); cleanup: SYS_LockReg(); return err; } static void numaker_usbd_hw_shutdown(const struct device *dev) { const struct udc_numaker_config *config = dev->config; USBD_T *const base = config->base; struct numaker_scc_subsys scc_subsys; SYS_UnlockReg(); /* Uninitialize IRQ */ config->irq_unconfig_func(dev); /* Set SE0 for S/W disconnect */ numaker_usbd_sw_disconnect(dev); /* Disable USB PHY */ base->ATTR &= ~USBD_PHY_EN; /* Invoke Clock controller to disable module clock */ memset(&scc_subsys, 0x00, sizeof(scc_subsys)); scc_subsys.subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC; scc_subsys.pcc.clk_modidx = config->clk_modidx; /* Equivalent to CLK_DisableModuleClock() */ clock_control_off(config->clkctrl_dev, (clock_control_subsys_t)&scc_subsys); /* Invoke Reset controller to reset module to default state */ /* Equivalent to SYS_ResetModule() */ reset_line_toggle_dt(&config->reset); SYS_LockReg(); } /* Interrupt top half processing for bus reset */ static void numaker_usbd_bus_reset_th(const struct device *dev) { struct udc_numaker_data *priv = udc_get_private(dev); USBD_EP_T *ep_base; for (uint32_t i = 0ul; i < priv->ep_pool_size; i++) { ep_base = numaker_usbd_ep_base(dev, EP0 + i); /* Cancel EP on-going transaction */ ep_base->CFGP |= USBD_CFGP_CLRRDY_Msk; /* Reset EP to unstalled */ ep_base->CFGP &= ~USBD_CFGP_SSTALL_Msk; /* Reset EP data toggle bit to 0 */ ep_base->CFG &= ~USBD_CFG_DSQSYNC_Msk; /* Except EP0/EP1 kept resident for CTRL OUT/IN, disable all other EPs */ if (i >= 2) { ep_base->CFG = 0; } } numaker_usbd_reset_addr(dev); } /* USBD SRAM base for DMA */ static inline uint32_t numaker_usbd_buf_base(const struct device *dev) { const struct udc_numaker_config *config = dev->config; USBD_T *const base = config->base; return ((uint32_t)base + 0x800ul); } /* Copy Setup packet to user buffer */ static void numaker_usbd_setup_copy_to_user(const struct device *dev, uint8_t *usrbuf) { const struct udc_numaker_config *config = dev->config; USBD_T *const base = config->base; uint32_t dmabuf_addr; dmabuf_addr = numaker_usbd_buf_base(dev) + (base->STBUFSEG & USBD_STBUFSEG_STBUFSEG_Msk); bytecpy(usrbuf, (uint8_t *)dmabuf_addr, 8ul); } /* Copy data to user buffer * * size_p holds size to copy/copied on input/output */ static void numaker_usbd_ep_copy_to_user(struct numaker_usbd_ep *ep_cur, uint8_t *usrbuf, uint32_t *size_p, uint32_t *rmn_p) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); uint32_t dmabuf_addr; uint32_t data_rmn; __ASSERT_NO_MSG(size_p); __ASSERT_NO_MSG(ep_cur->dmabuf_valid); dmabuf_addr = numaker_usbd_buf_base(dev) + ep_base->BUFSEG; /* NOTE: See comment on mxpld_ctrlout for why make one copy of CTRL OUT's MXPLD */ if (ep_cur->addr == USB_CONTROL_EP_OUT) { data_rmn = ep_cur->mxpld_ctrlout; } else { data_rmn = ep_base->MXPLD; } *size_p = MIN(*size_p, data_rmn); bytecpy(usrbuf, (uint8_t *)dmabuf_addr, *size_p); data_rmn -= *size_p; if (rmn_p) { *rmn_p = data_rmn; } } /* Copy data from user buffer * * size_p holds size to copy/copied on input/output */ static void numaker_usbd_ep_copy_from_user(struct numaker_usbd_ep *ep_cur, const uint8_t *usrbuf, uint32_t *size_p) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); uint32_t dmabuf_addr; __ASSERT_NO_MSG(size_p); __ASSERT_NO_MSG(ep_cur->dmabuf_valid); __ASSERT_NO_MSG(ep_cur->mps_valid); __ASSERT_NO_MSG(ep_cur->mps <= ep_cur->dmabuf_size); dmabuf_addr = numaker_usbd_buf_base(dev) + ep_base->BUFSEG; *size_p = MIN(*size_p, ep_cur->mps); bytecpy((uint8_t *)dmabuf_addr, (uint8_t *)usrbuf, *size_p); } static void numaker_usbd_ep_config_dmabuf(struct numaker_usbd_ep *ep_cur, uint32_t dmabuf_base, uint32_t dmabuf_size) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); ep_base->BUFSEG = dmabuf_base; ep_cur->dmabuf_valid = true; ep_cur->dmabuf_base = dmabuf_base; ep_cur->dmabuf_size = dmabuf_size; } static void numaker_usbd_ep_abort(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* Abort EP on-going transaction */ ep_base->CFGP |= USBD_CFGP_CLRRDY_Msk; if (ep_cur->addr_valid) { udc_ep_set_busy(dev, ep_cur->addr, false); } } /* Configure EP major common parts */ static void numaker_usbd_ep_config_major(struct numaker_usbd_ep *ep_cur, struct udc_ep_config *const ep_cfg) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); ep_cur->mps_valid = true; ep_cur->mps = ep_cfg->mps; /* Configure EP transfer type, DATA0/1 toggle, direction, number, etc. */ ep_cur->ep_hw_cfg = 0; /* Clear STALL Response in Setup stage */ if ((ep_cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) == USB_EP_TYPE_CONTROL) { ep_cur->ep_hw_cfg |= USBD_CFG_CSTALL; } /* Default to DATA0 */ ep_cur->ep_hw_cfg &= ~USBD_CFG_DSQSYNC_Msk; /* Endpoint IN/OUT, though, default to disabled */ ep_cur->ep_hw_cfg |= USBD_CFG_EPMODE_DISABLE; /* Isochronous or not */ if ((ep_cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) == USB_EP_TYPE_ISO) { ep_cur->ep_hw_cfg |= USBD_CFG_TYPE_ISO; } /* Endpoint index */ ep_cur->ep_hw_cfg |= (USB_EP_GET_IDX(ep_cfg->addr) << USBD_CFG_EPNUM_Pos) & USBD_CFG_EPNUM_Msk; ep_base->CFG = ep_cur->ep_hw_cfg; } static void numaker_usbd_ep_enable(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* For safe, EP (re-)enable from clean state */ numaker_usbd_ep_abort(ep_cur); numaker_usbd_ep_clear_stall_n_data_toggle(ep_cur); /* Enable EP to IN/OUT */ ep_cur->ep_hw_cfg &= ~USBD_CFG_STATE_Msk; if (USB_EP_DIR_IS_IN(ep_cur->addr)) { ep_cur->ep_hw_cfg |= USBD_CFG_EPMODE_IN; } else { ep_cur->ep_hw_cfg |= USBD_CFG_EPMODE_OUT; } ep_base->CFG = ep_cur->ep_hw_cfg; /* For USBD, no separate EP interrupt control */ } static void numaker_usbd_ep_disable(struct numaker_usbd_ep *ep_cur) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); /* For USBD, no separate EP interrupt control */ /* Disable EP */ ep_cur->ep_hw_cfg = (ep_cur->ep_hw_cfg & ~USBD_CFG_STATE_Msk) | USBD_CFG_EPMODE_DISABLE; ep_base->CFG = ep_cur->ep_hw_cfg; } /* Start EP data transaction */ static void udc_numaker_ep_trigger(struct numaker_usbd_ep *ep_cur, uint32_t len) { const struct device *dev = ep_cur->dev; USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_cur->ep_hw_idx); if (ep_cur->addr_valid) { udc_ep_set_busy(dev, ep_cur->addr, true); } ep_base->MXPLD = len; } static struct numaker_usbd_ep *numaker_usbd_ep_mgmt_alloc_ep(const struct device *dev) { struct udc_numaker_data *priv = udc_get_private(dev); struct numaker_usbd_ep_mgmt *ep_mgmt = &priv->ep_mgmt; struct numaker_usbd_ep *ep_cur = NULL; if (ep_mgmt->ep_idx < priv->ep_pool_size) { ep_cur = priv->ep_pool + ep_mgmt->ep_idx; ep_mgmt->ep_idx++; __ASSERT_NO_MSG(!ep_cur->valid); /* Indicate this EP H/W context is allocated */ ep_cur->valid = true; } return ep_cur; } /* Allocate DMA buffer * * Return -ENOMEM on OOM error, or 0 on success with DMA buffer base/size (rounded up) allocated */ static int numaker_usbd_ep_mgmt_alloc_dmabuf(const struct device *dev, uint32_t size, uint32_t *dmabuf_base_p, uint32_t *dmabuf_size_p) { const struct udc_numaker_config *config = dev->config; struct udc_numaker_data *priv = udc_get_private(dev); struct numaker_usbd_ep_mgmt *ep_mgmt = &priv->ep_mgmt; __ASSERT_NO_MSG(dmabuf_base_p); __ASSERT_NO_MSG(dmabuf_size_p); /* Required to be 8-byte aligned */ size = ROUND_UP(size, 8); ep_mgmt->dmabuf_pos += size; if (ep_mgmt->dmabuf_pos > config->dmabuf_size) { ep_mgmt->dmabuf_pos -= size; return -ENOMEM; } *dmabuf_base_p = ep_mgmt->dmabuf_pos - size; *dmabuf_size_p = size; return 0; } /* Initialize all EP H/W contexts */ static void numaker_usbd_ep_mgmt_init(const struct device *dev) { const struct udc_numaker_config *config = dev->config; struct udc_numaker_data *priv = udc_get_private(dev); USBD_T *const base = config->base; struct numaker_usbd_ep_mgmt *ep_mgmt = &priv->ep_mgmt; struct numaker_usbd_ep *ep_cur; struct numaker_usbd_ep *ep_end; /* Initialize all fields to zero for clean state */ memset(ep_mgmt, 0x00, sizeof(*ep_mgmt)); ep_cur = priv->ep_pool; ep_end = priv->ep_pool + priv->ep_pool_size; /* Initialize all EP H/W contexts */ for (; ep_cur != ep_end; ep_cur++) { /* Zero-initialize */ memset(ep_cur, 0x00, sizeof(*ep_cur)); /* Pointer to the containing device */ ep_cur->dev = dev; /* BSP USBD driver EP handle */ ep_cur->ep_hw_idx = EP0 + (ep_cur - priv->ep_pool); } /* Reserve 1st/2nd EP H/W contexts (BSP USBD driver EP0/EP1) for CTRL OUT/IN */ ep_mgmt->ep_idx = 2; /* Reserve DMA buffer for Setup/CTRL OUT/CTRL IN, starting from 0 */ ep_mgmt->dmabuf_pos = 0; /* Configure DMA buffer for Setup packet */ base->STBUFSEG = ep_mgmt->dmabuf_pos; ep_mgmt->dmabuf_pos += NUMAKER_USBD_DMABUF_SIZE_SETUP; /* Reserve 1st EP H/W context (BSP USBD driver EP0) for CTRL OUT */ ep_cur = priv->ep_pool + 0; ep_cur->valid = true; ep_cur->addr_valid = true; ep_cur->addr = USB_EP_GET_ADDR(0, USB_EP_DIR_OUT); numaker_usbd_ep_config_dmabuf(ep_cur, ep_mgmt->dmabuf_pos, NUMAKER_USBD_DMABUF_SIZE_CTRLOUT); ep_mgmt->dmabuf_pos += NUMAKER_USBD_DMABUF_SIZE_CTRLOUT; ep_cur->mps_valid = true; ep_cur->mps = NUMAKER_USBD_DMABUF_SIZE_CTRLOUT; /* Reserve 2nd EP H/W context (BSP USBD driver EP1) for CTRL IN */ ep_cur = priv->ep_pool + 1; ep_cur->valid = true; ep_cur->addr_valid = true; ep_cur->addr = USB_EP_GET_ADDR(0, USB_EP_DIR_IN); numaker_usbd_ep_config_dmabuf(ep_cur, ep_mgmt->dmabuf_pos, NUMAKER_USBD_DMABUF_SIZE_CTRLIN); ep_mgmt->dmabuf_pos += NUMAKER_USBD_DMABUF_SIZE_CTRLIN; ep_cur->mps_valid = true; ep_cur->mps = NUMAKER_USBD_DMABUF_SIZE_CTRLIN; } /* Find EP H/W context by EP address */ static struct numaker_usbd_ep *numaker_usbd_ep_mgmt_find_ep(const struct device *dev, const uint8_t ep) { struct udc_numaker_data *priv = udc_get_private(dev); struct numaker_usbd_ep *ep_cur = priv->ep_pool; struct numaker_usbd_ep *ep_end = priv->ep_pool + priv->ep_pool_size; for (; ep_cur != ep_end; ep_cur++) { if (!ep_cur->valid) { continue; } if (!ep_cur->addr_valid) { continue; } if (ep == ep_cur->addr) { return ep_cur; } } return NULL; } /* Bind EP H/W context to EP address */ static struct numaker_usbd_ep *numaker_usbd_ep_mgmt_bind_ep(const struct device *dev, const uint8_t ep) { struct numaker_usbd_ep *ep_cur = numaker_usbd_ep_mgmt_find_ep(dev, ep); if (!ep_cur) { ep_cur = numaker_usbd_ep_mgmt_alloc_ep(dev); if (!ep_cur) { return NULL; } /* Bind EP H/W context to EP address */ ep_cur->addr = ep; ep_cur->addr_valid = true; } /* Assert EP H/W context bound to EP address */ __ASSERT_NO_MSG(ep_cur->valid); __ASSERT_NO_MSG(ep_cur->addr_valid); __ASSERT_NO_MSG(ep_cur->addr == ep); return ep_cur; } static int numaker_usbd_xfer_out(const struct device *dev, uint8_t ep, bool strict) { struct net_buf *buf; struct numaker_usbd_ep *ep_cur; if (!USB_EP_DIR_IS_OUT(ep)) { LOG_ERR("Invalid EP address 0x%02x for data out", ep); return -EINVAL; } if (udc_ep_is_busy(dev, ep)) { if (strict) { LOG_ERR("EP 0x%02x busy", ep); return -EAGAIN; } return 0; } buf = udc_buf_peek(dev, ep); if (buf == NULL) { if (strict) { LOG_ERR("No buffer queued for EP 0x%02x", ep); return -ENODATA; } return 0; } /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP H/W context: ep=0x%02x", ep); return -ENODEV; } udc_numaker_ep_trigger(ep_cur, ep_cur->mps); return 0; } static int numaker_usbd_xfer_in(const struct device *dev, uint8_t ep, bool strict) { struct net_buf *buf; struct numaker_usbd_ep *ep_cur; uint32_t data_len; if (!USB_EP_DIR_IS_IN(ep)) { LOG_ERR("Invalid EP address 0x%02x for data in", ep); return -EINVAL; } if (udc_ep_is_busy(dev, ep)) { if (strict) { LOG_ERR("EP 0x%02x busy", ep); return -EAGAIN; } return 0; } buf = udc_buf_peek(dev, ep); if (buf == NULL) { if (strict) { LOG_ERR("No buffer queued for EP 0x%02x", ep); return -ENODATA; } return 0; } /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("ep=0x%02x", ep); return -ENODEV; } data_len = buf->len; if (data_len) { numaker_usbd_ep_copy_from_user(ep_cur, buf->data, &data_len); net_buf_pull(buf, data_len); } else if (udc_ep_buf_has_zlp(buf)) { /* zlp, send exactly once */ udc_ep_buf_clear_zlp(buf); } else { /* initially empty net_buf, send exactly once */ } udc_numaker_ep_trigger(ep_cur, data_len); return 0; } static int numaker_usbd_ctrl_feed_dout(const struct device *dev, const size_t length) { struct udc_numaker_data *priv = udc_get_private(dev); struct udc_ep_config *ep_cfg; struct net_buf *buf; ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); if (ep_cfg == NULL) { LOG_ERR("Bind udc_ep_config: ep=0x%02x", USB_CONTROL_EP_OUT); return -ENODEV; } buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length); if (buf == NULL) { LOG_ERR("Allocate net_buf: ep=0x%02x", USB_CONTROL_EP_OUT); return -ENOMEM; } priv->ctrlout_tailroom = length; k_fifo_put(&ep_cfg->fifo, buf); return numaker_usbd_xfer_out(dev, ep_cfg->addr, true); } /* Message handler for Setup transaction completed */ static int numaker_usbd_msg_handle_setup(const struct device *dev, struct numaker_usbd_msg *msg) { int err; uint8_t ep; struct numaker_usbd_ep *ep_cur; struct net_buf *buf; uint8_t *data_ptr; __ASSERT_NO_MSG(msg->type == NUMAKER_USBD_MSG_TYPE_SETUP); ep = USB_CONTROL_EP_OUT; /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP H/W context: ep=0x%02x", ep); return -ENODEV; } /* We should have reserved 1st/2nd EP H/W contexts for CTRL OUT/IN */ __ASSERT_NO_MSG(ep_cur->addr == USB_CONTROL_EP_OUT); __ASSERT_NO_MSG((ep_cur + 1)->addr == USB_CONTROL_EP_IN); /* Abort previous CTRL OUT/IN */ numaker_usbd_ep_abort(ep_cur); numaker_usbd_ep_abort(ep_cur + 1); /* CTRL OUT/IN reset to unstalled by H/W on receive of Setup packet */ numaker_usbd_ep_sync_udc_halt(ep_cur, false); numaker_usbd_ep_sync_udc_halt(ep_cur + 1, false); buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, 8); if (buf == NULL) { LOG_ERR("Failed to allocate for Setup"); return -ENOMEM; } udc_ep_buf_set_setup(buf); data_ptr = net_buf_tail(buf); numaker_usbd_setup_copy_to_user(dev, data_ptr); net_buf_add(buf, 8); /* Update to next stage of CTRL transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_data_out(dev)) { /* Allocate and feed buffer for DATA OUT stage */ err = numaker_usbd_ctrl_feed_dout(dev, udc_data_stage_length(buf)); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } } else if (udc_ctrl_stage_is_data_in(dev)) { err = udc_ctrl_submit_s_in_status(dev); } else { err = udc_ctrl_submit_s_status(dev); } return err; } /* Message handler for DATA OUT transaction completed */ static int numaker_usbd_msg_handle_out(const struct device *dev, struct numaker_usbd_msg *msg) { struct udc_numaker_data *priv = udc_get_private(dev); int err; uint8_t ep; struct numaker_usbd_ep *ep_cur; struct net_buf *buf; uint8_t *data_ptr; uint32_t data_len; uint32_t data_rmn; __ASSERT_NO_MSG(msg->type == NUMAKER_USBD_MSG_TYPE_OUT); ep = msg->out.ep; udc_ep_set_busy(dev, ep, false); /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP H/W context: ep=0x%02x", ep); return -ENODEV; } buf = udc_buf_peek(dev, ep); if (buf == NULL) { LOG_ERR("No buffer queued for ep=0x%02x", ep); return -ENODATA; } if (ep == USB_CONTROL_EP_OUT) { __ASSERT_NO_MSG(net_buf_tailroom(buf) >= priv->ctrlout_tailroom); data_len = priv->ctrlout_tailroom; } else { data_len = net_buf_tailroom(buf); } data_ptr = net_buf_tail(buf); numaker_usbd_ep_copy_to_user(ep_cur, data_ptr, &data_len, &data_rmn); net_buf_add(buf, data_len); if (ep == USB_CONTROL_EP_OUT) { __ASSERT_NO_MSG(priv->ctrlout_tailroom >= data_len); priv->ctrlout_tailroom -= data_len; } if (data_rmn) { LOG_ERR("Buffer queued for ep=0x%02x cannot accommodate packet", ep); return -ENOBUFS; } /* CTRL DATA OUT/STATUS OUT stage completed */ if (ep == USB_CONTROL_EP_OUT && priv->ctrlout_tailroom != 0) { goto next_xfer; } /* To submit the peeked buffer */ udc_buf_get(dev, ep); if (ep == USB_CONTROL_EP_OUT) { if (udc_ctrl_stage_is_status_out(dev)) { /* s-in-status finished */ err = udc_ctrl_submit_status(dev, buf); if (err < 0) { LOG_ERR("udc_ctrl_submit_status failed for s-in-status: %d", err); return err; } } /* Update to next stage of CTRL transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_in(dev)) { err = udc_ctrl_submit_s_out_status(dev, buf); if (err < 0) { LOG_ERR("udc_ctrl_submit_s_out_status failed for s-out-status: %d", err); return err; } } } else { err = udc_submit_ep_event(dev, buf, 0); if (err < 0) { LOG_ERR("udc_submit_ep_event failed for ep=0x%02x: %d", ep, err); return err; } } next_xfer: /* Continue with next DATA OUT transaction on request */ numaker_usbd_xfer_out(dev, ep, false); return 0; } /* Message handler for DATA IN transaction completed */ static int numaker_usbd_msg_handle_in(const struct device *dev, struct numaker_usbd_msg *msg) { int err; uint8_t ep; struct numaker_usbd_ep *ep_cur; struct net_buf *buf; __ASSERT_NO_MSG(msg->type == NUMAKER_USBD_MSG_TYPE_IN); ep = msg->in.ep; udc_ep_set_busy(dev, ep, false); /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep); if (!ep_cur) { LOG_ERR("Bind EP H/W context: ep=0x%02x", ep); return -ENODEV; } buf = udc_buf_peek(dev, ep); if (buf == NULL) { /* No DATA IN request */ return 0; } if (buf->len || udc_ep_buf_has_zlp(buf)) { goto xfer_next; } /* To submit the peeked buffer */ udc_buf_get(dev, ep); if (ep == USB_CONTROL_EP_IN) { if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* s-out-status/s-status finished */ err = udc_ctrl_submit_status(dev, buf); if (err < 0) { LOG_ERR("udc_ctrl_submit_status failed for s-out-status/s-status: " "%d", err); return err; } } /* Update to next stage of CTRL transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_out(dev)) { /* DATA IN stage finished, release buffer */ net_buf_unref(buf); /* Allocate and feed buffer for STATUS OUT stage */ err = numaker_usbd_ctrl_feed_dout(dev, 0); if (err < 0) { LOG_ERR("ctrl_feed_dout failed for status out: %d", err); return err; } } } else { err = udc_submit_ep_event(dev, buf, 0); if (err < 0) { LOG_ERR("udc_submit_ep_event failed for ep=0x%02x: %d", ep, err); return err; } } xfer_next: /* Continue with next DATA IN transaction on request */ numaker_usbd_xfer_in(dev, ep, false); return 0; } /* Message handler for queued transfer re-activated */ static int numaker_usbd_msg_handle_xfer(const struct device *dev, struct numaker_usbd_msg *msg) { uint8_t ep; __ASSERT_NO_MSG(msg->type == NUMAKER_USBD_MSG_TYPE_XFER); ep = msg->xfer.ep; if (USB_EP_DIR_IS_OUT(ep)) { numaker_usbd_xfer_out(dev, ep, false); } else { numaker_usbd_xfer_in(dev, ep, false); } return 0; } /* Message handler for S/W reconnect */ static int numaker_usbd_msg_handle_sw_reconn(const struct device *dev, struct numaker_usbd_msg *msg) { __ASSERT_NO_MSG(msg->type == NUMAKER_USBD_MSG_TYPE_SW_RECONN); /* S/W reconnect for error recovery */ numaker_usbd_sw_reconnect(dev); return 0; } static void numaker_usbd_msg_handler(const struct device *dev) { struct udc_numaker_data *priv = udc_get_private(dev); int err; struct numaker_usbd_msg msg; while (true) { if (k_msgq_get(priv->msgq, &msg, K_FOREVER)) { continue; } err = 0; udc_lock_internal(dev, K_FOREVER); switch (msg.type) { case NUMAKER_USBD_MSG_TYPE_SETUP: err = numaker_usbd_msg_handle_setup(dev, &msg); break; case NUMAKER_USBD_MSG_TYPE_OUT: err = numaker_usbd_msg_handle_out(dev, &msg); break; case NUMAKER_USBD_MSG_TYPE_IN: err = numaker_usbd_msg_handle_in(dev, &msg); break; case NUMAKER_USBD_MSG_TYPE_XFER: err = numaker_usbd_msg_handle_xfer(dev, &msg); break; case NUMAKER_USBD_MSG_TYPE_SW_RECONN: err = numaker_usbd_msg_handle_sw_reconn(dev, &msg); break; default: __ASSERT_NO_MSG(false); } udc_unlock_internal(dev); if (err) { udc_submit_event(dev, UDC_EVT_ERROR, err); } } } static void numaker_udbd_isr(const struct device *dev) { const struct udc_numaker_config *config = dev->config; struct udc_numaker_data *priv = udc_get_private(dev); USBD_T *const base = config->base; struct numaker_usbd_msg msg = {0}; uint32_t volatile usbd_intsts = base->INTSTS; uint32_t volatile usbd_bus_state = base->ATTR; /* USB plug-in/unplug */ if (usbd_intsts & USBD_INTSTS_FLDET) { /* Floating detect */ base->INTSTS = USBD_INTSTS_FLDET; if (base->VBUSDET & USBD_VBUSDET_VBUSDET_Msk) { /* USB plug-in */ /* Enable back USB/PHY */ base->ATTR |= USBD_ATTR_USBEN_Msk | USBD_ATTR_PHYEN_Msk; /* UDC stack would handle bottom-half processing */ udc_submit_event(dev, UDC_EVT_VBUS_READY, 0); LOG_DBG("USB plug-in"); } else { /* USB unplug */ /* Disable USB */ base->ATTR &= ~USBD_USB_EN; /* UDC stack would handle bottom-half processing */ udc_submit_event(dev, UDC_EVT_VBUS_REMOVED, 0); LOG_DBG("USB unplug"); } } /* USB wake-up */ if (usbd_intsts & USBD_INTSTS_WAKEUP) { /* Clear event flag */ base->INTSTS = USBD_INTSTS_WAKEUP; LOG_DBG("USB wake-up"); } /* USB reset/suspend/resume */ if (usbd_intsts & USBD_INTSTS_BUS) { /* Clear event flag */ base->INTSTS = USBD_INTSTS_BUS; if (usbd_bus_state & USBD_STATE_USBRST) { /* Bus reset */ /* Enable back USB/PHY */ base->ATTR |= USBD_ATTR_USBEN_Msk | USBD_ATTR_PHYEN_Msk; /* Bus reset top half */ numaker_usbd_bus_reset_th(dev); /* UDC stack would handle bottom-half processing, * including reset device address (udc_set_address), * un-configure device (udc_ep_disable), etc. */ udc_submit_event(dev, UDC_EVT_RESET, 0); LOG_DBG("USB reset"); } if (usbd_bus_state & USBD_STATE_SUSPEND) { /* Enable USB but disable PHY */ base->ATTR &= ~USBD_PHY_EN; /* UDC stack would handle bottom-half processing */ udc_submit_event(dev, UDC_EVT_SUSPEND, 0); LOG_DBG("USB suspend"); } if (usbd_bus_state & USBD_STATE_RESUME) { /* Enable back USB/PHY */ base->ATTR |= USBD_ATTR_USBEN_Msk | USBD_ATTR_PHYEN_Msk; /* UDC stack would handle bottom-half processing */ udc_submit_event(dev, UDC_EVT_RESUME, 0); LOG_DBG("USB resume"); } } /* USB SOF */ if (usbd_intsts & USBD_INTSTS_SOFIF_Msk) { /* Clear event flag */ base->INTSTS = USBD_INTSTS_SOFIF_Msk; /* UDC stack would handle bottom-half processing */ udc_submit_event(dev, UDC_EVT_SOF, 0); } /* USB Setup/EP */ if (usbd_intsts & USBD_INTSTS_USB) { uint32_t epintsts; /* Setup event */ if (usbd_intsts & USBD_INTSTS_SETUP) { USBD_EP_T *ep0_base = numaker_usbd_ep_base(dev, EP0); USBD_EP_T *ep1_base = numaker_usbd_ep_base(dev, EP1); /* Clear event flag */ base->INTSTS = USBD_INTSTS_SETUP; /* Clear the data IN/OUT ready flag of control endpoints */ ep0_base->CFGP |= USBD_CFGP_CLRRDY_Msk; ep1_base->CFGP |= USBD_CFGP_CLRRDY_Msk; /* By USB spec, following transactions, regardless of Data/Status stage, * will always be DATA1 */ ep0_base->CFG |= USBD_CFG_DSQSYNC_Msk; ep1_base->CFG |= USBD_CFG_DSQSYNC_Msk; /* Message for bottom-half processing */ /* NOTE: In Zephyr USB device stack, Setup packet is passed via * CTRL OUT EP */ msg.type = NUMAKER_USBD_MSG_TYPE_SETUP; numaker_usbd_setup_copy_to_user(dev, msg.setup.packet); numaker_usbd_send_msg(dev, &msg); } /* EP events */ epintsts = base->EPINTSTS; base->EPINTSTS = epintsts; while (epintsts) { uint32_t ep_hw_idx = u32_count_trailing_zeros(epintsts); USBD_EP_T *ep_base = numaker_usbd_ep_base(dev, ep_hw_idx); uint8_t ep_dir; uint8_t ep_idx; uint8_t ep; /* We don't enable INNAKEN interrupt, so as long as EP event occurs, * we can just regard one data transaction has completed (ACK for * CTRL/BULK/INT or no-ACK for Iso), that is, no need to check EPSTS0, * EPSTS1, etc. */ /* EP direction, number, and address */ ep_dir = ((ep_base->CFG & USBD_CFG_STATE_Msk) == USBD_CFG_EPMODE_IN) ? USB_EP_DIR_IN : USB_EP_DIR_OUT; ep_idx = (ep_base->CFG & USBD_CFG_EPNUM_Msk) >> USBD_CFG_EPNUM_Pos; ep = USB_EP_GET_ADDR(ep_idx, ep_dir); /* NOTE: See comment in udc_numaker_set_address()'s implementation * for safe place to change USB device address */ if (ep == USB_EP_GET_ADDR(0, USB_EP_DIR_IN)) { numaker_usbd_set_addr(dev); } /* NOTE: See comment on mxpld_ctrlout for why make one copy of * CTRL OUT's MXPLD */ if (ep == USB_EP_GET_ADDR(0, USB_EP_DIR_OUT)) { struct numaker_usbd_ep *ep_ctrlout = priv->ep_pool + 0; USBD_EP_T *ep_ctrlout_base = numaker_usbd_ep_base(dev, ep_ctrlout->ep_hw_idx); ep_ctrlout->mxpld_ctrlout = ep_ctrlout_base->MXPLD; } /* Message for bottom-half processing */ if (USB_EP_DIR_IS_OUT(ep)) { msg.type = NUMAKER_USBD_MSG_TYPE_OUT; msg.out.ep = ep; } else { msg.type = NUMAKER_USBD_MSG_TYPE_IN; msg.in.ep = ep; } numaker_usbd_send_msg(dev, &msg); /* Have handled this EP and go next */ epintsts &= ~BIT(ep_hw_idx); } } } static enum udc_bus_speed udc_numaker_device_speed(const struct device *dev) { return UDC_BUS_SPEED_FS; } static int udc_numaker_ep_enqueue(const struct device *dev, struct udc_ep_config *const ep_cfg, struct net_buf *buf) { struct numaker_usbd_msg msg = {0}; LOG_DBG("%p enqueue %p", dev, buf); udc_buf_put(ep_cfg, buf); /* Resume the EP's queued transfer */ if (!ep_cfg->stat.halted) { msg.type = NUMAKER_USBD_MSG_TYPE_XFER; msg.xfer.ep = ep_cfg->addr; numaker_usbd_send_msg(dev, &msg); } return 0; } static int udc_numaker_ep_dequeue(const struct device *dev, struct udc_ep_config *const ep_cfg) { struct net_buf *buf; struct numaker_usbd_ep *ep_cur; /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep_cfg->addr); if (!ep_cur) { LOG_ERR("Bind EP H/W context: ep=0x%02x", ep_cfg->addr); return -ENODEV; } numaker_usbd_ep_abort(ep_cur); buf = udc_buf_get_all(dev, ep_cfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } return 0; } static int udc_numaker_ep_set_halt(const struct device *dev, struct udc_ep_config *const ep_cfg) { struct numaker_usbd_ep *ep_cur; LOG_DBG("Set halt ep 0x%02x", ep_cfg->addr); /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep_cfg->addr); if (!ep_cur) { LOG_ERR("Bind EP H/W context: ep=0x%02x", ep_cfg->addr); return -ENODEV; } /* Set EP to stalled */ numaker_usbd_ep_set_stall(ep_cur); return 0; } static int udc_numaker_ep_clear_halt(const struct device *dev, struct udc_ep_config *const ep_cfg) { struct numaker_usbd_ep *ep_cur; struct numaker_usbd_msg msg = {0}; LOG_DBG("Clear halt ep 0x%02x", ep_cfg->addr); /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep_cfg->addr); if (!ep_cur) { LOG_ERR("Bind EP H/W context: ep=0x%02x", ep_cfg->addr); return -ENODEV; } /* Reset EP to unstalled and data toggle bit to 0 */ numaker_usbd_ep_clear_stall_n_data_toggle(ep_cur); /* Resume the EP's queued transfer */ msg.type = NUMAKER_USBD_MSG_TYPE_XFER; msg.xfer.ep = ep_cfg->addr; numaker_usbd_send_msg(dev, &msg); return 0; } static int udc_numaker_ep_enable(const struct device *dev, struct udc_ep_config *const ep_cfg) { int err; uint32_t dmabuf_base; uint32_t dmabuf_size; struct numaker_usbd_ep *ep_cur; LOG_DBG("Enable ep 0x%02x", ep_cfg->addr); /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep_cfg->addr); if (!ep_cur) { LOG_ERR("Bind EP H/W context: ep=0x%02x", ep_cfg->addr); return -ENODEV; } /* Configure EP DMA buffer */ if (!ep_cur->dmabuf_valid || ep_cur->dmabuf_size < ep_cfg->mps) { /* Allocate DMA buffer */ err = numaker_usbd_ep_mgmt_alloc_dmabuf(dev, ep_cfg->mps, &dmabuf_base, &dmabuf_size); if (err < 0) { LOG_ERR("Allocate DMA buffer failed"); return err; } /* Configure EP DMA buffer */ numaker_usbd_ep_config_dmabuf(ep_cur, dmabuf_base, dmabuf_size); } /* Configure EP majorly */ numaker_usbd_ep_config_major(ep_cur, ep_cfg); /* Enable EP */ numaker_usbd_ep_enable(ep_cur); return 0; } static int udc_numaker_ep_disable(const struct device *dev, struct udc_ep_config *const ep_cfg) { struct numaker_usbd_ep *ep_cur; LOG_DBG("Disable ep 0x%02x", ep_cfg->addr); /* Bind EP H/W context to EP address */ ep_cur = numaker_usbd_ep_mgmt_bind_ep(dev, ep_cfg->addr); if (!ep_cur) { LOG_ERR("Bind EP H/W context: ep=0x%02x", ep_cfg->addr); return -ENODEV; } /* Disable EP */ numaker_usbd_ep_disable(ep_cur); return 0; } static int udc_numaker_host_wakeup(const struct device *dev) { const struct udc_numaker_config *config = dev->config; USBD_T *const base = config->base; /* Enable back USB/PHY first */ base->ATTR |= USBD_ATTR_USBEN_Msk | USBD_ATTR_PHYEN_Msk; /* Then generate 'K' */ base->ATTR |= USBD_ATTR_RWAKEUP_Msk; k_sleep(K_USEC(NUMAKER_USBD_BUS_RESUME_DRV_K_US)); base->ATTR ^= USBD_ATTR_RWAKEUP_Msk; return 0; } static int udc_numaker_set_address(const struct device *dev, const uint8_t addr) { struct udc_numaker_data *priv = udc_get_private(dev); LOG_DBG("Set new address %u for %p", addr, dev); /* NOTE: Timing for configuring USB device address into H/W is critical. It must be done * in-between SET_ADDRESS control transfer and next transfer. For this, it is done in * IN ACK ISR of SET_ADDRESS control transfer. */ priv->addr = addr; return 0; } static int udc_numaker_enable(const struct device *dev) { LOG_DBG("Enable device %p", dev); /* S/W connect */ numaker_usbd_sw_connect(dev); return 0; } static int udc_numaker_disable(const struct device *dev) { LOG_DBG("Enable device %p", dev); /* S/W disconnect */ numaker_usbd_sw_disconnect(dev); return 0; } static int udc_numaker_init(const struct device *dev) { int err; /* Initialize USBD H/W */ err = numaker_usbd_hw_setup(dev); if (err < 0) { LOG_ERR("Set up H/W: %d", err); return err; } /* USB device address defaults to 0 */ numaker_usbd_reset_addr(dev); /* Initialize all EP H/W contexts */ numaker_usbd_ep_mgmt_init(dev); if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, 64, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, 64, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } return 0; } static int udc_numaker_shutdown(const struct device *dev) { struct udc_numaker_data *priv = udc_get_private(dev); if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } /* Uninitialize USBD H/W */ numaker_usbd_hw_shutdown(dev); /* Purge message queue */ k_msgq_purge(priv->msgq); return 0; } static int udc_numaker_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int udc_numaker_unlock(const struct device *dev) { return udc_unlock_internal(dev); } static int udc_numaker_driver_preinit(const struct device *dev) { const struct udc_numaker_config *config = dev->config; struct udc_data *data = dev->data; int err; data->caps.rwup = true; data->caps.addr_before_status = true; data->caps.mps0 = UDC_MPS0_64; /* Some soc series don't allow ISO IN/OUT to be assigned the same EP number. * This is addressed by limiting all OUT/IN EP addresses in top/bottom halves, * except CTRL OUT/IN. */ for (int i = 0; i < config->ep_cfg_out_size; i++) { /* Limit all OUT EP numbers to 0, 1~7 */ if (config->disallow_iso_inout_same && i != 0 && i >= 8) { continue; } config->ep_cfg_out[i].caps.out = 1; if (i == 0) { config->ep_cfg_out[i].caps.control = 1; config->ep_cfg_out[i].caps.mps = 64; } else { config->ep_cfg_out[i].caps.bulk = 1; config->ep_cfg_out[i].caps.interrupt = 1; config->ep_cfg_out[i].caps.iso = 1; config->ep_cfg_out[i].caps.mps = 1023; } config->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i; err = udc_register_ep(dev, &config->ep_cfg_out[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } for (int i = 0; i < config->ep_cfg_in_size; i++) { /* Limit all IN EP numbers to 0, 8~15 */ if (config->disallow_iso_inout_same && i != 0 && i < 8) { continue; } config->ep_cfg_in[i].caps.in = 1; if (i == 0) { config->ep_cfg_in[i].caps.control = 1; config->ep_cfg_in[i].caps.mps = 64; } else { config->ep_cfg_in[i].caps.bulk = 1; config->ep_cfg_in[i].caps.interrupt = 1; config->ep_cfg_in[i].caps.iso = 1; config->ep_cfg_in[i].caps.mps = 1023; } config->ep_cfg_in[i].addr = USB_EP_DIR_IN | i; err = udc_register_ep(dev, &config->ep_cfg_in[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } config->make_thread(dev); return 0; } static const struct udc_api udc_numaker_api = { .device_speed = udc_numaker_device_speed, .ep_enqueue = udc_numaker_ep_enqueue, .ep_dequeue = udc_numaker_ep_dequeue, .ep_set_halt = udc_numaker_ep_set_halt, .ep_clear_halt = udc_numaker_ep_clear_halt, .ep_enable = udc_numaker_ep_enable, .ep_disable = udc_numaker_ep_disable, .host_wakeup = udc_numaker_host_wakeup, .set_address = udc_numaker_set_address, .enable = udc_numaker_enable, .disable = udc_numaker_disable, .init = udc_numaker_init, .shutdown = udc_numaker_shutdown, .lock = udc_numaker_lock, .unlock = udc_numaker_unlock, }; #define UDC_NUMAKER_DEVICE_DEFINE(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ \ static void udc_numaker_irq_config_func_##inst(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), numaker_udbd_isr, \ DEVICE_DT_INST_GET(inst), 0); \ \ irq_enable(DT_INST_IRQN(inst)); \ } \ \ static void udc_numaker_irq_unconfig_func_##inst(const struct device *dev) \ { \ irq_disable(DT_INST_IRQN(inst)); \ } \ \ K_THREAD_STACK_DEFINE(udc_numaker_stack_##inst, CONFIG_UDC_NUMAKER_THREAD_STACK_SIZE); \ \ static void udc_numaker_thread_##inst(void *dev, void *arg1, void *arg2) \ { \ ARG_UNUSED(arg1); \ ARG_UNUSED(arg2); \ numaker_usbd_msg_handler(dev); \ } \ \ static void udc_numaker_make_thread_##inst(const struct device *dev) \ { \ struct udc_numaker_data *priv = udc_get_private(dev); \ \ k_thread_create(&priv->thread_data, udc_numaker_stack_##inst, \ K_THREAD_STACK_SIZEOF(udc_numaker_stack_##inst), \ udc_numaker_thread_##inst, (void *)dev, NULL, NULL, \ K_PRIO_COOP(CONFIG_UDC_NUMAKER_THREAD_PRIORITY), K_ESSENTIAL, \ K_NO_WAIT); \ k_thread_name_set(&priv->thread_data, dev->name); \ } \ \ static struct udc_ep_config \ ep_cfg_out_##inst[MIN(DT_INST_PROP(inst, num_bidir_endpoints), 16)]; \ static struct udc_ep_config \ ep_cfg_in_##inst[MIN(DT_INST_PROP(inst, num_bidir_endpoints), 16)]; \ \ static const struct udc_numaker_config udc_numaker_config_##inst = { \ .ep_cfg_out = ep_cfg_out_##inst, \ .ep_cfg_in = ep_cfg_in_##inst, \ .ep_cfg_out_size = ARRAY_SIZE(ep_cfg_out_##inst), \ .ep_cfg_in_size = ARRAY_SIZE(ep_cfg_in_##inst), \ .make_thread = udc_numaker_make_thread_##inst, \ .base = (USBD_T *)DT_INST_REG_ADDR(inst), \ .reset = RESET_DT_SPEC_INST_GET(inst), \ .clk_modidx = DT_INST_CLOCKS_CELL(inst, clock_module_index), \ .clk_src = DT_INST_CLOCKS_CELL(inst, clock_source), \ .clk_div = DT_INST_CLOCKS_CELL(inst, clock_divider), \ .clkctrl_dev = DEVICE_DT_GET(DT_PARENT(DT_INST_CLOCKS_CTLR(inst))), \ .irq_config_func = udc_numaker_irq_config_func_##inst, \ .irq_unconfig_func = udc_numaker_irq_unconfig_func_##inst, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ .dmabuf_size = DT_INST_PROP(inst, dma_buffer_size), \ .disallow_iso_inout_same = DT_INST_PROP(inst, disallow_iso_in_out_same_number), \ }; \ \ static struct numaker_usbd_ep \ numaker_usbd_ep_pool_##inst[DT_INST_PROP(inst, num_bidir_endpoints)]; \ \ K_MSGQ_DEFINE(numaker_usbd_msgq_##inst, sizeof(struct numaker_usbd_msg), \ CONFIG_UDC_NUMAKER_MSG_QUEUE_SIZE, 4); \ \ static struct udc_numaker_data udc_priv_##inst = { \ .msgq = &numaker_usbd_msgq_##inst, \ .ep_pool = numaker_usbd_ep_pool_##inst, \ .ep_pool_size = DT_INST_PROP(inst, num_bidir_endpoints), \ }; \ \ static struct udc_data udc_data_##inst = { \ .mutex = Z_MUTEX_INITIALIZER(udc_data_##inst.mutex), \ .priv = &udc_priv_##inst, \ }; \ \ DEVICE_DT_INST_DEFINE(inst, udc_numaker_driver_preinit, NULL, &udc_data_##inst, \ &udc_numaker_config_##inst, POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &udc_numaker_api); DT_INST_FOREACH_STATUS_OKAY(UDC_NUMAKER_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/usb/udc/udc_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
14,502
```objective-c /* * */ /** * @file * @brief Private API for USB device controller (UDC) drivers */ #ifndef ZEPHYR_INCLUDE_UDC_COMMON_H #define ZEPHYR_INCLUDE_UDC_COMMON_H #include <zephyr/drivers/usb/udc.h> #include <zephyr/sys/byteorder.h> #define CTRL_PIPE_STAGE_SETUP 0 #define CTRL_PIPE_STAGE_DATA_OUT 1 #define CTRL_PIPE_STAGE_DATA_IN 2 #define CTRL_PIPE_STAGE_NO_DATA 3 #define CTRL_PIPE_STAGE_STATUS_OUT 4 #define CTRL_PIPE_STAGE_STATUS_IN 5 #define CTRL_PIPE_STAGE_ERROR 6 /** * @brief Get driver's private data * * @param[in] dev Pointer to device struct of the driver instance * * @return pointer to driver's private data */ static inline void *udc_get_private(const struct device *dev) { struct udc_data *data = dev->data; return data->priv; } /** * @brief Helper function to set suspended status * * This function can be used by the driver to set suspended status * * @param[in] dev Pointer to device struct of the driver instance * @param[in] values True to set suspended status */ void udc_set_suspended(const struct device *dev, const bool value); /** * @brief Get pointer to endpoint configuration structure. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] ep Endpoint address * * @return pointer to endpoint configuration or NULL on error. */ struct udc_ep_config *udc_get_ep_cfg(const struct device *dev, const uint8_t ep); /** * @brief Checks if the endpoint is busy * * @param[in] dev Pointer to device struct of the driver instance * @param[in] ep Endpoint address * * @return true if endpoint is busy */ bool udc_ep_is_busy(const struct device *dev, const uint8_t ep); /** * @brief Helper function to set endpoint busy state * * @param[in] dev Pointer to device struct of the driver instance * @param[in] ep Endpoint address * @param[in] busy Busy state */ void udc_ep_set_busy(const struct device *dev, const uint8_t ep, const bool busy); /** * @brief Get UDC request from endpoint FIFO. * * This function removes request from endpoint FIFO. * Use it when transfer is finished and request should * be passed to the higher level. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] ep Endpoint address * * @return pointer to UDC request or NULL on error. */ struct net_buf *udc_buf_get(const struct device *dev, const uint8_t ep); /** * @brief Get all UDC request from endpoint FIFO. * * Get all UDC request from endpoint FIFO as single-linked list. * This function removes all request from endpoint FIFO and * is typically used to dequeue endpoint FIFO. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] ep Endpoint address * * @return pointer to UDC request or NULL on error. */ struct net_buf *udc_buf_get_all(const struct device *dev, const uint8_t ep); /** * @brief Peek request at the head of endpoint FIFO. * * Return request from the head of endpoint FIFO without removing. * Use it when request buffer is required for a transfer. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] ep Endpoint address * * @return pointer to request or NULL on error. */ struct net_buf *udc_buf_peek(const struct device *dev, const uint8_t ep); /** * @brief Put request at the tail of endpoint FIFO. * * @param[in] ep_cfg Pointer to endpoint configuration * @param[in] buf Pointer to UDC request buffer * * @return pointer to request or NULL on error. */ void udc_buf_put(struct udc_ep_config *const ep_cfg, struct net_buf *const buf); /** * @brief Helper function to send UDC event to a higher level. * * The callback would typically sends UDC even to a message queue (k_msgq). * * @param[in] dev Pointer to device struct of the driver instance * @param[in] type Event type * @param[in] status Event status * * @return 0 on success, all other values should be treated as error. * @retval -EPERM controller is not initialized */ int udc_submit_event(const struct device *dev, const enum udc_event_type type, const int status); /** * @brief Helper function to send UDC endpoint event to a higher level. * * Type of this event is hardcoded to UDC_EVT_EP_REQUEST. * The callback would typically sends UDC even to a message queue (k_msgq). * * @param[in] dev Pointer to device struct of the driver instance * @param[in] buf Pointer to UDC request buffer * @param[in] err Request result * * @return 0 on success, all other values should be treated as error. * @retval -EPERM controller is not initialized */ int udc_submit_ep_event(const struct device *dev, struct net_buf *const buf, const int err); /** * @brief Helper function to enable endpoint. * * This function can be used by the driver to enable control IN/OUT endpoint. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] ep Endpoint address (same as bEndpointAddress) * @param[in] attributes Endpoint attributes (same as bmAttributes) * @param[in] mps Maximum packet size (same as wMaxPacketSize) * @param[in] interval Polling interval (same as bInterval) * * @return 0 on success, all other values should be treated as error. * @retval -ENODEV endpoint is not assigned or no configuration found * @retval -EALREADY endpoint is already enabled */ int udc_ep_enable_internal(const struct device *dev, const uint8_t ep, const uint8_t attributes, const uint16_t mps, const uint8_t interval); /** * @brief Helper function to disable endpoint. * * This function can be used by the driver to disable control IN/OUT endpoint. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] ep Endpoint address * * @return 0 on success, all other values should be treated as error. * @retval -ENODEV endpoint is not assigned or no configuration found * @retval -EALREADY endpoint is already enabled */ int udc_ep_disable_internal(const struct device *dev, const uint8_t ep); /** * @brief Helper function to register endpoint configuration. * * This function initializes endpoint FIFO and * appends endpoint configuration to drivers endpoint list. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] cfg Pointer to endpoint configuration structure * * @return 0 on success, all other values should be treated as error. * @retval -EACCES controller is initialized or enabled */ int udc_register_ep(const struct device *dev, struct udc_ep_config *const cfg); /** * @brief Set setup flag in requests metadata. * * A control transfer can be either setup or data OUT, * use this function to mark request as setup packet. * * @param[in] buf Pointer to UDC request buffer */ void udc_ep_buf_set_setup(struct net_buf *const buf); /** * @brief Checks whether the driver must finish transfer with a ZLP * * @param[in] buf Pointer to UDC request buffer * * @return true if ZLP is requested */ bool udc_ep_buf_has_zlp(const struct net_buf *const buf); /** * @brief Clear ZLP flag * * @param[in] buf Pointer to UDC request buffer */ void udc_ep_buf_clear_zlp(const struct net_buf *const buf); /** * @brief Locking function for the drivers. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] timeout Timeout * * @return values provided by k_mutex_lock() */ static inline int udc_lock_internal(const struct device *dev, k_timeout_t timeout) { struct udc_data *data = dev->data; return k_mutex_lock(&data->mutex, timeout); } /** * @brief Unlocking function for the drivers. * * @param[in] dev Pointer to device struct of the driver instance * * @return values provided by k_mutex_lock() */ static inline int udc_unlock_internal(const struct device *dev) { struct udc_data *data = dev->data; return k_mutex_unlock(&data->mutex); } /** * @brief Allocate UDC control transfer buffer * * Allocate a new buffer from common control transfer buffer pool. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] ep Endpoint address * @param[in] size Size of the request buffer * * @return pointer to allocated request or NULL on error. */ struct net_buf *udc_ctrl_alloc(const struct device *dev, const uint8_t ep, const size_t size); static inline uint16_t udc_data_stage_length(const struct net_buf *const buf) { struct usb_setup_packet *setup = (void *)buf->data; return sys_le16_to_cpu(setup->wLength); } /** * @brief Checks whether the current control transfer stage is Data Stage OUT * * @param[in] dev Pointer to device struct of the driver instance * * @return true if stage is Data Stage OUT */ bool udc_ctrl_stage_is_data_out(const struct device *dev); /** * @brief Checks whether the current control transfer stage is Data Stage IN * * @param[in] dev Pointer to device struct of the driver instance * * @return true if stage is Data Stage IN */ bool udc_ctrl_stage_is_data_in(const struct device *dev); /** * @brief Checks whether the current control transfer stage is Status IN * * @param[in] dev Pointer to device struct of the driver instance * * @return true if stage is Data Stage IN */ bool udc_ctrl_stage_is_status_in(const struct device *dev); /** * @brief Checks whether the current control transfer stage is Status OUT * * @param[in] dev Pointer to device struct of the driver instance * * @return true if stage is Data Stage IN */ bool udc_ctrl_stage_is_status_out(const struct device *dev); /** * @brief Checks whether the current control transfer stage is Status no-data * * @param[in] dev Pointer to device struct of the driver instance * * @return true if stage is Status no-data */ bool udc_ctrl_stage_is_no_data(const struct device *dev); /** * @brief Submit Control Write (s-out-status) transfer * * Allocate buffer for data stage IN, * submit both setup and data buffer to upper layer. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] dout Pointer to UDC buffer containing data transaction * * @return 0 on success, all other values should be treated as error. */ int udc_ctrl_submit_s_out_status(const struct device *dev, struct net_buf *const dout); /** * @brief Prepare control data IN stage * * Allocate buffer for data stage IN, * submit both setup and data buffer to upper layer. * * @param[in] dev Pointer to device struct of the driver instance * * @return 0 on success, all other values should be treated as error. */ int udc_ctrl_submit_s_in_status(const struct device *dev); /** * @brief Prepare control (no-data) status stage * * Allocate buffer for status stage IN, * submit both setup and status buffer to upper layer. * * @param[in] dev Pointer to device struct of the driver instance * * @return 0 on success, all other values should be treated as error. */ int udc_ctrl_submit_s_status(const struct device *dev); /** * @brief Submit status transaction * * Submit both status transaction to upper layer. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] dout Pointer to UDC buffer containing data transaction * * @return 0 on success, all other values should be treated as error. */ int udc_ctrl_submit_status(const struct device *dev, struct net_buf *const buf); /** * @brief Update internal control stage status based on the net_buf metadata * * Use it in the driver to update the stage, typically there are * three places where this function should be called: * - when a setup packet is received * - when a data stage is completed (all data stage transactions) * - when a status stage transaction is finished * * The functions of type udc_ctrl_stage_is_*() can be called before or * after this function, depending on the desired action. * To keep protocol processing running the following should be taken * into account: * * - Upper layer may not allocate buffers but remove or release buffers * from the chain that are no longer needed. Only control IN transfers may * be enqueued by the upper layer. * * - For "Control Write" (s-out-status), the driver should allocate the buffer, * insert it as a fragment to setup buffer and perform the Data Stage * transaction. Allocate and insert a fragment for the status (IN) stage to * setup buffer, and then pass setup packet with the chain of s-out-status to * upper layer. Upper layer should either halt control endpoint or * enqueue status buffer for status stage. There should be second * notification to upper layer when the status transaction is finished. * * ->driver_foo_setup_rcvd(dev) * ->udc_ctrl_update_stage(dev, buf) * ->udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, wLength) * ->driver_foo_xfer_start(dev, USB_CONTROL_EP_OUT) * * ->driver_foo_dout_rcvd(dev) * -... * ->driver_foo_feed_next_dout(dev, ....) * -... * ->udc_ctrl_update_stage(dev, dout_buf) * -... * ->udc_ctrl_submit_s_out_status(dev, dout_buf); * * ->driver_foo_din_rcvd(dev) * -... * ->udc_ctrl_submit_status(dev, status_buf); * -... * ->udc_ctrl_update_stage(dev, status_buf) * * - For "Control Read" (s-in-status), depending on the controller, * the driver should reserve the buffers for subsequent status stage and * setup packet and prepare everything. The driver should allocate the buffer * for IN transaction insert it as a fragment to setup buffer, and pass * the chain of s-in to upper layer. Upper layer should either halt control * endpoint or enqueue (in) buffer. There should be second * notification to upper layer when the status transaction is finished. * * ->driver_foo_setup_rcvd(dev) * ->udc_ctrl_update_stage(dev, buf) * ->driver_foo_feed_next_dout(dev, ....) * -... * ->udc_ctrl_submit_s_in_status(dev); * * ->driver_foo_din_rcvd(dev) * -... * ->udc_ctrl_update_stage(dev, dout_buf) * -... * * ->driver_foo_dout_rcvd(dev) * -... * ->udc_ctrl_submit_status(dev, status_buf); * -... * ->udc_ctrl_update_stage(dev, dout_buf) * * - For "No-data Control" (s-status), the driver should allocate the buffer * for the status (IN) stage, insert it as a fragment to setup buffer, * and then pass setup packet with the chain of s-status to * upper layer. Upper layer should either halt control endpoint or * enqueue status buffer for status stage. There should be second * notification to upper layer when the status transaction is finished. * * ->driver_foo_setup_rcvd(dev) * ->udc_ctrl_update_stage(dev, buf) * ->driver_foo_feed_next_dout(dev, ....) * -... * ->udc_ctrl_submit_s_status(dev); * * ->driver_foo_din_rcvd(dev) * -... * ->udc_ctrl_submit_status(dev, status_buf); * -... * ->udc_ctrl_update_stage(dev, status_buf) * * Please refer to Chapter 8.5.3 Control Transfers USB 2.0 spec. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] buf Buffer containing setup packet * * @return 0 on success, all other values should be treated as error. */ void udc_ctrl_update_stage(const struct device *dev, struct net_buf *const buf); #if defined(CONFIG_UDC_WORKQUEUE) extern struct k_work_q udc_work_q; static inline struct k_work_q *udc_get_work_q(void) { return &udc_work_q; } #else static inline struct k_work_q *udc_get_work_q(void) { return &k_sys_work_q; } #endif #endif /* ZEPHYR_INCLUDE_UDC_COMMON_H */ ```
/content/code_sandbox/drivers/usb/udc/udc_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,787
```c /* * */ #define DT_DRV_COMPAT nxp_ehci #include <soc.h> #include <string.h> #include <stdio.h> #include <zephyr/device.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/usb/udc.h> #include <zephyr/drivers/pinctrl.h> #include "udc_common.h" #include "usb.h" #include "usb_device_config.h" #include "usb_device_mcux_drv_port.h" #include "usb_device_ehci.h" #include "usb_phy.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(udc_mcux, CONFIG_UDC_DRIVER_LOG_LEVEL); /* * There is no real advantage to change control endpoint size * but we can use it for testing UDC driver API and higher layers. */ #define USB_MCUX_MPS0 UDC_MPS0_64 #define USB_MCUX_EP0_SIZE 64 #define PRV_DATA_HANDLE(_handle) CONTAINER_OF(_handle, struct udc_mcux_data, mcux_device) struct udc_mcux_config { const usb_device_controller_interface_struct_t *mcux_if; void (*irq_enable_func)(const struct device *dev); void (*irq_disable_func)(const struct device *dev); size_t num_of_eps; struct udc_ep_config *ep_cfg_in; struct udc_ep_config *ep_cfg_out; uintptr_t base; const struct pinctrl_dev_config *pincfg; usb_phy_config_struct_t *phy_config; }; struct udc_mcux_data { const struct device *dev; usb_device_struct_t mcux_device; uint8_t controller_id; /* 0xFF is invalid value */ }; static int udc_mcux_control(const struct device *dev, usb_device_control_type_t command, void *param) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); usb_status_t status; status = mcux_if->deviceControl(priv->mcux_device.controllerHandle, command, param); if (status != kStatus_USB_Success) { return -ENOMEM; } return 0; } /* If ep is busy, return busy. Otherwise feed the buf to controller */ static int udc_mcux_ep_feed(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *const buf) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); usb_status_t status = kStatus_USB_Success; uint8_t *data; uint32_t len; unsigned int key; usb_device_endpoint_status_struct_t ep_status; ep_status.endpointAddress = cfg->addr; udc_mcux_control(dev, kUSB_DeviceControlGetEndpointStatus, &ep_status); if (ep_status.endpointStatus == kUSB_DeviceEndpointStateStalled) { return -EACCES; /* stalled */ } key = irq_lock(); if (!udc_ep_is_busy(dev, cfg->addr)) { udc_ep_set_busy(dev, cfg->addr, true); irq_unlock(key); if (USB_EP_DIR_IS_OUT(cfg->addr)) { len = net_buf_tailroom(buf); data = net_buf_tail(buf); status = mcux_if->deviceRecv(priv->mcux_device.controllerHandle, cfg->addr, data, len); } else { len = buf->len; data = buf->data; status = mcux_if->deviceSend(priv->mcux_device.controllerHandle, cfg->addr, data, len); } key = irq_lock(); if (status != kStatus_USB_Success) { udc_ep_set_busy(dev, cfg->addr, false); } irq_unlock(key); } else { irq_unlock(key); return -EBUSY; } return (status == kStatus_USB_Success ? 0 : -EIO); } /* return success if the ep is busy or stalled. */ static int udc_mcux_ep_try_feed(const struct device *dev, struct udc_ep_config *const cfg) { struct net_buf *feed_buf; feed_buf = udc_buf_peek(dev, cfg->addr); if (feed_buf) { int ret = udc_mcux_ep_feed(dev, cfg, feed_buf); return ((ret == -EBUSY || ret == -EACCES || ret == 0) ? 0 : -EIO); } return 0; } /* * Allocate buffer and initiate a new control OUT transfer. */ static int udc_mcux_ctrl_feed_dout(const struct device *dev, const size_t length) { struct net_buf *buf; struct udc_ep_config *cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); int ret; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length); if (buf == NULL) { return -ENOMEM; } k_fifo_put(&cfg->fifo, buf); ret = udc_mcux_ep_feed(dev, cfg, buf); if (ret) { net_buf_unref(buf); return ret; } return 0; } static int udc_mcux_handler_setup(const struct device *dev, struct usb_setup_packet *setup) { int err; struct net_buf *buf; LOG_DBG("setup packet"); buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, sizeof(struct usb_setup_packet)); if (buf == NULL) { LOG_ERR("Failed to allocate for setup"); return -EIO; } udc_ep_buf_set_setup(buf); memcpy(buf->data, setup, 8); net_buf_add(buf, 8); if (setup->RequestType.type == USB_REQTYPE_TYPE_STANDARD && setup->RequestType.direction == USB_REQTYPE_DIR_TO_DEVICE && setup->bRequest == USB_SREQ_SET_ADDRESS && setup->wLength == 0) { udc_mcux_control(dev, kUSB_DeviceControlPreSetDeviceAddress, &setup->wValue); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (!buf->len) { return -EIO; } if (udc_ctrl_stage_is_data_out(dev)) { /* Allocate and feed buffer for data OUT stage */ LOG_DBG("s:%p|feed for -out-", buf); err = udc_mcux_ctrl_feed_dout(dev, udc_data_stage_length(buf)); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } } else if (udc_ctrl_stage_is_data_in(dev)) { err = udc_ctrl_submit_s_in_status(dev); } else { err = udc_ctrl_submit_s_status(dev); } return err; } static int udc_mcux_handler_ctrl_out(const struct device *dev, struct net_buf *buf, uint8_t *mcux_buf, uint16_t mcux_len) { int err = 0; uint32_t len; len = MIN(net_buf_tailroom(buf), mcux_len); net_buf_add(buf, len); if (udc_ctrl_stage_is_status_out(dev)) { /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); /* Status stage finished, notify upper layer */ err = udc_ctrl_submit_status(dev, buf); } else { /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); } if (udc_ctrl_stage_is_status_in(dev)) { err = udc_ctrl_submit_s_out_status(dev, buf); } return err; } static int udc_mcux_handler_ctrl_in(const struct device *dev, struct net_buf *buf, uint8_t *mcux_buf, uint16_t mcux_len) { int err = 0; uint32_t len; len = MIN(buf->len, mcux_len); buf->data += len; buf->len -= len; if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* Status stage finished, notify upper layer */ err = udc_ctrl_submit_status(dev, buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_out(dev)) { /* * IN transfer finished, release buffer, * control OUT buffer should be already fed. */ net_buf_unref(buf); err = udc_mcux_ctrl_feed_dout(dev, 0u); } return err; } static int udc_mcux_handler_non_ctrl_in(const struct device *dev, uint8_t ep, struct net_buf *buf, uint8_t *mcux_buf, uint16_t mcux_len) { int err; uint32_t len; len = MIN(buf->len, mcux_len); buf->data += len; buf->len -= len; err = udc_submit_ep_event(dev, buf, 0); udc_mcux_ep_try_feed(dev, udc_get_ep_cfg(dev, ep)); return err; } static int udc_mcux_handler_non_ctrl_out(const struct device *dev, uint8_t ep, struct net_buf *buf, uint8_t *mcux_buf, uint16_t mcux_len) { int err; uint32_t len; len = MIN(net_buf_tailroom(buf), mcux_len); net_buf_add(buf, len); err = udc_submit_ep_event(dev, buf, 0); udc_mcux_ep_try_feed(dev, udc_get_ep_cfg(dev, ep)); return err; } static int udc_mcux_handler_out(const struct device *dev, uint8_t ep, uint8_t *mcux_buf, uint16_t mcux_len) { int err; struct net_buf *buf; unsigned int key; buf = udc_buf_get(dev, ep); key = irq_lock(); udc_ep_set_busy(dev, ep, false); irq_unlock(key); if (buf == NULL) { udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); return -ENOBUFS; } if (ep == USB_CONTROL_EP_OUT) { err = udc_mcux_handler_ctrl_out(dev, buf, mcux_buf, mcux_len); } else { err = udc_mcux_handler_non_ctrl_out(dev, ep, buf, mcux_buf, mcux_len); } return err; } /* return true - zlp is feed; false - no zlp */ static bool udc_mcux_handler_zlt(const struct device *dev, uint8_t ep, struct net_buf *buf, uint16_t mcux_len) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); /* The whole transfer is already done by MCUX controller driver. */ if (mcux_len >= buf->len) { if (udc_ep_buf_has_zlp(buf)) { usb_status_t status; udc_ep_buf_clear_zlp(buf); status = mcux_if->deviceRecv(priv->mcux_device.controllerHandle, ep, NULL, 0); if (status != kStatus_USB_Success) { udc_submit_event(dev, UDC_EVT_ERROR, -EIO); return false; } return true; } } return false; } static int udc_mcux_handler_in(const struct device *dev, uint8_t ep, uint8_t *mcux_buf, uint16_t mcux_len) { int err; struct net_buf *buf; unsigned int key; buf = udc_buf_peek(dev, ep); if (buf == NULL) { udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); return -ENOBUFS; } if (udc_mcux_handler_zlt(dev, ep, buf, mcux_len)) { return 0; } buf = udc_buf_get(dev, ep); key = irq_lock(); udc_ep_set_busy(dev, ep, false); irq_unlock(key); if (buf == NULL) { udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); return -ENOBUFS; } if (ep == USB_CONTROL_EP_IN) { err = udc_mcux_handler_ctrl_in(dev, buf, mcux_buf, mcux_len); } else { err = udc_mcux_handler_non_ctrl_in(dev, ep, buf, mcux_buf, mcux_len); } return err; } /* NXP MCUX controller driver notify transfers/status through this interface */ usb_status_t USB_DeviceNotificationTrigger(void *handle, void *msg) { usb_device_callback_message_struct_t *mcux_msg = msg; uint8_t ep; usb_device_notification_t mcux_notify; struct udc_mcux_data *priv; const struct device *dev; usb_status_t mcux_status = kStatus_USB_Success; int err = 0; if ((NULL == msg) || (NULL == handle)) { return kStatus_USB_InvalidHandle; } mcux_notify = (usb_device_notification_t)mcux_msg->code; priv = (struct udc_mcux_data *)(PRV_DATA_HANDLE(handle)); dev = priv->dev; switch (mcux_notify) { case kUSB_DeviceNotifyBusReset: struct udc_ep_config *cfg; udc_mcux_control(dev, kUSB_DeviceControlSetDefaultStatus, NULL); cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); if (cfg->stat.enabled) { udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT); } cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_IN); if (cfg->stat.enabled) { udc_ep_disable_internal(dev, USB_CONTROL_EP_IN); } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, USB_MCUX_EP0_SIZE, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, USB_MCUX_EP0_SIZE, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } udc_submit_event(dev, UDC_EVT_RESET, 0); break; case kUSB_DeviceNotifyError: udc_submit_event(dev, UDC_EVT_ERROR, -EIO); break; case kUSB_DeviceNotifySuspend: udc_set_suspended(dev, true); udc_submit_event(dev, UDC_EVT_SUSPEND, 0); break; case kUSB_DeviceNotifyResume: udc_set_suspended(dev, false); udc_submit_event(dev, UDC_EVT_RESUME, 0); break; case kUSB_DeviceNotifyLPMSleep: break; case kUSB_DeviceNotifyDetach: udc_submit_event(dev, UDC_EVT_VBUS_REMOVED, 0); break; case kUSB_DeviceNotifyAttach: udc_submit_event(dev, UDC_EVT_VBUS_READY, 0); break; case kUSB_DeviceNotifySOF: udc_submit_event(dev, UDC_EVT_SOF, 0); break; default: ep = mcux_msg->code; if (mcux_msg->isSetup) { struct usb_setup_packet *setup = (struct usb_setup_packet *)mcux_msg->buffer; err = udc_mcux_handler_setup(dev, setup); } else if (USB_EP_DIR_IS_IN(ep)) { err = udc_mcux_handler_in(dev, ep, mcux_msg->buffer, mcux_msg->length); } else { err = udc_mcux_handler_out(dev, ep, mcux_msg->buffer, mcux_msg->length); } break; } if (unlikely(err)) { udc_submit_event(dev, UDC_EVT_ERROR, err); mcux_status = kStatus_USB_Error; } return mcux_status; } static void udc_mcux_isr(const struct device *dev) { struct udc_mcux_data *priv = udc_get_private(dev); USB_DeviceEhciIsrFunction((void *)(&priv->mcux_device)); } /* Return actual USB device speed */ static enum udc_bus_speed udc_mcux_device_speed(const struct device *dev) { int err; uint8_t mcux_speed; err = udc_mcux_control(dev, kUSB_DeviceControlGetSpeed, &mcux_speed); if (err) { /* * In the current version of all NXP USB device drivers, * no error is returned if the parameter is correct. */ return UDC_BUS_SPEED_FS; } switch (mcux_speed) { case USB_SPEED_HIGH: return UDC_BUS_SPEED_HS; case USB_SPEED_LOW: __ASSERT(false, "Low speed mode not supported"); __fallthrough; case USB_SPEED_FULL: __fallthrough; default: return UDC_BUS_SPEED_FS; } } static int udc_mcux_ep_enqueue(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *const buf) { udc_buf_put(cfg, buf); if (cfg->stat.halted) { LOG_DBG("ep 0x%02x halted", cfg->addr); return 0; } return udc_mcux_ep_try_feed(dev, cfg); } static int udc_mcux_ep_dequeue(const struct device *dev, struct udc_ep_config *const cfg) { struct net_buf *buf; unsigned int key; cfg->stat.halted = false; buf = udc_buf_get_all(dev, cfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } key = irq_lock(); udc_ep_set_busy(dev, cfg->addr, false); irq_unlock(key); return 0; } static int udc_mcux_ep_set_halt(const struct device *dev, struct udc_ep_config *const cfg) { return udc_mcux_control(dev, kUSB_DeviceControlEndpointStall, &cfg->addr); } static int udc_mcux_ep_clear_halt(const struct device *dev, struct udc_ep_config *const cfg) { (void)udc_mcux_control(dev, kUSB_DeviceControlEndpointUnstall, &cfg->addr); /* transfer is enqueued after stalled */ return udc_mcux_ep_try_feed(dev, cfg); } static int udc_mcux_ep_enable(const struct device *dev, struct udc_ep_config *const cfg) { usb_device_endpoint_init_struct_t ep_init; LOG_DBG("Enable ep 0x%02x", cfg->addr); ep_init.zlt = 0U; ep_init.interval = cfg->interval; ep_init.endpointAddress = cfg->addr; ep_init.maxPacketSize = cfg->mps; switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) { case USB_EP_TYPE_CONTROL: ep_init.transferType = USB_ENDPOINT_CONTROL; break; case USB_EP_TYPE_BULK: ep_init.transferType = USB_ENDPOINT_BULK; break; case USB_EP_TYPE_INTERRUPT: ep_init.transferType = USB_ENDPOINT_INTERRUPT; break; case USB_EP_TYPE_ISO: ep_init.transferType = USB_ENDPOINT_ISOCHRONOUS; break; default: return -EINVAL; } return udc_mcux_control(dev, kUSB_DeviceControlEndpointInit, &ep_init); } static int udc_mcux_ep_disable(const struct device *dev, struct udc_ep_config *const cfg) { LOG_DBG("Disable ep 0x%02x", cfg->addr); return udc_mcux_control(dev, kUSB_DeviceControlEndpointDeinit, &cfg->addr); } static int udc_mcux_host_wakeup(const struct device *dev) { return -ENOTSUP; } static int udc_mcux_set_address(const struct device *dev, const uint8_t addr) { uint8_t temp_addr = addr; return udc_mcux_control(dev, kUSB_DeviceControlSetDeviceAddress, &temp_addr); } static int udc_mcux_enable(const struct device *dev) { return udc_mcux_control(dev, kUSB_DeviceControlRun, NULL); } static int udc_mcux_disable(const struct device *dev) { return udc_mcux_control(dev, kUSB_DeviceControlStop, NULL); } static int udc_mcux_init(const struct device *dev) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); usb_status_t status; if (priv->controller_id == 0xFFu) { return -ENOMEM; } #ifdef CONFIG_DT_HAS_NXP_USBPHY_ENABLED if (config->phy_config != NULL) { USB_EhciPhyInit(priv->controller_id, 0u, (usb_phy_config_struct_t *)&config->phy_config); } #endif /* Init MCUX USB device driver. */ status = mcux_if->deviceInit(priv->controller_id, &priv->mcux_device, &(priv->mcux_device.controllerHandle)); if (status != kStatus_USB_Success) { return -ENOMEM; } /* enable USB interrupt */ config->irq_enable_func(dev); LOG_DBG("Initialized USB controller %x", (uint32_t)config->base); return 0; } static int udc_mcux_shutdown(const struct device *dev) { const struct udc_mcux_config *config = dev->config; const usb_device_controller_interface_struct_t *mcux_if = config->mcux_if; struct udc_mcux_data *priv = udc_get_private(dev); usb_status_t status; /* Disable interrupt */ config->irq_disable_func(dev); /* De-init MCUX USB device driver. */ status = mcux_if->deviceDeinit(priv->mcux_device.controllerHandle); if (status != kStatus_USB_Success) { return -ENOMEM; } return 0; } static int udc_mcux_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int udc_mcux_unlock(const struct device *dev) { return udc_unlock_internal(dev); } static inline void udc_mcux_get_hal_driver_id(struct udc_mcux_data *priv, const struct udc_mcux_config *config) { /* * MCUX USB controller drivers use an ID to tell the HAL drivers * which controller is being used. This part of the code converts * the base address to the ID value. */ #ifdef USBHS_STACK_BASE_ADDRS uintptr_t usb_base_addrs[] = USBHS_STACK_BASE_ADDRS; #else uintptr_t usb_base_addrs[] = USBHS_BASE_ADDRS; #endif /* get the right controller id */ priv->controller_id = 0xFFu; /* invalid value */ for (uint8_t i = 0; i < ARRAY_SIZE(usb_base_addrs); i++) { if (usb_base_addrs[i] == config->base) { priv->controller_id = kUSB_ControllerEhci0 + i; break; } } } static int udc_mcux_driver_preinit(const struct device *dev) { const struct udc_mcux_config *config = dev->config; struct udc_data *data = dev->data; struct udc_mcux_data *priv = data->priv; int err; udc_mcux_get_hal_driver_id(priv, config); if (priv->controller_id == 0xFFu) { return -ENOMEM; } k_mutex_init(&data->mutex); for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_out[i].caps.out = 1; if (i == 0) { config->ep_cfg_out[i].caps.control = 1; config->ep_cfg_out[i].caps.mps = 64; } else { config->ep_cfg_out[i].caps.bulk = 1; config->ep_cfg_out[i].caps.interrupt = 1; config->ep_cfg_out[i].caps.iso = 1; config->ep_cfg_out[i].caps.mps = 1024; } config->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i; err = udc_register_ep(dev, &config->ep_cfg_out[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } for (int i = 0; i < config->num_of_eps; i++) { config->ep_cfg_in[i].caps.in = 1; if (i == 0) { config->ep_cfg_in[i].caps.control = 1; config->ep_cfg_in[i].caps.mps = 64; } else { config->ep_cfg_in[i].caps.bulk = 1; config->ep_cfg_in[i].caps.interrupt = 1; config->ep_cfg_in[i].caps.iso = 1; config->ep_cfg_in[i].caps.mps = 1024; } config->ep_cfg_in[i].addr = USB_EP_DIR_IN | i; err = udc_register_ep(dev, &config->ep_cfg_in[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } /* Requires udc_mcux_host_wakeup() implementation */ data->caps.rwup = false; data->caps.mps0 = USB_MCUX_MPS0; data->caps.hs = true; priv->dev = dev; pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); return 0; } static const struct udc_api udc_mcux_api = { .device_speed = udc_mcux_device_speed, .ep_enqueue = udc_mcux_ep_enqueue, .ep_dequeue = udc_mcux_ep_dequeue, .ep_set_halt = udc_mcux_ep_set_halt, .ep_clear_halt = udc_mcux_ep_clear_halt, .ep_try_config = NULL, .ep_enable = udc_mcux_ep_enable, .ep_disable = udc_mcux_ep_disable, .host_wakeup = udc_mcux_host_wakeup, .set_address = udc_mcux_set_address, .enable = udc_mcux_enable, .disable = udc_mcux_disable, .init = udc_mcux_init, .shutdown = udc_mcux_shutdown, .lock = udc_mcux_lock, .unlock = udc_mcux_unlock, }; /* EHCI device driver interface */ static const usb_device_controller_interface_struct_t udc_mcux_if = { USB_DeviceEhciInit, USB_DeviceEhciDeinit, USB_DeviceEhciSend, USB_DeviceEhciRecv, USB_DeviceEhciCancel, USB_DeviceEhciControl }; #define UDC_MCUX_PHY_DEFINE(n) \ static usb_phy_config_struct_t phy_config_##n = { \ .D_CAL = DT_PROP_OR(DT_INST_PHANDLE(n, phy_handle), tx_d_cal, 0), \ .TXCAL45DP = DT_PROP_OR(DT_INST_PHANDLE(n, phy_handle), tx_cal_45_dp_ohms, 0), \ .TXCAL45DM = DT_PROP_OR(DT_INST_PHANDLE(n, phy_handle), tx_cal_45_dm_ohms, 0), \ } #define UDC_MCUX_PHY_DEFINE_OR(n) \ COND_CODE_1(DT_NODE_HAS_PROP(DT_DRV_INST(n), phy_handle), \ (UDC_MCUX_PHY_DEFINE(n)), ()) #define UDC_MCUX_PHY_CFG_PTR_OR_NULL(n) \ COND_CODE_1(DT_NODE_HAS_PROP(DT_DRV_INST(n), phy_handle), \ (&phy_config_##n), (NULL)) #define USB_MCUX_EHCI_DEVICE_DEFINE(n) \ UDC_MCUX_PHY_DEFINE_OR(n); \ \ static void udc_irq_enable_func##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ udc_mcux_isr, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } \ \ static void udc_irq_disable_func##n(const struct device *dev) \ { \ irq_disable(DT_INST_IRQN(n)); \ } \ \ static struct udc_ep_config \ ep_cfg_out##n[DT_INST_PROP(n, num_bidir_endpoints)]; \ static struct udc_ep_config \ ep_cfg_in##n[DT_INST_PROP(n, num_bidir_endpoints)]; \ \ PINCTRL_DT_INST_DEFINE(n); \ \ static struct udc_mcux_config priv_config_##n = { \ .base = DT_INST_REG_ADDR(n), \ .irq_enable_func = udc_irq_enable_func##n, \ .irq_disable_func = udc_irq_disable_func##n, \ .num_of_eps = DT_INST_PROP(n, num_bidir_endpoints), \ .ep_cfg_in = ep_cfg_in##n, \ .ep_cfg_out = ep_cfg_out##n, \ .mcux_if = &udc_mcux_if, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .phy_config = UDC_MCUX_PHY_CFG_PTR_OR_NULL(n), \ }; \ \ static struct udc_mcux_data priv_data_##n = { \ }; \ \ static struct udc_data udc_data_##n = { \ .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \ .priv = &priv_data_##n, \ }; \ \ DEVICE_DT_INST_DEFINE(n, udc_mcux_driver_preinit, NULL, \ &udc_data_##n, &priv_config_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &udc_mcux_api); DT_INST_FOREACH_STATUS_OKAY(USB_MCUX_EHCI_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/usb/udc/udc_mcux_ehci.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,740
```unknown menuconfig UDC_DRIVER bool "USB device controller drivers [EXPERIMENTAL]" select EXPERIMENTAL select NET_BUF help USB device controller driver. if UDC_DRIVER config UDC_BUF_COUNT int "Number of buffers in the pool" range 16 256 default 16 help Number of UDC request buffers in the pool. config UDC_BUF_POOL_SIZE int "Memory available for requests" range 64 32768 default 1024 help Total amount of memory available for UDC requests. config UDC_BUF_FORCE_NOCACHE bool "Place the buffer pools in the nocache memory region" depends on NOCACHE_MEMORY && DCACHE help Place the buffer pools in the nocache memory region if the driver cannot handle buffers in cached memory. config UDC_WORKQUEUE bool "Use a dedicate work queue for UDC drivers" help This option provides a dedicated work queue for UDC drivers. config UDC_WORKQUEUE_STACK_SIZE int "UDC workqueue stack size" depends on UDC_WORKQUEUE default 512 config UDC_WORKQUEUE_PRIORITY int "UDC workqueue priority" depends on UDC_WORKQUEUE default SYSTEM_WORKQUEUE_PRIORITY help By default, UDC work queue priority is the same as System workqueue priority. module = UDC_DRIVER module-str = usb drv source "subsys/logging/Kconfig.template.log_config" source "drivers/usb/udc/Kconfig.dwc2" source "drivers/usb/udc/Kconfig.nrf" source "drivers/usb/udc/Kconfig.kinetis" source "drivers/usb/udc/Kconfig.skeleton" source "drivers/usb/udc/Kconfig.virtual" source "drivers/usb/udc/Kconfig.stm32" source "drivers/usb/udc/Kconfig.it82xx2" source "drivers/usb/udc/Kconfig.mcux" source "drivers/usb/udc/Kconfig.numaker" endif # UDC_DRIVER ```
/content/code_sandbox/drivers/usb/udc/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
439
```c /* * */ #include "udc_common.h" #include "udc_dwc2.h" #include <string.h> #include <stdio.h> #include <zephyr/cache.h> #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/sys/util.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/usb/udc.h> #include <zephyr/usb/usb_ch9.h> #include <usb_dwc2_hw.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(udc_dwc2, CONFIG_UDC_DRIVER_LOG_LEVEL); #include "udc_dwc2_vendor_quirks.h" enum dwc2_drv_event_type { /* Trigger next transfer, must not be used for control OUT */ DWC2_DRV_EVT_XFER, /* Setup packet received */ DWC2_DRV_EVT_SETUP, /* OUT transaction for specific endpoint is finished */ DWC2_DRV_EVT_DOUT, /* IN transaction for specific endpoint is finished */ DWC2_DRV_EVT_DIN, }; struct dwc2_drv_event { const struct device *dev; enum dwc2_drv_event_type type; uint32_t bcnt; uint8_t ep; }; K_MSGQ_DEFINE(drv_msgq, sizeof(struct dwc2_drv_event), CONFIG_UDC_DWC2_MAX_QMESSAGES, sizeof(void *)); /* Minimum RX FIFO size in 32-bit words considering the largest used OUT packet * of 512 bytes. The value must be adjusted according to the number of OUT * endpoints. */ #define UDC_DWC2_GRXFSIZ_DEFAULT (15U + 512U/4U) /* TX FIFO0 depth in 32-bit words (used by control IN endpoint) */ #define UDC_DWC2_FIFO0_DEPTH 16U /* Get Data FIFO access register */ #define UDC_DWC2_EP_FIFO(base, idx) ((mem_addr_t)base + 0x1000 * (idx + 1)) /* Driver private data per instance */ struct udc_dwc2_data { struct k_thread thread_data; uint32_t ghwcfg1; uint32_t enumspd; uint32_t txf_set; uint32_t max_xfersize; uint32_t max_pktcnt; uint32_t tx_len[16]; uint16_t dfifodepth; uint16_t rxfifo_depth; uint16_t max_txfifo_depth[16]; unsigned int dynfifosizing : 1; unsigned int bufferdma : 1; /* Number of endpoints including control endpoint */ uint8_t numdeveps; /* Number of IN endpoints including control endpoint */ uint8_t ineps; /* Number of OUT endpoints including control endpoint */ uint8_t outeps; uint8_t setup[8]; }; #if defined(CONFIG_PINCTRL) #include <zephyr/drivers/pinctrl.h> static int dwc2_init_pinctrl(const struct device *dev) { const struct udc_dwc2_config *const config = dev->config; const struct pinctrl_dev_config *const pcfg = config->pcfg; int ret = 0; if (pcfg == NULL) { LOG_INF("Skip pinctrl configuration"); return 0; } ret = pinctrl_apply_state(pcfg, PINCTRL_STATE_DEFAULT); if (ret) { LOG_ERR("Failed to apply default pinctrl state (%d)", ret); } LOG_DBG("Apply pinctrl"); return ret; } #else static int dwc2_init_pinctrl(const struct device *dev) { ARG_UNUSED(dev); return 0; } #endif static inline struct usb_dwc2_reg *dwc2_get_base(const struct device *dev) { const struct udc_dwc2_config *const config = dev->config; return config->base; } /* Get DOEPCTLn or DIEPCTLn register address */ static mem_addr_t dwc2_get_dxepctl_reg(const struct device *dev, const uint8_t ep) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); uint8_t ep_idx = USB_EP_GET_IDX(ep); if (USB_EP_DIR_IS_OUT(ep)) { return (mem_addr_t)&base->out_ep[ep_idx].doepctl; } else { return (mem_addr_t)&base->in_ep[ep_idx].diepctl; } } /* Get available FIFO space in bytes */ static uint32_t dwc2_ftx_avail(const struct device *dev, const uint32_t idx) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); mem_addr_t reg = (mem_addr_t)&base->in_ep[idx].dtxfsts; uint32_t dtxfsts; dtxfsts = sys_read32(reg); return usb_dwc2_get_dtxfsts_ineptxfspcavail(dtxfsts) * 4; } static uint32_t dwc2_get_iept_pktctn(const struct device *dev, const uint32_t idx) { struct udc_dwc2_data *const priv = udc_get_private(dev); if (idx == 0) { return usb_dwc2_get_dieptsiz0_pktcnt(UINT32_MAX); } else { return priv->max_pktcnt; } } static uint32_t dwc2_get_iept_xfersize(const struct device *dev, const uint32_t idx) { struct udc_dwc2_data *const priv = udc_get_private(dev); if (idx == 0) { return usb_dwc2_get_dieptsiz0_xfersize(UINT32_MAX); } else { return priv->max_xfersize; } } static void dwc2_flush_rx_fifo(const struct device *dev) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl; sys_write32(USB_DWC2_GRSTCTL_RXFFLSH, grstctl_reg); while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_RXFFLSH) { } } static void dwc2_flush_tx_fifo(const struct device *dev, const uint8_t idx) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl; /* TODO: use dwc2_get_dxepctl_reg() */ mem_addr_t diepctl_reg = (mem_addr_t)&base->in_ep[idx].diepctl; uint32_t grstctl; uint32_t fnum; fnum = usb_dwc2_get_depctl_txfnum(sys_read32(diepctl_reg)); grstctl = usb_dwc2_set_grstctl_txfnum(fnum) | USB_DWC2_GRSTCTL_TXFFLSH; sys_write32(grstctl, grstctl_reg); while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_TXFFLSH) { } } /* Return TX FIFOi depth in 32-bit words (i = f_idx + 1) */ static uint32_t dwc2_get_txfdep(const struct device *dev, const uint32_t f_idx) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); uint32_t dieptxf; dieptxf = sys_read32((mem_addr_t)&base->dieptxf[f_idx]); return usb_dwc2_get_dieptxf_inepntxfdep(dieptxf); } /* Return TX FIFOi address (i = f_idx + 1) */ static uint32_t dwc2_get_txfaddr(const struct device *dev, const uint32_t f_idx) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); uint32_t dieptxf; dieptxf = sys_read32((mem_addr_t)&base->dieptxf[f_idx]); return usb_dwc2_get_dieptxf_inepntxfstaddr(dieptxf); } /* Set TX FIFOi address and depth (i = f_idx + 1) */ static void dwc2_set_txf(const struct device *dev, const uint32_t f_idx, const uint32_t dep, const uint32_t addr) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); uint32_t dieptxf; dieptxf = usb_dwc2_set_dieptxf_inepntxfdep(dep) | usb_dwc2_set_dieptxf_inepntxfstaddr(addr); sys_write32(dieptxf, (mem_addr_t)&base->dieptxf[f_idx]); } /* Enable/disable endpoint interrupt */ static void dwc2_set_epint(const struct device *dev, struct udc_ep_config *const cfg, const bool enabled) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); mem_addr_t reg = (mem_addr_t)&base->daintmsk; uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); uint32_t epmsk; if (USB_EP_DIR_IS_IN(cfg->addr)) { epmsk = USB_DWC2_DAINT_INEPINT(ep_idx); } else { epmsk = USB_DWC2_DAINT_OUTEPINT(ep_idx); } if (enabled) { sys_set_bits(reg, epmsk); } else { sys_clear_bits(reg, epmsk); } } static bool dwc2_dma_buffer_ok_to_use(const struct device *dev, void *buf, uint32_t xfersize, uint16_t mps) { ARG_UNUSED(dev); if (!IS_ALIGNED(buf, 4)) { LOG_ERR("Buffer not aligned"); return false; } /* If Max Packet Size is not */ if (unlikely(mps % 4) && (xfersize > mps)) { LOG_ERR("Padding not supported"); return false; } return true; } /* Can be called from ISR context */ static int dwc2_tx_fifo_write(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *const buf) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); struct udc_dwc2_data *const priv = udc_get_private(dev); uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); mem_addr_t dieptsiz_reg = (mem_addr_t)&base->in_ep[ep_idx].dieptsiz; /* TODO: use dwc2_get_dxepctl_reg() */ mem_addr_t diepctl_reg = (mem_addr_t)&base->in_ep[ep_idx].diepctl; mem_addr_t diepint_reg = (mem_addr_t)&base->in_ep[ep_idx].diepint; uint32_t max_xfersize, max_pktcnt, pktcnt, spcavail; const size_t d = sizeof(uint32_t); unsigned int key; uint32_t len; spcavail = dwc2_ftx_avail(dev, ep_idx); /* Round down to multiple of endpoint MPS */ spcavail -= spcavail % cfg->mps; /* * Here, the available space should be equal to the FIFO space * assigned/configured for that endpoint because we do not schedule another * transfer until the previous one has not finished. For simplicity, * we only check that the available space is not less than the endpoint * MPS. */ if (spcavail < cfg->mps) { LOG_ERR("ep 0x%02x FIFO space is too low, %u (%u)", cfg->addr, spcavail, dwc2_ftx_avail(dev, ep_idx)); return -EAGAIN; } len = MIN(buf->len, spcavail); if (len != 0U) { max_pktcnt = dwc2_get_iept_pktctn(dev, ep_idx); max_xfersize = dwc2_get_iept_xfersize(dev, ep_idx); if (len > max_xfersize) { /* * Avoid short packets if the transfer size cannot be * handled in one set. */ len = ROUND_DOWN(max_xfersize, cfg->mps); } /* * Determine the number of packets for the current transfer; * if the pktcnt is too large, truncate the actual transfer length. */ pktcnt = DIV_ROUND_UP(len, cfg->mps); if (pktcnt > max_pktcnt) { pktcnt = max_pktcnt; len = pktcnt * cfg->mps; } } else { /* ZLP */ pktcnt = 1U; } LOG_DBG("Prepare ep 0x%02x xfer len %u pktcnt %u spcavail %u", cfg->addr, len, pktcnt, spcavail); priv->tx_len[ep_idx] = len; /* Lock and write to endpoint FIFO */ key = irq_lock(); /* Set number of packets and transfer size */ sys_write32((pktcnt << USB_DWC2_DEPTSIZN_PKTCNT_POS) | len, dieptsiz_reg); if (priv->bufferdma) { if (!dwc2_dma_buffer_ok_to_use(dev, buf->data, len, cfg->mps)) { /* Cannot continue unless buffer is bounced. Device will * cease to function. Is fatal error appropriate here? */ irq_unlock(key); return -ENOTSUP; } sys_write32((uint32_t)buf->data, (mem_addr_t)&base->in_ep[ep_idx].diepdma); sys_cache_data_flush_range(buf->data, len); } /* Clear NAK and set endpoint enable */ sys_set_bits(diepctl_reg, USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_CNAK); /* Clear IN Endpoint NAK Effective interrupt in case it was set */ sys_write32(USB_DWC2_DIEPINT_INEPNAKEFF, diepint_reg); if (!priv->bufferdma) { /* FIFO access is always in 32-bit words */ for (uint32_t i = 0UL; i < len; i += d) { uint32_t val = buf->data[i]; if (i + 1 < len) { val |= ((uint32_t)buf->data[i + 1UL]) << 8; } if (i + 2 < len) { val |= ((uint32_t)buf->data[i + 2UL]) << 16; } if (i + 3 < len) { val |= ((uint32_t)buf->data[i + 3UL]) << 24; } sys_write32(val, UDC_DWC2_EP_FIFO(base, ep_idx)); } } irq_unlock(key); return 0; } static inline int dwc2_read_fifo(const struct device *dev, const uint8_t ep, struct net_buf *const buf, const size_t size) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); size_t len = buf ? MIN(size, net_buf_tailroom(buf)) : 0; const size_t d = sizeof(uint32_t); /* FIFO access is always in 32-bit words */ for (uint32_t n = 0; n < (len / d); n++) { net_buf_add_le32(buf, sys_read32(UDC_DWC2_EP_FIFO(base, ep))); } if (len % d) { uint8_t r[4]; /* Get the remaining */ sys_put_le32(sys_read32(UDC_DWC2_EP_FIFO(base, ep)), r); for (uint32_t i = 0U; i < (len % d); i++) { net_buf_add_u8(buf, r[i]); } } if (unlikely(size > len)) { for (uint32_t n = 0; n < DIV_ROUND_UP(size - len, d); n++) { (void)sys_read32(UDC_DWC2_EP_FIFO(base, ep)); } } return 0; } static uint32_t dwc2_rx_xfer_size(struct udc_dwc2_data *const priv, struct udc_ep_config *const cfg, struct net_buf *buf) { uint32_t size; if (priv->bufferdma) { size = net_buf_tailroom(buf); /* Do as many packets in a single DMA as possible */ if (size > priv->max_xfersize) { size = ROUND_DOWN(priv->max_xfersize, cfg->mps); } } else { /* Completer mode can always program Max Packet Size, RxFLvl * interrupt will drop excessive data if necessary (i.e. buffer * is too short). */ size = cfg->mps; } return size; } /* Can be called from ISR and we call it only when there is a buffer in the queue */ static void dwc2_prep_rx(const struct device *dev, struct net_buf *buf, struct udc_ep_config *const cfg, const bool ncnak) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); struct udc_dwc2_data *const priv = udc_get_private(dev); uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); mem_addr_t doeptsiz_reg = (mem_addr_t)&base->out_ep[ep_idx].doeptsiz; mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, ep_idx); uint32_t doeptsiz; uint32_t xfersize; xfersize = dwc2_rx_xfer_size(priv, cfg, buf); doeptsiz = xfersize | usb_dwc2_set_deptsizn_pktcnt(DIV_ROUND_UP(xfersize, cfg->mps)); if (cfg->addr == USB_CONTROL_EP_OUT) { /* Use 1 to allow 8 byte long buffers for SETUP data */ doeptsiz |= (1 << USB_DWC2_DOEPTSIZ0_SUPCNT_POS); } sys_write32(doeptsiz, doeptsiz_reg); if (priv->bufferdma) { if (!dwc2_dma_buffer_ok_to_use(dev, buf->data, xfersize, cfg->mps)) { /* Cannot continue unless buffer is bounced. Device will * cease to function. Is fatal error appropriate here? */ return; } sys_write32((uint32_t)buf->data, (mem_addr_t)&base->out_ep[ep_idx].doepdma); sys_cache_data_invd_range(buf->data, xfersize); } if (ncnak) { sys_set_bits(doepctl_reg, USB_DWC2_DEPCTL_EPENA); } else { sys_set_bits(doepctl_reg, USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_CNAK); } LOG_INF("Prepare RX 0x%02x doeptsiz 0x%x", cfg->addr, doeptsiz); } static void dwc2_handle_xfer_next(const struct device *dev, struct udc_ep_config *const cfg) { struct net_buf *buf; buf = udc_buf_peek(dev, cfg->addr); if (buf == NULL) { return; } if (USB_EP_DIR_IS_OUT(cfg->addr)) { dwc2_prep_rx(dev, buf, cfg, 0); } else { if (dwc2_tx_fifo_write(dev, cfg, buf)) { LOG_ERR("Failed to start write to TX FIFO, ep 0x%02x", cfg->addr); } } udc_ep_set_busy(dev, cfg->addr, true); } static int dwc2_ctrl_feed_dout(const struct device *dev, const size_t length) { struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); struct net_buf *buf; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length); if (buf == NULL) { return -ENOMEM; } udc_buf_put(ep_cfg, buf); dwc2_prep_rx(dev, buf, ep_cfg, 0); LOG_DBG("feed buf %p", buf); return 0; } static int dwc2_handle_evt_setup(const struct device *dev) { struct udc_dwc2_data *const priv = udc_get_private(dev); struct net_buf *buf; int err; buf = udc_buf_get(dev, USB_CONTROL_EP_OUT); if (buf == NULL) { LOG_ERR("No buffer queued for control ep"); return -ENODATA; } net_buf_add_mem(buf, priv->setup, sizeof(priv->setup)); udc_ep_buf_set_setup(buf); LOG_HEXDUMP_DBG(buf->data, buf->len, "setup"); /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); /* We always allocate and feed buffer large enough for a setup packet. */ if (udc_ctrl_stage_is_data_out(dev)) { /* Allocate and feed buffer for data OUT stage */ LOG_DBG("s:%p|feed for -out-", buf); /* Allocate at least 8 bytes in case the host decides to send * SETUP DATA instead of OUT DATA packet. */ err = dwc2_ctrl_feed_dout(dev, MAX(udc_data_stage_length(buf), 8)); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } } else if (udc_ctrl_stage_is_data_in(dev)) { LOG_DBG("s:%p|feed for -in-status", buf); err = dwc2_ctrl_feed_dout(dev, 8); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } err = udc_ctrl_submit_s_in_status(dev); } else { LOG_DBG("s:%p|feed >setup", buf); err = dwc2_ctrl_feed_dout(dev, 8); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } err = udc_ctrl_submit_s_status(dev); } return err; } static inline int dwc2_handle_evt_dout(const struct device *dev, struct udc_ep_config *const cfg) { struct net_buf *buf; int err = 0; buf = udc_buf_get(dev, cfg->addr); if (buf == NULL) { LOG_ERR("No buffer queued for control ep"); return -ENODATA; } udc_ep_set_busy(dev, cfg->addr, false); if (cfg->addr == USB_CONTROL_EP_OUT) { if (udc_ctrl_stage_is_status_out(dev)) { /* s-in-status finished */ LOG_DBG("dout:%p| status, feed >s", buf); /* Feed a buffer for the next setup packet */ err = dwc2_ctrl_feed_dout(dev, 8); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } /* Status stage finished, notify upper layer */ udc_ctrl_submit_status(dev, buf); } else { /* * For all other cases we feed with a buffer * large enough for setup packet. */ LOG_DBG("dout:%p| data, feed >s", buf); err = dwc2_ctrl_feed_dout(dev, 8); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_in(dev)) { err = udc_ctrl_submit_s_out_status(dev, buf); } } else { err = udc_submit_ep_event(dev, buf, 0); } return err; } static int dwc2_handle_evt_din(const struct device *dev, struct udc_ep_config *const cfg) { struct net_buf *buf; buf = udc_buf_peek(dev, cfg->addr); if (buf == NULL) { LOG_ERR("No buffer for ep 0x%02x", cfg->addr); udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); return -ENOBUFS; } if (buf->len) { /* Looks like we failed to continue in ISR, retry */ return dwc2_tx_fifo_write(dev, cfg, buf); } if (cfg->addr == USB_CONTROL_EP_IN && udc_ep_buf_has_zlp(buf)) { udc_ep_buf_clear_zlp(buf); return dwc2_tx_fifo_write(dev, cfg, buf); } buf = udc_buf_get(dev, cfg->addr); udc_ep_set_busy(dev, cfg->addr, false); if (cfg->addr == USB_CONTROL_EP_IN) { if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* Status stage finished, notify upper layer */ udc_ctrl_submit_status(dev, buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_out(dev)) { /* * IN transfer finished, release buffer, * control OUT buffer should be already fed. */ net_buf_unref(buf); } return 0; } return udc_submit_ep_event(dev, buf, 0); } static ALWAYS_INLINE void dwc2_thread_handler(void *const arg) { const struct device *dev = (const struct device *)arg; struct udc_ep_config *ep_cfg; struct dwc2_drv_event evt; /* This is the bottom-half of the ISR handler and the place where * a new transfer can be fed. */ k_msgq_get(&drv_msgq, &evt, K_FOREVER); ep_cfg = udc_get_ep_cfg(dev, evt.ep); switch (evt.type) { case DWC2_DRV_EVT_XFER: LOG_DBG("New transfer in the queue"); break; case DWC2_DRV_EVT_SETUP: LOG_DBG("SETUP event"); dwc2_handle_evt_setup(dev); break; case DWC2_DRV_EVT_DOUT: LOG_DBG("DOUT event ep 0x%02x", ep_cfg->addr); dwc2_handle_evt_dout(dev, ep_cfg); break; case DWC2_DRV_EVT_DIN: LOG_DBG("DIN event"); dwc2_handle_evt_din(dev, ep_cfg); break; } if (ep_cfg->addr != USB_CONTROL_EP_OUT && !udc_ep_is_busy(dev, ep_cfg->addr)) { dwc2_handle_xfer_next(dev, ep_cfg); } else { LOG_DBG("ep 0x%02x busy", ep_cfg->addr); } } static void dwc2_on_bus_reset(const struct device *dev) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); struct udc_dwc2_data *const priv = udc_get_private(dev); uint32_t doepmsk; /* Set the NAK bit for all OUT endpoints */ for (uint8_t i = 0U; i < priv->numdeveps; i++) { uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i); mem_addr_t doepctl_reg; LOG_DBG("ep 0x%02x EPDIR %u", i, epdir); if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) { doepctl_reg = dwc2_get_dxepctl_reg(dev, i); sys_write32(USB_DWC2_DEPCTL_SNAK, doepctl_reg); } } doepmsk = USB_DWC2_DOEPINT_SETUP; if (priv->bufferdma) { doepmsk |= USB_DWC2_DOEPINT_XFERCOMPL | USB_DWC2_DOEPINT_STSPHSERCVD; } sys_write32(doepmsk, (mem_addr_t)&base->doepmsk); sys_set_bits((mem_addr_t)&base->diepmsk, USB_DWC2_DIEPINT_XFERCOMPL); /* Software has to handle RxFLvl interrupt only in Completer mode */ if (!priv->bufferdma) { sys_set_bits((mem_addr_t)&base->gintmsk, USB_DWC2_GINTSTS_RXFLVL); } /* Clear device address during reset. */ sys_clear_bits((mem_addr_t)&base->dcfg, USB_DWC2_DCFG_DEVADDR_MASK); } static void dwc2_handle_enumdone(const struct device *dev) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); struct udc_dwc2_data *const priv = udc_get_private(dev); uint32_t dsts; dsts = sys_read32((mem_addr_t)&base->dsts); priv->enumspd = usb_dwc2_get_dsts_enumspd(dsts); } static inline int dwc2_read_fifo_setup(const struct device *dev, uint8_t ep, const size_t size) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); struct udc_dwc2_data *const priv = udc_get_private(dev); size_t offset; /* FIFO access is always in 32-bit words */ if (size != 8) { LOG_ERR("%d bytes SETUP", size); } /* * We store the setup packet temporarily in the driver's private data * because there is always a race risk after the status stage OUT * packet from the host and the new setup packet. This is fine in * bottom-half processing because the events arrive in a queue and * there will be a next net_buf for the setup packet. */ for (offset = 0; offset < MIN(size, 8); offset += 4) { sys_put_le32(sys_read32(UDC_DWC2_EP_FIFO(base, ep)), &priv->setup[offset]); } /* On protocol error simply discard extra data */ while (offset < size) { sys_read32(UDC_DWC2_EP_FIFO(base, ep)); offset += 4; } return 0; } static inline void dwc2_handle_rxflvl(const struct device *dev) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); struct udc_ep_config *ep_cfg; struct dwc2_drv_event evt; struct net_buf *buf; uint32_t grxstsp; uint32_t pktsts; grxstsp = sys_read32((mem_addr_t)&base->grxstsp); evt.ep = usb_dwc2_get_grxstsp_epnum(grxstsp); evt.bcnt = usb_dwc2_get_grxstsp_bcnt(grxstsp); pktsts = usb_dwc2_get_grxstsp_pktsts(grxstsp); LOG_DBG("ep 0x%02x: pktsts %u, bcnt %u", evt.ep, pktsts, evt.bcnt); switch (pktsts) { case USB_DWC2_GRXSTSR_PKTSTS_SETUP: dwc2_read_fifo_setup(dev, evt.ep, evt.bcnt); break; case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA: evt.type = DWC2_DRV_EVT_DOUT; ep_cfg = udc_get_ep_cfg(dev, evt.ep); buf = udc_buf_peek(dev, ep_cfg->addr); /* RxFIFO data must be retrieved even when buf is NULL */ dwc2_read_fifo(dev, evt.ep, buf, evt.bcnt); if (buf == NULL) { LOG_ERR("No buffer for ep 0x%02x", ep_cfg->addr); udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); break; } if (net_buf_tailroom(buf) && evt.bcnt == ep_cfg->mps) { dwc2_prep_rx(dev, buf, ep_cfg, 0); } else { k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); } break; case USB_DWC2_GRXSTSR_PKTSTS_OUT_DATA_DONE: LOG_DBG("RX pktsts DONE"); break; case USB_DWC2_GRXSTSR_PKTSTS_SETUP_DONE: LOG_DBG("SETUP pktsts DONE"); case USB_DWC2_GRXSTSR_PKTSTS_GLOBAL_OUT_NAK: LOG_DBG("Global OUT NAK"); break; default: break; } } static inline void dwc2_handle_in_xfercompl(const struct device *dev, const uint8_t ep_idx) { struct udc_dwc2_data *const priv = udc_get_private(dev); struct udc_ep_config *ep_cfg; struct dwc2_drv_event evt; struct net_buf *buf; ep_cfg = udc_get_ep_cfg(dev, ep_idx | USB_EP_DIR_IN); buf = udc_buf_peek(dev, ep_cfg->addr); if (buf == NULL) { udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); return; } net_buf_pull(buf, priv->tx_len[ep_idx]); if (buf->len && dwc2_tx_fifo_write(dev, ep_cfg, buf) == 0) { return; } evt.dev = dev; evt.ep = ep_cfg->addr; evt.type = DWC2_DRV_EVT_DIN; k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); } static inline void dwc2_handle_iepint(const struct device *dev) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); const uint8_t n_max = 16; uint32_t diepmsk; uint32_t daint; diepmsk = sys_read32((mem_addr_t)&base->diepmsk); daint = sys_read32((mem_addr_t)&base->daint); for (uint8_t n = 0U; n < n_max; n++) { mem_addr_t diepint_reg = (mem_addr_t)&base->in_ep[n].diepint; uint32_t diepint; uint32_t status; if (daint & USB_DWC2_DAINT_INEPINT(n)) { /* Read and clear interrupt status */ diepint = sys_read32(diepint_reg); status = diepint & diepmsk; sys_write32(status, diepint_reg); LOG_DBG("ep 0x%02x interrupt status: 0x%x", n | USB_EP_DIR_IN, status); if (status & USB_DWC2_DIEPINT_XFERCOMPL) { dwc2_handle_in_xfercompl(dev, n); } } } /* Clear IEPINT interrupt */ sys_write32(USB_DWC2_GINTSTS_IEPINT, (mem_addr_t)&base->gintsts); } static inline void dwc2_handle_out_xfercompl(const struct device *dev, const uint8_t ep_idx) { struct udc_ep_config *ep_cfg = udc_get_ep_cfg(dev, ep_idx); struct udc_dwc2_data *const priv = udc_get_private(dev); struct usb_dwc2_reg *const base = dwc2_get_base(dev); struct dwc2_drv_event evt; struct net_buf *buf; uint32_t doeptsiz; doeptsiz = sys_read32((mem_addr_t)&base->out_ep[ep_idx].doeptsiz); buf = udc_buf_peek(dev, ep_cfg->addr); if (!buf) { LOG_ERR("No buffer for ep 0x%02x", ep_cfg->addr); udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS); return; } evt.type = DWC2_DRV_EVT_DOUT; evt.ep = ep_cfg->addr; /* Assume udc buffer and endpoint config is the same as it was when * transfer was scheduled in dwc2_prep_rx(). The original transfer size * value is necessary here because controller decreases the value for * every byte stored. */ evt.bcnt = dwc2_rx_xfer_size(priv, ep_cfg, buf) - usb_dwc2_get_deptsizn_xfersize(doeptsiz); if (priv->bufferdma) { sys_cache_data_invd_range(buf->data, evt.bcnt); } net_buf_add(buf, evt.bcnt); if (((evt.bcnt % ep_cfg->mps) == 0) && net_buf_tailroom(buf)) { dwc2_prep_rx(dev, buf, ep_cfg, 0); } else { k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); } } static inline void dwc2_handle_oepint(const struct device *dev) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); struct udc_dwc2_data *const priv = udc_get_private(dev); const uint8_t n_max = 16; uint32_t doepmsk; uint32_t daint; doepmsk = sys_read32((mem_addr_t)&base->doepmsk); daint = sys_read32((mem_addr_t)&base->daint); for (uint8_t n = 0U; n < n_max; n++) { mem_addr_t doepint_reg = (mem_addr_t)&base->out_ep[n].doepint; uint32_t doepint; uint32_t status; if (!(daint & USB_DWC2_DAINT_OUTEPINT(n))) { continue; } /* Read and clear interrupt status */ doepint = sys_read32(doepint_reg); status = doepint & doepmsk; sys_write32(status, doepint_reg); LOG_DBG("ep 0x%02x interrupt status: 0x%x", n, status); /* StupPktRcvd is not enabled for interrupt, but must be checked * when XferComp hits to determine if SETUP token was received. */ if (priv->bufferdma && (status & USB_DWC2_DOEPINT_XFERCOMPL) && (doepint & USB_DWC2_DOEPINT_STUPPKTRCVD)) { uint32_t addr; sys_write32(USB_DWC2_DOEPINT_STUPPKTRCVD, doepint_reg); status &= ~USB_DWC2_DOEPINT_XFERCOMPL; /* DMAAddr points past the memory location where the * SETUP data was stored. Copy the received SETUP data * to temporary location used also in Completer mode * which allows common SETUP interrupt handling. */ addr = sys_read32((mem_addr_t)&base->out_ep[0].doepdma); sys_cache_data_invd_range((void *)(addr - 8), 8); memcpy(priv->setup, (void *)(addr - 8), sizeof(priv->setup)); } if (status & USB_DWC2_DOEPINT_SETUP) { struct dwc2_drv_event evt = { .type = DWC2_DRV_EVT_SETUP, .ep = USB_CONTROL_EP_OUT, .bcnt = 8, }; k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); } if (status & USB_DWC2_DOEPINT_STSPHSERCVD) { /* Driver doesn't need any special handling, but it is * mandatory that the bit is cleared in Buffer DMA mode. * If the bit is not cleared (i.e. when this interrupt * bit is masked), then SETUP interrupts will cease * after first control transfer with data stage from * device to host. */ } if (status & USB_DWC2_DOEPINT_XFERCOMPL) { dwc2_handle_out_xfercompl(dev, n); } } /* Clear OEPINT interrupt */ sys_write32(USB_DWC2_GINTSTS_OEPINT, (mem_addr_t)&base->gintsts); } static void udc_dwc2_isr_handler(const struct device *dev) { const struct udc_dwc2_config *const config = dev->config; struct usb_dwc2_reg *const base = config->base; mem_addr_t gintsts_reg = (mem_addr_t)&base->gintsts; uint32_t int_status; uint32_t gintmsk; gintmsk = sys_read32((mem_addr_t)&base->gintmsk); /* Read and handle interrupt status register */ while ((int_status = sys_read32(gintsts_reg) & gintmsk)) { LOG_DBG("GINTSTS 0x%x", int_status); if (int_status & USB_DWC2_GINTSTS_SOF) { /* Clear USB SOF interrupt. */ sys_write32(USB_DWC2_GINTSTS_SOF, gintsts_reg); udc_submit_event(dev, UDC_EVT_SOF, 0); } if (int_status & USB_DWC2_GINTSTS_USBRST) { /* Clear and handle USB Reset interrupt. */ sys_write32(USB_DWC2_GINTSTS_USBRST, gintsts_reg); dwc2_on_bus_reset(dev); LOG_DBG("USB Reset interrupt"); } if (int_status & USB_DWC2_GINTSTS_ENUMDONE) { /* Clear and handle Enumeration Done interrupt. */ sys_write32(USB_DWC2_GINTSTS_ENUMDONE, gintsts_reg); dwc2_handle_enumdone(dev); udc_submit_event(dev, UDC_EVT_RESET, 0); } if (int_status & USB_DWC2_GINTSTS_USBSUSP) { /* Clear USB Suspend interrupt. */ sys_write32(USB_DWC2_GINTSTS_USBSUSP, gintsts_reg); udc_set_suspended(dev, true); udc_submit_event(dev, UDC_EVT_SUSPEND, 0); } if (int_status & USB_DWC2_GINTSTS_WKUPINT) { /* Clear Resume/Remote Wakeup Detected interrupt. */ sys_write32(USB_DWC2_GINTSTS_WKUPINT, gintsts_reg); udc_set_suspended(dev, false); udc_submit_event(dev, UDC_EVT_RESUME, 0); } if (int_status & USB_DWC2_GINTSTS_IEPINT) { /* Handle IN Endpoints interrupt */ dwc2_handle_iepint(dev); } if (int_status & USB_DWC2_GINTSTS_RXFLVL) { /* Handle RxFIFO Non-Empty interrupt */ dwc2_handle_rxflvl(dev); } if (int_status & USB_DWC2_GINTSTS_OEPINT) { /* Handle OUT Endpoints interrupt */ dwc2_handle_oepint(dev); } } (void)dwc2_quirk_irq_clear(dev); } static void dwc2_unset_unused_fifo(const struct device *dev) { struct udc_dwc2_data *const priv = udc_get_private(dev); struct udc_ep_config *tmp; for (uint8_t i = priv->ineps - 1U; i > 0; i--) { tmp = udc_get_ep_cfg(dev, i | USB_EP_DIR_IN); if (tmp->stat.enabled && (priv->txf_set & BIT(i))) { return; } if (!tmp->stat.enabled && (priv->txf_set & BIT(i))) { priv->txf_set &= ~BIT(i); } } } /* * In dedicated FIFO mode there are i (i = 1 ... ineps - 1) FIFO size registers, * e.g. DIEPTXF1, DIEPTXF2, ... DIEPTXF4. When dynfifosizing is enabled, * the size register is mutable. The offset of DIEPTXF1 registers is 0. */ static int dwc2_set_dedicated_fifo(const struct device *dev, struct udc_ep_config *const cfg, uint32_t *const diepctl) { struct udc_dwc2_data *const priv = udc_get_private(dev); uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); uint32_t txfaddr; uint32_t txfdep; uint32_t tmp; /* Keep everything but FIFO number */ tmp = *diepctl & ~USB_DWC2_DEPCTL_TXFNUM_MASK; if (priv->dynfifosizing) { if (priv->txf_set & ~BIT_MASK(ep_idx)) { dwc2_unset_unused_fifo(dev); } if (priv->txf_set & ~BIT_MASK(ep_idx)) { LOG_WRN("Some of the FIFOs higher than %u are set, %lx", ep_idx, priv->txf_set & ~BIT_MASK(ep_idx)); return -EIO; } if ((ep_idx - 1) != 0U) { txfaddr = dwc2_get_txfdep(dev, ep_idx - 2) + dwc2_get_txfaddr(dev, ep_idx - 2); } else { txfaddr = priv->rxfifo_depth + MAX(UDC_DWC2_FIFO0_DEPTH, priv->max_txfifo_depth[0]); } /* Make sure to not set TxFIFO greater than hardware allows */ txfdep = DIV_ROUND_UP(cfg->mps, 4U); if (txfdep > priv->max_txfifo_depth[ep_idx]) { return -ENOMEM; } /* Set FIFO depth (32-bit words) and address */ dwc2_set_txf(dev, ep_idx - 1, txfdep, txfaddr); } else { txfdep = dwc2_get_txfdep(dev, ep_idx - 1); txfaddr = dwc2_get_txfaddr(dev, ep_idx - 1); if (cfg->mps < txfdep * 4U) { return -ENOMEM; } LOG_DBG("Reuse FIFO%u addr 0x%08x depth %u", ep_idx, txfaddr, txfdep); } /* Assign FIFO to the IN endpoint */ *diepctl = tmp | usb_dwc2_set_depctl_txfnum(ep_idx); priv->txf_set |= BIT(ep_idx); dwc2_flush_tx_fifo(dev, ep_idx); LOG_INF("Set FIFO%u (ep 0x%02x) addr 0x%04x depth %u size %u", ep_idx, cfg->addr, txfaddr, txfdep, dwc2_ftx_avail(dev, ep_idx)); return 0; } static int dwc2_ep_control_enable(const struct device *dev, struct udc_ep_config *const cfg) { mem_addr_t dxepctl0_reg; uint32_t dxepctl0; dxepctl0_reg = dwc2_get_dxepctl_reg(dev, cfg->addr); dxepctl0 = sys_read32(dxepctl0_reg); dxepctl0 &= ~USB_DWC2_DEPCTL0_MPS_MASK; switch (cfg->mps) { case 8: dxepctl0 |= USB_DWC2_DEPCTL0_MPS_8 << USB_DWC2_DEPCTL_MPS_POS; break; case 16: dxepctl0 |= USB_DWC2_DEPCTL0_MPS_16 << USB_DWC2_DEPCTL_MPS_POS; break; case 32: dxepctl0 |= USB_DWC2_DEPCTL0_MPS_32 << USB_DWC2_DEPCTL_MPS_POS; break; case 64: dxepctl0 |= USB_DWC2_DEPCTL0_MPS_64 << USB_DWC2_DEPCTL_MPS_POS; break; default: return -EINVAL; } dxepctl0 |= USB_DWC2_DEPCTL_USBACTEP; if (cfg->addr == USB_CONTROL_EP_OUT) { int ret; dwc2_flush_rx_fifo(dev); ret = dwc2_ctrl_feed_dout(dev, 8); if (ret) { return ret; } } else { dwc2_flush_tx_fifo(dev, 0); } sys_write32(dxepctl0, dxepctl0_reg); dwc2_set_epint(dev, cfg, true); return 0; } static int udc_dwc2_ep_activate(const struct device *dev, struct udc_ep_config *const cfg) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); struct udc_dwc2_data *const priv = udc_get_private(dev); uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); mem_addr_t dxepctl_reg; uint32_t dxepctl; LOG_DBG("Enable ep 0x%02x", cfg->addr); if (ep_idx == 0U) { return dwc2_ep_control_enable(dev, cfg); } if (USB_EP_DIR_IS_OUT(cfg->addr)) { /* TODO: use dwc2_get_dxepctl_reg() */ dxepctl_reg = (mem_addr_t)&base->out_ep[ep_idx].doepctl; } else { if (priv->ineps > 0U && ep_idx > (priv->ineps - 1U)) { LOG_ERR("No resources available for ep 0x%02x", cfg->addr); return -EINVAL; } dxepctl_reg = (mem_addr_t)&base->in_ep[ep_idx].diepctl; } if (cfg->mps > usb_dwc2_get_depctl_mps(UINT16_MAX)) { return -EINVAL; } if (priv->bufferdma && (cfg->mps % 4)) { /* TODO: In Buffer DMA mode, DMA will insert padding bytes in * between packets if endpoint Max Packet Size is not multiple * of 4 (DWORD) and single transfer spans across multiple * packets. * * In order to support such Max Packet Sizes, the driver would * have to remove the padding in between the packets. Besides * just driver shuffling the data, the buffers would have to be * large enough to temporarily hold the paddings. * * For the time being just error out early. */ LOG_ERR("Driver requires MPS to be multiple of 4"); return -EINVAL; } dxepctl = sys_read32(dxepctl_reg); /* Set max packet size */ dxepctl &= ~USB_DWC2_DEPCTL_MPS_MASK; dxepctl |= cfg->mps << USB_DWC2_DEPCTL_MPS_POS; /* Set endpoint type */ dxepctl &= ~USB_DWC2_DEPCTL_EPTYPE_MASK; switch (cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) { case USB_EP_TYPE_BULK: dxepctl |= USB_DWC2_DEPCTL_EPTYPE_BULK << USB_DWC2_DEPCTL_EPTYPE_POS; dxepctl |= USB_DWC2_DEPCTL_SETD0PID; break; case USB_EP_TYPE_INTERRUPT: dxepctl |= USB_DWC2_DEPCTL_EPTYPE_INTERRUPT << USB_DWC2_DEPCTL_EPTYPE_POS; dxepctl |= USB_DWC2_DEPCTL_SETD0PID; break; case USB_EP_TYPE_ISO: dxepctl |= USB_DWC2_DEPCTL_EPTYPE_ISO << USB_DWC2_DEPCTL_EPTYPE_POS; break; default: return -EINVAL; } if (USB_EP_DIR_IS_IN(cfg->addr) && cfg->mps != 0U) { int ret = dwc2_set_dedicated_fifo(dev, cfg, &dxepctl); if (ret) { return ret; } } dxepctl |= USB_DWC2_DEPCTL_USBACTEP; /* Enable endpoint interrupts */ dwc2_set_epint(dev, cfg, true); sys_write32(dxepctl, dxepctl_reg); for (uint8_t i = 1U; i < priv->ineps; i++) { LOG_DBG("DIEPTXF%u %08x DIEPCTL%u %08x", i, sys_read32((mem_addr_t)&base->dieptxf[i - 1U]), i, dxepctl); } return 0; } static int dwc2_unset_dedicated_fifo(const struct device *dev, struct udc_ep_config *const cfg, uint32_t *const diepctl) { struct udc_dwc2_data *const priv = udc_get_private(dev); uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); /* Clear FIFO number field */ *diepctl &= ~USB_DWC2_DEPCTL_TXFNUM_MASK; if (priv->dynfifosizing) { if (priv->txf_set & ~BIT_MASK(ep_idx)) { LOG_WRN("Some of the FIFOs higher than %u are set, %lx", ep_idx, priv->txf_set & ~BIT_MASK(ep_idx)); return 0; } dwc2_set_txf(dev, ep_idx - 1, 0, 0); } priv->txf_set &= ~BIT(ep_idx); return 0; } static void dwc2_wait_for_bit(const struct device *dev, mem_addr_t addr, uint32_t bit) { k_timepoint_t timeout = sys_timepoint_calc(K_MSEC(100)); /* This could potentially be converted to use proper synchronization * primitives instead of busy looping, but the number of interrupt bits * this function can be waiting for is rather high. * * Busy looping is most likely fine unless profiling shows otherwise. */ while (!(sys_read32(addr) & bit)) { if (dwc2_quirk_is_phy_clk_off(dev)) { /* No point in waiting, because the bit can only be set * when the PHY is actively clocked. */ return; } if (sys_timepoint_expired(timeout)) { LOG_ERR("Timeout waiting for bit 0x%08X at 0x%08X", bit, (uint32_t)addr); return; } } } /* Disabled IN endpoint means that device will send NAK (isochronous: ZLP) after * receiving IN token from host even if there is packet available in TxFIFO. * Disabled OUT endpoint means that device will NAK (isochronous: discard data) * incoming OUT data (or HS PING) even if there is space available in RxFIFO. * * Set stall parameter to true if caller wants to send STALL instead of NAK. */ static void udc_dwc2_ep_disable(const struct device *dev, struct udc_ep_config *const cfg, bool stall) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); mem_addr_t dxepctl_reg; uint32_t dxepctl; dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr); dxepctl = sys_read32(dxepctl_reg); if (dxepctl & USB_DWC2_DEPCTL_NAKSTS) { /* Endpoint already sends forced NAKs. STALL if necessary. */ if (stall) { dxepctl |= USB_DWC2_DEPCTL_STALL; sys_write32(dxepctl, dxepctl_reg); } return; } if (USB_EP_DIR_IS_OUT(cfg->addr)) { mem_addr_t dctl_reg, gintsts_reg, doepint_reg; uint32_t dctl; dctl_reg = (mem_addr_t)&base->dctl; gintsts_reg = (mem_addr_t)&base->gintsts; doepint_reg = (mem_addr_t)&base->out_ep[ep_idx].doepint; dctl = sys_read32(dctl_reg); if (sys_read32(gintsts_reg) & USB_DWC2_GINTSTS_GOUTNAKEFF) { LOG_ERR("GOUTNAKEFF already active"); } else { dctl |= USB_DWC2_DCTL_SGOUTNAK; sys_write32(dctl, dctl_reg); dctl &= ~USB_DWC2_DCTL_SGOUTNAK; } dwc2_wait_for_bit(dev, gintsts_reg, USB_DWC2_GINTSTS_GOUTNAKEFF); /* The application cannot disable control OUT endpoint 0. */ if (ep_idx != 0) { dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPDIS; } if (stall) { /* For OUT endpoints STALL is set instead of SNAK */ dxepctl |= USB_DWC2_DEPCTL_STALL; } else { dxepctl |= USB_DWC2_DEPCTL_SNAK; } sys_write32(dxepctl, dxepctl_reg); if (ep_idx != 0) { dwc2_wait_for_bit(dev, doepint_reg, USB_DWC2_DOEPINT_EPDISBLD); } /* Clear Endpoint Disabled interrupt */ sys_write32(USB_DWC2_DIEPINT_EPDISBLD, doepint_reg); dctl |= USB_DWC2_DCTL_CGOUTNAK; sys_write32(dctl, dctl_reg); } else { mem_addr_t diepint_reg; diepint_reg = (mem_addr_t)&base->in_ep[ep_idx].diepint; dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_SNAK; if (stall) { /* For IN endpoints STALL is set in addition to SNAK */ dxepctl |= USB_DWC2_DEPCTL_STALL; } sys_write32(dxepctl, dxepctl_reg); dwc2_wait_for_bit(dev, diepint_reg, USB_DWC2_DIEPINT_INEPNAKEFF); dxepctl |= USB_DWC2_DEPCTL_EPENA | USB_DWC2_DEPCTL_EPDIS; sys_write32(dxepctl, dxepctl_reg); dwc2_wait_for_bit(dev, diepint_reg, USB_DWC2_DIEPINT_EPDISBLD); /* Clear Endpoint Disabled interrupt */ sys_write32(USB_DWC2_DIEPINT_EPDISBLD, diepint_reg); /* TODO: Read DIEPTSIZn here? Programming Guide suggest it to * let application know how many bytes of interrupted transfer * were transferred to the host. */ dwc2_flush_tx_fifo(dev, ep_idx); } udc_ep_set_busy(dev, cfg->addr, false); } /* Deactivated endpoint means that there will be a bus timeout when the host * tries to access the endpoint. */ static int udc_dwc2_ep_deactivate(const struct device *dev, struct udc_ep_config *const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); mem_addr_t dxepctl_reg; uint32_t dxepctl; dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr); dxepctl = sys_read32(dxepctl_reg); if (dxepctl & USB_DWC2_DEPCTL_USBACTEP) { LOG_DBG("Disable ep 0x%02x DxEPCTL%u %x", cfg->addr, ep_idx, dxepctl); udc_dwc2_ep_disable(dev, cfg, false); dxepctl = sys_read32(dxepctl_reg); dxepctl &= ~USB_DWC2_DEPCTL_USBACTEP; } else { LOG_WRN("ep 0x%02x is not active DxEPCTL%u %x", cfg->addr, ep_idx, dxepctl); } if (USB_EP_DIR_IS_IN(cfg->addr) && cfg->mps != 0U && ep_idx != 0U) { dwc2_unset_dedicated_fifo(dev, cfg, &dxepctl); } sys_write32(dxepctl, dxepctl_reg); dwc2_set_epint(dev, cfg, false); if (cfg->addr == USB_CONTROL_EP_OUT) { struct net_buf *buf = udc_buf_get_all(dev, cfg->addr); /* Release the buffer allocated in dwc2_ctrl_feed_dout() */ if (buf) { net_buf_unref(buf); } } return 0; } static int udc_dwc2_ep_set_halt(const struct device *dev, struct udc_ep_config *const cfg) { uint8_t ep_idx = USB_EP_GET_IDX(cfg->addr); udc_dwc2_ep_disable(dev, cfg, true); LOG_DBG("Set halt ep 0x%02x", cfg->addr); if (ep_idx != 0) { cfg->stat.halted = true; } return 0; } static int udc_dwc2_ep_clear_halt(const struct device *dev, struct udc_ep_config *const cfg) { mem_addr_t dxepctl_reg = dwc2_get_dxepctl_reg(dev, cfg->addr); uint32_t dxepctl; struct dwc2_drv_event evt = { .ep = cfg->addr, .type = DWC2_DRV_EVT_XFER, }; dxepctl = sys_read32(dxepctl_reg); dxepctl &= ~USB_DWC2_DEPCTL_STALL; dxepctl |= USB_DWC2_DEPCTL_SETD0PID; sys_write32(dxepctl, dxepctl_reg); LOG_DBG("Clear halt ep 0x%02x", cfg->addr); cfg->stat.halted = false; /* Resume queued transfers if any */ if (udc_buf_peek(dev, cfg->addr)) { k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); } return 0; } static int udc_dwc2_ep_enqueue(const struct device *dev, struct udc_ep_config *const cfg, struct net_buf *const buf) { struct dwc2_drv_event evt = { .ep = cfg->addr, .type = DWC2_DRV_EVT_XFER, }; LOG_DBG("%p enqueue %x %p", dev, cfg->addr, buf); udc_buf_put(cfg, buf); if (!cfg->stat.halted) { k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); } return 0; } static int udc_dwc2_ep_dequeue(const struct device *dev, struct udc_ep_config *const cfg) { struct net_buf *buf; udc_dwc2_ep_disable(dev, cfg, false); buf = udc_buf_get_all(dev, cfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } udc_ep_set_busy(dev, cfg->addr, false); LOG_DBG("dequeue ep 0x%02x", cfg->addr); return 0; } static int udc_dwc2_set_address(const struct device *dev, const uint8_t addr) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); mem_addr_t dcfg_reg = (mem_addr_t)&base->dcfg; uint32_t dcfg; if (addr > (USB_DWC2_DCFG_DEVADDR_MASK >> USB_DWC2_DCFG_DEVADDR_POS)) { return -EINVAL; } dcfg = sys_read32(dcfg_reg); dcfg &= ~USB_DWC2_DCFG_DEVADDR_MASK; dcfg |= usb_dwc2_set_dcfg_devaddr(addr); sys_write32(dcfg, dcfg_reg); LOG_DBG("Set new address %u for %p", addr, dev); return 0; } static int udc_dwc2_test_mode(const struct device *dev, const uint8_t mode, const bool dryrun) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); mem_addr_t dctl_reg = (mem_addr_t)&base->dctl; uint32_t dctl; if (mode == 0U || mode > USB_DWC2_DCTL_TSTCTL_TESTFE) { return -EINVAL; } dctl = sys_read32(dctl_reg); if (usb_dwc2_get_dctl_tstctl(dctl) != USB_DWC2_DCTL_TSTCTL_DISABLED) { return -EALREADY; } if (dryrun) { LOG_DBG("Test Mode %u supported", mode); return 0; } dctl |= usb_dwc2_set_dctl_tstctl(mode); sys_write32(dctl, dctl_reg); LOG_DBG("Enable Test Mode %u", mode); return 0; } static int udc_dwc2_host_wakeup(const struct device *dev) { LOG_DBG("Remote wakeup from %p", dev); return -ENOTSUP; } /* Return actual USB device speed */ static enum udc_bus_speed udc_dwc2_device_speed(const struct device *dev) { struct udc_dwc2_data *const priv = udc_get_private(dev); switch (priv->enumspd) { case USB_DWC2_DSTS_ENUMSPD_HS3060: return UDC_BUS_SPEED_HS; case USB_DWC2_DSTS_ENUMSPD_LS6: __ASSERT(false, "Low speed mode not supported"); __fallthrough; case USB_DWC2_DSTS_ENUMSPD_FS48: __fallthrough; case USB_DWC2_DSTS_ENUMSPD_FS3060: __fallthrough; default: return UDC_BUS_SPEED_FS; } } static int dwc2_core_soft_reset(const struct device *dev) { struct usb_dwc2_reg *const base = dwc2_get_base(dev); mem_addr_t grstctl_reg = (mem_addr_t)&base->grstctl; const unsigned int csr_timeout_us = 10000UL; uint32_t cnt = 0UL; /* Check AHB master idle state */ while (!(sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_AHBIDLE)) { k_busy_wait(1); if (++cnt > csr_timeout_us) { LOG_ERR("Wait for AHB idle timeout, GRSTCTL 0x%08x", sys_read32(grstctl_reg)); return -EIO; } } /* Apply Core Soft Reset */ sys_write32(USB_DWC2_GRSTCTL_CSFTRST, grstctl_reg); cnt = 0UL; do { if (++cnt > csr_timeout_us) { LOG_ERR("Wait for CSR done timeout, GRSTCTL 0x%08x", sys_read32(grstctl_reg)); return -EIO; } k_busy_wait(1); } while (sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_CSFTRST && !(sys_read32(grstctl_reg) & USB_DWC2_GRSTCTL_CSFTRSTDONE)); sys_clear_bits(grstctl_reg, USB_DWC2_GRSTCTL_CSFTRST | USB_DWC2_GRSTCTL_CSFTRSTDONE); return 0; } static int udc_dwc2_init_controller(const struct device *dev) { const struct udc_dwc2_config *const config = dev->config; struct udc_dwc2_data *const priv = udc_get_private(dev); struct usb_dwc2_reg *const base = config->base; mem_addr_t grxfsiz_reg = (mem_addr_t)&base->grxfsiz; mem_addr_t gahbcfg_reg = (mem_addr_t)&base->gahbcfg; mem_addr_t gusbcfg_reg = (mem_addr_t)&base->gusbcfg; mem_addr_t dcfg_reg = (mem_addr_t)&base->dcfg; uint32_t dcfg; uint32_t gusbcfg; uint32_t gahbcfg; uint32_t ghwcfg2; uint32_t ghwcfg3; uint32_t ghwcfg4; uint32_t val; int ret; ret = dwc2_core_soft_reset(dev); if (ret) { return ret; } priv->ghwcfg1 = sys_read32((mem_addr_t)&base->ghwcfg1); ghwcfg2 = sys_read32((mem_addr_t)&base->ghwcfg2); ghwcfg3 = sys_read32((mem_addr_t)&base->ghwcfg3); ghwcfg4 = sys_read32((mem_addr_t)&base->ghwcfg4); if (!(ghwcfg4 & USB_DWC2_GHWCFG4_DEDFIFOMODE)) { LOG_ERR("Only dedicated TX FIFO mode is supported"); return -ENOTSUP; } /* * Force device mode as we do no support role changes. * Wait 25ms for the change to take effect. */ gusbcfg = USB_DWC2_GUSBCFG_FORCEDEVMODE; sys_write32(gusbcfg, gusbcfg_reg); k_msleep(25); /* Buffer DMA is always supported in Internal DMA mode. * TODO: check and support descriptor DMA if available */ priv->bufferdma = (usb_dwc2_get_ghwcfg2_otgarch(ghwcfg2) == USB_DWC2_GHWCFG2_OTGARCH_INTERNALDMA); if (!IS_ENABLED(CONFIG_UDC_DWC2_DMA)) { priv->bufferdma = 0; } else if (priv->bufferdma) { LOG_WRN("Experimental DMA enabled"); } if (ghwcfg2 & USB_DWC2_GHWCFG2_DYNFIFOSIZING) { LOG_DBG("Dynamic FIFO Sizing is enabled"); priv->dynfifosizing = true; } /* Get the number or endpoints and IN endpoints we can use later */ priv->numdeveps = usb_dwc2_get_ghwcfg2_numdeveps(ghwcfg2) + 1U; priv->ineps = usb_dwc2_get_ghwcfg4_ineps(ghwcfg4) + 1U; LOG_DBG("Number of endpoints (NUMDEVEPS + 1) %u", priv->numdeveps); LOG_DBG("Number of IN endpoints (INEPS + 1) %u", priv->ineps); LOG_DBG("Number of periodic IN endpoints (NUMDEVPERIOEPS) %u", usb_dwc2_get_ghwcfg4_numdevperioeps(ghwcfg4)); LOG_DBG("Number of additional control endpoints (NUMCTLEPS) %u", usb_dwc2_get_ghwcfg4_numctleps(ghwcfg4)); LOG_DBG("OTG architecture (OTGARCH) %u, mode (OTGMODE) %u", usb_dwc2_get_ghwcfg2_otgarch(ghwcfg2), usb_dwc2_get_ghwcfg2_otgmode(ghwcfg2)); priv->dfifodepth = usb_dwc2_get_ghwcfg3_dfifodepth(ghwcfg3); LOG_DBG("DFIFO depth (DFIFODEPTH) %u bytes", priv->dfifodepth * 4); priv->max_pktcnt = GHWCFG3_PKTCOUNT(usb_dwc2_get_ghwcfg3_pktsizewidth(ghwcfg3)); priv->max_xfersize = GHWCFG3_XFERSIZE(usb_dwc2_get_ghwcfg3_xfersizewidth(ghwcfg3)); LOG_DBG("Max packet count %u, Max transfer size %u", priv->max_pktcnt, priv->max_xfersize); LOG_DBG("Vendor Control interface support enabled: %s", (ghwcfg3 & USB_DWC2_GHWCFG3_VNDCTLSUPT) ? "true" : "false"); LOG_DBG("PHY interface type: FSPHYTYPE %u, HSPHYTYPE %u, DATAWIDTH %u", usb_dwc2_get_ghwcfg2_fsphytype(ghwcfg2), usb_dwc2_get_ghwcfg2_hsphytype(ghwcfg2), usb_dwc2_get_ghwcfg4_phydatawidth(ghwcfg4)); LOG_DBG("LPM mode is %s", (ghwcfg3 & USB_DWC2_GHWCFG3_LPMMODE) ? "enabled" : "disabled"); /* Configure AHB, select Completer or DMA mode */ gahbcfg = sys_read32(gahbcfg_reg); if (priv->bufferdma) { gahbcfg |= USB_DWC2_GAHBCFG_DMAEN; } else { gahbcfg &= ~USB_DWC2_GAHBCFG_DMAEN; } sys_write32(gahbcfg, gahbcfg_reg); dcfg = sys_read32(dcfg_reg); dcfg &= ~USB_DWC2_DCFG_DESCDMA; /* Configure PHY and device speed */ dcfg &= ~USB_DWC2_DCFG_DEVSPD_MASK; switch (usb_dwc2_get_ghwcfg2_hsphytype(ghwcfg2)) { case USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUSULPI: __fallthrough; case USB_DWC2_GHWCFG2_HSPHYTYPE_ULPI: gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB20 | USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_ULPI; dcfg |= USB_DWC2_DCFG_DEVSPD_USBHS20 << USB_DWC2_DCFG_DEVSPD_POS; break; case USB_DWC2_GHWCFG2_HSPHYTYPE_UTMIPLUS: gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB20 | USB_DWC2_GUSBCFG_ULPI_UTMI_SEL_UTMI; dcfg |= USB_DWC2_DCFG_DEVSPD_USBHS20 << USB_DWC2_DCFG_DEVSPD_POS; break; case USB_DWC2_GHWCFG2_HSPHYTYPE_NO_HS: __fallthrough; default: if (usb_dwc2_get_ghwcfg2_fsphytype(ghwcfg2) != USB_DWC2_GHWCFG2_FSPHYTYPE_NO_FS) { gusbcfg |= USB_DWC2_GUSBCFG_PHYSEL_USB11; } dcfg |= USB_DWC2_DCFG_DEVSPD_USBFS1148 << USB_DWC2_DCFG_DEVSPD_POS; } if (usb_dwc2_get_ghwcfg4_phydatawidth(ghwcfg4)) { gusbcfg |= USB_DWC2_GUSBCFG_PHYIF_16_BIT; } /* Update PHY configuration */ sys_write32(gusbcfg, gusbcfg_reg); sys_write32(dcfg, dcfg_reg); priv->outeps = 0U; for (uint8_t i = 0U; i < priv->numdeveps; i++) { uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(priv->ghwcfg1, i); if (epdir == USB_DWC2_GHWCFG1_EPDIR_OUT || epdir == USB_DWC2_GHWCFG1_EPDIR_BDIR) { mem_addr_t doepctl_reg = dwc2_get_dxepctl_reg(dev, i); sys_write32(USB_DWC2_DEPCTL_SNAK, doepctl_reg); priv->outeps++; } } LOG_DBG("Number of OUT endpoints %u", priv->outeps); /* Read and store all TxFIFO depths because Programmed FIFO Depths must * not exceed the power-on values. */ val = sys_read32((mem_addr_t)&base->gnptxfsiz); priv->max_txfifo_depth[0] = usb_dwc2_get_gnptxfsiz_nptxfdep(val); for (uint8_t i = 1; i < priv->ineps; i++) { priv->max_txfifo_depth[i] = dwc2_get_txfdep(dev, i - 1); } priv->rxfifo_depth = usb_dwc2_get_grxfsiz(sys_read32(grxfsiz_reg)); if (priv->dynfifosizing) { uint32_t gnptxfsiz; /* Driver does not dynamically resize RxFIFO so there is no need * to store reset value. Read the reset value and make sure that * the programmed value is not greater than what driver sets. */ priv->rxfifo_depth = MIN(priv->rxfifo_depth, UDC_DWC2_GRXFSIZ_DEFAULT + priv->outeps * 2U); sys_write32(usb_dwc2_set_grxfsiz(priv->rxfifo_depth), grxfsiz_reg); /* Set TxFIFO 0 depth */ val = MAX(UDC_DWC2_FIFO0_DEPTH, priv->max_txfifo_depth[0]); gnptxfsiz = usb_dwc2_set_gnptxfsiz_nptxfdep(val) | usb_dwc2_set_gnptxfsiz_nptxfstaddr(priv->rxfifo_depth); sys_write32(gnptxfsiz, (mem_addr_t)&base->gnptxfsiz); } LOG_DBG("RX FIFO size %u bytes", priv->rxfifo_depth * 4); for (uint8_t i = 1U; i < priv->ineps; i++) { LOG_DBG("TX FIFO%u depth %u addr %u", i, priv->max_txfifo_depth[i], dwc2_get_txfaddr(dev, i)); } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, 64, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, 64, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } /* Unmask interrupts */ sys_write32(USB_DWC2_GINTSTS_OEPINT | USB_DWC2_GINTSTS_IEPINT | USB_DWC2_GINTSTS_ENUMDONE | USB_DWC2_GINTSTS_USBRST | USB_DWC2_GINTSTS_WKUPINT | USB_DWC2_GINTSTS_USBSUSP | USB_DWC2_GINTSTS_SOF, (mem_addr_t)&base->gintmsk); return 0; } static int udc_dwc2_enable(const struct device *dev) { const struct udc_dwc2_config *const config = dev->config; struct usb_dwc2_reg *const base = dwc2_get_base(dev); int err; err = dwc2_quirk_pre_enable(dev); if (err) { LOG_ERR("Quirk pre enable failed %d", err); return err; } err = udc_dwc2_init_controller(dev); if (err) { return err; } err = dwc2_quirk_post_enable(dev); if (err) { LOG_ERR("Quirk post enable failed %d", err); return err; } /* Enable global interrupt */ sys_set_bits((mem_addr_t)&base->gahbcfg, USB_DWC2_GAHBCFG_GLBINTRMASK); config->irq_enable_func(dev); /* Disable soft disconnect */ sys_clear_bits((mem_addr_t)&base->dctl, USB_DWC2_DCTL_SFTDISCON); LOG_DBG("Enable device %p", base); return 0; } static int udc_dwc2_disable(const struct device *dev) { const struct udc_dwc2_config *const config = dev->config; struct usb_dwc2_reg *const base = dwc2_get_base(dev); mem_addr_t dctl_reg = (mem_addr_t)&base->dctl; int err; /* Enable soft disconnect */ sys_set_bits(dctl_reg, USB_DWC2_DCTL_SFTDISCON); LOG_DBG("Disable device %p", dev); if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) { LOG_DBG("Failed to disable control endpoint"); return -EIO; } if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) { LOG_DBG("Failed to disable control endpoint"); return -EIO; } config->irq_disable_func(dev); sys_clear_bits((mem_addr_t)&base->gahbcfg, USB_DWC2_GAHBCFG_GLBINTRMASK); err = dwc2_quirk_disable(dev); if (err) { LOG_ERR("Quirk disable failed %d", err); return err; } return 0; } static int udc_dwc2_init(const struct device *dev) { int ret; ret = dwc2_quirk_init(dev); if (ret) { LOG_ERR("Quirk init failed %d", ret); return ret; } return dwc2_init_pinctrl(dev); } static int udc_dwc2_shutdown(const struct device *dev) { int ret; ret = dwc2_quirk_shutdown(dev); if (ret) { LOG_ERR("Quirk shutdown failed %d", ret); return ret; } return 0; } static int dwc2_driver_preinit(const struct device *dev) { const struct udc_dwc2_config *config = dev->config; struct udc_data *data = dev->data; uint16_t mps = 1023; uint32_t numdeveps; uint32_t ineps; int err; k_mutex_init(&data->mutex); data->caps.addr_before_status = true; data->caps.mps0 = UDC_MPS0_64; (void)dwc2_quirk_caps(dev); if (data->caps.hs) { mps = 1024; } /* * At this point, we cannot or do not want to access the hardware * registers to get GHWCFGn values. For now, we will use devicetree to * get GHWCFGn values and use them to determine the number and type of * configured endpoints in the hardware. This can be considered a * workaround, and we may change the upper layer internals to avoid it * in the future. */ ineps = usb_dwc2_get_ghwcfg4_ineps(config->ghwcfg4) + 1U; numdeveps = usb_dwc2_get_ghwcfg2_numdeveps(config->ghwcfg2) + 1U; LOG_DBG("Number of endpoints (NUMDEVEPS + 1) %u", numdeveps); LOG_DBG("Number of IN endpoints (INEPS + 1) %u", ineps); for (uint32_t i = 0, n = 0; i < numdeveps; i++) { uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(config->ghwcfg1, i); if (epdir != USB_DWC2_GHWCFG1_EPDIR_OUT && epdir != USB_DWC2_GHWCFG1_EPDIR_BDIR) { continue; } if (i == 0) { config->ep_cfg_out[n].caps.control = 1; config->ep_cfg_out[n].caps.mps = 64; } else { config->ep_cfg_out[n].caps.bulk = 1; config->ep_cfg_out[n].caps.interrupt = 1; config->ep_cfg_out[n].caps.iso = 1; config->ep_cfg_out[n].caps.mps = mps; } config->ep_cfg_out[n].caps.out = 1; config->ep_cfg_out[n].addr = USB_EP_DIR_OUT | i; LOG_DBG("Register ep 0x%02x (%u)", i, n); err = udc_register_ep(dev, &config->ep_cfg_out[n]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } n++; /* Also check the number of desired OUT endpoints in devicetree. */ if (n >= config->num_out_eps) { break; } } for (uint32_t i = 0, n = 0; i < numdeveps; i++) { uint32_t epdir = usb_dwc2_get_ghwcfg1_epdir(config->ghwcfg1, i); if (epdir != USB_DWC2_GHWCFG1_EPDIR_IN && epdir != USB_DWC2_GHWCFG1_EPDIR_BDIR) { continue; } if (i == 0) { config->ep_cfg_in[n].caps.control = 1; config->ep_cfg_in[n].caps.mps = 64; } else { config->ep_cfg_in[n].caps.bulk = 1; config->ep_cfg_in[n].caps.interrupt = 1; config->ep_cfg_in[n].caps.iso = 1; config->ep_cfg_in[n].caps.mps = mps; } config->ep_cfg_in[n].caps.in = 1; config->ep_cfg_in[n].addr = USB_EP_DIR_IN | i; LOG_DBG("Register ep 0x%02x (%u)", USB_EP_DIR_IN | i, n); err = udc_register_ep(dev, &config->ep_cfg_in[n]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } n++; /* Also check the number of desired IN endpoints in devicetree. */ if (n >= MIN(ineps, config->num_in_eps)) { break; } } config->make_thread(dev); return 0; } static int udc_dwc2_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int udc_dwc2_unlock(const struct device *dev) { return udc_unlock_internal(dev); } static const struct udc_api udc_dwc2_api = { .lock = udc_dwc2_lock, .unlock = udc_dwc2_unlock, .device_speed = udc_dwc2_device_speed, .init = udc_dwc2_init, .enable = udc_dwc2_enable, .disable = udc_dwc2_disable, .shutdown = udc_dwc2_shutdown, .set_address = udc_dwc2_set_address, .test_mode = udc_dwc2_test_mode, .host_wakeup = udc_dwc2_host_wakeup, .ep_enable = udc_dwc2_ep_activate, .ep_disable = udc_dwc2_ep_deactivate, .ep_set_halt = udc_dwc2_ep_set_halt, .ep_clear_halt = udc_dwc2_ep_clear_halt, .ep_enqueue = udc_dwc2_ep_enqueue, .ep_dequeue = udc_dwc2_ep_dequeue, }; #define DT_DRV_COMPAT snps_dwc2 #define UDC_DWC2_VENDOR_QUIRK_GET(n) \ COND_CODE_1(DT_NODE_VENDOR_HAS_IDX(DT_DRV_INST(n), 1), \ (&dwc2_vendor_quirks_##n), \ (NULL)) #define UDC_DWC2_DT_INST_REG_ADDR(n) \ COND_CODE_1(DT_NUM_REGS(DT_DRV_INST(n)), (DT_INST_REG_ADDR(n)), \ (DT_INST_REG_ADDR_BY_NAME(n, core))) #define UDC_DWC2_PINCTRL_DT_INST_DEFINE(n) \ COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \ (PINCTRL_DT_INST_DEFINE(n)), ()) #define UDC_DWC2_PINCTRL_DT_INST_DEV_CONFIG_GET(n) \ COND_CODE_1(DT_INST_PINCTRL_HAS_NAME(n, default), \ ((void *)PINCTRL_DT_INST_DEV_CONFIG_GET(n)), (NULL)) #define UDC_DWC2_IRQ_FLAGS_TYPE0(n) 0 #define UDC_DWC2_IRQ_FLAGS_TYPE1(n) DT_INST_IRQ(n, type) #define DW_IRQ_FLAGS(n) \ _CONCAT(UDC_DWC2_IRQ_FLAGS_TYPE, DT_INST_IRQ_HAS_CELL(n, type))(n) /* * A UDC driver should always be implemented as a multi-instance * driver, even if your platform does not require it. */ #define UDC_DWC2_DEVICE_DEFINE(n) \ UDC_DWC2_PINCTRL_DT_INST_DEFINE(n); \ \ K_THREAD_STACK_DEFINE(udc_dwc2_stack_##n, CONFIG_UDC_DWC2_STACK_SIZE); \ \ static void udc_dwc2_thread_##n(void *dev, void *arg1, void *arg2) \ { \ while (true) { \ dwc2_thread_handler(dev); \ } \ } \ \ static void udc_dwc2_make_thread_##n(const struct device *dev) \ { \ struct udc_dwc2_data *priv = udc_get_private(dev); \ \ k_thread_create(&priv->thread_data, \ udc_dwc2_stack_##n, \ K_THREAD_STACK_SIZEOF(udc_dwc2_stack_##n), \ udc_dwc2_thread_##n, \ (void *)dev, NULL, NULL, \ K_PRIO_COOP(CONFIG_UDC_DWC2_THREAD_PRIORITY), \ K_ESSENTIAL, \ K_NO_WAIT); \ k_thread_name_set(&priv->thread_data, dev->name); \ } \ \ static void udc_dwc2_irq_enable_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ udc_dwc2_isr_handler, \ DEVICE_DT_INST_GET(n), \ DW_IRQ_FLAGS(n)); \ \ irq_enable(DT_INST_IRQN(n)); \ } \ \ static void udc_dwc2_irq_disable_func_##n(const struct device *dev) \ { \ irq_disable(DT_INST_IRQN(n)); \ } \ \ static struct udc_ep_config ep_cfg_out[DT_INST_PROP(n, num_out_eps)]; \ static struct udc_ep_config ep_cfg_in[DT_INST_PROP(n, num_in_eps)]; \ \ static const struct udc_dwc2_config udc_dwc2_config_##n = { \ .num_out_eps = DT_INST_PROP(n, num_out_eps), \ .num_in_eps = DT_INST_PROP(n, num_in_eps), \ .ep_cfg_in = ep_cfg_in, \ .ep_cfg_out = ep_cfg_out, \ .make_thread = udc_dwc2_make_thread_##n, \ .base = (struct usb_dwc2_reg *)UDC_DWC2_DT_INST_REG_ADDR(n), \ .pcfg = UDC_DWC2_PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .irq_enable_func = udc_dwc2_irq_enable_func_##n, \ .irq_disable_func = udc_dwc2_irq_disable_func_##n, \ .quirks = UDC_DWC2_VENDOR_QUIRK_GET(n), \ .ghwcfg1 = DT_INST_PROP(n, ghwcfg1), \ .ghwcfg2 = DT_INST_PROP(n, ghwcfg2), \ .ghwcfg4 = DT_INST_PROP(n, ghwcfg4), \ }; \ \ static struct udc_dwc2_data udc_priv_##n = { \ }; \ \ static struct udc_data udc_data_##n = { \ .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \ .priv = &udc_priv_##n, \ }; \ \ DEVICE_DT_INST_DEFINE(n, dwc2_driver_preinit, NULL, \ &udc_data_##n, &udc_dwc2_config_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &udc_dwc2_api); DT_INST_FOREACH_STATUS_OKAY(UDC_DWC2_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/usb/udc/udc_dwc2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
20,623
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_USB_UDC_DWC2_VENDOR_QUIRKS_H #define ZEPHYR_DRIVERS_USB_UDC_DWC2_VENDOR_QUIRKS_H #include "udc_dwc2.h" #include <stdint.h> #include <zephyr/device.h> #include <zephyr/drivers/usb/udc.h> #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_fsotg) #include <zephyr/sys/sys_io.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <usb_dwc2_hw.h> struct usb_dw_stm32_clk { const struct device *const dev; const struct stm32_pclken *const pclken; size_t pclken_len; }; #define DT_DRV_COMPAT snps_dwc2 static inline int stm32f4_fsotg_enable_clk(const struct usb_dw_stm32_clk *const clk) { int ret; if (!device_is_ready(clk->dev)) { return -ENODEV; } if (clk->pclken_len > 1) { uint32_t clk_rate; ret = clock_control_configure(clk->dev, (void *)&clk->pclken[1], NULL); if (ret) { return ret; } ret = clock_control_get_rate(clk->dev, (void *)&clk->pclken[1], &clk_rate); if (ret) { return ret; } if (clk_rate != MHZ(48)) { return -ENOTSUP; } } return clock_control_on(clk->dev, (void *)&clk->pclken[0]); } static inline int stm32f4_fsotg_enable_phy(const struct device *dev) { const struct udc_dwc2_config *const config = dev->config; mem_addr_t ggpio_reg = (mem_addr_t)&config->base->ggpio; sys_set_bits(ggpio_reg, USB_DWC2_GGPIO_STM32_PWRDWN | USB_DWC2_GGPIO_STM32_VBDEN); return 0; } static inline int stm32f4_fsotg_disable_phy(const struct device *dev) { const struct udc_dwc2_config *const config = dev->config; mem_addr_t ggpio_reg = (mem_addr_t)&config->base->ggpio; sys_clear_bits(ggpio_reg, USB_DWC2_GGPIO_STM32_PWRDWN | USB_DWC2_GGPIO_STM32_VBDEN); return 0; } #define QUIRK_STM32F4_FSOTG_DEFINE(n) \ static const struct stm32_pclken pclken_##n[] = STM32_DT_INST_CLOCKS(n);\ \ static const struct usb_dw_stm32_clk stm32f4_clk_##n = { \ .dev = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), \ .pclken = pclken_##n, \ .pclken_len = DT_INST_NUM_CLOCKS(n), \ }; \ \ static int stm32f4_fsotg_enable_clk_##n(const struct device *dev) \ { \ return stm32f4_fsotg_enable_clk(&stm32f4_clk_##n); \ } \ \ struct dwc2_vendor_quirks dwc2_vendor_quirks_##n = { \ .pre_enable = stm32f4_fsotg_enable_clk_##n, \ .post_enable = stm32f4_fsotg_enable_phy, \ .disable = stm32f4_fsotg_disable_phy, \ .irq_clear = NULL, \ }; DT_INST_FOREACH_STATUS_OKAY(QUIRK_STM32F4_FSOTG_DEFINE) #undef DT_DRV_COMPAT #endif /*DT_HAS_COMPAT_STATUS_OKAY(st_stm32f4_fsotg) */ #if DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_usbhs) #define DT_DRV_COMPAT snps_dwc2 #include <nrfs_backend_ipc_service.h> #include <nrfs_usb.h> #define USBHS_DT_WRAPPER_REG_ADDR(n) UINT_TO_POINTER(DT_INST_REG_ADDR_BY_NAME(n, wrapper)) /* * On USBHS, we cannot access the DWC2 register until VBUS is detected and * valid. If the user tries to force usbd_enable() and the corresponding * udc_enable() without a "VBUS ready" notification, the event wait will block * until a valid VBUS signal is detected. */ static K_EVENT_DEFINE(usbhs_events); #define USBHS_VBUS_READY BIT(0) static void usbhs_vbus_handler(nrfs_usb_evt_t const *p_evt, void *const context) { const struct device *dev = context; switch (p_evt->type) { case NRFS_USB_EVT_VBUS_STATUS_CHANGE: LOG_DBG("USBHS new status, pll_ok = %d vreg_ok = %d vbus_detected = %d", p_evt->usbhspll_ok, p_evt->vregusb_ok, p_evt->vbus_detected); if (p_evt->usbhspll_ok && p_evt->vregusb_ok && p_evt->vbus_detected) { k_event_post(&usbhs_events, USBHS_VBUS_READY); udc_submit_event(dev, UDC_EVT_VBUS_READY, 0); } else { k_event_set_masked(&usbhs_events, 0, USBHS_VBUS_READY); udc_submit_event(dev, UDC_EVT_VBUS_REMOVED, 0); } break; case NRFS_USB_EVT_REJECT: LOG_ERR("Request rejected"); break; default: LOG_ERR("Unknown event type 0x%x", p_evt->type); break; } } static inline int usbhs_enable_nrfs_service(const struct device *dev) { nrfs_err_t nrfs_err; int err; err = nrfs_backend_wait_for_connection(K_MSEC(1000)); if (err) { LOG_INF("NRFS backend connection timeout"); return err; } nrfs_err = nrfs_usb_init(usbhs_vbus_handler); if (nrfs_err != NRFS_SUCCESS) { LOG_ERR("Failed to init NRFS VBUS handler: %d", nrfs_err); return -EIO; } nrfs_err = nrfs_usb_enable_request((void *)dev); if (nrfs_err != NRFS_SUCCESS) { LOG_ERR("Failed to enable NRFS VBUS service: %d", nrfs_err); return -EIO; } return 0; } static inline int usbhs_enable_core(const struct device *dev) { NRF_USBHS_Type *wrapper = USBHS_DT_WRAPPER_REG_ADDR(0); if (!k_event_wait(&usbhs_events, USBHS_VBUS_READY, false, K_NO_WAIT)) { LOG_WRN("VBUS is not ready, block udc_enable()"); k_event_wait(&usbhs_events, USBHS_VBUS_READY, false, K_FOREVER); } wrapper->ENABLE = USBHS_ENABLE_PHY_Msk | USBHS_ENABLE_CORE_Msk; wrapper->TASKS_START = 1UL; /* Enable interrupts */ wrapper->INTENSET = 1UL; return 0; } static inline int usbhs_disable_core(const struct device *dev) { NRF_USBHS_Type *wrapper = USBHS_DT_WRAPPER_REG_ADDR(0); /* Disable interrupts */ wrapper->INTENCLR = 1UL; wrapper->ENABLE = 0UL; wrapper->TASKS_START = 1UL; return 0; } static inline int usbhs_disable_nrfs_service(const struct device *dev) { nrfs_err_t nrfs_err; nrfs_err = nrfs_usb_disable_request((void *)dev); if (nrfs_err != NRFS_SUCCESS) { LOG_ERR("Failed to disable NRFS VBUS service: %d", nrfs_err); return -EIO; } nrfs_usb_uninit(); return 0; } static inline int usbhs_irq_clear(const struct device *dev) { NRF_USBHS_Type *wrapper = USBHS_DT_WRAPPER_REG_ADDR(0); wrapper->EVENTS_CORE = 0UL; return 0; } static inline int usbhs_init_caps(const struct device *dev) { struct udc_data *data = dev->data; data->caps.can_detect_vbus = true; data->caps.hs = true; return 0; } static inline int usbhs_is_phy_clk_off(const struct device *dev) { return !k_event_test(&usbhs_events, USBHS_VBUS_READY); } #define QUIRK_NRF_USBHS_DEFINE(n) \ struct dwc2_vendor_quirks dwc2_vendor_quirks_##n = { \ .init = usbhs_enable_nrfs_service, \ .pre_enable = usbhs_enable_core, \ .disable = usbhs_disable_core, \ .shutdown = usbhs_disable_nrfs_service, \ .irq_clear = usbhs_irq_clear, \ .caps = usbhs_init_caps, \ .is_phy_clk_off = usbhs_is_phy_clk_off, \ }; DT_INST_FOREACH_STATUS_OKAY(QUIRK_NRF_USBHS_DEFINE) #undef DT_DRV_COMPAT #endif /*DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_usbhs) */ /* Add next vendor quirks definition above this line */ #endif /* ZEPHYR_DRIVERS_USB_UDC_DWC2_VENDOR_QUIRKS_H */ ```
/content/code_sandbox/drivers/usb/udc/udc_dwc2_vendor_quirks.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,074
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/usb/usb_ch9.h> #include "uhc_common.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uhc, CONFIG_UHC_DRIVER_LOG_LEVEL); K_MEM_SLAB_DEFINE_STATIC(uhc_xfer_pool, sizeof(struct uhc_transfer), CONFIG_UHC_XFER_COUNT, sizeof(void *)); NET_BUF_POOL_VAR_DEFINE(uhc_ep_pool, CONFIG_UHC_BUF_COUNT, CONFIG_UHC_BUF_POOL_SIZE, 0, NULL); int uhc_submit_event(const struct device *dev, const enum uhc_event_type type, const int status) { struct uhc_data *data = dev->data; struct uhc_event drv_evt = { .type = type, .status = status, .dev = dev, }; if (!uhc_is_initialized(dev)) { return -EPERM; } return data->event_cb(dev, &drv_evt); } void uhc_xfer_return(const struct device *dev, struct uhc_transfer *const xfer, const int err) { struct uhc_data *data = dev->data; struct uhc_event drv_evt = { .type = UHC_EVT_EP_REQUEST, .xfer = xfer, .dev = dev, }; sys_dlist_remove(&xfer->node); xfer->queued = 0; xfer->err = err; data->event_cb(dev, &drv_evt); } struct uhc_transfer *uhc_xfer_get_next(const struct device *dev) { struct uhc_data *data = dev->data; struct uhc_transfer *xfer; sys_dnode_t *node; /* Draft, WIP */ node = sys_dlist_peek_head(&data->ctrl_xfers); if (node == NULL) { node = sys_dlist_peek_head(&data->bulk_xfers); } return (node == NULL) ? NULL : SYS_DLIST_CONTAINER(node, xfer, node); } int uhc_xfer_append(const struct device *dev, struct uhc_transfer *const xfer) { struct uhc_data *data = dev->data; sys_dlist_append(&data->ctrl_xfers, &xfer->node); return 0; } struct net_buf *uhc_xfer_buf_alloc(const struct device *dev, const size_t size) { return net_buf_alloc_len(&uhc_ep_pool, size, K_NO_WAIT); } void uhc_xfer_buf_free(const struct device *dev, struct net_buf *const buf) { net_buf_unref(buf); } struct uhc_transfer *uhc_xfer_alloc(const struct device *dev, const uint8_t addr, const uint8_t ep, const uint8_t attrib, const uint16_t mps, const uint16_t timeout, void *const udev, void *const cb) { const struct uhc_api *api = dev->api; struct uhc_transfer *xfer = NULL; api->lock(dev); if (!uhc_is_initialized(dev)) { goto xfer_alloc_error; } LOG_DBG("Allocate xfer, ep 0x%02x attrib 0x%02x cb %p", ep, attrib, cb); if (k_mem_slab_alloc(&uhc_xfer_pool, (void **)&xfer, K_NO_WAIT)) { LOG_ERR("Failed to allocate transfer"); goto xfer_alloc_error; } memset(xfer, 0, sizeof(struct uhc_transfer)); xfer->addr = addr; xfer->ep = ep; xfer->attrib = attrib; xfer->mps = mps; xfer->timeout = timeout; xfer->udev = udev; xfer->cb = cb; xfer_alloc_error: api->unlock(dev); return xfer; } struct uhc_transfer *uhc_xfer_alloc_with_buf(const struct device *dev, const uint8_t addr, const uint8_t ep, const uint8_t attrib, const uint16_t mps, const uint16_t timeout, void *const udev, void *const cb, size_t size) { struct uhc_transfer *xfer; struct net_buf *buf; buf = uhc_xfer_buf_alloc(dev, size); if (buf == NULL) { return NULL; } xfer = uhc_xfer_alloc(dev, addr, ep, attrib, mps, timeout, udev, cb); if (xfer == NULL) { net_buf_unref(buf); return NULL; } xfer->buf = buf; return xfer; } int uhc_xfer_free(const struct device *dev, struct uhc_transfer *const xfer) { const struct uhc_api *api = dev->api; int ret = 0; api->lock(dev); if (xfer->queued) { ret = -EBUSY; LOG_ERR("Transfer is still queued"); goto xfer_free_error; } k_mem_slab_free(&uhc_xfer_pool, (void *)xfer); xfer_free_error: api->unlock(dev); return ret; } int uhc_xfer_buf_add(const struct device *dev, struct uhc_transfer *const xfer, struct net_buf *buf) { const struct uhc_api *api = dev->api; int ret = 0; api->lock(dev); if (xfer->queued) { ret = -EBUSY; } else { xfer->buf = buf; } api->unlock(dev); return ret; } int uhc_ep_enqueue(const struct device *dev, struct uhc_transfer *const xfer) { const struct uhc_api *api = dev->api; int ret; api->lock(dev); if (!uhc_is_initialized(dev)) { ret = -EPERM; goto ep_enqueue_error; } xfer->queued = 1; ret = api->ep_enqueue(dev, xfer); if (ret) { xfer->queued = 0; } ep_enqueue_error: api->unlock(dev); return ret; } int uhc_ep_dequeue(const struct device *dev, struct uhc_transfer *const xfer) { const struct uhc_api *api = dev->api; int ret; api->lock(dev); if (!uhc_is_initialized(dev)) { ret = -EPERM; goto ep_dequeue_error; } ret = api->ep_dequeue(dev, xfer); xfer->queued = 0; ep_dequeue_error: api->unlock(dev); return ret; } int uhc_enable(const struct device *dev) { const struct uhc_api *api = dev->api; struct uhc_data *data = dev->data; int ret; api->lock(dev); if (!uhc_is_initialized(dev)) { ret = -EPERM; goto uhc_enable_error; } if (uhc_is_enabled(dev)) { ret = -EALREADY; goto uhc_enable_error; } ret = api->enable(dev); if (ret == 0) { atomic_set_bit(&data->status, UHC_STATUS_ENABLED); } uhc_enable_error: api->unlock(dev); return ret; } int uhc_disable(const struct device *dev) { const struct uhc_api *api = dev->api; struct uhc_data *data = dev->data; int ret; api->lock(dev); if (!uhc_is_enabled(dev)) { ret = -EALREADY; goto uhc_disable_error; } ret = api->disable(dev); atomic_clear_bit(&data->status, UHC_STATUS_ENABLED); uhc_disable_error: api->unlock(dev); return ret; } int uhc_init(const struct device *dev, uhc_event_cb_t event_cb) { const struct uhc_api *api = dev->api; struct uhc_data *data = dev->data; int ret; if (event_cb == NULL) { return -EINVAL; } api->lock(dev); if (uhc_is_initialized(dev)) { ret = -EALREADY; goto uhc_init_error; } data->event_cb = event_cb; sys_dlist_init(&data->ctrl_xfers); sys_dlist_init(&data->bulk_xfers); ret = api->init(dev); if (ret == 0) { atomic_set_bit(&data->status, UHC_STATUS_INITIALIZED); } uhc_init_error: api->unlock(dev); return ret; } int uhc_shutdown(const struct device *dev) { const struct uhc_api *api = dev->api; struct uhc_data *data = dev->data; int ret; api->lock(dev); if (uhc_is_enabled(dev)) { ret = -EBUSY; goto uhc_shutdown_error; } if (!uhc_is_initialized(dev)) { ret = -EALREADY; goto uhc_shutdown_error; } ret = api->shutdown(dev); atomic_clear_bit(&data->status, UHC_STATUS_INITIALIZED); uhc_shutdown_error: api->unlock(dev); return ret; } ```
/content/code_sandbox/drivers/usb/uhc/uhc_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,943
```unknown config UHC_VIRTUAL bool "Virtual USB host controller driver" select UVB default y depends on DT_HAS_ZEPHYR_UHC_VIRTUAL_ENABLED help Virtual USB host controller driver. ```
/content/code_sandbox/drivers/usb/uhc/Kconfig.virtual
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
45
```c /* * */ /** * @file udc_nrf.c * @brief Nordic USB device controller (UDC) driver * * The driver implements the interface between the nRF USBD peripheral * driver from nrfx package and UDC API. */ #include <string.h> #include <stdio.h> #include <soc.h> #include <zephyr/kernel.h> #include <zephyr/drivers/usb/udc.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/nrf_clock_control.h> #include <zephyr/dt-bindings/regulator/nrf5x.h> #include <nrf_usbd_common.h> #include <hal/nrf_usbd.h> #include <nrfx_power.h> #include "udc_common.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(udc_nrf, CONFIG_UDC_DRIVER_LOG_LEVEL); /* * There is no real advantage to change control endpoint size * but we can use it for testing UDC driver API and higher layers. */ #define UDC_NRF_MPS0 UDC_MPS0_64 #define UDC_NRF_EP0_SIZE 64 enum udc_nrf_event_type { /* An event generated by the HAL driver */ UDC_NRF_EVT_HAL, /* Shim driver event to trigger next transfer */ UDC_NRF_EVT_XFER, /* Let controller perform status stage */ UDC_NRF_EVT_STATUS_IN, }; struct udc_nrf_evt { enum udc_nrf_event_type type; union { nrf_usbd_common_evt_t hal_evt; uint8_t ep; }; }; K_MSGQ_DEFINE(drv_msgq, sizeof(struct udc_nrf_evt), CONFIG_UDC_NRF_MAX_QMESSAGES, sizeof(uint32_t)); static K_KERNEL_STACK_DEFINE(drv_stack, CONFIG_UDC_NRF_THREAD_STACK_SIZE); static struct k_thread drv_stack_data; /* USB device controller access from devicetree */ #define DT_DRV_COMPAT nordic_nrf_usbd #define CFG_EPIN_CNT DT_INST_PROP(0, num_in_endpoints) #define CFG_EPOUT_CNT DT_INST_PROP(0, num_out_endpoints) #define CFG_EP_ISOIN_CNT DT_INST_PROP(0, num_isoin_endpoints) #define CFG_EP_ISOOUT_CNT DT_INST_PROP(0, num_isoout_endpoints) static struct udc_ep_config ep_cfg_out[CFG_EPOUT_CNT + CFG_EP_ISOOUT_CNT + 1]; static struct udc_ep_config ep_cfg_in[CFG_EPIN_CNT + CFG_EP_ISOIN_CNT + 1]; static bool udc_nrf_setup_rcvd; const static struct device *udc_nrf_dev; struct udc_nrf_config { clock_control_subsys_t clock; nrfx_power_config_t pwr; nrfx_power_usbevt_config_t evt; }; static struct onoff_manager *hfxo_mgr; static struct onoff_client hfxo_cli; static void udc_nrf_clear_control_out(const struct device *dev) { if (nrf_usbd_common_last_setup_dir_get() == USB_CONTROL_EP_OUT && udc_nrf_setup_rcvd) { /* Allow data chunk on EP0 OUT */ nrf_usbd_common_setup_data_clear(); udc_nrf_setup_rcvd = false; LOG_INF("Allow data OUT"); } } static void udc_event_xfer_in_next(const struct device *dev, const uint8_t ep) { struct net_buf *buf; if (udc_ep_is_busy(dev, ep)) { return; } buf = udc_buf_peek(dev, ep); if (buf != NULL) { nrf_usbd_common_transfer_t xfer = { .p_data = {.tx = buf->data}, .size = buf->len, .flags = udc_ep_buf_has_zlp(buf) ? NRF_USBD_COMMON_TRANSFER_ZLP_FLAG : 0, }; nrfx_err_t err; err = nrf_usbd_common_ep_transfer(ep, &xfer); if (err != NRFX_SUCCESS) { LOG_ERR("ep 0x%02x nrfx error: %x", ep, err); /* REVISE: remove from endpoint queue? ASSERT? */ udc_submit_ep_event(dev, buf, -ECONNREFUSED); } else { udc_ep_set_busy(dev, ep, true); } } } static void udc_event_xfer_ctrl_in(const struct device *dev, struct net_buf *const buf) { if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) { /* Status stage finished, notify upper layer */ udc_ctrl_submit_status(dev, buf); } if (udc_ctrl_stage_is_data_in(dev)) { /* * s-in-[status] finished, release buffer. * Since the controller supports auto-status we cannot use * if (udc_ctrl_stage_is_status_out()) after state update. */ net_buf_unref(buf); } /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); nrf_usbd_common_setup_clear(); } static void udc_event_fake_status_in(const struct device *dev) { struct net_buf *buf; buf = udc_buf_get(dev, USB_CONTROL_EP_IN); if (unlikely(buf == NULL)) { LOG_DBG("ep 0x%02x queue is empty", USB_CONTROL_EP_IN); return; } LOG_DBG("Fake status IN %p", buf); udc_event_xfer_ctrl_in(dev, buf); } static void udc_event_xfer_in(const struct device *dev, nrf_usbd_common_evt_t const *const event) { uint8_t ep = event->data.eptransfer.ep; struct net_buf *buf; switch (event->data.eptransfer.status) { case NRF_USBD_COMMON_EP_OK: buf = udc_buf_get(dev, ep); if (buf == NULL) { LOG_ERR("ep 0x%02x queue is empty", ep); __ASSERT_NO_MSG(false); return; } udc_ep_set_busy(dev, ep, false); if (ep == USB_CONTROL_EP_IN) { return udc_event_xfer_ctrl_in(dev, buf); } udc_submit_ep_event(dev, buf, 0); break; case NRF_USBD_COMMON_EP_ABORTED: LOG_WRN("aborted IN ep 0x%02x", ep); buf = udc_buf_get_all(dev, ep); if (buf == NULL) { LOG_DBG("ep 0x%02x queue is empty", ep); return; } udc_ep_set_busy(dev, ep, false); udc_submit_ep_event(dev, buf, -ECONNABORTED); break; default: LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x", event->data.eptransfer.status, ep); udc_submit_event(dev, UDC_EVT_ERROR, -EIO); break; } } static void udc_event_xfer_ctrl_out(const struct device *dev, struct net_buf *const buf) { /* * In case s-in-status, controller supports auto-status therefore we * do not have to call udc_ctrl_stage_is_status_out(). */ /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_status_in(dev)) { udc_ctrl_submit_s_out_status(dev, buf); } } static void udc_event_xfer_out_next(const struct device *dev, const uint8_t ep) { struct net_buf *buf; if (udc_ep_is_busy(dev, ep)) { return; } buf = udc_buf_peek(dev, ep); if (buf != NULL) { nrf_usbd_common_transfer_t xfer = { .p_data = {.rx = buf->data}, .size = buf->size, .flags = 0, }; nrfx_err_t err; err = nrf_usbd_common_ep_transfer(ep, &xfer); if (err != NRFX_SUCCESS) { LOG_ERR("ep 0x%02x nrfx error: %x", ep, err); /* REVISE: remove from endpoint queue? ASSERT? */ udc_submit_ep_event(dev, buf, -ECONNREFUSED); } else { udc_ep_set_busy(dev, ep, true); } } else { LOG_DBG("ep 0x%02x waiting, queue is empty", ep); } } static void udc_event_xfer_out(const struct device *dev, nrf_usbd_common_evt_t const *const event) { uint8_t ep = event->data.eptransfer.ep; nrf_usbd_common_ep_status_t err_code; struct net_buf *buf; size_t len; switch (event->data.eptransfer.status) { case NRF_USBD_COMMON_EP_WAITING: /* * There is nothing to do here, new transfer * will be tried in both cases later. */ break; case NRF_USBD_COMMON_EP_OK: err_code = nrf_usbd_common_ep_status_get(ep, &len); if (err_code != NRF_USBD_COMMON_EP_OK) { LOG_ERR("OUT transfer failed %d", err_code); } buf = udc_buf_get(dev, ep); if (buf == NULL) { LOG_ERR("ep 0x%02x ok, queue is empty", ep); return; } net_buf_add(buf, len); udc_ep_set_busy(dev, ep, false); if (ep == USB_CONTROL_EP_OUT) { udc_event_xfer_ctrl_out(dev, buf); } else { udc_submit_ep_event(dev, buf, 0); } break; default: LOG_ERR("Unexpected event (nrfx_usbd): %d, ep 0x%02x", event->data.eptransfer.status, ep); udc_submit_event(dev, UDC_EVT_ERROR, -EIO); break; } } static int usbd_ctrl_feed_dout(const struct device *dev, const size_t length) { struct udc_ep_config *cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT); struct net_buf *buf; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length); if (buf == NULL) { return -ENOMEM; } k_fifo_put(&cfg->fifo, buf); udc_nrf_clear_control_out(dev); return 0; } static int udc_event_xfer_setup(const struct device *dev) { struct net_buf *buf; int err; buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, sizeof(struct usb_setup_packet)); if (buf == NULL) { LOG_ERR("Failed to allocate for setup"); return -ENOMEM; } udc_ep_buf_set_setup(buf); nrf_usbd_common_setup_get((nrf_usbd_common_setup_t *)buf->data); net_buf_add(buf, sizeof(nrf_usbd_common_setup_t)); udc_nrf_setup_rcvd = true; /* Update to next stage of control transfer */ udc_ctrl_update_stage(dev, buf); if (udc_ctrl_stage_is_data_out(dev)) { /* Allocate and feed buffer for data OUT stage */ LOG_DBG("s:%p|feed for -out-", buf); err = usbd_ctrl_feed_dout(dev, udc_data_stage_length(buf)); if (err == -ENOMEM) { err = udc_submit_ep_event(dev, buf, err); } } else if (udc_ctrl_stage_is_data_in(dev)) { err = udc_ctrl_submit_s_in_status(dev); } else { err = udc_ctrl_submit_s_status(dev); } return err; } static void udc_nrf_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); const struct device *dev = p1; while (true) { bool start_xfer = false; struct udc_nrf_evt evt; uint8_t ep; k_msgq_get(&drv_msgq, &evt, K_FOREVER); switch (evt.type) { case UDC_NRF_EVT_HAL: ep = evt.hal_evt.data.eptransfer.ep; switch (evt.hal_evt.type) { case NRF_USBD_COMMON_EVT_EPTRANSFER: start_xfer = true; if (USB_EP_DIR_IS_IN(ep)) { udc_event_xfer_in(dev, &evt.hal_evt); } else { udc_event_xfer_out(dev, &evt.hal_evt); } break; case NRF_USBD_COMMON_EVT_SETUP: udc_event_xfer_setup(dev); break; default: break; } break; case UDC_NRF_EVT_XFER: start_xfer = true; ep = evt.ep; break; case UDC_NRF_EVT_STATUS_IN: udc_event_fake_status_in(dev); break; } if (start_xfer) { if (USB_EP_DIR_IS_IN(ep)) { udc_event_xfer_in_next(dev, ep); } else { udc_event_xfer_out_next(dev, ep); } } } } static void udc_sof_check_iso_out(const struct device *dev) { const uint8_t iso_out_addr = 0x08; struct udc_nrf_evt evt = { .type = UDC_NRF_EVT_XFER, .ep = iso_out_addr, }; struct udc_ep_config *ep_cfg; ep_cfg = udc_get_ep_cfg(dev, iso_out_addr); if (ep_cfg == NULL) { return; } if (ep_cfg->stat.enabled && !k_fifo_is_empty(&ep_cfg->fifo)) { k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); } } static void usbd_event_handler(nrf_usbd_common_evt_t const *const hal_evt) { switch (hal_evt->type) { case NRF_USBD_COMMON_EVT_SUSPEND: LOG_INF("SUSPEND state detected"); nrf_usbd_common_suspend(); udc_set_suspended(udc_nrf_dev, true); udc_submit_event(udc_nrf_dev, UDC_EVT_SUSPEND, 0); break; case NRF_USBD_COMMON_EVT_RESUME: LOG_INF("RESUMING from suspend"); udc_set_suspended(udc_nrf_dev, false); udc_submit_event(udc_nrf_dev, UDC_EVT_RESUME, 0); break; case NRF_USBD_COMMON_EVT_WUREQ: LOG_INF("Remote wakeup initiated"); udc_set_suspended(udc_nrf_dev, false); udc_submit_event(udc_nrf_dev, UDC_EVT_RESUME, 0); break; case NRF_USBD_COMMON_EVT_RESET: LOG_INF("Reset"); udc_submit_event(udc_nrf_dev, UDC_EVT_RESET, 0); break; case NRF_USBD_COMMON_EVT_SOF: udc_submit_event(udc_nrf_dev, UDC_EVT_SOF, 0); udc_sof_check_iso_out(udc_nrf_dev); break; case NRF_USBD_COMMON_EVT_EPTRANSFER: case NRF_USBD_COMMON_EVT_SETUP: { struct udc_nrf_evt evt = { .type = UDC_NRF_EVT_HAL, .hal_evt = *hal_evt, }; /* Forward these two to the thread since mutually exclusive * access to the controller is necessary. */ k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); break; } default: break; } } static void udc_nrf_power_handler(nrfx_power_usb_evt_t pwr_evt) { switch (pwr_evt) { case NRFX_POWER_USB_EVT_DETECTED: LOG_DBG("POWER event detected"); udc_submit_event(udc_nrf_dev, UDC_EVT_VBUS_READY, 0); break; case NRFX_POWER_USB_EVT_READY: LOG_DBG("POWER event ready"); nrf_usbd_common_start(true); break; case NRFX_POWER_USB_EVT_REMOVED: LOG_DBG("POWER event removed"); udc_submit_event(udc_nrf_dev, UDC_EVT_VBUS_REMOVED, 0); break; default: LOG_ERR("Unknown power event %d", pwr_evt); } } static void udc_nrf_fake_status_in(const struct device *dev) { struct udc_nrf_evt evt = { .type = UDC_NRF_EVT_STATUS_IN, .ep = USB_CONTROL_EP_IN, }; if (nrf_usbd_common_last_setup_dir_get() == USB_CONTROL_EP_OUT) { /* Let controller perform status IN stage */ k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); } } static int udc_nrf_ep_enqueue(const struct device *dev, struct udc_ep_config *cfg, struct net_buf *buf) { struct udc_nrf_evt evt = { .type = UDC_NRF_EVT_XFER, .ep = cfg->addr, }; udc_buf_put(cfg, buf); if (cfg->addr == USB_CONTROL_EP_IN && buf->len == 0) { udc_nrf_fake_status_in(dev); return 0; } k_msgq_put(&drv_msgq, &evt, K_NO_WAIT); return 0; } static int udc_nrf_ep_dequeue(const struct device *dev, struct udc_ep_config *cfg) { bool busy = nrf_usbd_common_ep_is_busy(cfg->addr); nrf_usbd_common_ep_abort(cfg->addr); if (USB_EP_DIR_IS_OUT(cfg->addr) || !busy) { struct net_buf *buf; /* * HAL driver does not generate event for an OUT endpoint * or when IN endpoint is not busy. */ buf = udc_buf_get_all(dev, cfg->addr); if (buf) { udc_submit_ep_event(dev, buf, -ECONNABORTED); } else { LOG_INF("ep 0x%02x queue is empty", cfg->addr); } } udc_ep_set_busy(dev, cfg->addr, false); return 0; } static int udc_nrf_ep_enable(const struct device *dev, struct udc_ep_config *cfg) { uint16_t mps; __ASSERT_NO_MSG(cfg); mps = (cfg->mps == 0) ? cfg->caps.mps : cfg->mps; nrf_usbd_common_ep_max_packet_size_set(cfg->addr, mps); nrf_usbd_common_ep_enable(cfg->addr); if (!NRF_USBD_EPISO_CHECK(cfg->addr)) { /* ISO transactions for full-speed device do not support * toggle sequencing and should only send DATA0 PID. */ nrf_usbd_common_ep_dtoggle_clear(cfg->addr); nrf_usbd_common_ep_stall_clear(cfg->addr); } LOG_DBG("Enable ep 0x%02x", cfg->addr); return 0; } static int udc_nrf_ep_disable(const struct device *dev, struct udc_ep_config *cfg) { __ASSERT_NO_MSG(cfg); nrf_usbd_common_ep_disable(cfg->addr); LOG_DBG("Disable ep 0x%02x", cfg->addr); return 0; } static int udc_nrf_ep_set_halt(const struct device *dev, struct udc_ep_config *cfg) { LOG_DBG("Halt ep 0x%02x", cfg->addr); if (cfg->addr == USB_CONTROL_EP_OUT || cfg->addr == USB_CONTROL_EP_IN) { nrf_usbd_common_setup_stall(); } else { nrf_usbd_common_ep_stall(cfg->addr); } return 0; } static int udc_nrf_ep_clear_halt(const struct device *dev, struct udc_ep_config *cfg) { LOG_DBG("Clear halt ep 0x%02x", cfg->addr); nrf_usbd_common_ep_dtoggle_clear(cfg->addr); nrf_usbd_common_ep_stall_clear(cfg->addr); return 0; } static int udc_nrf_set_address(const struct device *dev, const uint8_t addr) { /** * Nothing to do here. The USBD HW already takes care of initiating * STATUS stage. Just double check the address for sanity. */ if (addr != (uint8_t)NRF_USBD->USBADDR) { LOG_WRN("USB Address incorrect 0x%02x", addr); } return 0; } static int udc_nrf_host_wakeup(const struct device *dev) { bool res = nrf_usbd_common_wakeup_req(); LOG_DBG("Host wakeup request"); if (!res) { return -EAGAIN; } return 0; } static int udc_nrf_enable(const struct device *dev) { unsigned int key; int ret; ret = nrf_usbd_common_init(usbd_event_handler); if (ret != NRFX_SUCCESS) { LOG_ERR("nRF USBD driver initialization failed"); return -EIO; } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, UDC_NRF_EP0_SIZE, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, UDC_NRF_EP0_SIZE, 0)) { LOG_ERR("Failed to enable control endpoint"); return -EIO; } sys_notify_init_spinwait(&hfxo_cli.notify); ret = onoff_request(hfxo_mgr, &hfxo_cli); if (ret < 0) { LOG_ERR("Failed to start HFXO %d", ret); return ret; } /* Disable interrupts until USBD is enabled */ key = irq_lock(); nrf_usbd_common_enable(); irq_unlock(key); return 0; } static int udc_nrf_disable(const struct device *dev) { int ret; nrf_usbd_common_disable(); if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) { LOG_ERR("Failed to disable control endpoint"); return -EIO; } nrf_usbd_common_uninit(); ret = onoff_cancel_or_release(hfxo_mgr, &hfxo_cli); if (ret < 0) { LOG_ERR("Failed to stop HFXO %d", ret); return ret; } return 0; } static int udc_nrf_init(const struct device *dev) { const struct udc_nrf_config *cfg = dev->config; hfxo_mgr = z_nrf_clock_control_get_onoff(cfg->clock); #ifdef CONFIG_HAS_HW_NRF_USBREG /* Use CLOCK/POWER priority for compatibility with other series where * USB events are handled by CLOCK interrupt handler. */ IRQ_CONNECT(USBREGULATOR_IRQn, DT_IRQ(DT_INST(0, nordic_nrf_clock), priority), nrfx_isr, nrfx_usbreg_irq_handler, 0); irq_enable(USBREGULATOR_IRQn); #endif IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), nrfx_isr, nrf_usbd_common_irq_handler, 0); (void)nrfx_power_init(&cfg->pwr); nrfx_power_usbevt_init(&cfg->evt); nrfx_power_usbevt_enable(); LOG_INF("Initialized"); return 0; } static int udc_nrf_shutdown(const struct device *dev) { LOG_INF("shutdown"); nrfx_power_usbevt_disable(); nrfx_power_usbevt_uninit(); #ifdef CONFIG_HAS_HW_NRF_USBREG irq_disable(USBREGULATOR_IRQn); #endif return 0; } static int udc_nrf_driver_init(const struct device *dev) { struct udc_data *data = dev->data; int err; LOG_INF("Preinit"); udc_nrf_dev = dev; k_mutex_init(&data->mutex); k_thread_create(&drv_stack_data, drv_stack, K_KERNEL_STACK_SIZEOF(drv_stack), udc_nrf_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(8), 0, K_NO_WAIT); k_thread_name_set(&drv_stack_data, "udc_nrfx"); for (int i = 0; i < ARRAY_SIZE(ep_cfg_out); i++) { ep_cfg_out[i].caps.out = 1; if (i == 0) { ep_cfg_out[i].caps.control = 1; ep_cfg_out[i].caps.mps = NRF_USBD_COMMON_EPSIZE; } else if (i < (CFG_EPOUT_CNT + 1)) { ep_cfg_out[i].caps.bulk = 1; ep_cfg_out[i].caps.interrupt = 1; ep_cfg_out[i].caps.mps = NRF_USBD_COMMON_EPSIZE; } else { ep_cfg_out[i].caps.iso = 1; ep_cfg_out[i].caps.mps = NRF_USBD_COMMON_ISOSIZE / 2; } ep_cfg_out[i].addr = USB_EP_DIR_OUT | i; err = udc_register_ep(dev, &ep_cfg_out[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } for (int i = 0; i < ARRAY_SIZE(ep_cfg_in); i++) { ep_cfg_in[i].caps.in = 1; if (i == 0) { ep_cfg_in[i].caps.control = 1; ep_cfg_in[i].caps.mps = NRF_USBD_COMMON_EPSIZE; } else if (i < (CFG_EPIN_CNT + 1)) { ep_cfg_in[i].caps.bulk = 1; ep_cfg_in[i].caps.interrupt = 1; ep_cfg_in[i].caps.mps = NRF_USBD_COMMON_EPSIZE; } else { ep_cfg_in[i].caps.iso = 1; ep_cfg_in[i].caps.mps = NRF_USBD_COMMON_ISOSIZE / 2; } ep_cfg_in[i].addr = USB_EP_DIR_IN | i; err = udc_register_ep(dev, &ep_cfg_in[i]); if (err != 0) { LOG_ERR("Failed to register endpoint"); return err; } } data->caps.rwup = true; data->caps.out_ack = true; data->caps.mps0 = UDC_NRF_MPS0; data->caps.can_detect_vbus = true; return 0; } static int udc_nrf_lock(const struct device *dev) { return udc_lock_internal(dev, K_FOREVER); } static int udc_nrf_unlock(const struct device *dev) { return udc_unlock_internal(dev); } static const struct udc_nrf_config udc_nrf_cfg = { .clock = COND_CODE_1(NRF_CLOCK_HAS_HFCLK192M, (CLOCK_CONTROL_NRF_SUBSYS_HF192M), (CLOCK_CONTROL_NRF_SUBSYS_HF)), .pwr = { .dcdcen = (DT_PROP(DT_INST(0, nordic_nrf5x_regulator), regulator_initial_mode) == NRF5X_REG_MODE_DCDC), #if NRFX_POWER_SUPPORTS_DCDCEN_VDDH .dcdcenhv = COND_CODE_1(CONFIG_SOC_SERIES_NRF52X, (DT_NODE_HAS_STATUS(DT_INST(0, nordic_nrf52x_regulator_hv), okay)), (DT_NODE_HAS_STATUS(DT_INST(0, nordic_nrf53x_regulator_hv), okay))), #endif }, .evt = { .handler = udc_nrf_power_handler }, }; static struct udc_data udc_nrf_data = { .mutex = Z_MUTEX_INITIALIZER(udc_nrf_data.mutex), .priv = NULL, }; static const struct udc_api udc_nrf_api = { .lock = udc_nrf_lock, .unlock = udc_nrf_unlock, .init = udc_nrf_init, .enable = udc_nrf_enable, .disable = udc_nrf_disable, .shutdown = udc_nrf_shutdown, .set_address = udc_nrf_set_address, .host_wakeup = udc_nrf_host_wakeup, .ep_try_config = NULL, .ep_enable = udc_nrf_ep_enable, .ep_disable = udc_nrf_ep_disable, .ep_set_halt = udc_nrf_ep_set_halt, .ep_clear_halt = udc_nrf_ep_clear_halt, .ep_enqueue = udc_nrf_ep_enqueue, .ep_dequeue = udc_nrf_ep_dequeue, }; DEVICE_DT_INST_DEFINE(0, udc_nrf_driver_init, NULL, &udc_nrf_data, &udc_nrf_cfg, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &udc_nrf_api); ```
/content/code_sandbox/drivers/usb/udc/udc_nrf.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,458
```unknown DT_COMPAT_UHC_MAX3421E := maxim,max3421e_spi config UHC_MAX3421E bool "MAX3421E driver" default y depends on DT_HAS_MAXIM_MAX3421E_SPI_ENABLED select SPI help This option enables MAX3421E USB Peripheral/Host Controller with SPI Interface. if UHC_MAX3421E config MAX3421E_THREAD_STACK_SIZE int "Driver internal thread stack size" default 512 help Size of the stack used in the driver. config MAX3421E_OSC_WAIT_RETRIES int "Maximum retries for oscillator ready event" default 3 help Specify the number of retries for oscillator ready event. endif #UHC_MAX3421E ```
/content/code_sandbox/drivers/usb/uhc/Kconfig.max3421e
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
162
```objective-c /* * */ #ifndef ZEPHYR_INCLUDE_UHC_MAX3421E_H #define ZEPHYR_INCLUDE_UHC_MAX3421E_H #include <zephyr/sys/util_macro.h> #define MAX3421E_MAX_EP_SIZE 64U /* SPI command byte format macros */ #define MAX3421E_CMD_REG_SHIFT 3U #define MAX3421E_CMD_DIR_WR BIT(1) #define MAX3421E_CMD_DIR_RD 0U #define MAX3421E_CMD_SPI_READ(reg) \ (((reg) << MAX3421E_CMD_REG_SHIFT) | MAX3421E_CMD_DIR_RD) #define MAX3421E_CMD_SPI_WRITE(reg) \ (((reg) << MAX3421E_CMD_REG_SHIFT) | MAX3421E_CMD_DIR_WR) /* Below are all the register definitions for the host mode. */ /* Register RCVFIFO */ #define MAX3421E_REG_RCVFIFO 1U /* Register SNDFIFO */ #define MAX3421E_REG_SNDFIFO 2U /* Register SUDFIFO */ #define MAX3421E_REG_SUDFIFO 4U /* Register RCVBC */ #define MAX3421E_REG_RCVBC 6U #define MAX3421E_RCVBC_MAX 0x7FU /* Register SNDBC */ #define MAX3421E_REG_SNDBC 7U #define MAX3421E_SNDBC_MAX 0x7FU /* Register USBIRQ */ #define MAX3421E_REG_USBIRQ 13U #define MAX3421E_VBUSIRQ BIT(6) #define MAX3421E_NOVBUSIRQ BIT(5) #define MAX3421E_OSCOKIRQ BIT(0) /* Register USBIEN */ #define MAX3421E_REG_USBIEN 14U #define MAX3421E_VBUSIE BIT(6) #define MAX3421E_NOVBUSIE BIT(5) #define MAX3421E_OSCOKIE BIT(0) /* Register USBCTL */ #define MAX3421E_REG_USBCTL 15U #define MAX3421E_CHIPRES BIT(5) #define MAX3421E_PWRDOWN BIT(4) /* Register CPUCTL */ #define MAX3421E_REG_CPUCTL 16U #define MAX3421E_PULSEWID1 BIT(7) #define MAX3421E_PULSEWID0 BIT(6) #define MAX3421E_IE BIT(0) /* Register PINCTL */ #define MAX3421E_REG_PINCTL 17U #define MAX3421E_FDUPSPI BIT(4) #define MAX3421E_INTLEVEL BIT(3) #define MAX3421E_POSINT BIT(2) #define MAX3421E_GPXB BIT(1) #define MAX3421E_GPXA BIT(0) /* Register REVISION */ #define MAX3421E_REG_REVISION 18U /* Register IOPINS1, IOPINS2, GPINIRQ, GPINIEN, GPINPOL */ #define MAX3421E_REG_IOPINS1 20U #define MAX3421E_REG_IOPINS2 21U #define MAX3421E_REG_GPINIRQ 22U #define MAX3421E_REG_GPINIE 23U #define MAX3421E_REG_GPINPOL 24U /* Register HIRQ and HIEN */ #define MAX3421E_REG_HIRQ 25U #define MAX3421E_REG_HIEN 26U #define MAX3421E_HXFRDN BIT(7) #define MAX3421E_FRAME BIT(6) #define MAX3421E_CONDET BIT(5) #define MAX3421E_SUSDN BIT(4) #define MAX3421E_SNDBAV BIT(3) #define MAX3421E_RCVDAV BIT(2) #define MAX3421E_RWU BIT(1) #define MAX3421E_BUSEVENT BIT(0) /* Register MODE */ #define MAX3421E_REG_MODE 27U #define MAX3421E_DPPULLDN BIT(7) #define MAX3421E_DMPULLDN BIT(6) #define MAX3421E_DELAYISO BIT(5) #define MAX3421E_SEPIRQ BIT(4) #define MAX3421E_SOFKAENAB BIT(3) #define MAX3421E_HUBPRE BIT(2) #define MAX3421E_LOWSPEED BIT(1) #define MAX3421E_HOST BIT(0) /* Register PERADDR */ #define MAX3421E_REG_PERADDR 28U #define MAX3421E_PERADDR_MASK 0x7FU /* Register HCTL */ #define MAX3421E_REG_HCTL 29U #define MAX3421E_SNDTOG1 BIT(7) #define MAX3421E_SNDTOG0 BIT(6) #define MAX3421E_RCVTOG1 BIT(5) #define MAX3421E_RCVTOG0 BIT(4) #define MAX3421E_SIGRSM BIT(3) #define MAX3421E_SAMPLEBUS BIT(2) #define MAX3421E_FRMRST BIT(1) #define MAX3421E_BUSRST BIT(0) /* Register HXFR */ #define MAX3421E_REG_HXFR 30U #define MAX3421E_HS BIT(7) #define MAX3421E_ISO BIT(6) #define MAX3421E_OUTNIN BIT(5) #define MAX3421E_SETUP BIT(4) #define MAX3421E_EP_MASK 0x0FU #define MAX3421E_EP(ep) ((ep) & MAX3421E_EP_MASK) #define MAX3421E_HXFR_TYPE(hxfr) ((hxfr) & 0xF0U) #define MAX3421E_HXFR_SETUP(ep) (MAX3421E_SETUP | MAX3421E_EP(ep)) #define MAX3421E_HXFR_BULKIN(ep) MAX3421E_EP(ep) #define MAX3421E_HXFR_ISOIN(ep) (MAX3421E_ISO | MAX3421E_EP(ep)) #define MAX3421E_HXFR_HSIN(ep) (MAX3421E_HS | MAX3421E_EP(ep)) #define MAX3421E_HXFR_BULKOUT(ep) (MAX3421E_OUTNIN | MAX3421E_HXFR_BULKIN(ep)) #define MAX3421E_HXFR_ISOOUT(ep) (MAX3421E_OUTNIN | MAX3421E_HXFR_ISOIN(ep)) #define MAX3421E_HXFR_HSOUT(ep) (MAX3421E_OUTNIN | MAX3421E_HXFR_HSIN(ep)) #define MAX3421E_HXFR_TYPE_SETUP MAX3421E_SETUP #define MAX3421E_HXFR_TYPE_HSIN MAX3421E_HS #define MAX3421E_HXFR_TYPE_HSOUT (MAX3421E_OUTNIN | MAX3421E_HXFR_TYPE_HSIN) #define MAX3421E_HXFR_TYPE_ISOIN MAX3421E_ISO #define MAX3421E_HXFR_TYPE_ISOOUT (MAX3421E_OUTNIN | MAX3421E_HXFR_TYPE_ISOIN) #define MAX3421E_HXFR_TYPE_BULKIN 0 #define MAX3421E_HXFR_TYPE_BULKOUT MAX3421E_OUTNIN /* Register HRSL */ #define MAX3421E_REG_HRSL 31U #define MAX3421E_JKSTATUS_MASK (BIT(7) | BIT(6)) #define MAX3421E_JSTATUS BIT(7) #define MAX3421E_KSTATUS BIT(6) #define MAX3421E_SNDTOGRD BIT(5) #define MAX3421E_RCVTOGRD BIT(4) #define MAX3421E_HRSLT_MASK 0x0FU #define MAX3421E_HRSLT(hr) ((hr) & MAX3421E_HRSLT_MASK) #define MAX3421E_HR_SUCCESS 0x00U #define MAX3421E_HR_BUSY 0x01U #define MAX3421E_HR_BADREQ 0x02U #define MAX3421E_HR_UNDEF 0x03U #define MAX3421E_HR_NAK 0x04U #define MAX3421E_HR_STALL 0x05U #define MAX3421E_HR_TOGERR 0x06U #define MAX3421E_HR_WRONGPID 0x07U #define MAX3421E_HR_BADBC 0x08U #define MAX3421E_HR_PIDERR 0x09U #define MAX3421E_HR_PKTERR 0x0AU #define MAX3421E_HR_CRCERR 0x0BU #define MAX3421E_HR_KERR 0x0CU #define MAX3421E_HR_JERR 0x0DU #define MAX3421E_HR_TIMEOUT 0x0EU #define MAX3421E_HR_BABBLE 0x0FU /* Successful Transfer */ #define HRSLT_IS_SUCCESS(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_SUCCESS) /* SIE is busy, transfer pending */ #define HRSLT_IS_BUSY(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_BUSY) /* Bad value in HXFR reg */ #define HRSLT_IS_BADREQ(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_BADREQ) /* reserved */ #define HRSLT_IS_UNDEF(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_UNDEF) /* Peripheral returned NAK */ #define HRSLT_IS_NAK(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_NAK) /* Peripheral returned STALL */ #define HRSLT_IS_STALL(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_STALL) /* Toggle error/ISO over-underrun */ #define HRSLT_IS_TOGERR(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_TOGERR) /* Received the wrong PID */ #define HRSLT_IS_WRONGPID(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_WRONGPID) /* Bad byte count */ #define HRSLT_IS_BADBC(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_BADBC) /* Receive PID is corrupted */ #define HRSLT_IS_PIDERR(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_PIDERR) /* Packet error (stuff, EOP) */ #define HRSLT_IS_PKTERR(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_PKTERR) /* CRC error */ #define HRSLT_IS_CRCERR(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_CRCERR) /* K-state instead of response */ #define HRSLT_IS_KERR(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_KERR) /* J-state instead of response */ #define HRSLT_IS_JERR(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_JERR) /* Device did not respond in time */ #define HRSLT_IS_TIMEOUT(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_TIMEOUT) /* Device talked too long */ #define HRSLT_IS_BABBLE(hr) (MAX3421E_HRSLT(hr) == MAX3421E_HR_BABBLE) #endif /* ZEPHYR_INCLUDE_UHC_MAX3421E_H */ ```
/content/code_sandbox/drivers/usb/uhc/uhc_max3421e.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,630
```objective-c /* * */ /** * @file * @brief Private API for USB host controller (UHC) drivers */ #ifndef ZEPHYR_INCLUDE_UHC_COMMON_H #define ZEPHYR_INCLUDE_UHC_COMMON_H #include <zephyr/drivers/usb/uhc.h> /** * @brief Get driver's private data * * @param[in] dev Pointer to device struct of the driver instance * * @return pointer to driver's private data */ static inline void *uhc_get_private(const struct device *dev) { struct uhc_data *data = dev->data; return data->priv; } /** * @brief Locking function for the drivers. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] timeout Timeout * * @return values provided by k_mutex_lock() */ static inline int uhc_lock_internal(const struct device *dev, k_timeout_t timeout) { struct uhc_data *data = dev->data; return k_mutex_lock(&data->mutex, timeout); } /** * @brief Unlocking function for the drivers. * * @param[in] dev Pointer to device struct of the driver instance * * @return values provided by k_mutex_lock() */ static inline int uhc_unlock_internal(const struct device *dev) { struct uhc_data *data = dev->data; return k_mutex_unlock(&data->mutex); } /** * @brief Helper function to return UHC transfer to a higher level. * * Function to dequeue transfer and send UHC event to a higher level. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] xfer Pointer to UHC transfer * @param[in] err Transfer error */ void uhc_xfer_return(const struct device *dev, struct uhc_transfer *const xfer, const int err); /** * @brief Helper to get next transfer to process. * * This is currently a draft, and simple picks a transfer * from the lists. * * @param[in] dev Pointer to device struct of the driver instance * @return pointer to the next transfer or NULL on error. */ struct uhc_transfer *uhc_xfer_get_next(const struct device *dev); /** * @brief Helper to append a transfer to internal list. * * @param[in] dev Pointer to device struct of the driver instance * @param[in] xfer Pointer to UHC transfer * * @return 0 on success, all other values should be treated as error. * @retval -ENOMEM if there is no buffer in the queue */ int uhc_xfer_append(const struct device *dev, struct uhc_transfer *const xfer); /** * @brief Helper function to send UHC event to a higher level. * * The callback would typically sends UHC even to a message queue (k_msgq). * * @param[in] dev Pointer to device struct of the driver instance * @param[in] type Event type * @param[in] status Event status * * @return 0 on success, all other values should be treated as error. * @retval -EPERM controller is not initialized */ int uhc_submit_event(const struct device *dev, const enum uhc_event_type type, const int status); #endif /* ZEPHYR_INCLUDE_UHC_COMMON_H */ ```
/content/code_sandbox/drivers/usb/uhc/uhc_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
714
```unknown menuconfig UHC_DRIVER bool "USB host controller drivers [EXPERIMENTAL]" select EXPERIMENTAL select NET_BUF help USB host controller driver. if UHC_DRIVER config UHC_XFER_COUNT int "Number of transfers in the pool" range 2 256 default 16 help Number of UHC transfers available. config UHC_BUF_COUNT int "Number of buffers in the pool" range 16 256 default 16 help Number of UHC request buffers in the pool. config UHC_BUF_POOL_SIZE int "Memory available for buffers" range 64 32768 default 1024 help Total amount of memory available for UHC buffers. module = UHC_DRIVER module-str = uhc drv source "subsys/logging/Kconfig.template.log_config" source "drivers/usb/uhc/Kconfig.max3421e" source "drivers/usb/uhc/Kconfig.virtual" endif # UHC_DRIVER ```
/content/code_sandbox/drivers/usb/uhc/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
212
```c /* * */ /* * @file uhc_virtual.c * @brief Virtual USB host controller (UHC) driver * * Virtual device controller does not emulate any hardware * and can only communicate with the virtual device controllers * through virtual bus. */ #include "uhc_common.h" #include "../uvb/uvb.h" #include <string.h> #include <zephyr/kernel.h> #include <zephyr/init.h> #include <zephyr/drivers/usb/uhc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uhc_vrt, CONFIG_UHC_DRIVER_LOG_LEVEL); struct uhc_vrt_config { }; struct uhc_vrt_data { const struct device *dev; struct uvb_node *host_node; struct k_work work; struct k_fifo fifo; struct uhc_transfer *last_xfer; struct k_timer sof_timer; bool busy; uint8_t req; }; enum uhc_vrt_event_type { /* Trigger next transfer */ UHC_VRT_EVT_XFER, /* SoF generator event */ UHC_VRT_EVT_SOF, /* Request reply received */ UHC_VRT_EVT_REPLY, }; /* Structure for driver's endpoint events */ struct uhc_vrt_event { sys_snode_t node; enum uhc_vrt_event_type type; struct uvb_packet *pkt; }; K_MEM_SLAB_DEFINE(uhc_vrt_slab, sizeof(struct uhc_vrt_event), 16, sizeof(void *)); static void vrt_event_submit(const struct device *dev, const enum uhc_vrt_event_type type, const void *data) { struct uhc_vrt_data *priv = uhc_get_private(dev); struct uhc_vrt_event *event; int ret; ret = k_mem_slab_alloc(&uhc_vrt_slab, (void **)&event, K_NO_WAIT); __ASSERT(ret == 0, "Failed to allocate slab"); event->type = type; event->pkt = (struct uvb_packet *const)data; k_fifo_put(&priv->fifo, event); k_work_submit(&priv->work); } static int vrt_xfer_control(const struct device *dev, struct uhc_transfer *const xfer) { struct uhc_vrt_data *priv = uhc_get_private(dev); struct net_buf *buf = xfer->buf; struct uvb_packet *uvb_pkt; uint8_t *data = NULL; size_t length = 0; if (xfer->stage == UHC_CONTROL_STAGE_SETUP) { LOG_DBG("Handle SETUP stage"); uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_SETUP, xfer->addr, USB_CONTROL_EP_OUT, xfer->setup_pkt, sizeof(xfer->setup_pkt)); if (uvb_pkt == NULL) { LOG_ERR("Failed to allocate UVB packet"); return -ENOMEM; } priv->req = UVB_REQUEST_SETUP; priv->busy = true; return uvb_advert_pkt(priv->host_node, uvb_pkt); } if (buf != NULL && xfer->stage == UHC_CONTROL_STAGE_DATA) { if (USB_EP_DIR_IS_IN(xfer->ep)) { length = MIN(net_buf_tailroom(buf), xfer->mps); data = net_buf_tail(buf); } else { length = MIN(buf->len, xfer->mps); data = buf->data; } LOG_DBG("Handle DATA stage"); uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA, xfer->addr, xfer->ep, data, length); if (uvb_pkt == NULL) { LOG_ERR("Failed to allocate UVB packet"); return -ENOMEM; } priv->req = UVB_REQUEST_DATA; priv->busy = true; return uvb_advert_pkt(priv->host_node, uvb_pkt); } if (xfer->stage == UHC_CONTROL_STAGE_STATUS) { uint8_t ep; LOG_DBG("Handle STATUS stage"); if (USB_EP_DIR_IS_IN(xfer->ep)) { ep = USB_CONTROL_EP_OUT; } else { ep = USB_CONTROL_EP_IN; } uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA, xfer->addr, ep, NULL, 0); if (uvb_pkt == NULL) { LOG_ERR("Failed to allocate UVB packet"); return -ENOMEM; } priv->req = UVB_REQUEST_DATA; priv->busy = true; return uvb_advert_pkt(priv->host_node, uvb_pkt); } return -EINVAL; } static int vrt_xfer_bulk(const struct device *dev, struct uhc_transfer *const xfer) { struct uhc_vrt_data *priv = uhc_get_private(dev); struct net_buf *buf = xfer->buf; struct uvb_packet *uvb_pkt; uint8_t *data; size_t length; if (USB_EP_DIR_IS_IN(xfer->ep)) { length = MIN(net_buf_tailroom(buf), xfer->mps); data = net_buf_tail(buf); } else { length = MIN(buf->len, xfer->mps); data = buf->data; } uvb_pkt = uvb_alloc_pkt(UVB_REQUEST_DATA, xfer->addr, xfer->ep, data, length); if (uvb_pkt == NULL) { LOG_ERR("Failed to allocate UVB packet"); return -ENOMEM; } return uvb_advert_pkt(priv->host_node, uvb_pkt); } static int vrt_schedule_xfer(const struct device *dev) { struct uhc_vrt_data *priv = uhc_get_private(dev); if (priv->last_xfer == NULL) { priv->last_xfer = uhc_xfer_get_next(dev); if (priv->last_xfer == NULL) { LOG_DBG("Nothing to transfer"); return 0; } LOG_DBG("Next transfer is %p", priv->last_xfer); } if (USB_EP_GET_IDX(priv->last_xfer->ep) == 0) { return vrt_xfer_control(dev, priv->last_xfer); } /* TODO: Isochronous transfers */ return vrt_xfer_bulk(dev, priv->last_xfer); } static void vrt_hrslt_success(const struct device *dev, struct uvb_packet *const pkt) { struct uhc_vrt_data *priv = uhc_get_private(dev); struct uhc_transfer *const xfer = priv->last_xfer; struct net_buf *buf = xfer->buf; bool finished = false; size_t length; switch (pkt->request) { case UVB_REQUEST_SETUP: if (xfer->buf != NULL) { xfer->stage = UHC_CONTROL_STAGE_DATA; } else { xfer->stage = UHC_CONTROL_STAGE_STATUS; } break; case UVB_REQUEST_DATA: if (xfer->stage == UHC_CONTROL_STAGE_STATUS) { LOG_DBG("Status stage finished"); finished = true; break; } if (USB_EP_DIR_IS_OUT(pkt->ep)) { length = MIN(buf->len, xfer->mps); net_buf_pull(buf, length); LOG_DBG("OUT chunk %zu out of %u", length, buf->len); if (buf->len == 0) { if (pkt->ep == USB_CONTROL_EP_OUT) { xfer->stage = UHC_CONTROL_STAGE_STATUS; } else { finished = true; } } } else { length = MIN(net_buf_tailroom(buf), pkt->length); net_buf_add(buf, length); if (pkt->length > xfer->mps) { LOG_ERR("Ambiguous packet with the length %zu", pkt->length); } LOG_DBG("IN chunk %zu out of %zu", length, net_buf_tailroom(buf)); if (pkt->length < xfer->mps || !net_buf_tailroom(buf)) { if (pkt->ep == USB_CONTROL_EP_IN) { xfer->stage = UHC_CONTROL_STAGE_STATUS; } else { finished = true; } } } break; } if (finished) { LOG_DBG("Transfer finished"); uhc_xfer_return(dev, xfer, 0); priv->last_xfer = NULL; } } static void vrt_xfer_drop_active(const struct device *dev, int err) { struct uhc_vrt_data *priv = uhc_get_private(dev); if (priv->last_xfer) { uhc_xfer_return(dev, priv->last_xfer, err); priv->last_xfer = NULL; } } static int vrt_handle_reply(const struct device *dev, struct uvb_packet *const pkt) { struct uhc_vrt_data *priv = uhc_get_private(dev); struct uhc_transfer *const xfer = priv->last_xfer; int ret = 0; if (xfer == NULL) { LOG_ERR("No transfers to handle"); ret = -ENODATA; goto handle_reply_err; } priv->busy = false; switch (pkt->reply) { case UVB_REPLY_NACK: /* Restart last transaction */ break; case UVB_REPLY_STALL: vrt_xfer_drop_active(dev, -EPIPE); break; case UVB_REPLY_ACK: vrt_hrslt_success(dev, pkt); break; default: vrt_xfer_drop_active(dev, -EINVAL); ret = -EINVAL; break; } handle_reply_err: uvb_free_pkt(pkt); return ret; } static void xfer_work_handler(struct k_work *work) { struct uhc_vrt_data *priv = CONTAINER_OF(work, struct uhc_vrt_data, work); const struct device *dev = priv->dev; struct uhc_vrt_event *ev; while ((ev = k_fifo_get(&priv->fifo, K_NO_WAIT)) != NULL) { bool schedule = false; int err; switch (ev->type) { case UHC_VRT_EVT_REPLY: err = vrt_handle_reply(dev, ev->pkt); if (unlikely(err)) { uhc_submit_event(dev, UHC_EVT_ERROR, err); } schedule = true; break; case UHC_VRT_EVT_XFER: LOG_DBG("Transfer triggered for %p", dev); schedule = true; break; case UHC_VRT_EVT_SOF: if (priv->last_xfer != NULL) { if (priv->last_xfer->timeout) { priv->last_xfer->timeout--; } else { vrt_xfer_drop_active(dev, -ETIMEDOUT); priv->busy = false; LOG_WRN("Transfer timeout"); } } break; default: break; } if (schedule && !priv->busy) { err = vrt_schedule_xfer(dev); if (unlikely(err)) { uhc_submit_event(dev, UHC_EVT_ERROR, err); } } k_mem_slab_free(&uhc_vrt_slab, (void *)ev); } } static void sof_timer_handler(struct k_timer *timer) { struct uhc_vrt_data *priv = CONTAINER_OF(timer, struct uhc_vrt_data, sof_timer); vrt_event_submit(priv->dev, UHC_VRT_EVT_SOF, NULL); } static void vrt_device_act(const struct device *dev, const enum uvb_device_act act) { enum uhc_event_type type; switch (act) { case UVB_DEVICE_ACT_RWUP: type = UHC_EVT_RWUP; break; case UVB_DEVICE_ACT_FS: type = UHC_EVT_DEV_CONNECTED_FS; break; case UVB_DEVICE_ACT_HS: type = UHC_EVT_DEV_CONNECTED_HS; break; case UVB_DEVICE_ACT_REMOVED: type = UHC_EVT_DEV_REMOVED; break; default: type = UHC_EVT_ERROR; } uhc_submit_event(dev, type, 0); } static void uhc_vrt_uvb_cb(const void *const vrt_priv, const enum uvb_event_type type, const void *data) { const struct device *dev = vrt_priv; if (type == UVB_EVT_REPLY) { vrt_event_submit(dev, UHC_VRT_EVT_REPLY, data); } else if (type == UVB_EVT_DEVICE_ACT) { vrt_device_act(dev, POINTER_TO_INT(data)); } else { LOG_ERR("Unknown event %d for %p", type, dev); } } static int uhc_vrt_sof_enable(const struct device *dev) { /* TODO */ return 0; } /* Disable SOF generator and suspend bus */ static int uhc_vrt_bus_suspend(const struct device *dev) { struct uhc_vrt_data *priv = uhc_get_private(dev); k_timer_stop(&priv->sof_timer); return uvb_advert(priv->host_node, UVB_EVT_SUSPEND, NULL); } static int uhc_vrt_bus_reset(const struct device *dev) { struct uhc_vrt_data *priv = uhc_get_private(dev); k_timer_stop(&priv->sof_timer); return uvb_advert(priv->host_node, UVB_EVT_RESET, NULL); } static int uhc_vrt_bus_resume(const struct device *dev) { struct uhc_vrt_data *priv = uhc_get_private(dev); k_timer_init(&priv->sof_timer, sof_timer_handler, NULL); k_timer_start(&priv->sof_timer, K_MSEC(1), K_MSEC(1)); return uvb_advert(priv->host_node, UVB_EVT_RESUME, NULL); } static int uhc_vrt_enqueue(const struct device *dev, struct uhc_transfer *const xfer) { uhc_xfer_append(dev, xfer); vrt_event_submit(dev, UHC_VRT_EVT_XFER, NULL); return 0; } static int uhc_vrt_dequeue(const struct device *dev, struct uhc_transfer *const xfer) { /* TODO */ return 0; } static int uhc_vrt_init(const struct device *dev) { return 0; } static int uhc_vrt_enable(const struct device *dev) { struct uhc_vrt_data *priv = uhc_get_private(dev); return uvb_advert(priv->host_node, UVB_EVT_VBUS_READY, NULL); } static int uhc_vrt_disable(const struct device *dev) { struct uhc_vrt_data *priv = uhc_get_private(dev); return uvb_advert(priv->host_node, UVB_EVT_VBUS_REMOVED, NULL); } static int uhc_vrt_shutdown(const struct device *dev) { return 0; } static int uhc_vrt_lock(const struct device *dev) { return uhc_lock_internal(dev, K_FOREVER); } static int uhc_vrt_unlock(const struct device *dev) { return uhc_unlock_internal(dev); } static int uhc_vrt_driver_preinit(const struct device *dev) { struct uhc_vrt_data *priv = uhc_get_private(dev); struct uhc_data *data = dev->data; priv->dev = dev; k_mutex_init(&data->mutex); priv->host_node->priv = dev; k_fifo_init(&priv->fifo); k_work_init(&priv->work, xfer_work_handler); k_timer_init(&priv->sof_timer, sof_timer_handler, NULL); LOG_DBG("Virtual UHC pre-initialized"); return 0; } static const struct uhc_api uhc_vrt_api = { .lock = uhc_vrt_lock, .unlock = uhc_vrt_unlock, .init = uhc_vrt_init, .enable = uhc_vrt_enable, .disable = uhc_vrt_disable, .shutdown = uhc_vrt_shutdown, .bus_reset = uhc_vrt_bus_reset, .sof_enable = uhc_vrt_sof_enable, .bus_suspend = uhc_vrt_bus_suspend, .bus_resume = uhc_vrt_bus_resume, .ep_enqueue = uhc_vrt_enqueue, .ep_dequeue = uhc_vrt_dequeue, }; #define DT_DRV_COMPAT zephyr_uhc_virtual #define UHC_VRT_DEVICE_DEFINE(n) \ UVB_HOST_NODE_DEFINE(uhc_bc_##n, \ DT_NODE_FULL_NAME(DT_DRV_INST(n)), \ uhc_vrt_uvb_cb); \ \ static const struct uhc_vrt_config uhc_vrt_config_##n = { \ }; \ \ static struct uhc_vrt_data uhc_priv_##n = { \ .host_node = &uhc_bc_##n, \ }; \ \ static struct uhc_data uhc_data_##n = { \ .priv = &uhc_priv_##n, \ }; \ \ DEVICE_DT_INST_DEFINE(n, uhc_vrt_driver_preinit, NULL, \ &uhc_data_##n, &uhc_vrt_config_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &uhc_vrt_api); DT_INST_FOREACH_STATUS_OKAY(UHC_VRT_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/usb/uhc/uhc_virtual.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,760
```c /* * */ /* * MAX3421E USB Peripheral/Host Controller with SPI Interface. * NOTE: Driver supports only host mode yet. */ #define DT_DRV_COMPAT maxim_max3421e_spi #include <string.h> #include <zephyr/kernel.h> #include <zephyr/init.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #include <zephyr/drivers/usb/uhc.h> #include "uhc_common.h" #include "uhc_max3421e.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(max3421e, CONFIG_UHC_DRIVER_LOG_LEVEL); static K_KERNEL_STACK_DEFINE(drv_stack, CONFIG_MAX3421E_THREAD_STACK_SIZE); static struct k_thread drv_stack_data; #define MAX3421E_STATE_BUS_RESET 0 #define MAX3421E_STATE_BUS_RESUME 1 struct max3421e_data { struct gpio_callback gpio_cb; struct uhc_transfer *last_xfer; struct k_sem irq_sem; atomic_t state; uint16_t tog_in; uint16_t tog_out; uint8_t addr; uint8_t hirq; uint8_t hien; uint8_t mode; uint8_t hxfr; uint8_t hrsl; }; struct max3421e_config { struct spi_dt_spec dt_spi; struct gpio_dt_spec dt_int; struct gpio_dt_spec dt_rst; }; static int max3421e_read_hirq(const struct device *dev, const uint8_t reg, uint8_t *const data, const uint32_t count, bool update_hirq) { struct max3421e_data *priv = uhc_get_private(dev); const struct max3421e_config *config = dev->config; uint8_t cmd = MAX3421E_CMD_SPI_READ(reg); uint8_t hirq; int ret; const struct spi_buf cmd_buf = { .buf = &cmd, .len = sizeof(cmd) }; const struct spi_buf rx_buf[] = { { .buf = &hirq, .len = sizeof(hirq) }, { .buf = data, .len = count } }; const struct spi_buf_set tx = { .buffers = &cmd_buf, .count = 1 }; const struct spi_buf_set rx = { .buffers = rx_buf, .count = ARRAY_SIZE(rx_buf) }; ret = spi_transceive_dt(&config->dt_spi, &tx, &rx); if (unlikely(update_hirq)) { priv->hirq = hirq; } return ret; } static int max3421e_read(const struct device *dev, const uint8_t reg, uint8_t *const data, const uint32_t count) { return max3421e_read_hirq(dev, reg, data, count, false); } static int max3421e_write_byte(const struct device *dev, const uint8_t reg, const uint8_t val) { const struct max3421e_config *config = dev->config; uint8_t buf[2] = {MAX3421E_CMD_SPI_WRITE(reg), val}; const struct spi_buf cmd_buf = { .buf = &buf, .len = sizeof(buf) }; const struct spi_buf_set tx = { .buffers = &cmd_buf, .count = 1 }; return spi_write_dt(&config->dt_spi, &tx); } static int max3421e_write(const struct device *dev, const uint8_t reg, uint8_t *const data, const size_t count) { const struct max3421e_config *config = dev->config; uint8_t cmd = MAX3421E_CMD_SPI_WRITE(reg); const struct spi_buf cmd_buf[] = { { .buf = &cmd, .len = sizeof(cmd), }, { .buf = data, .len = count, }, }; const struct spi_buf_set tx = { .buffers = cmd_buf, .count = ARRAY_SIZE(cmd_buf), }; return spi_write_dt(&config->dt_spi, &tx); } static int max3421e_lock(const struct device *dev) { struct uhc_data *data = dev->data; return k_mutex_lock(&data->mutex, K_FOREVER); } static int max3421e_unlock(const struct device *dev) { struct uhc_data *data = dev->data; return k_mutex_unlock(&data->mutex); } /* Disable Host Interrupt */ static ALWAYS_INLINE int max3421e_hien_disable(const struct device *dev, const uint8_t hint) { struct max3421e_data *priv = uhc_get_private(dev); priv->hien &= ~hint; return max3421e_write_byte(dev, MAX3421E_REG_HIEN, priv->hien); } /* Set peripheral (device) address to be used in next transfer */ static ALWAYS_INLINE int max3421e_peraddr(const struct device *dev, const uint8_t addr) { struct max3421e_data *priv = uhc_get_private(dev); int ret = 0; if (priv->addr != addr) { /* * TODO: Consider how to force the update of toggle values * for the next transfer. Necessary if we want to support * multiple peripherals. */ ret = max3421e_write_byte(dev, MAX3421E_REG_PERADDR, addr); if (ret == 0) { priv->addr = addr; } } return ret; } /* Update driver's knowledge about DATA PID */ static ALWAYS_INLINE void max3421e_tgl_update(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); uint8_t ep_idx = MAX3421E_EP(priv->hxfr); if (priv->hxfr & MAX3421E_OUTNIN) { if (priv->hrsl & MAX3421E_SNDTOGRD) { priv->tog_out |= BIT(ep_idx); } else { priv->tog_out &= ~BIT(ep_idx); } } else { if (priv->hrsl & MAX3421E_RCVTOGRD) { priv->tog_in |= BIT(ep_idx); } else { priv->tog_in &= ~BIT(ep_idx); } } LOG_DBG("tog_in 0x%02x tog_out 0x%02x last-hxfr 0x%02x hrsl 0x%02x", priv->tog_in, priv->tog_out, priv->hxfr, priv->hrsl); } /* Get DATA PID to be used for the next transfer */ static ALWAYS_INLINE uint8_t max3421e_tgl_next(const struct device *dev, const uint8_t hxfr) { struct max3421e_data *priv = uhc_get_private(dev); uint8_t ep_idx = MAX3421E_EP(hxfr); uint8_t hctl; /* Force DATA1 PID for the data stage of control transfer */ if (hxfr & MAX3421E_SETUP) { priv->tog_in |= BIT(0); priv->tog_out |= BIT(0); } if (hxfr & MAX3421E_OUTNIN) { hctl = (priv->tog_out & BIT(ep_idx)) ? MAX3421E_SNDTOG1 : MAX3421E_SNDTOG0; } else { hctl = (priv->tog_in & BIT(ep_idx)) ? MAX3421E_RCVTOG1 : MAX3421E_RCVTOG0; } return hctl; } static ALWAYS_INLINE int max3421e_hxfr_start(const struct device *dev, const uint8_t hxfr) { struct max3421e_data *priv = uhc_get_private(dev); if (priv->hxfr != hxfr) { uint8_t reg[2] = {0, hxfr}; /* Update DATA PID if transfer parameter changes */ max3421e_tgl_update(dev); reg[0] = max3421e_tgl_next(dev, hxfr); priv->hxfr = hxfr; LOG_DBG("hctl 0x%02x hxfr 0x%02x", reg[0], reg[1]); return max3421e_write(dev, MAX3421E_REG_HCTL, reg, sizeof(reg)); } return max3421e_write_byte(dev, MAX3421E_REG_HXFR, priv->hxfr); } static int max3421e_xfer_data(const struct device *dev, struct net_buf *const buf, const uint8_t ep) { const uint8_t ep_idx = USB_EP_GET_IDX(ep); int ret; if (USB_EP_DIR_IS_IN(ep)) { LOG_DBG("bulk in %p %u", buf, net_buf_tailroom(buf)); ret = max3421e_hxfr_start(dev, MAX3421E_HXFR_BULKIN(ep_idx)); } else { size_t len; len = MIN(MAX3421E_MAX_EP_SIZE, buf->len); LOG_DBG("bulk out %p %u", buf, len); ret = max3421e_write(dev, MAX3421E_REG_SNDFIFO, buf->data, len); if (ret) { return ret; } ret = max3421e_write_byte(dev, MAX3421E_REG_SNDBC, len); if (ret) { return ret; } /* * FIXME: Pull should happen after device ACKs the data, * move to max3421e_hrslt_success(). */ net_buf_pull(buf, len); ret = max3421e_hxfr_start(dev, MAX3421E_HXFR_BULKOUT(ep_idx)); } return ret; } static int max3421e_xfer_control(const struct device *dev, struct uhc_transfer *const xfer, const uint8_t hrsl) { struct max3421e_data *priv = uhc_get_private(dev); struct net_buf *buf = xfer->buf; int ret; /* Just restart if device NAKed packet */ if (HRSLT_IS_NAK(hrsl)) { return max3421e_hxfr_start(dev, priv->hxfr); } if (xfer->stage == UHC_CONTROL_STAGE_SETUP) { LOG_DBG("Handle SETUP stage"); ret = max3421e_write(dev, MAX3421E_REG_SUDFIFO, xfer->setup_pkt, sizeof(xfer->setup_pkt)); if (ret) { return ret; } ret = max3421e_hxfr_start(dev, MAX3421E_HXFR_SETUP(0)); if (ret) { return ret; } return 0; } if (buf != NULL && xfer->stage == UHC_CONTROL_STAGE_DATA) { LOG_DBG("Handle DATA stage"); return max3421e_xfer_data(dev, buf, xfer->ep); } if (xfer->stage == UHC_CONTROL_STAGE_STATUS) { LOG_DBG("Handle STATUS stage"); if (USB_EP_DIR_IS_IN(xfer->ep)) { ret = max3421e_hxfr_start(dev, MAX3421E_HXFR_HSOUT(0)); } else { ret = max3421e_hxfr_start(dev, MAX3421E_HXFR_HSIN(0)); } return ret; } return -EINVAL; } static int max3421e_xfer_bulk(const struct device *dev, struct uhc_transfer *const xfer, const uint8_t hrsl) { struct max3421e_data *priv = uhc_get_private(dev); struct net_buf *buf = xfer->buf; /* Just restart if device NAKed packet */ if (HRSLT_IS_NAK(hrsl)) { return max3421e_hxfr_start(dev, priv->hxfr); } if (buf == NULL) { LOG_ERR("No buffer to handle"); return -ENODATA; } return max3421e_xfer_data(dev, buf, xfer->ep); } static int max3421e_schedule_xfer(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); const uint8_t hirq = priv->hirq; const uint8_t hrsl = priv->hrsl; if (priv->last_xfer == NULL) { int ret; priv->last_xfer = uhc_xfer_get_next(dev); if (priv->last_xfer == NULL) { LOG_DBG("Nothing to transfer"); return 0; } LOG_DBG("Next transfer %p", priv->last_xfer); ret = max3421e_peraddr(dev, priv->last_xfer->addr); if (ret) { return ret; } } if (hirq & MAX3421E_FRAME) { if (priv->last_xfer->timeout) { priv->last_xfer->timeout--; } else { LOG_INF("Transfer timeout"); } } /* * TODO: currently we only support control transfers and * treat all others as bulk. */ if (USB_EP_GET_IDX(priv->last_xfer->ep) == 0) { return max3421e_xfer_control(dev, priv->last_xfer, hrsl); } return max3421e_xfer_bulk(dev, priv->last_xfer, hrsl); } static void max3421e_xfer_drop_active(const struct device *dev, int err) { struct max3421e_data *priv = uhc_get_private(dev); if (priv->last_xfer) { uhc_xfer_return(dev, priv->last_xfer, err); priv->last_xfer = NULL; } } static int max3421e_hrslt_success(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); struct uhc_transfer *const xfer = priv->last_xfer; struct net_buf *buf = xfer->buf; bool finished = false; int err = 0; size_t len; uint8_t bc; switch (MAX3421E_HXFR_TYPE(priv->hxfr)) { case MAX3421E_HXFR_TYPE_SETUP: if (xfer->buf != NULL) { xfer->stage = UHC_CONTROL_STAGE_DATA; } else { xfer->stage = UHC_CONTROL_STAGE_STATUS; } break; case MAX3421E_HXFR_TYPE_HSOUT: LOG_DBG("HSOUT"); finished = true; break; case MAX3421E_HXFR_TYPE_HSIN: LOG_DBG("HSIN"); finished = true; break; case MAX3421E_HXFR_TYPE_ISOOUT: LOG_ERR("ISO OUT is not implemented"); k_panic(); break; case MAX3421E_HXFR_TYPE_ISOIN: LOG_ERR("ISO IN is not implemented"); k_panic(); break; case MAX3421E_HXFR_TYPE_BULKOUT: if (buf->len == 0) { LOG_INF("hrslt bulk out %u", buf->len); if (xfer->ep == USB_CONTROL_EP_OUT) { xfer->stage = UHC_CONTROL_STAGE_STATUS; } else { finished = true; } } break; case MAX3421E_HXFR_TYPE_BULKIN: err = max3421e_read(dev, MAX3421E_REG_RCVBC, &bc, sizeof(bc)); if (err) { break; } if (bc > net_buf_tailroom(buf)) { LOG_WRN("%u received bytes will be dropped", bc - net_buf_tailroom(buf)); } len = MIN(net_buf_tailroom(buf), bc); err = max3421e_read(dev, MAX3421E_REG_RCVFIFO, net_buf_add(buf, len), len); if (err) { break; } LOG_INF("bc %u tr %u", bc, net_buf_tailroom(buf)); if (bc < MAX3421E_MAX_EP_SIZE || !net_buf_tailroom(buf)) { LOG_INF("hrslt bulk in %u, %u", bc, len); if (xfer->ep == USB_CONTROL_EP_IN) { xfer->stage = UHC_CONTROL_STAGE_STATUS; } else { finished = true; } } break; } if (finished) { LOG_DBG("Transfer finished"); uhc_xfer_return(dev, xfer, 0); priv->last_xfer = NULL; } if (err) { max3421e_xfer_drop_active(dev, err); } return err; } static int max3421e_handle_hxfrdn(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); struct uhc_transfer *const xfer = priv->last_xfer; const uint8_t hrsl = priv->hrsl; int ret = 0; if (xfer == NULL) { LOG_ERR("No transfers to handle"); return -ENODATA; } switch (MAX3421E_HRSLT(hrsl)) { case MAX3421E_HR_NAK: /* * The transfer did not take place within * the specified number of frames. * * TODO: Transfer cancel request (xfer->cancel) * can be handled here as well. */ if (xfer->timeout == 0) { max3421e_xfer_drop_active(dev, -ETIMEDOUT); } break; case MAX3421E_HR_STALL: max3421e_xfer_drop_active(dev, -EPIPE); break; case MAX3421E_HR_TOGERR: LOG_WRN("Toggle error"); break; case MAX3421E_HR_SUCCESS: ret = max3421e_hrslt_success(dev); break; default: /* TODO: Handle all reasonalbe result codes */ max3421e_xfer_drop_active(dev, -EINVAL); ret = -EINVAL; break; } return ret; } static void max3421e_handle_condet(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); const uint8_t jk = priv->hrsl & MAX3421E_JKSTATUS_MASK; enum uhc_event_type type = UHC_EVT_ERROR; /* * JSTATUS:KSTATUS 0:0 - SE0 * JSTATUS:KSTATUS 0:1 - K (Resume) * JSTATUS:KSTATUS 1:0 - J (Idle) */ if (jk == 0) { /* Device disconnected */ type = UHC_EVT_DEV_REMOVED; } if (jk == MAX3421E_JSTATUS) { /* Device connected */ type = UHC_EVT_DEV_CONNECTED_FS; } if (jk == MAX3421E_KSTATUS) { /* Device connected */ type = UHC_EVT_DEV_CONNECTED_LS; } uhc_submit_event(dev, type, 0); } static void max3421e_bus_event(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); if (atomic_test_and_clear_bit(&priv->state, MAX3421E_STATE_BUS_RESUME)) { /* Resume operation done event */ uhc_submit_event(dev, UHC_EVT_RESUMED, 0); } if (atomic_test_and_clear_bit(&priv->state, MAX3421E_STATE_BUS_RESET)) { /* Reset operation done event */ uhc_submit_event(dev, UHC_EVT_RESETED, 0); } } static int max3421e_update_hrsl_hirq(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); int err; err = max3421e_read_hirq(dev, MAX3421E_REG_HRSL, &priv->hrsl, 1, true); /* Consider only enabled interrupts and RCVDAV bit (see RCVBC description) */ priv->hirq &= priv->hien | MAX3421E_RCVDAV; LOG_DBG("HIRQ 0x%02x HRSLT %d", priv->hirq, MAX3421E_HRSLT(priv->hrsl)); return err; } static int max3421e_clear_hirq(const struct device *dev, const uint8_t hirq) { return max3421e_write_byte(dev, MAX3421E_REG_HIRQ, hirq); } static int max3421e_handle_bus_irq(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); const uint8_t hirq = priv->hirq; int ret = 0; /* Suspend operation Done Interrupt (bus suspended) */ if (hirq & MAX3421E_SUSDN) { ret = max3421e_hien_disable(dev, MAX3421E_SUSDN); uhc_submit_event(dev, UHC_EVT_SUSPENDED, 0); } /* Peripheral Connect/Disconnect Interrupt */ if (hirq & MAX3421E_CONDET) { max3421e_handle_condet(dev); } /* Remote Wakeup Interrupt */ if (hirq & MAX3421E_RWU) { uhc_submit_event(dev, UHC_EVT_RWUP, 0); } /* Bus Reset or Bus Resume event */ if (hirq & MAX3421E_BUSEVENT) { max3421e_bus_event(dev); } return ret; } static void uhc_max3421e_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); const struct device *dev = p1; struct max3421e_data *priv = uhc_get_private(dev); LOG_DBG("MAX3421E thread started"); while (true) { bool schedule = false; int err; k_sem_take(&priv->irq_sem, K_FOREVER); max3421e_lock(dev); /* * Get HRSL and HIRQ values, do not perform any operation * that changes the state of the bus yet. */ err = max3421e_update_hrsl_hirq(dev); if (unlikely(err)) { uhc_submit_event(dev, UHC_EVT_ERROR, err); } /* Host Transfer Done Interrupt */ if (priv->hirq & MAX3421E_HXFRDN) { err = max3421e_handle_hxfrdn(dev); schedule = true; } /* Frame Generator Interrupt */ if (priv->hirq & MAX3421E_FRAME) { schedule = HRSLT_IS_BUSY(priv->hrsl) ? false : true; } /* Shorten the if path a little */ if (priv->hirq & ~(MAX3421E_FRAME | MAX3421E_HXFRDN)) { err = max3421e_handle_bus_irq(dev); if (unlikely(err)) { uhc_submit_event(dev, UHC_EVT_ERROR, err); } } /* Clear interrupts and schedule new bus transfer */ err = max3421e_clear_hirq(dev, priv->hirq); if (unlikely(err)) { uhc_submit_event(dev, UHC_EVT_ERROR, err); } if (schedule) { err = max3421e_schedule_xfer(dev); if (unlikely(err)) { uhc_submit_event(dev, UHC_EVT_ERROR, err); } } max3421e_unlock(dev); } } static void max3421e_gpio_cb(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct max3421e_data *priv = CONTAINER_OF(cb, struct max3421e_data, gpio_cb); k_sem_give(&priv->irq_sem); } /* Enable SOF generator */ static int max3421e_sof_enable(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); if (priv->mode & MAX3421E_SOFKAENAB) { return -EALREADY; } priv->mode |= MAX3421E_SOFKAENAB; return max3421e_write_byte(dev, MAX3421E_REG_MODE, priv->mode); } /* Disable SOF generator and suspend bus */ static int max3421e_bus_suspend(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); if (!(priv->mode & MAX3421E_SOFKAENAB)) { return -EALREADY; } priv->hien |= MAX3421E_SUSDN; priv->mode &= ~MAX3421E_SOFKAENAB; uint8_t tmp[3] = {MAX3421E_SUSDN, priv->hien, priv->mode}; return max3421e_write(dev, MAX3421E_REG_HIRQ, tmp, sizeof(tmp)); } /* Signal bus reset, 50ms SE0 signal */ static int max3421e_bus_reset(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); int ret; if (atomic_test_bit(&priv->state, MAX3421E_STATE_BUS_RESUME)) { return -EBUSY; } ret = max3421e_write_byte(dev, MAX3421E_REG_HCTL, MAX3421E_BUSRST); atomic_set_bit(&priv->state, MAX3421E_STATE_BUS_RESET); return ret; } /* Signal bus resume event, 20ms K-state + low-speed EOP */ static int max3421e_bus_resume(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); int ret; if (atomic_test_bit(&priv->state, MAX3421E_STATE_BUS_RESET)) { return -EBUSY; } ret = max3421e_write_byte(dev, MAX3421E_REG_HCTL, MAX3421E_SIGRSM); atomic_set_bit(&priv->state, MAX3421E_STATE_BUS_RESUME); return ret; } static int max3421e_enqueue(const struct device *dev, struct uhc_transfer *const xfer) { return uhc_xfer_append(dev, xfer); } static int max3421e_dequeue(const struct device *dev, struct uhc_transfer *const xfer) { /* TODO */ return 0; } static int max3421e_reset(const struct device *dev) { const struct max3421e_config *config = dev->config; int ret; if (config->dt_rst.port) { gpio_pin_set_dt(&config->dt_rst, 1); gpio_pin_set_dt(&config->dt_rst, 0); } else { LOG_DBG("Reset MAX3421E using CHIPRES"); ret = max3421e_write_byte(dev, MAX3421E_REG_USBCTL, MAX3421E_CHIPRES); ret |= max3421e_write_byte(dev, MAX3421E_REG_USBCTL, 0); if (ret) { return ret; } } for (int i = 0; i < CONFIG_MAX3421E_OSC_WAIT_RETRIES; i++) { uint8_t usbirq; ret = max3421e_read(dev, MAX3421E_REG_USBIRQ, &usbirq, sizeof(usbirq)); LOG_DBG("USBIRQ 0x%02x", usbirq); if (usbirq & MAX3421E_OSCOKIRQ) { return 0; } k_msleep(3); } return -EIO; } static int max3421e_pinctl_setup(const struct device *dev) { /* Full-Duplex SPI, INT pin edge active, GPX pin signals SOF */ const uint8_t pinctl = MAX3421E_FDUPSPI | MAX3421E_GPXB | MAX3421E_GPXA; uint8_t tmp; int ret; ret = max3421e_write_byte(dev, MAX3421E_REG_PINCTL, pinctl); if (unlikely(ret)) { return ret; } ret = max3421e_read(dev, MAX3421E_REG_PINCTL, &tmp, sizeof(tmp)); if (unlikely(ret)) { return ret; } if (unlikely(tmp != pinctl)) { LOG_ERR("Failed to verify PINCTL register 0x%02x vs 0x%02x", pinctl, tmp); return -EIO; } return 0; } /* * Cache MODE and HIEN register values to avoid having to read it * before modifying register bits. */ static int max3421e_mode_setup(const struct device *dev) { /* * MODE register defaults: * host mode, connect internal D+ and D- pulldown resistors to ground */ const uint8_t mode = MAX3421E_DPPULLDN | MAX3421E_DMPULLDN | MAX3421E_DELAYISO | MAX3421E_HOST; struct max3421e_data *priv = uhc_get_private(dev); uint8_t tmp; int ret; ret = max3421e_write_byte(dev, MAX3421E_REG_MODE, mode); if (ret) { return ret; } ret = max3421e_read(dev, MAX3421E_REG_MODE, &tmp, sizeof(tmp)); if (ret) { return ret; } if (tmp != mode) { LOG_ERR("Failed to verify MODE register 0x%02x vs 0x%02x", mode, tmp); return -EIO; } priv->mode = mode; return 0; } static int max3421e_hien_setup(const struct device *dev) { /* Host interrupts enabled by default */ const uint8_t hien = MAX3421E_HXFRDN | MAX3421E_FRAME | MAX3421E_CONDET | MAX3421E_RWU | MAX3421E_BUSEVENT; struct max3421e_data *priv = uhc_get_private(dev); uint8_t tmp; int ret; ret = max3421e_write_byte(dev, MAX3421E_REG_HIEN, hien); if (ret) { return ret; } ret = max3421e_read(dev, MAX3421E_REG_HIEN, &tmp, sizeof(tmp)); if (ret) { return ret; } if (tmp != hien) { LOG_ERR("Failed to verify HIEN register 0x%02x vs 0x%02x", hien, tmp); return -EIO; } priv->hien = hien; return 0; } static int max3421e_enable_int_output(const struct device *dev) { const uint8_t cpuctl = MAX3421E_IE; uint8_t tmp; int ret; /* Enable MAX3421E INT output pin */ ret = max3421e_write_byte(dev, MAX3421E_REG_CPUCTL, cpuctl); if (ret) { return ret; } ret = max3421e_read(dev, MAX3421E_REG_CPUCTL, &tmp, sizeof(tmp)); if (ret) { return ret; } if (tmp != cpuctl) { LOG_ERR("Failed to verify CPUCTL register 0x%02x vs 0x%02x", cpuctl, tmp); return -EIO; } return 0; } static int uhc_max3421e_init(const struct device *dev) { struct max3421e_data *priv = uhc_get_private(dev); uint8_t rev; int ret; ret = max3421e_pinctl_setup(dev); if (ret) { LOG_ERR("Failed to setup pinctl"); return ret; } ret = max3421e_read(dev, MAX3421E_REG_REVISION, &rev, sizeof(rev)); if (ret) { LOG_ERR("Failed to read revision"); return ret; } ret = max3421e_reset(dev); if (ret) { LOG_ERR("Failed to reset MAX3421E"); return ret; } ret = max3421e_mode_setup(dev); if (ret) { LOG_ERR("Failed to setup controller mode"); return ret; } ret = max3421e_hien_setup(dev); if (ret) { LOG_ERR("Failed to setup interrupts"); return ret; } ret = max3421e_enable_int_output(dev); if (ret) { LOG_ERR("Failed to enable INT output"); return ret; } LOG_INF("REV 0x%x, MODE 0x%02x, HIEN 0x%02x", rev, priv->mode, priv->hien); priv->addr = 0; /* Sample bus if device is already connected */ return max3421e_write_byte(dev, MAX3421E_REG_HCTL, MAX3421E_SAMPLEBUS); } static int uhc_max3421e_enable(const struct device *dev) { /* TODO */ return 0; } static int uhc_max3421e_disable(const struct device *dev) { /* TODO */ return 0; } static int uhc_max3421e_shutdown(const struct device *dev) { /* TODO */ return 0; } static int max3421e_driver_init(const struct device *dev) { const struct max3421e_config *config = dev->config; struct uhc_data *data = dev->data; struct max3421e_data *priv = data->priv; int ret; if (config->dt_rst.port) { if (!gpio_is_ready_dt(&config->dt_rst)) { LOG_ERR("GPIO device %s not ready", config->dt_rst.port->name); return -EIO; } ret = gpio_pin_configure_dt(&config->dt_rst, GPIO_OUTPUT_INACTIVE); if (ret) { LOG_ERR("Failed to configure GPIO pin %u", config->dt_rst.pin); return ret; } } if (!spi_is_ready_dt(&config->dt_spi)) { LOG_ERR("SPI device %s not ready", config->dt_spi.bus->name); return -EIO; } if (!gpio_is_ready_dt(&config->dt_int)) { LOG_ERR("GPIO device %s not ready", config->dt_int.port->name); return -EIO; } ret = gpio_pin_configure_dt(&config->dt_int, GPIO_INPUT); if (ret) { LOG_ERR("Failed to configure GPIO pin %u", config->dt_int.pin); return ret; } gpio_init_callback(&priv->gpio_cb, max3421e_gpio_cb, BIT(config->dt_int.pin)); ret = gpio_add_callback(config->dt_int.port, &priv->gpio_cb); if (ret) { return ret; } ret = gpio_pin_interrupt_configure_dt(&config->dt_int, GPIO_INT_EDGE_TO_ACTIVE); if (ret) { return ret; } k_mutex_init(&data->mutex); k_thread_create(&drv_stack_data, drv_stack, K_KERNEL_STACK_SIZEOF(drv_stack), uhc_max3421e_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); k_thread_name_set(&drv_stack_data, "uhc_max3421e"); LOG_DBG("MAX3421E CPU interface initialized"); return 0; } static const struct uhc_api max3421e_uhc_api = { .lock = max3421e_lock, .unlock = max3421e_unlock, .init = uhc_max3421e_init, .enable = uhc_max3421e_enable, .disable = uhc_max3421e_disable, .shutdown = uhc_max3421e_shutdown, .bus_reset = max3421e_bus_reset, .sof_enable = max3421e_sof_enable, .bus_suspend = max3421e_bus_suspend, .bus_resume = max3421e_bus_resume, .ep_enqueue = max3421e_enqueue, .ep_dequeue = max3421e_dequeue, }; static struct max3421e_data max3421e_data = { .irq_sem = Z_SEM_INITIALIZER(max3421e_data.irq_sem, 0, 1), }; static struct uhc_data max3421e_uhc_data = { .priv = &max3421e_data, }; static const struct max3421e_config max3421e_cfg = { .dt_spi = SPI_DT_SPEC_INST_GET(0, SPI_WORD_SET(8) | SPI_TRANSFER_MSB, 0), .dt_int = GPIO_DT_SPEC_INST_GET(0, int_gpios), .dt_rst = GPIO_DT_SPEC_INST_GET_OR(0, reset_gpios, {0}), }; DEVICE_DT_INST_DEFINE(0, max3421e_driver_init, NULL, &max3421e_uhc_data, &max3421e_cfg, POST_KERNEL, 99, &max3421e_uhc_api); ```
/content/code_sandbox/drivers/usb/uhc/uhc_max3421e.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,138
```c /* * */ /* PI3USB9201 USB BC 1.2 Charger Detector driver. */ #define DT_DRV_COMPAT diodes_pi3usb9201 #include <zephyr/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/usb/usb_bc12.h> #include <zephyr/logging/log.h> #include "bc12_pi3usb9201.h" LOG_MODULE_REGISTER(PI3USB9201, CONFIG_USB_BC12_LOG_LEVEL); /* Constant configuration data */ struct pi3usb9201_config { struct i2c_dt_spec i2c; struct gpio_dt_spec intb_gpio; enum bc12_type charging_mode; }; /* Run-time configuration data */ struct pi3usb9201_data { const struct device *dev; struct k_work work; struct bc12_partner_state partner_state; struct gpio_callback gpio_cb; bc12_callback_t result_cb; void *result_cb_data; }; enum pi3usb9201_client_sts { CHG_OTHER = 0, CHG_2_4A = 1, CHG_2_0A = 2, CHG_1_0A = 3, CHG_RESERVED = 4, CHG_CDP = 5, CHG_SDP = 6, CHG_DCP = 7, }; struct bc12_status { enum bc12_type partner_type; int current_limit; }; /* * The USB Type-C specification limits the maximum amount of current from BC 1.2 * suppliers to 1.5A. Technically, proprietary methods are not allowed, but we * will continue to allow those. */ static const struct bc12_status bc12_chg_limits[] = { /* For unknown chargers return Isusp. */ [CHG_OTHER] = {BC12_TYPE_PROPRIETARY, BC12_CURR_UA(BC12_CHARGER_MIN_CURR_UA)}, [CHG_2_4A] = {BC12_TYPE_PROPRIETARY, BC12_CURR_UA(2400000)}, [CHG_2_0A] = {BC12_TYPE_PROPRIETARY, BC12_CURR_UA(2000000)}, [CHG_1_0A] = {BC12_TYPE_PROPRIETARY, BC12_CURR_UA(1000000)}, [CHG_RESERVED] = {BC12_TYPE_NONE, 0}, [CHG_CDP] = {BC12_TYPE_CDP, BC12_CURR_UA(1500000)}, /* BC1.2 driver contract specifies to return Isusp for SDP ports. */ [CHG_SDP] = {BC12_TYPE_SDP, BC12_CURR_UA(BC12_CHARGER_MIN_CURR_UA)}, [CHG_DCP] = {BC12_TYPE_DCP, BC12_CURR_UA(1500000)}, }; static const enum pi3usb9201_mode charging_mode_to_host_mode[] = { [BC12_TYPE_NONE] = PI3USB9201_POWER_DOWN, [BC12_TYPE_SDP] = PI3USB9201_SDP_HOST_MODE, [BC12_TYPE_DCP] = PI3USB9201_DCP_HOST_MODE, [BC12_TYPE_CDP] = PI3USB9201_CDP_HOST_MODE, /* Invalid modes configured to power down the device. */ [BC12_TYPE_PROPRIETARY] = PI3USB9201_POWER_DOWN, [BC12_TYPE_UNKNOWN] = PI3USB9201_POWER_DOWN, }; BUILD_ASSERT(ARRAY_SIZE(charging_mode_to_host_mode) == BC12_TYPE_COUNT); static int pi3usb9201_interrupt_enable(const struct device *dev, const bool enable) { const struct pi3usb9201_config *cfg = dev->config; /* Clear the interrupt mask bit to enable the interrupt */ return i2c_reg_update_byte_dt(&cfg->i2c, PI3USB9201_REG_CTRL_1, PI3USB9201_REG_CTRL_1_INT_MASK, enable ? 0 : PI3USB9201_REG_CTRL_1_INT_MASK); } static int pi3usb9201_bc12_detect_ctrl(const struct device *dev, const bool enable) { const struct pi3usb9201_config *cfg = dev->config; return i2c_reg_update_byte_dt(&cfg->i2c, PI3USB9201_REG_CTRL_2, PI3USB9201_REG_CTRL_2_START_DET, enable ? PI3USB9201_REG_CTRL_2_START_DET : 0); } static int pi3usb9201_bc12_usb_switch(const struct device *dev, bool enable) { const struct pi3usb9201_config *cfg = dev->config; /* USB data switch enabled when PI3USB9201_REG_CTRL_2_AUTO_SW is clear */ return i2c_reg_update_byte_dt(&cfg->i2c, PI3USB9201_REG_CTRL_2, PI3USB9201_REG_CTRL_2_START_DET, enable ? 0 : PI3USB9201_REG_CTRL_2_AUTO_SW); } static int pi3usb9201_set_mode(const struct device *dev, enum pi3usb9201_mode mode) { const struct pi3usb9201_config *cfg = dev->config; return i2c_reg_update_byte_dt(&cfg->i2c, PI3USB9201_REG_CTRL_1, PI3USB9201_REG_CTRL_1_MODE_MASK << PI3USB9201_REG_CTRL_1_MODE_SHIFT, mode << PI3USB9201_REG_CTRL_1_MODE_SHIFT); } static int pi3usb9201_get_mode(const struct device *dev, enum pi3usb9201_mode *const mode) { const struct pi3usb9201_config *cfg = dev->config; int rv; uint8_t ctrl1; rv = i2c_reg_read_byte_dt(&cfg->i2c, PI3USB9201_REG_CTRL_1, &ctrl1); if (rv < 0) { return rv; } ctrl1 >>= PI3USB9201_REG_CTRL_1_MODE_SHIFT; ctrl1 &= PI3USB9201_REG_CTRL_1_MODE_MASK; *mode = ctrl1; return 0; } static int pi3usb9201_get_status(const struct device *dev, uint8_t *const client, uint8_t *const host) { const struct pi3usb9201_config *cfg = dev->config; uint8_t status; int rv; rv = i2c_reg_read_byte_dt(&cfg->i2c, PI3USB9201_REG_CLIENT_STS, &status); if (rv < 0) { return rv; } if (client != NULL) { *client = status; } rv = i2c_reg_read_byte_dt(&cfg->i2c, PI3USB9201_REG_HOST_STS, &status); if (rv < 0) { return rv; } if (host != NULL) { *host = status; } return 0; } static void pi3usb9201_notify_callback(const struct device *dev, struct bc12_partner_state *const state) { struct pi3usb9201_data *pi3usb9201_data = dev->data; if (pi3usb9201_data->result_cb) { pi3usb9201_data->result_cb(dev, state, pi3usb9201_data->result_cb_data); } } static bool pi3usb9201_partner_has_changed(const struct device *dev, struct bc12_partner_state *const state) { struct pi3usb9201_data *pi3usb9201_data = dev->data; /* Always notify when clearing out partner state */ if (!state) { return true; } if (state->bc12_role != pi3usb9201_data->partner_state.bc12_role) { return true; } if (state->bc12_role == BC12_PORTABLE_DEVICE && pi3usb9201_data->partner_state.type != state->type) { return true; } if (state->bc12_role == BC12_CHARGING_PORT && pi3usb9201_data->partner_state.pd_partner_connected != state->pd_partner_connected) { return true; } return false; } /** * @brief Notify the application about changes to the BC1.2 partner. * * @param dev BC1.2 device instance * @param state New partner state information. Set to NULL to indicate no * partner is connected. */ static void pi3usb9201_update_charging_partner(const struct device *dev, struct bc12_partner_state *const state) { struct pi3usb9201_data *pi3usb9201_data = dev->data; if (!pi3usb9201_partner_has_changed(dev, state)) { /* No change to the partner */ return; } if (state) { /* Now update callback with the new partner type. */ pi3usb9201_data->partner_state = *state; pi3usb9201_notify_callback(dev, state); } else { pi3usb9201_data->partner_state.bc12_role = BC12_DISCONNECTED; pi3usb9201_notify_callback(dev, NULL); } } static int pi3usb9201_client_detect_start(const struct device *dev) { int rv; /* * Read both status registers to ensure that all interrupt indications * are cleared prior to starting bc1.2 detection. */ pi3usb9201_get_status(dev, NULL, NULL); /* Put pi3usb9201 into client mode */ rv = pi3usb9201_set_mode(dev, PI3USB9201_CLIENT_MODE); if (rv < 0) { return rv; } /* Have pi3usb9201 start bc1.2 detection */ rv = pi3usb9201_bc12_detect_ctrl(dev, true); if (rv < 0) { return rv; } /* Enable interrupt to wake task when detection completes */ return pi3usb9201_interrupt_enable(dev, true); } static void pi3usb9201_client_detect_finish(const struct device *dev, const int status) { struct bc12_partner_state new_partner_state; int bit_pos; bool enable_usb_data; new_partner_state.bc12_role = BC12_PORTABLE_DEVICE; /* Set charge voltage to 5V */ new_partner_state.voltage_uv = BC12_CHARGER_VOLTAGE_UV; /* * Find set bit position. Note that this function is only called if a * bit was set in client_status, so bit_pos won't be negative. */ bit_pos = __builtin_ffs(status) - 1; new_partner_state.current_ua = bc12_chg_limits[bit_pos].current_limit; new_partner_state.type = bc12_chg_limits[bit_pos].partner_type; LOG_DBG("client status = 0x%x, current = %d mA, type = %d", status, new_partner_state.current_ua, new_partner_state.type); /* bc1.2 is complete and start bit does not auto clear */ if (pi3usb9201_bc12_detect_ctrl(dev, false) < 0) { LOG_ERR("failed to clear client detect"); } /* If DCP mode, disable USB swtich */ if (status & BIT(CHG_DCP)) { enable_usb_data = false; } else { enable_usb_data = true; } if (pi3usb9201_bc12_usb_switch(dev, enable_usb_data) < 0) { LOG_ERR("failed to set USB data mode"); } /* Inform charge manager of new supplier type and current limit */ pi3usb9201_update_charging_partner(dev, &new_partner_state); } static void pi3usb9201_host_interrupt(const struct device *dev, uint8_t host_status) { const struct pi3usb9201_config *pi3usb9201_config = dev->config; struct bc12_partner_state partner_state; switch (pi3usb9201_config->charging_mode) { case BC12_TYPE_NONE: /* * For USB-C connections, enable the USB data path * TODO - Provide a devicetree property indicating * whether the USB data path is supported. */ pi3usb9201_set_mode(dev, PI3USB9201_USB_PATH_ON); break; case BC12_TYPE_CDP: if (IS_ENABLED(CONFIG_USB_BC12_PI3USB9201_CDP_ERRATA)) { /* * Switch to SDP after device is plugged in to avoid * noise (pulse on D-) causing USB disconnect */ if (host_status & PI3USB9201_REG_HOST_STS_DEV_PLUG) { pi3usb9201_set_mode(dev, PI3USB9201_SDP_HOST_MODE); } /* * Switch to CDP after device is unplugged so we * advertise higher power available for next device. */ if (host_status & PI3USB9201_REG_HOST_STS_DEV_UNPLUG) { pi3usb9201_set_mode(dev, PI3USB9201_CDP_HOST_MODE); } } __fallthrough; case BC12_TYPE_SDP: /* Plug/unplug events only valid for CDP and SDP modes */ if (host_status & PI3USB9201_REG_HOST_STS_DEV_PLUG) { partner_state.bc12_role = BC12_CHARGING_PORT; partner_state.pd_partner_connected = true; pi3usb9201_update_charging_partner(dev, &partner_state); } if (host_status & PI3USB9201_REG_HOST_STS_DEV_UNPLUG) { partner_state.bc12_role = BC12_CHARGING_PORT; partner_state.pd_partner_connected = false; pi3usb9201_update_charging_partner(dev, &partner_state); } break; default: break; } } static int pi3usb9201_disconnect(const struct device *dev) { int rv; /* Ensure USB switch auto-on is enabled */ rv = pi3usb9201_bc12_usb_switch(dev, true); if (rv < 0) { return rv; } /* Put pi3usb9201 into its power down mode */ rv = pi3usb9201_set_mode(dev, PI3USB9201_POWER_DOWN); if (rv < 0) { return rv; } /* The start bc1.2 bit does not auto clear */ rv = pi3usb9201_bc12_detect_ctrl(dev, false); if (rv < 0) { return rv; } /* Mask interrupts until next bc1.2 detection event */ rv = pi3usb9201_interrupt_enable(dev, false); if (rv < 0) { return rv; } /* * Let the application know there's no more charge available for the * supplier type that was most recently detected. */ pi3usb9201_update_charging_partner(dev, NULL); return 0; } static int pi3usb9201_set_portable_device(const struct device *dev) { int rv; /* Disable interrupts during mode change */ rv = pi3usb9201_interrupt_enable(dev, false); if (rv < 0) { return rv; } if (pi3usb9201_client_detect_start(dev) < 0) { struct bc12_partner_state partner_state; /* * VBUS is present, but starting bc1.2 detection failed * for some reason. Set the partner type to unknown limit * current to the minimum allowed for a suspended USB device. */ partner_state.bc12_role = BC12_PORTABLE_DEVICE; partner_state.voltage_uv = BC12_CHARGER_VOLTAGE_UV; partner_state.current_ua = BC12_CHARGER_MIN_CURR_UA; partner_state.type = BC12_TYPE_UNKNOWN; /* Save supplier type and notify callbacks */ pi3usb9201_update_charging_partner(dev, &partner_state); LOG_ERR("bc1.2 detection failed, using defaults"); return -EIO; } return 0; } static int pi3usb9201_set_charging_mode(const struct device *dev) { const struct pi3usb9201_config *pi3usb9201_config = dev->config; struct bc12_partner_state partner_state; enum pi3usb9201_mode current_mode; enum pi3usb9201_mode desired_mode; int rv; if (pi3usb9201_config->charging_mode == BC12_TYPE_NONE) { /* * For USB-C connections, enable the USB data path when configured * as a downstream facing port but charging is disabled. * * TODO - Provide a devicetree property indicating * whether the USB data path is supported. */ return pi3usb9201_set_mode(dev, PI3USB9201_USB_PATH_ON); } /* * When enabling charging mode for this port, clear out information * regarding any charging partners. */ partner_state.bc12_role = BC12_CHARGING_PORT; partner_state.pd_partner_connected = false; pi3usb9201_update_charging_partner(dev, &partner_state); rv = pi3usb9201_interrupt_enable(dev, false); if (rv < 0) { return rv; } rv = pi3usb9201_get_mode(dev, &current_mode); if (rv < 0) { return rv; } desired_mode = charging_mode_to_host_mode[pi3usb9201_config->charging_mode]; if (current_mode != desired_mode) { LOG_DBG("Set host mode to %d", desired_mode); /* * Read both status registers to ensure that all * interrupt indications are cleared prior to starting * charging port (host) mode. */ rv = pi3usb9201_get_status(dev, NULL, NULL); if (rv < 0) { return rv; } rv = pi3usb9201_set_mode(dev, desired_mode); if (rv < 0) { return rv; } } rv = pi3usb9201_interrupt_enable(dev, true); if (rv < 0) { return rv; } return 0; } static void pi3usb9201_isr_work(struct k_work *item) { struct pi3usb9201_data *pi3usb9201_data = CONTAINER_OF(item, struct pi3usb9201_data, work); const struct device *dev = pi3usb9201_data->dev; uint8_t client; uint8_t host; int rv; rv = pi3usb9201_get_status(dev, &client, &host); if (rv < 0) { LOG_ERR("Failed to get host/client status"); return; } if (client != 0) { /* * Any bit set in client status register indicates that * BC1.2 detection has completed. */ pi3usb9201_client_detect_finish(dev, client); } if (host != 0) { pi3usb9201_host_interrupt(dev, host); } } static void pi3usb9201_gpio_callback(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct pi3usb9201_data *pi3usb9201_data = CONTAINER_OF(cb, struct pi3usb9201_data, gpio_cb); k_work_submit(&pi3usb9201_data->work); } static int pi3usb9201_set_role(const struct device *dev, const enum bc12_role role) { switch (role) { case BC12_DISCONNECTED: return pi3usb9201_disconnect(dev); case BC12_PORTABLE_DEVICE: return pi3usb9201_set_portable_device(dev); case BC12_CHARGING_PORT: return pi3usb9201_set_charging_mode(dev); default: LOG_ERR("unsupported BC12 role: %d", role); return -EINVAL; } return 0; } int pi3usb9201_set_result_cb(const struct device *dev, bc12_callback_t cb, void *const user_data) { struct pi3usb9201_data *pi3usb9201_data = dev->data; pi3usb9201_data->result_cb = cb; pi3usb9201_data->result_cb_data = user_data; return 0; } static const struct bc12_driver_api pi3usb9201_driver_api = { .set_role = pi3usb9201_set_role, .set_result_cb = pi3usb9201_set_result_cb, }; static int pi3usb9201_init(const struct device *dev) { const struct pi3usb9201_config *cfg = dev->config; struct pi3usb9201_data *pi3usb9201_data = dev->data; int rv; if (!i2c_is_ready_dt(&cfg->i2c)) { LOG_ERR("Bus device is not ready."); return -ENODEV; } if (!gpio_is_ready_dt(&cfg->intb_gpio)) { LOG_ERR("intb_gpio device is not ready."); return -ENODEV; } pi3usb9201_data->dev = dev; /* * Set most recent bc1.2 detection type result to * BC12_DISCONNECTED for the port. */ pi3usb9201_data->partner_state.bc12_role = BC12_DISCONNECTED; rv = gpio_pin_configure_dt(&cfg->intb_gpio, GPIO_INPUT); if (rv < 0) { LOG_DBG("Failed to set gpio callback."); return rv; } gpio_init_callback(&pi3usb9201_data->gpio_cb, pi3usb9201_gpio_callback, BIT(cfg->intb_gpio.pin)); k_work_init(&pi3usb9201_data->work, pi3usb9201_isr_work); rv = gpio_add_callback(cfg->intb_gpio.port, &pi3usb9201_data->gpio_cb); if (rv < 0) { LOG_DBG("Failed to set gpio callback."); return rv; } rv = gpio_pin_interrupt_configure_dt(&cfg->intb_gpio, GPIO_INT_EDGE_FALLING); if (rv < 0) { LOG_DBG("Failed to configure gpio interrupt."); return rv; } /* * The is no specific initialization required for the pi3usb9201 other * than disabling the interrupt. */ return pi3usb9201_interrupt_enable(dev, false); } #define PI2USB9201_DEFINE(inst) \ static struct pi3usb9201_data pi3usb9201_data_##inst; \ \ static const struct pi3usb9201_config pi3usb9201_config_##inst = { \ .i2c = I2C_DT_SPEC_INST_GET(inst), \ .intb_gpio = GPIO_DT_SPEC_INST_GET(inst, intb_gpios), \ .charging_mode = DT_INST_STRING_UPPER_TOKEN(inst, charging_mode), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, pi3usb9201_init, NULL, &pi3usb9201_data_##inst, \ &pi3usb9201_config_##inst, POST_KERNEL, \ CONFIG_APPLICATION_INIT_PRIORITY, &pi3usb9201_driver_api); DT_INST_FOREACH_STATUS_OKAY(PI2USB9201_DEFINE) ```
/content/code_sandbox/drivers/usb/bc12/bc12_pi3usb9201.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,052
```objective-c /* * */ /* PI3USB9201 USB BC 1.2 Charger Detector driver definitions */ #ifndef ZEPHYR_INCLUDE_USB_BC12_PI3USB9201_H #define ZEPHYR_INCLUDE_USB_BC12_PI3USB9201_H #define PI3USB9201_REG_CTRL_1 0x0 #define PI3USB9201_REG_CTRL_2 0x1 #define PI3USB9201_REG_CLIENT_STS 0x2 #define PI3USB9201_REG_HOST_STS 0x3 /* Flags */ #define PI3USB9201_ALWAYS_POWERED BIT(0) /* Control_1 register bit definitions */ #define PI3USB9201_REG_CTRL_1_INT_MASK BIT(0) #define PI3USB9201_REG_CTRL_1_MODE_SHIFT 1 #define PI3USB9201_REG_CTRL_1_MODE_MASK BIT_MASK(3) /* Control_2 register bit definitions */ #define PI3USB9201_REG_CTRL_2_AUTO_SW BIT(1) #define PI3USB9201_REG_CTRL_2_START_DET BIT(3) /* Host status register bit definitions */ #define PI3USB9201_REG_HOST_STS_BC12_DET BIT(0) #define PI3USB9201_REG_HOST_STS_DEV_PLUG BIT(1) #define PI3USB9201_REG_HOST_STS_DEV_UNPLUG BIT(2) enum pi3usb9201_mode { PI3USB9201_POWER_DOWN, PI3USB9201_SDP_HOST_MODE, PI3USB9201_DCP_HOST_MODE, PI3USB9201_CDP_HOST_MODE, PI3USB9201_CLIENT_MODE, PI3USB9201_RESERVED_1, PI3USB9201_RESERVED_2, PI3USB9201_USB_PATH_ON, }; #endif /* ZEPHYR_INCLUDE_USB_BC12_PI3USB9201_H */ ```
/content/code_sandbox/drivers/usb/bc12/bc12_pi3usb9201.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
393
```unknown menuconfig USB_BC12 bool "USB BC1.2 Drivers" help Enable USB BC1.2 (battery charging detection) drivers. if USB_BC12 module = USB_BC12 module-str = usb_bc12 source "subsys/logging/Kconfig.template.log_config" source "drivers/usb/bc12/Kconfig.pi3usb9201" endif # USB_BC12 ```
/content/code_sandbox/drivers/usb/bc12/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
84