repo_name
string
path
string
copies
string
size
string
content
string
license
string
skeevy420/android_kernel_lge_d850
block/blk-sysfs.c
3140
15136
/* * Functions related to sysfs handling */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blktrace_api.h> #include "blk.h" struct queue_sysfs_entry { struct attribute attr; ssize_t (*show)(struct request_queue *, char *); ssize_t (*store)(struct request_queue *, const char *, size_t); }; static ssize_t queue_var_show(unsigned long var, char *page) { return sprintf(page, "%lu\n", var); } static ssize_t queue_var_store(unsigned long *var, const char *page, size_t count) { char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count; } static ssize_t queue_requests_show(struct request_queue *q, char *page) { return queue_var_show(q->nr_requests, (page)); } static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { struct request_list *rl = &q->rq; unsigned long nr; int ret; if (!q->request_fn) return -EINVAL; ret = queue_var_store(&nr, page, count); if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; spin_lock_irq(q->queue_lock); q->nr_requests = nr; blk_queue_congestion_threshold(q); if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_SYNC); else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_SYNC); if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_ASYNC); else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_ASYNC); if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { blk_set_queue_full(q, BLK_RW_SYNC); } else { blk_clear_queue_full(q, BLK_RW_SYNC); wake_up(&rl->wait[BLK_RW_SYNC]); } if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { blk_set_queue_full(q, BLK_RW_ASYNC); } else { blk_clear_queue_full(q, BLK_RW_ASYNC); wake_up(&rl->wait[BLK_RW_ASYNC]); } spin_unlock_irq(q->queue_lock); return ret; } static ssize_t queue_ra_show(struct request_queue *q, char *page) { unsigned long ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); return queue_var_show(ra_kb, (page)); } static ssize_t queue_ra_store(struct request_queue *q, const char *page, size_t count) { unsigned long ra_kb; ssize_t ret = queue_var_store(&ra_kb, page, count); q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); return ret; } static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) { int max_sectors_kb = queue_max_sectors(q) >> 1; return queue_var_show(max_sectors_kb, (page)); } static ssize_t queue_max_segments_show(struct request_queue *q, char *page) { return queue_var_show(queue_max_segments(q), (page)); } static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.max_integrity_segments, (page)); } static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) { if (blk_queue_cluster(q)) return queue_var_show(queue_max_segment_size(q), (page)); return queue_var_show(PAGE_CACHE_SIZE, (page)); } static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) { return queue_var_show(queue_logical_block_size(q), page); } static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) { return queue_var_show(queue_physical_block_size(q), page); } static ssize_t queue_io_min_show(struct request_queue *q, char *page) { return queue_var_show(queue_io_min(q), page); } static ssize_t queue_io_opt_show(struct request_queue *q, char *page) { return queue_var_show(queue_io_opt(q), page); } static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.discard_granularity, page); } static ssize_t queue_discard_max_show(struct request_queue *q, char *page) { return sprintf(page, "%llu\n", (unsigned long long)q->limits.max_discard_sectors << 9); } static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) { return queue_var_show(queue_discard_zeroes_data(q), page); } static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_sectors_kb, max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, page_kb = 1 << (PAGE_CACHE_SHIFT - 10); ssize_t ret = queue_var_store(&max_sectors_kb, page, count); if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) return -EINVAL; spin_lock_irq(q->queue_lock); q->limits.max_sectors = max_sectors_kb << 1; spin_unlock_irq(q->queue_lock); return ret; } static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) { int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; return queue_var_show(max_hw_sectors_kb, (page)); } #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ static ssize_t \ queue_show_##name(struct request_queue *q, char *page) \ { \ int bit; \ bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ return queue_var_show(neg ? !bit : bit, page); \ } \ static ssize_t \ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ { \ unsigned long val; \ ssize_t ret; \ ret = queue_var_store(&val, page, count); \ if (neg) \ val = !val; \ \ spin_lock_irq(q->queue_lock); \ if (val) \ queue_flag_set(QUEUE_FLAG_##flag, q); \ else \ queue_flag_clear(QUEUE_FLAG_##flag, q); \ spin_unlock_irq(q->queue_lock); \ return ret; \ } QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); #undef QUEUE_SYSFS_BIT_FNS static ssize_t queue_nomerges_show(struct request_queue *q, char *page) { return queue_var_show((blk_queue_nomerges(q) << 1) | blk_queue_noxmerges(q), page); } static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, size_t count) { unsigned long nm; ssize_t ret = queue_var_store(&nm, page, count); spin_lock_irq(q->queue_lock); queue_flag_clear(QUEUE_FLAG_NOMERGES, q); queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); if (nm == 2) queue_flag_set(QUEUE_FLAG_NOMERGES, q); else if (nm) queue_flag_set(QUEUE_FLAG_NOXMERGES, q); spin_unlock_irq(q->queue_lock); return ret; } static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) { bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); return queue_var_show(set << force, page); } static ssize_t queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) { ssize_t ret = -EINVAL; #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) unsigned long val; ret = queue_var_store(&val, page, count); spin_lock_irq(q->queue_lock); if (val == 2) { queue_flag_set(QUEUE_FLAG_SAME_COMP, q); queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); } else if (val == 1) { queue_flag_set(QUEUE_FLAG_SAME_COMP, q); queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); } else if (val == 0) { queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); } spin_unlock_irq(q->queue_lock); #endif return ret; } static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, .store = queue_requests_store, }; static struct queue_sysfs_entry queue_ra_entry = { .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, .show = queue_ra_show, .store = queue_ra_store, }; static struct queue_sysfs_entry queue_max_sectors_entry = { .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, .show = queue_max_sectors_show, .store = queue_max_sectors_store, }; static struct queue_sysfs_entry queue_max_hw_sectors_entry = { .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, .show = queue_max_hw_sectors_show, }; static struct queue_sysfs_entry queue_max_segments_entry = { .attr = {.name = "max_segments", .mode = S_IRUGO }, .show = queue_max_segments_show, }; static struct queue_sysfs_entry queue_max_integrity_segments_entry = { .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, .show = queue_max_integrity_segments_show, }; static struct queue_sysfs_entry queue_max_segment_size_entry = { .attr = {.name = "max_segment_size", .mode = S_IRUGO }, .show = queue_max_segment_size_show, }; static struct queue_sysfs_entry queue_iosched_entry = { .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, .show = elv_iosched_show, .store = elv_iosched_store, }; static struct queue_sysfs_entry queue_hw_sector_size_entry = { .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, .show = queue_logical_block_size_show, }; static struct queue_sysfs_entry queue_logical_block_size_entry = { .attr = {.name = "logical_block_size", .mode = S_IRUGO }, .show = queue_logical_block_size_show, }; static struct queue_sysfs_entry queue_physical_block_size_entry = { .attr = {.name = "physical_block_size", .mode = S_IRUGO }, .show = queue_physical_block_size_show, }; static struct queue_sysfs_entry queue_io_min_entry = { .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, .show = queue_io_min_show, }; static struct queue_sysfs_entry queue_io_opt_entry = { .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, .show = queue_io_opt_show, }; static struct queue_sysfs_entry queue_discard_granularity_entry = { .attr = {.name = "discard_granularity", .mode = S_IRUGO }, .show = queue_discard_granularity_show, }; static struct queue_sysfs_entry queue_discard_max_entry = { .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, .show = queue_discard_max_show, }; static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, .show = queue_discard_zeroes_data_show, }; static struct queue_sysfs_entry queue_nonrot_entry = { .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, .show = queue_show_nonrot, .store = queue_store_nonrot, }; static struct queue_sysfs_entry queue_nomerges_entry = { .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, .show = queue_nomerges_show, .store = queue_nomerges_store, }; static struct queue_sysfs_entry queue_rq_affinity_entry = { .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, .show = queue_rq_affinity_show, .store = queue_rq_affinity_store, }; static struct queue_sysfs_entry queue_iostats_entry = { .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, .show = queue_show_iostats, .store = queue_store_iostats, }; static struct queue_sysfs_entry queue_random_entry = { .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, .show = queue_show_random, .store = queue_store_random, }; static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, &queue_max_hw_sectors_entry.attr, &queue_max_sectors_entry.attr, &queue_max_segments_entry.attr, &queue_max_integrity_segments_entry.attr, &queue_max_segment_size_entry.attr, &queue_iosched_entry.attr, &queue_hw_sector_size_entry.attr, &queue_logical_block_size_entry.attr, &queue_physical_block_size_entry.attr, &queue_io_min_entry.attr, &queue_io_opt_entry.attr, &queue_discard_granularity_entry.attr, &queue_discard_max_entry.attr, &queue_discard_zeroes_data_entry.attr, &queue_nonrot_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, &queue_iostats_entry.attr, &queue_random_entry.attr, NULL, }; #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) static ssize_t queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); struct request_queue *q = container_of(kobj, struct request_queue, kobj); ssize_t res; if (!entry->show) return -EIO; mutex_lock(&q->sysfs_lock); if (blk_queue_dead(q)) { mutex_unlock(&q->sysfs_lock); return -ENOENT; } res = entry->show(q, page); mutex_unlock(&q->sysfs_lock); return res; } static ssize_t queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); struct request_queue *q; ssize_t res; if (!entry->store) return -EIO; q = container_of(kobj, struct request_queue, kobj); mutex_lock(&q->sysfs_lock); if (blk_queue_dead(q)) { mutex_unlock(&q->sysfs_lock); return -ENOENT; } res = entry->store(q, page, length); mutex_unlock(&q->sysfs_lock); return res; } /** * blk_release_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging to the request queue to be released * * Description: * blk_release_queue is the pair to blk_init_queue() or * blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * * Caveat: * Hopefully the low level driver will have finished any * outstanding requests first... **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); struct request_list *rl = &q->rq; blk_sync_queue(q); if (q->elevator) { spin_lock_irq(q->queue_lock); ioc_clear_queue(q); spin_unlock_irq(q->queue_lock); elevator_exit(q->elevator); } blk_throtl_exit(q); if (rl->rq_pool) mempool_destroy(rl->rq_pool); if (q->queue_tags) __blk_queue_free_tags(q); blk_throtl_release(q); blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); ida_simple_remove(&blk_queue_ida, q->id); kmem_cache_free(blk_requestq_cachep, q); } static const struct sysfs_ops queue_sysfs_ops = { .show = queue_attr_show, .store = queue_attr_store, }; struct kobj_type blk_queue_ktype = { .sysfs_ops = &queue_sysfs_ops, .default_attrs = default_attrs, .release = blk_release_queue, }; int blk_register_queue(struct gendisk *disk) { int ret; struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; if (WARN_ON(!q)) return -ENXIO; ret = blk_trace_init_sysfs(dev); if (ret) return ret; ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); if (ret < 0) { blk_trace_remove_sysfs(dev); return ret; } kobject_uevent(&q->kobj, KOBJ_ADD); if (!q->request_fn) return 0; ret = elv_register_queue(q); if (ret) { kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(dev); kobject_put(&dev->kobj); return ret; } return 0; } void blk_unregister_queue(struct gendisk *disk) { struct request_queue *q = disk->queue; if (WARN_ON(!q)) return; if (q->request_fn) elv_unregister_queue(q); kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); kobject_put(&disk_to_dev(disk)->kobj); }
gpl-2.0
fronti90/kernel_lge_geefhd
arch/arm/mach-msm/smd_rpc_sym.c
3652
6155
/* Autogenerated by mkrpcsym.pl. Do not edit */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/module.h> struct sym { const char *str; }; const char *smd_rpc_syms[] = { "CB CM_FUSION", /*0x30010000*/ "CB DB", /*0x30000001*/ "CB SND", /*0x30000002*/ "CB WMS_FUSION", /*0x30010003*/ "CB PDSM", /*0x30000004*/ "CB MISC_MODEM_APIS", /*0x30000005*/ "CB MISC_APPS_APIS", /*0x30000006*/ "CB JOYST", /*0x30000007*/ "CB UNDEFINED", "CB UNDEFINED", "CB ADSPRTOSATOM", /*0x3000000A*/ "CB ADSPRTOSMTOA", /*0x3000000B*/ "CB I2C", /*0x3000000C*/ "CB TIME_REMOTE", /*0x3000000D*/ "CB NV_FUSION", /*0x3001000E*/ "CB CLKRGM_SEC_FUSION", /*0x3001000F*/ "CB RDEVMAP", /*0x30000010*/ "CB UNDEFINED", "CB PBMLIB_FUSION", /*0x30010012*/ "CB AUDMGR", /*0x30000013*/ "CB MVS", /*0x30000014*/ "CB DOG_KEEPALIVE", /*0x30000015*/ "CB GSDI_EXP_FUSION", /*0x30010016*/ "CB AUTH", /*0x30000017*/ "CB NVRUIMI", /*0x30000018*/ "CB MMGSDILIB_FUSION", /*0x30010019*/ "CB CHARGER", /*0x3000001A*/ "CB UIM_FUSION", /*0x3001001B*/ "CB UNDEFINED", "CB PDSM_ATL", /*0x3000001D*/ "CB FS_XMOUNT", /*0x3000001E*/ "CB SECUTIL", /*0x3000001F*/ "CB MCCMEID", /*0x30000020*/ "CB PM_STROBE_FLASH", /*0x30000021*/ "CB UNDEFINED", "CB SMD_BRIDGE", /*0x30000023*/ "CB SMD_PORT_MGR_FUSION", /*0x30010024*/ "CB BUS_PERF", /*0x30000025*/ "CB BUS_MON_REMOTE", /*0x30000026*/ "CB MC", /*0x30000027*/ "CB MCCAP", /*0x30000028*/ "CB MCCDMA", /*0x30000029*/ "CB MCCDS", /*0x3000002A*/ "CB MCCSCH", /*0x3000002B*/ "CB MCCSRID", /*0x3000002C*/ "CB SNM", /*0x3000002D*/ "CB MCCSYOBJ", /*0x3000002E*/ "CB UNDEFINED", "CB UNDEFINED", "CB DSRLP_APIS", /*0x30000031*/ "CB RLP_APIS", /*0x30000032*/ "CB DS_MP_SHIM_MODEM", /*0x30000033*/ "CB UNDEFINED", "CB DSHDR_MDM_APIS", /*0x30000035*/ "CB DS_MP_SHIM_APPS", /*0x30000036*/ "CB HDRMC_APIS", /*0x30000037*/ "CB UNDEFINED", "CB UNDEFINED", "CB PMAPP_OTG", /*0x3000003A*/ "CB DIAG", /*0x3000003B*/ "CB GSTK_EXP_FUSION", /*0x3001003C*/ "CB DSBC_MDM_APIS", /*0x3000003D*/ "CB HDRMRLP_MDM_APIS", /*0x3000003E*/ "CB UNDEFINED", "CB HDRMC_MRLP_APIS", /*0x30000040*/ "CB PDCOMM_APP_API", /*0x30000041*/ "CB DSAT_APIS", /*0x30000042*/ "CB RFM", /*0x30000043*/ "CB CMIPAPP", /*0x30000044*/ "CB DSMP_UMTS_MODEM_APIS", /*0x30000045*/ "CB UNDEFINED", "CB DSUCSDMPSHIM", /*0x30000047*/ "CB TIME_REMOTE_ATOM", /*0x30000048*/ "CB UNDEFINED", "CB SD", /*0x3000004A*/ "CB MMOC", /*0x3000004B*/ "CB UNDEFINED", "CB WLAN_CP_CM", /*0x3000004D*/ "CB FTM_WLAN", /*0x3000004E*/ "CB UNDEFINED", "CB CPRMINTERFACE", /*0x30000050*/ "CB DATA_ON_MODEM_MTOA_APIS", /*0x30000051*/ "CB UNDEFINED", "CB MISC_MODEM_APIS_NONWINMOB", /*0x30000053*/ "CB MISC_APPS_APIS_NONWINMOB", /*0x30000054*/ "CB PMEM_REMOTE", /*0x30000055*/ "CB TCXOMGR", /*0x30000056*/ "CB UNDEFINED", "CB BT", /*0x30000058*/ "CB PD_COMMS_API", /*0x30000059*/ "CB PD_COMMS_CLIENT_API", /*0x3000005A*/ "CB PDAPI", /*0x3000005B*/ "CB UNDEFINED", "CB TIME_REMOTE_MTOA", /*0x3000005D*/ "CB FTM_BT", /*0x3000005E*/ "CB DSUCSDAPPIF_APIS", /*0x3000005F*/ "CB PMAPP_GEN", /*0x30000060*/ "CB PM_LIB_FUSION", /*0x30010061*/ "CB UNDEFINED", "CB HSU_APP_APIS", /*0x30000063*/ "CB HSU_MDM_APIS", /*0x30000064*/ "CB ADIE_ADC_REMOTE_ATOM", /*0x30000065*/ "CB TLMM_REMOTE_ATOM", /*0x30000066*/ "CB UI_CALLCTRL", /*0x30000067*/ "CB UIUTILS", /*0x30000068*/ "CB PRL", /*0x30000069*/ "CB HW", /*0x3000006A*/ "CB OEM_RAPI_FUSION", /*0x3001006B*/ "CB WMSPM", /*0x3000006C*/ "CB BTPF", /*0x3000006D*/ "CB UNDEFINED", "CB USB_APPS_RPC", /*0x3000006F*/ "CB USB_MODEM_RPC", /*0x30000070*/ "CB ADC", /*0x30000071*/ "CB CAMERAREMOTED", /*0x30000072*/ "CB SECAPIREMOTED", /*0x30000073*/ "CB DSATAPI", /*0x30000074*/ "CB CLKCTL_RPC", /*0x30000075*/ "CB BREWAPPCOORD", /*0x30000076*/ "CB UNDEFINED", "CB WLAN_TRP_UTILS", /*0x30000078*/ "CB GPIO_RPC", /*0x30000079*/ "CB UNDEFINED", "CB UNDEFINED", "CB L1_DS", /*0x3000007C*/ "CB UNDEFINED", "CB UNDEFINED", "CB OSS_RRCASN_REMOTE", /*0x3000007F*/ "CB PMAPP_OTG_REMOTE", /*0x30000080*/ "CB PING_LTE_RPC", /*0x30010081*/ "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB UKCC_IPC_APIS", /*0x30000087*/ "CB UNDEFINED", "CB VBATT_REMOTE", /*0x30000089*/ "CB MFPAL_FPS", /*0x3000008A*/ "CB DSUMTSPDPREG", /*0x3000008B*/ "CB LOC_API", /*0x3000008C*/ "CB UNDEFINED", "CB CMGAN", /*0x3000008E*/ "CB ISENSE", /*0x3000008F*/ "CB TIME_SECURE", /*0x30000090*/ "CB HS_REM", /*0x30000091*/ "CB ACDB", /*0x30000092*/ "CB NET", /*0x30000093*/ "CB LED", /*0x30000094*/ "CB DSPAE", /*0x30000095*/ "CB MFKAL", /*0x30000096*/ "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB TEST_API", /*0x3000009B*/ "CB REMOTEFS_SRV_API_FUSION", /*0x3001009C*/ "CB ISI_TRANSPORT", /*0x3000009D*/ "CB OEM_FTM", /*0x3000009E*/ "CB TOUCH_SCREEN_ADC", /*0x3000009F*/ "CB SMD_BRIDGE_APPS_FUSION", /*0x300100A0*/ "CB SMD_BRIDGE_MODEM_FUSION", /*0x300100A1*/ "CB DOG_KEEPALIVE_MODEM", /*0x300000A2*/ "CB VOEM_IF", /*0x300000A3*/ "CB NPA_REMOTE", /*0x300000A4*/ "CB MMGSDISESSIONLIB_FUSION", /*0x300100A5*/ "CB IFTA_REMOTE", /*0x300000A6*/ "CB REMOTE_STORAGE", /*0x300000A7*/ "CB MF_REMOTE_FILE", /*0x300000A8*/ "CB MFSC_CHUNKED_TRANSPORT", /*0x300000A9*/ "CB MFIM3", /*0x300000AA*/ "CB FM_WAN_API", /*0x300000AB*/ "CB WLAN_RAPI", /*0x300000AC*/ "CB DSMGR_APIS", /*0x300000AD*/ "CB CM_MM_FUSION", /*0x300100AE*/ }; static struct sym_tbl { const char **data; int size; } tbl = { smd_rpc_syms, ARRAY_SIZE(smd_rpc_syms)}; const char *smd_rpc_get_sym(uint32_t val) { int idx = val & 0xFFFF; if (idx < tbl.size) { if (val & 0x01000000) return tbl.data[idx]; else return tbl.data[idx] + 3; } return 0; } EXPORT_SYMBOL(smd_rpc_get_sym);
gpl-2.0
kerlw/linux-sunxi
sound/i2c/other/ak4114.c
4676
18963
/* * Routines for control of the AK4114 via I2C and 4-wire serial interface * IEC958 (S/PDIF) receiver by Asahi Kasei * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/ak4114.h> #include <sound/asoundef.h> #include <sound/info.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("AK4114 IEC958 (S/PDIF) receiver by Asahi Kasei"); MODULE_LICENSE("GPL"); #define AK4114_ADDR 0x00 /* fixed address */ static void ak4114_stats(struct work_struct *work); static void ak4114_init_regs(struct ak4114 *chip); static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val) { ak4114->write(ak4114->private_data, reg, val); if (reg <= AK4114_REG_INT1_MASK) ak4114->regmap[reg] = val; else if (reg >= AK4114_REG_TXCSB0 && reg <= AK4114_REG_TXCSB4) ak4114->txcsb[reg-AK4114_REG_TXCSB0] = val; } static inline unsigned char reg_read(struct ak4114 *ak4114, unsigned char reg) { return ak4114->read(ak4114->private_data, reg); } #if 0 static void reg_dump(struct ak4114 *ak4114) { int i; printk(KERN_DEBUG "AK4114 REG DUMP:\n"); for (i = 0; i < 0x20; i++) printk(KERN_DEBUG "reg[%02x] = %02x (%02x)\n", i, reg_read(ak4114, i), i < sizeof(ak4114->regmap) ? ak4114->regmap[i] : 0); } #endif static void snd_ak4114_free(struct ak4114 *chip) { chip->init = 1; /* don't schedule new work */ mb(); cancel_delayed_work_sync(&chip->work); kfree(chip); } static int snd_ak4114_dev_free(struct snd_device *device) { struct ak4114 *chip = device->device_data; snd_ak4114_free(chip); return 0; } int snd_ak4114_create(struct snd_card *card, ak4114_read_t *read, ak4114_write_t *write, const unsigned char pgm[7], const unsigned char txcsb[5], void *private_data, struct ak4114 **r_ak4114) { struct ak4114 *chip; int err = 0; unsigned char reg; static struct snd_device_ops ops = { .dev_free = snd_ak4114_dev_free, }; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; spin_lock_init(&chip->lock); chip->card = card; chip->read = read; chip->write = write; chip->private_data = private_data; INIT_DELAYED_WORK(&chip->work, ak4114_stats); for (reg = 0; reg < 7; reg++) chip->regmap[reg] = pgm[reg]; for (reg = 0; reg < 5; reg++) chip->txcsb[reg] = txcsb[reg]; ak4114_init_regs(chip); chip->rcs0 = reg_read(chip, AK4114_REG_RCS0) & ~(AK4114_QINT | AK4114_CINT); chip->rcs1 = reg_read(chip, AK4114_REG_RCS1); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) goto __fail; if (r_ak4114) *r_ak4114 = chip; return 0; __fail: snd_ak4114_free(chip); return err < 0 ? err : -EIO; } void snd_ak4114_reg_write(struct ak4114 *chip, unsigned char reg, unsigned char mask, unsigned char val) { if (reg <= AK4114_REG_INT1_MASK) reg_write(chip, reg, (chip->regmap[reg] & ~mask) | val); else if (reg >= AK4114_REG_TXCSB0 && reg <= AK4114_REG_TXCSB4) reg_write(chip, reg, (chip->txcsb[reg-AK4114_REG_TXCSB0] & ~mask) | val); } static void ak4114_init_regs(struct ak4114 *chip) { unsigned char old = chip->regmap[AK4114_REG_PWRDN], reg; /* bring the chip to reset state and powerdown state */ reg_write(chip, AK4114_REG_PWRDN, old & ~(AK4114_RST|AK4114_PWN)); udelay(200); /* release reset, but leave powerdown */ reg_write(chip, AK4114_REG_PWRDN, (old | AK4114_RST) & ~AK4114_PWN); udelay(200); for (reg = 1; reg < 7; reg++) reg_write(chip, reg, chip->regmap[reg]); for (reg = 0; reg < 5; reg++) reg_write(chip, reg + AK4114_REG_TXCSB0, chip->txcsb[reg]); /* release powerdown, everything is initialized now */ reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN); } void snd_ak4114_reinit(struct ak4114 *chip) { chip->init = 1; mb(); flush_delayed_work_sync(&chip->work); ak4114_init_regs(chip); /* bring up statistics / event queing */ chip->init = 0; if (chip->kctls[0]) schedule_delayed_work(&chip->work, HZ / 10); } static unsigned int external_rate(unsigned char rcs1) { switch (rcs1 & (AK4114_FS0|AK4114_FS1|AK4114_FS2|AK4114_FS3)) { case AK4114_FS_32000HZ: return 32000; case AK4114_FS_44100HZ: return 44100; case AK4114_FS_48000HZ: return 48000; case AK4114_FS_88200HZ: return 88200; case AK4114_FS_96000HZ: return 96000; case AK4114_FS_176400HZ: return 176400; case AK4114_FS_192000HZ: return 192000; default: return 0; } } static int snd_ak4114_in_error_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = LONG_MAX; return 0; } static int snd_ak4114_in_error_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4114 *chip = snd_kcontrol_chip(kcontrol); long *ptr; spin_lock_irq(&chip->lock); ptr = (long *)(((char *)chip) + kcontrol->private_value); ucontrol->value.integer.value[0] = *ptr; *ptr = 0; spin_unlock_irq(&chip->lock); return 0; } #define snd_ak4114_in_bit_info snd_ctl_boolean_mono_info static int snd_ak4114_in_bit_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4114 *chip = snd_kcontrol_chip(kcontrol); unsigned char reg = kcontrol->private_value & 0xff; unsigned char bit = (kcontrol->private_value >> 8) & 0xff; unsigned char inv = (kcontrol->private_value >> 31) & 1; ucontrol->value.integer.value[0] = ((reg_read(chip, reg) & (1 << bit)) ? 1 : 0) ^ inv; return 0; } static int snd_ak4114_rate_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 192000; return 0; } static int snd_ak4114_rate_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4114 *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = external_rate(reg_read(chip, AK4114_REG_RCS1)); return 0; } static int snd_ak4114_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ak4114_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4114 *chip = snd_kcontrol_chip(kcontrol); unsigned i; for (i = 0; i < AK4114_REG_RXCSB_SIZE; i++) ucontrol->value.iec958.status[i] = reg_read(chip, AK4114_REG_RXCSB0 + i); return 0; } static int snd_ak4114_spdif_playback_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4114 *chip = snd_kcontrol_chip(kcontrol); unsigned i; for (i = 0; i < AK4114_REG_TXCSB_SIZE; i++) ucontrol->value.iec958.status[i] = chip->txcsb[i]; return 0; } static int snd_ak4114_spdif_playback_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4114 *chip = snd_kcontrol_chip(kcontrol); unsigned i; for (i = 0; i < AK4114_REG_TXCSB_SIZE; i++) reg_write(chip, AK4114_REG_TXCSB0 + i, ucontrol->value.iec958.status[i]); return 0; } static int snd_ak4114_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ak4114_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { memset(ucontrol->value.iec958.status, 0xff, AK4114_REG_RXCSB_SIZE); return 0; } static int snd_ak4114_spdif_pinfo(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0xffff; uinfo->count = 4; return 0; } static int snd_ak4114_spdif_pget(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4114 *chip = snd_kcontrol_chip(kcontrol); unsigned short tmp; ucontrol->value.integer.value[0] = 0xf8f2; ucontrol->value.integer.value[1] = 0x4e1f; tmp = reg_read(chip, AK4114_REG_Pc0) | (reg_read(chip, AK4114_REG_Pc1) << 8); ucontrol->value.integer.value[2] = tmp; tmp = reg_read(chip, AK4114_REG_Pd0) | (reg_read(chip, AK4114_REG_Pd1) << 8); ucontrol->value.integer.value[3] = tmp; return 0; } static int snd_ak4114_spdif_qinfo(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; uinfo->count = AK4114_REG_QSUB_SIZE; return 0; } static int snd_ak4114_spdif_qget(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4114 *chip = snd_kcontrol_chip(kcontrol); unsigned i; for (i = 0; i < AK4114_REG_QSUB_SIZE; i++) ucontrol->value.bytes.data[i] = reg_read(chip, AK4114_REG_QSUB_ADDR + i); return 0; } /* Don't forget to change AK4114_CONTROLS define!!! */ static struct snd_kcontrol_new snd_ak4114_iec958_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Parity Errors", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_in_error_info, .get = snd_ak4114_in_error_get, .private_value = offsetof(struct ak4114, parity_errors), }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 V-Bit Errors", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_in_error_info, .get = snd_ak4114_in_error_get, .private_value = offsetof(struct ak4114, v_bit_errors), }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 C-CRC Errors", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_in_error_info, .get = snd_ak4114_in_error_get, .private_value = offsetof(struct ak4114, ccrc_errors), }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Q-CRC Errors", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_in_error_info, .get = snd_ak4114_in_error_get, .private_value = offsetof(struct ak4114, qcrc_errors), }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 External Rate", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_rate_info, .get = snd_ak4114_rate_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_ak4114_spdif_mask_info, .get = snd_ak4114_spdif_mask_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_spdif_info, .get = snd_ak4114_spdif_playback_get, .put = snd_ak4114_spdif_playback_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",CAPTURE,MASK), .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_ak4114_spdif_mask_info, .get = snd_ak4114_spdif_mask_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",CAPTURE,DEFAULT), .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_spdif_info, .get = snd_ak4114_spdif_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Preample Capture Default", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_spdif_pinfo, .get = snd_ak4114_spdif_pget, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Q-subcode Capture Default", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_spdif_qinfo, .get = snd_ak4114_spdif_qget, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Audio", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_in_bit_info, .get = snd_ak4114_in_bit_get, .private_value = (1<<31) | (1<<8) | AK4114_REG_RCS0, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Non-PCM Bitstream", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_in_bit_info, .get = snd_ak4114_in_bit_get, .private_value = (6<<8) | AK4114_REG_RCS0, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 DTS Bitstream", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_in_bit_info, .get = snd_ak4114_in_bit_get, .private_value = (3<<8) | AK4114_REG_RCS0, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 PPL Lock Status", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_in_bit_info, .get = snd_ak4114_in_bit_get, .private_value = (1<<31) | (4<<8) | AK4114_REG_RCS0, } }; static void snd_ak4114_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct ak4114 *ak4114 = entry->private_data; int reg, val; /* all ak4114 registers 0x00 - 0x1f */ for (reg = 0; reg < 0x20; reg++) { val = reg_read(ak4114, reg); snd_iprintf(buffer, "0x%02x = 0x%02x\n", reg, val); } } static void snd_ak4114_proc_init(struct ak4114 *ak4114) { struct snd_info_entry *entry; if (!snd_card_proc_new(ak4114->card, "ak4114", &entry)) snd_info_set_text_ops(entry, ak4114, snd_ak4114_proc_regs_read); } int snd_ak4114_build(struct ak4114 *ak4114, struct snd_pcm_substream *ply_substream, struct snd_pcm_substream *cap_substream) { struct snd_kcontrol *kctl; unsigned int idx; int err; if (snd_BUG_ON(!cap_substream)) return -EINVAL; ak4114->playback_substream = ply_substream; ak4114->capture_substream = cap_substream; for (idx = 0; idx < AK4114_CONTROLS; idx++) { kctl = snd_ctl_new1(&snd_ak4114_iec958_controls[idx], ak4114); if (kctl == NULL) return -ENOMEM; if (strstr(kctl->id.name, "Playback")) { if (ply_substream == NULL) { snd_ctl_free_one(kctl); ak4114->kctls[idx] = NULL; continue; } kctl->id.device = ply_substream->pcm->device; kctl->id.subdevice = ply_substream->number; } else { kctl->id.device = cap_substream->pcm->device; kctl->id.subdevice = cap_substream->number; } err = snd_ctl_add(ak4114->card, kctl); if (err < 0) return err; ak4114->kctls[idx] = kctl; } snd_ak4114_proc_init(ak4114); /* trigger workq */ schedule_delayed_work(&ak4114->work, HZ / 10); return 0; } /* notify kcontrols if any parameters are changed */ static void ak4114_notify(struct ak4114 *ak4114, unsigned char rcs0, unsigned char rcs1, unsigned char c0, unsigned char c1) { if (!ak4114->kctls[0]) return; if (rcs0 & AK4114_PAR) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[0]->id); if (rcs0 & AK4114_V) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[1]->id); if (rcs1 & AK4114_CCRC) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[2]->id); if (rcs1 & AK4114_QCRC) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[3]->id); /* rate change */ if (c1 & 0xf0) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[4]->id); if ((c0 & AK4114_PEM) | (c0 & AK4114_CINT)) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[9]->id); if (c0 & AK4114_QINT) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[10]->id); if (c0 & AK4114_AUDION) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[11]->id); if (c0 & AK4114_AUTO) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[12]->id); if (c0 & AK4114_DTSCD) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[13]->id); if (c0 & AK4114_UNLCK) snd_ctl_notify(ak4114->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4114->kctls[14]->id); } int snd_ak4114_external_rate(struct ak4114 *ak4114) { unsigned char rcs1; rcs1 = reg_read(ak4114, AK4114_REG_RCS1); return external_rate(rcs1); } int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags) { struct snd_pcm_runtime *runtime = ak4114->capture_substream ? ak4114->capture_substream->runtime : NULL; unsigned long _flags; int res = 0; unsigned char rcs0, rcs1; unsigned char c0, c1; rcs1 = reg_read(ak4114, AK4114_REG_RCS1); if (flags & AK4114_CHECK_NO_STAT) goto __rate; rcs0 = reg_read(ak4114, AK4114_REG_RCS0); spin_lock_irqsave(&ak4114->lock, _flags); if (rcs0 & AK4114_PAR) ak4114->parity_errors++; if (rcs1 & AK4114_V) ak4114->v_bit_errors++; if (rcs1 & AK4114_CCRC) ak4114->ccrc_errors++; if (rcs1 & AK4114_QCRC) ak4114->qcrc_errors++; c0 = (ak4114->rcs0 & (AK4114_QINT | AK4114_CINT | AK4114_PEM | AK4114_AUDION | AK4114_AUTO | AK4114_UNLCK)) ^ (rcs0 & (AK4114_QINT | AK4114_CINT | AK4114_PEM | AK4114_AUDION | AK4114_AUTO | AK4114_UNLCK)); c1 = (ak4114->rcs1 & 0xf0) ^ (rcs1 & 0xf0); ak4114->rcs0 = rcs0 & ~(AK4114_QINT | AK4114_CINT); ak4114->rcs1 = rcs1; spin_unlock_irqrestore(&ak4114->lock, _flags); ak4114_notify(ak4114, rcs0, rcs1, c0, c1); if (ak4114->change_callback && (c0 | c1) != 0) ak4114->change_callback(ak4114, c0, c1); __rate: /* compare rate */ res = external_rate(rcs1); if (!(flags & AK4114_CHECK_NO_RATE) && runtime && runtime->rate != res) { snd_pcm_stream_lock_irqsave(ak4114->capture_substream, _flags); if (snd_pcm_running(ak4114->capture_substream)) { // printk(KERN_DEBUG "rate changed (%i <- %i)\n", runtime->rate, res); snd_pcm_stop(ak4114->capture_substream, SNDRV_PCM_STATE_DRAINING); res = 1; } snd_pcm_stream_unlock_irqrestore(ak4114->capture_substream, _flags); } return res; } static void ak4114_stats(struct work_struct *work) { struct ak4114 *chip = container_of(work, struct ak4114, work.work); if (!chip->init) snd_ak4114_check_rate_and_errors(chip, chip->check_flags); schedule_delayed_work(&chip->work, HZ / 10); } EXPORT_SYMBOL(snd_ak4114_create); EXPORT_SYMBOL(snd_ak4114_reg_write); EXPORT_SYMBOL(snd_ak4114_reinit); EXPORT_SYMBOL(snd_ak4114_build); EXPORT_SYMBOL(snd_ak4114_external_rate); EXPORT_SYMBOL(snd_ak4114_check_rate_and_errors);
gpl-2.0
lenghonglin/LU6200_Android_JB_LU620186_00_Kernel
arch/arm/mach-tegra/powergate.c
4676
5104
/* * drivers/powergate/tegra-powergate.c * * Copyright (c) 2010 Google, Inc * * Author: * Colin Cross <ccross@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <mach/clk.h> #include <mach/iomap.h> #include <mach/powergate.h> #include "fuse.h" #define PWRGATE_TOGGLE 0x30 #define PWRGATE_TOGGLE_START (1 << 8) #define REMOVE_CLAMPING 0x34 #define PWRGATE_STATUS 0x38 static int tegra_num_powerdomains; static int tegra_num_cpu_domains; static u8 *tegra_cpu_domains; static u8 tegra30_cpu_domains[] = { TEGRA_POWERGATE_CPU0, TEGRA_POWERGATE_CPU1, TEGRA_POWERGATE_CPU2, TEGRA_POWERGATE_CPU3, }; static DEFINE_SPINLOCK(tegra_powergate_lock); static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); static u32 pmc_read(unsigned long reg) { return readl(pmc + reg); } static void pmc_write(u32 val, unsigned long reg) { writel(val, pmc + reg); } static int tegra_powergate_set(int id, bool new_state) { bool status; unsigned long flags; spin_lock_irqsave(&tegra_powergate_lock, flags); status = pmc_read(PWRGATE_STATUS) & (1 << id); if (status == new_state) { spin_unlock_irqrestore(&tegra_powergate_lock, flags); return -EINVAL; } pmc_write(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE); spin_unlock_irqrestore(&tegra_powergate_lock, flags); return 0; } int tegra_powergate_power_on(int id) { if (id < 0 || id >= tegra_num_powerdomains) return -EINVAL; return tegra_powergate_set(id, true); } int tegra_powergate_power_off(int id) { if (id < 0 || id >= tegra_num_powerdomains) return -EINVAL; return tegra_powergate_set(id, false); } int tegra_powergate_is_powered(int id) { u32 status; if (id < 0 || id >= tegra_num_powerdomains) return -EINVAL; status = pmc_read(PWRGATE_STATUS) & (1 << id); return !!status; } int tegra_powergate_remove_clamping(int id) { u32 mask; if (id < 0 || id >= tegra_num_powerdomains) return -EINVAL; /* * Tegra 2 has a bug where PCIE and VDE clamping masks are * swapped relatively to the partition ids */ if (id == TEGRA_POWERGATE_VDEC) mask = (1 << TEGRA_POWERGATE_PCIE); else if (id == TEGRA_POWERGATE_PCIE) mask = (1 << TEGRA_POWERGATE_VDEC); else mask = (1 << id); pmc_write(mask, REMOVE_CLAMPING); return 0; } /* Must be called with clk disabled, and returns with clk enabled */ int tegra_powergate_sequence_power_up(int id, struct clk *clk) { int ret; tegra_periph_reset_assert(clk); ret = tegra_powergate_power_on(id); if (ret) goto err_power; ret = clk_enable(clk); if (ret) goto err_clk; udelay(10); ret = tegra_powergate_remove_clamping(id); if (ret) goto err_clamp; udelay(10); tegra_periph_reset_deassert(clk); return 0; err_clamp: clk_disable(clk); err_clk: tegra_powergate_power_off(id); err_power: return ret; } int tegra_cpu_powergate_id(int cpuid) { if (cpuid > 0 && cpuid < tegra_num_cpu_domains) return tegra_cpu_domains[cpuid]; return -EINVAL; } int __init tegra_powergate_init(void) { switch (tegra_chip_id) { case TEGRA20: tegra_num_powerdomains = 7; break; case TEGRA30: tegra_num_powerdomains = 14; tegra_num_cpu_domains = 4; tegra_cpu_domains = tegra30_cpu_domains; break; default: /* Unknown Tegra variant. Disable powergating */ tegra_num_powerdomains = 0; break; } return 0; } #ifdef CONFIG_DEBUG_FS static const char * const powergate_name[] = { [TEGRA_POWERGATE_CPU] = "cpu", [TEGRA_POWERGATE_3D] = "3d", [TEGRA_POWERGATE_VENC] = "venc", [TEGRA_POWERGATE_VDEC] = "vdec", [TEGRA_POWERGATE_PCIE] = "pcie", [TEGRA_POWERGATE_L2] = "l2", [TEGRA_POWERGATE_MPE] = "mpe", }; static int powergate_show(struct seq_file *s, void *data) { int i; seq_printf(s, " powergate powered\n"); seq_printf(s, "------------------\n"); for (i = 0; i < tegra_num_powerdomains; i++) seq_printf(s, " %9s %7s\n", powergate_name[i], tegra_powergate_is_powered(i) ? "yes" : "no"); return 0; } static int powergate_open(struct inode *inode, struct file *file) { return single_open(file, powergate_show, inode->i_private); } static const struct file_operations powergate_fops = { .open = powergate_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init powergate_debugfs_init(void) { struct dentry *d; int err = -ENOMEM; d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL, &powergate_fops); if (!d) return -ENOMEM; return err; } late_initcall(powergate_debugfs_init); #endif
gpl-2.0
Beeko/android_kernel_htc_msm8960
drivers/usb/gadget/serial.c
4932
7760
/* * serial.c -- USB gadget serial driver * * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 by David Brownell * Copyright (C) 2008 by Nokia Corporation * * This software is distributed under the terms of the GNU General * Public License ("GPL") as published by the Free Software Foundation, * either version 2 of that License or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/utsname.h> #include <linux/device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include "u_serial.h" #include "gadget_chips.h" /* Defines */ #define GS_VERSION_STR "v2.4" #define GS_VERSION_NUM 0x2400 #define GS_LONG_NAME "Gadget Serial" #define GS_VERSION_NAME GS_LONG_NAME " " GS_VERSION_STR /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "f_acm.c" #include "f_obex.c" #include "f_serial.c" #include "u_serial.c" /*-------------------------------------------------------------------------*/ /* Thanks to NetChip Technologies for donating this product ID. * * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ #define GS_VENDOR_ID 0x0525 /* NetChip */ #define GS_PRODUCT_ID 0xa4a6 /* Linux-USB Serial Gadget */ #define GS_CDC_PRODUCT_ID 0xa4a7 /* ... as CDC-ACM */ #define GS_CDC_OBEX_PRODUCT_ID 0xa4a9 /* ... as CDC-OBEX */ /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 #define STRING_DESCRIPTION_IDX 2 static char manufacturer[50]; static struct usb_string strings_dev[] = { [STRING_MANUFACTURER_IDX].s = manufacturer, [STRING_PRODUCT_IDX].s = GS_VERSION_NAME, [STRING_DESCRIPTION_IDX].s = NULL /* updated; f(use_acm) */, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), /* .bDeviceClass = f(use_acm) */ .bDeviceSubClass = 0, .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ .idVendor = cpu_to_le16(GS_VENDOR_ID), /* .idProduct = f(use_acm) */ /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ .bNumConfigurations = 1, }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /*-------------------------------------------------------------------------*/ /* Module */ MODULE_DESCRIPTION(GS_VERSION_NAME); MODULE_AUTHOR("Al Borchers"); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL"); static bool use_acm = true; module_param(use_acm, bool, 0); MODULE_PARM_DESC(use_acm, "Use CDC ACM, default=yes"); static bool use_obex = false; module_param(use_obex, bool, 0); MODULE_PARM_DESC(use_obex, "Use CDC OBEX, default=no"); static unsigned n_ports = 1; module_param(n_ports, uint, 0); MODULE_PARM_DESC(n_ports, "number of ports to create, default=1"); /*-------------------------------------------------------------------------*/ static int __init serial_bind_config(struct usb_configuration *c) { unsigned i; int status = 0; for (i = 0; i < n_ports && status == 0; i++) { if (use_acm) status = acm_bind_config(c, i); else if (use_obex) status = obex_bind_config(c, i); else status = gser_bind_config(c, i); } return status; } static struct usb_configuration serial_config_driver = { /* .label = f(use_acm) */ /* .bConfigurationValue = f(use_acm) */ /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; static int __init gs_bind(struct usb_composite_dev *cdev) { int gcnum; struct usb_gadget *gadget = cdev->gadget; int status; status = gserial_setup(cdev->gadget, n_ports); if (status < 0) return status; /* Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ /* device description: manufacturer, product */ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_MANUFACTURER_IDX].id = status; device_desc.iManufacturer = status; status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_PRODUCT_IDX].id = status; device_desc.iProduct = status; /* config description */ status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_DESCRIPTION_IDX].id = status; serial_config_driver.iConfiguration = status; /* set up other descriptors */ gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) device_desc.bcdDevice = cpu_to_le16(GS_VERSION_NUM | gcnum); else { /* this is so simple (for now, no altsettings) that it * SHOULD NOT have problems with bulk-capable hardware. * so warn about unrcognized controllers -- don't panic. * * things like configuration and altsetting numbering * can need hardware-specific attention though. */ pr_warning("gs_bind: controller '%s' not recognized\n", gadget->name); device_desc.bcdDevice = cpu_to_le16(GS_VERSION_NUM | 0x0099); } if (gadget_is_otg(cdev->gadget)) { serial_config_driver.descriptors = otg_desc; serial_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; } /* register our configuration */ status = usb_add_config(cdev, &serial_config_driver, serial_bind_config); if (status < 0) goto fail; INFO(cdev, "%s\n", GS_VERSION_NAME); return 0; fail: gserial_cleanup(); return status; } static struct usb_composite_driver gserial_driver = { .name = "g_serial", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_SUPER, }; static int __init init(void) { /* We *could* export two configs; that'd be much cleaner... * but neither of these product IDs was defined that way. */ if (use_acm) { serial_config_driver.label = "CDC ACM config"; serial_config_driver.bConfigurationValue = 2; device_desc.bDeviceClass = USB_CLASS_COMM; device_desc.idProduct = cpu_to_le16(GS_CDC_PRODUCT_ID); } else if (use_obex) { serial_config_driver.label = "CDC OBEX config"; serial_config_driver.bConfigurationValue = 3; device_desc.bDeviceClass = USB_CLASS_COMM; device_desc.idProduct = cpu_to_le16(GS_CDC_OBEX_PRODUCT_ID); } else { serial_config_driver.label = "Generic Serial config"; serial_config_driver.bConfigurationValue = 1; device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC; device_desc.idProduct = cpu_to_le16(GS_PRODUCT_ID); } strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label; return usb_composite_probe(&gserial_driver, gs_bind); } module_init(init); static void __exit cleanup(void) { usb_composite_unregister(&gserial_driver); gserial_cleanup(); } module_exit(cleanup);
gpl-2.0
AndroPlus-org/android_kernel_sony_msm8974ac_adv
drivers/media/video/sr030pc30.c
7236
22130
/* * Driver for SiliconFile SR030PC30 VGA (1/10-Inch) Image Sensor with ISP * * Copyright (C) 2010 Samsung Electronics Co., Ltd * Author: Sylwester Nawrocki, s.nawrocki@samsung.com * * Based on original driver authored by Dongsoo Nathaniel Kim * and HeungJun Kim <riverful.kim@samsung.com>. * * Based on mt9v011 Micron Digital Image Sensor driver * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/i2c.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/v4l2-mediabus.h> #include <media/sr030pc30.h> static int debug; module_param(debug, int, 0644); #define MODULE_NAME "SR030PC30" /* * Register offsets within a page * b15..b8 - page id, b7..b0 - register address */ #define POWER_CTRL_REG 0x0001 #define PAGEMODE_REG 0x03 #define DEVICE_ID_REG 0x0004 #define NOON010PC30_ID 0x86 #define SR030PC30_ID 0x8C #define VDO_CTL1_REG 0x0010 #define SUBSAMPL_NONE_VGA 0 #define SUBSAMPL_QVGA 0x10 #define SUBSAMPL_QQVGA 0x20 #define VDO_CTL2_REG 0x0011 #define SYNC_CTL_REG 0x0012 #define WIN_ROWH_REG 0x0020 #define WIN_ROWL_REG 0x0021 #define WIN_COLH_REG 0x0022 #define WIN_COLL_REG 0x0023 #define WIN_HEIGHTH_REG 0x0024 #define WIN_HEIGHTL_REG 0x0025 #define WIN_WIDTHH_REG 0x0026 #define WIN_WIDTHL_REG 0x0027 #define HBLANKH_REG 0x0040 #define HBLANKL_REG 0x0041 #define VSYNCH_REG 0x0042 #define VSYNCL_REG 0x0043 /* page 10 */ #define ISP_CTL_REG(n) (0x1010 + (n)) #define YOFS_REG 0x1040 #define DARK_YOFS_REG 0x1041 #define AG_ABRTH_REG 0x1050 #define SAT_CTL_REG 0x1060 #define BSAT_REG 0x1061 #define RSAT_REG 0x1062 #define AG_SAT_TH_REG 0x1063 /* page 11 */ #define ZLPF_CTRL_REG 0x1110 #define ZLPF_CTRL2_REG 0x1112 #define ZLPF_AGH_THR_REG 0x1121 #define ZLPF_THR_REG 0x1160 #define ZLPF_DYN_THR_REG 0x1160 /* page 12 */ #define YCLPF_CTL1_REG 0x1240 #define YCLPF_CTL2_REG 0x1241 #define YCLPF_THR_REG 0x1250 #define BLPF_CTL_REG 0x1270 #define BLPF_THR1_REG 0x1274 #define BLPF_THR2_REG 0x1275 /* page 14 - Lens Shading Compensation */ #define LENS_CTRL_REG 0x1410 #define LENS_XCEN_REG 0x1420 #define LENS_YCEN_REG 0x1421 #define LENS_R_COMP_REG 0x1422 #define LENS_G_COMP_REG 0x1423 #define LENS_B_COMP_REG 0x1424 /* page 15 - Color correction */ #define CMC_CTL_REG 0x1510 #define CMC_OFSGH_REG 0x1514 #define CMC_OFSGL_REG 0x1516 #define CMC_SIGN_REG 0x1517 /* Color correction coefficients */ #define CMC_COEF_REG(n) (0x1530 + (n)) /* Color correction offset coefficients */ #define CMC_OFS_REG(n) (0x1540 + (n)) /* page 16 - Gamma correction */ #define GMA_CTL_REG 0x1610 /* Gamma correction coefficients 0.14 */ #define GMA_COEF_REG(n) (0x1630 + (n)) /* page 20 - Auto Exposure */ #define AE_CTL1_REG 0x2010 #define AE_CTL2_REG 0x2011 #define AE_FRM_CTL_REG 0x2020 #define AE_FINE_CTL_REG(n) (0x2028 + (n)) #define EXP_TIMEH_REG 0x2083 #define EXP_TIMEM_REG 0x2084 #define EXP_TIMEL_REG 0x2085 #define EXP_MMINH_REG 0x2086 #define EXP_MMINL_REG 0x2087 #define EXP_MMAXH_REG 0x2088 #define EXP_MMAXM_REG 0x2089 #define EXP_MMAXL_REG 0x208A /* page 22 - Auto White Balance */ #define AWB_CTL1_REG 0x2210 #define AWB_ENABLE 0x80 #define AWB_CTL2_REG 0x2211 #define MWB_ENABLE 0x01 /* RGB gain control (manual WB) when AWB_CTL1[7]=0 */ #define AWB_RGAIN_REG 0x2280 #define AWB_GGAIN_REG 0x2281 #define AWB_BGAIN_REG 0x2282 #define AWB_RMAX_REG 0x2283 #define AWB_RMIN_REG 0x2284 #define AWB_BMAX_REG 0x2285 #define AWB_BMIN_REG 0x2286 /* R, B gain range in bright light conditions */ #define AWB_RMAXB_REG 0x2287 #define AWB_RMINB_REG 0x2288 #define AWB_BMAXB_REG 0x2289 #define AWB_BMINB_REG 0x228A /* manual white balance, when AWB_CTL2[0]=1 */ #define MWB_RGAIN_REG 0x22B2 #define MWB_BGAIN_REG 0x22B3 /* the token to mark an array end */ #define REG_TERM 0xFFFF /* Minimum and maximum exposure time in ms */ #define EXPOS_MIN_MS 1 #define EXPOS_MAX_MS 125 struct sr030pc30_info { struct v4l2_subdev sd; const struct sr030pc30_platform_data *pdata; const struct sr030pc30_format *curr_fmt; const struct sr030pc30_frmsize *curr_win; unsigned int auto_wb:1; unsigned int auto_exp:1; unsigned int hflip:1; unsigned int vflip:1; unsigned int sleep:1; unsigned int exposure; u8 blue_balance; u8 red_balance; u8 i2c_reg_page; }; struct sr030pc30_format { enum v4l2_mbus_pixelcode code; enum v4l2_colorspace colorspace; u16 ispctl1_reg; }; struct sr030pc30_frmsize { u16 width; u16 height; int vid_ctl1; }; struct i2c_regval { u16 addr; u16 val; }; static const struct v4l2_queryctrl sr030pc30_ctrl[] = { { .id = V4L2_CID_AUTO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto White Balance", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Red Balance", .minimum = 0, .maximum = 127, .step = 1, .default_value = 64, .flags = 0, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Blue Balance", .minimum = 0, .maximum = 127, .step = 1, .default_value = 64, }, { .id = V4L2_CID_EXPOSURE_AUTO, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Auto Exposure", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, }, { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure", .minimum = EXPOS_MIN_MS, .maximum = EXPOS_MAX_MS, .step = 1, .default_value = 1, }, { } }; /* supported resolutions */ static const struct sr030pc30_frmsize sr030pc30_sizes[] = { { .width = 640, .height = 480, .vid_ctl1 = SUBSAMPL_NONE_VGA, }, { .width = 320, .height = 240, .vid_ctl1 = SUBSAMPL_QVGA, }, { .width = 160, .height = 120, .vid_ctl1 = SUBSAMPL_QQVGA, }, }; /* supported pixel formats */ static const struct sr030pc30_format sr030pc30_formats[] = { { .code = V4L2_MBUS_FMT_YUYV8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x03, }, { .code = V4L2_MBUS_FMT_YVYU8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x02, }, { .code = V4L2_MBUS_FMT_VYUY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0, }, { .code = V4L2_MBUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x01, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_BE, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x40, }, }; static const struct i2c_regval sr030pc30_base_regs[] = { /* Window size and position within pixel matrix */ { WIN_ROWH_REG, 0x00 }, { WIN_ROWL_REG, 0x06 }, { WIN_COLH_REG, 0x00 }, { WIN_COLL_REG, 0x06 }, { WIN_HEIGHTH_REG, 0x01 }, { WIN_HEIGHTL_REG, 0xE0 }, { WIN_WIDTHH_REG, 0x02 }, { WIN_WIDTHL_REG, 0x80 }, { HBLANKH_REG, 0x01 }, { HBLANKL_REG, 0x50 }, { VSYNCH_REG, 0x00 }, { VSYNCL_REG, 0x14 }, { SYNC_CTL_REG, 0 }, /* Color corection and saturation */ { ISP_CTL_REG(0), 0x30 }, { YOFS_REG, 0x80 }, { DARK_YOFS_REG, 0x04 }, { AG_ABRTH_REG, 0x78 }, { SAT_CTL_REG, 0x1F }, { BSAT_REG, 0x90 }, { AG_SAT_TH_REG, 0xF0 }, { 0x1064, 0x80 }, { CMC_CTL_REG, 0x03 }, { CMC_OFSGH_REG, 0x3C }, { CMC_OFSGL_REG, 0x2C }, { CMC_SIGN_REG, 0x2F }, { CMC_COEF_REG(0), 0xCB }, { CMC_OFS_REG(0), 0x87 }, { CMC_COEF_REG(1), 0x61 }, { CMC_OFS_REG(1), 0x18 }, { CMC_COEF_REG(2), 0x16 }, { CMC_OFS_REG(2), 0x91 }, { CMC_COEF_REG(3), 0x23 }, { CMC_OFS_REG(3), 0x94 }, { CMC_COEF_REG(4), 0xCE }, { CMC_OFS_REG(4), 0x9f }, { CMC_COEF_REG(5), 0x2B }, { CMC_OFS_REG(5), 0x33 }, { CMC_COEF_REG(6), 0x01 }, { CMC_OFS_REG(6), 0x00 }, { CMC_COEF_REG(7), 0x34 }, { CMC_OFS_REG(7), 0x94 }, { CMC_COEF_REG(8), 0x75 }, { CMC_OFS_REG(8), 0x14 }, /* Color corection coefficients */ { GMA_CTL_REG, 0x03 }, { GMA_COEF_REG(0), 0x00 }, { GMA_COEF_REG(1), 0x19 }, { GMA_COEF_REG(2), 0x26 }, { GMA_COEF_REG(3), 0x3B }, { GMA_COEF_REG(4), 0x5D }, { GMA_COEF_REG(5), 0x79 }, { GMA_COEF_REG(6), 0x8E }, { GMA_COEF_REG(7), 0x9F }, { GMA_COEF_REG(8), 0xAF }, { GMA_COEF_REG(9), 0xBD }, { GMA_COEF_REG(10), 0xCA }, { GMA_COEF_REG(11), 0xDD }, { GMA_COEF_REG(12), 0xEC }, { GMA_COEF_REG(13), 0xF7 }, { GMA_COEF_REG(14), 0xFF }, /* Noise reduction, Z-LPF, YC-LPF and BLPF filters setup */ { ZLPF_CTRL_REG, 0x99 }, { ZLPF_CTRL2_REG, 0x0E }, { ZLPF_AGH_THR_REG, 0x29 }, { ZLPF_THR_REG, 0x0F }, { ZLPF_DYN_THR_REG, 0x63 }, { YCLPF_CTL1_REG, 0x23 }, { YCLPF_CTL2_REG, 0x3B }, { YCLPF_THR_REG, 0x05 }, { BLPF_CTL_REG, 0x1D }, { BLPF_THR1_REG, 0x05 }, { BLPF_THR2_REG, 0x04 }, /* Automatic white balance */ { AWB_CTL1_REG, 0xFB }, { AWB_CTL2_REG, 0x26 }, { AWB_RMAX_REG, 0x54 }, { AWB_RMIN_REG, 0x2B }, { AWB_BMAX_REG, 0x57 }, { AWB_BMIN_REG, 0x29 }, { AWB_RMAXB_REG, 0x50 }, { AWB_RMINB_REG, 0x43 }, { AWB_BMAXB_REG, 0x30 }, { AWB_BMINB_REG, 0x22 }, /* Auto exposure */ { AE_CTL1_REG, 0x8C }, { AE_CTL2_REG, 0x04 }, { AE_FRM_CTL_REG, 0x01 }, { AE_FINE_CTL_REG(0), 0x3F }, { AE_FINE_CTL_REG(1), 0xA3 }, { AE_FINE_CTL_REG(3), 0x34 }, /* Lens shading compensation */ { LENS_CTRL_REG, 0x01 }, { LENS_XCEN_REG, 0x80 }, { LENS_YCEN_REG, 0x70 }, { LENS_R_COMP_REG, 0x53 }, { LENS_G_COMP_REG, 0x40 }, { LENS_B_COMP_REG, 0x3e }, { REG_TERM, 0 }, }; static inline struct sr030pc30_info *to_sr030pc30(struct v4l2_subdev *sd) { return container_of(sd, struct sr030pc30_info, sd); } static inline int set_i2c_page(struct sr030pc30_info *info, struct i2c_client *client, unsigned int reg) { int ret = 0; u32 page = reg >> 8 & 0xFF; if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) { ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page); if (!ret) info->i2c_reg_page = page; } return ret; } static int cam_i2c_read(struct v4l2_subdev *sd, u32 reg_addr) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sr030pc30_info *info = to_sr030pc30(sd); int ret = set_i2c_page(info, client, reg_addr); if (!ret) ret = i2c_smbus_read_byte_data(client, reg_addr & 0xFF); return ret; } static int cam_i2c_write(struct v4l2_subdev *sd, u32 reg_addr, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sr030pc30_info *info = to_sr030pc30(sd); int ret = set_i2c_page(info, client, reg_addr); if (!ret) ret = i2c_smbus_write_byte_data( client, reg_addr & 0xFF, val); return ret; } static inline int sr030pc30_bulk_write_reg(struct v4l2_subdev *sd, const struct i2c_regval *msg) { while (msg->addr != REG_TERM) { int ret = cam_i2c_write(sd, msg->addr, msg->val); if (ret) return ret; msg++; } return 0; } /* Device reset and sleep mode control */ static int sr030pc30_pwr_ctrl(struct v4l2_subdev *sd, bool reset, bool sleep) { struct sr030pc30_info *info = to_sr030pc30(sd); u8 reg = sleep ? 0xF1 : 0xF0; int ret = 0; if (reset) ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02); if (!ret) { ret = cam_i2c_write(sd, POWER_CTRL_REG, reg); if (!ret) { info->sleep = sleep; if (reset) info->i2c_reg_page = -1; } } return ret; } static inline int sr030pc30_enable_autoexposure(struct v4l2_subdev *sd, int on) { struct sr030pc30_info *info = to_sr030pc30(sd); /* auto anti-flicker is also enabled here */ int ret = cam_i2c_write(sd, AE_CTL1_REG, on ? 0xDC : 0x0C); if (!ret) info->auto_exp = on; return ret; } static int sr030pc30_set_exposure(struct v4l2_subdev *sd, int value) { struct sr030pc30_info *info = to_sr030pc30(sd); unsigned long expos = value * info->pdata->clk_rate / (8 * 1000); int ret = cam_i2c_write(sd, EXP_TIMEH_REG, expos >> 16 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_TIMEM_REG, expos >> 8 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_TIMEL_REG, expos & 0xFF); if (!ret) { /* Turn off AE */ info->exposure = value; ret = sr030pc30_enable_autoexposure(sd, 0); } return ret; } /* Automatic white balance control */ static int sr030pc30_enable_autowhitebalance(struct v4l2_subdev *sd, int on) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret = cam_i2c_write(sd, AWB_CTL2_REG, on ? 0x2E : 0x2F); if (!ret) ret = cam_i2c_write(sd, AWB_CTL1_REG, on ? 0xFB : 0x7B); if (!ret) info->auto_wb = on; return ret; } static int sr030pc30_set_flip(struct v4l2_subdev *sd) { struct sr030pc30_info *info = to_sr030pc30(sd); s32 reg = cam_i2c_read(sd, VDO_CTL2_REG); if (reg < 0) return reg; reg &= 0x7C; if (info->hflip) reg |= 0x01; if (info->vflip) reg |= 0x02; return cam_i2c_write(sd, VDO_CTL2_REG, reg | 0x80); } /* Configure resolution, color format and image flip */ static int sr030pc30_set_params(struct v4l2_subdev *sd) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret; if (!info->curr_win) return -EINVAL; /* Configure the resolution through subsampling */ ret = cam_i2c_write(sd, VDO_CTL1_REG, info->curr_win->vid_ctl1); if (!ret && info->curr_fmt) ret = cam_i2c_write(sd, ISP_CTL_REG(0), info->curr_fmt->ispctl1_reg); if (!ret) ret = sr030pc30_set_flip(sd); return ret; } /* Find nearest matching image pixel size. */ static int sr030pc30_try_frame_size(struct v4l2_mbus_framefmt *mf) { unsigned int min_err = ~0; int i = ARRAY_SIZE(sr030pc30_sizes); const struct sr030pc30_frmsize *fsize = &sr030pc30_sizes[0], *match = NULL; while (i--) { int err = abs(fsize->width - mf->width) + abs(fsize->height - mf->height); if (err < min_err) { min_err = err; match = fsize; } fsize++; } if (match) { mf->width = match->width; mf->height = match->height; return 0; } return -EINVAL; } static int sr030pc30_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++) if (qc->id == sr030pc30_ctrl[i].id) { *qc = sr030pc30_ctrl[i]; v4l2_dbg(1, debug, sd, "%s id: %d\n", __func__, qc->id); return 0; } return -EINVAL; } static inline int sr030pc30_set_bluebalance(struct v4l2_subdev *sd, int value) { int ret = cam_i2c_write(sd, MWB_BGAIN_REG, value); if (!ret) to_sr030pc30(sd)->blue_balance = value; return ret; } static inline int sr030pc30_set_redbalance(struct v4l2_subdev *sd, int value) { int ret = cam_i2c_write(sd, MWB_RGAIN_REG, value); if (!ret) to_sr030pc30(sd)->red_balance = value; return ret; } static int sr030pc30_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++) if (ctrl->id == sr030pc30_ctrl[i].id) break; if (i == ARRAY_SIZE(sr030pc30_ctrl)) return -EINVAL; if (ctrl->value < sr030pc30_ctrl[i].minimum || ctrl->value > sr030pc30_ctrl[i].maximum) return -ERANGE; v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n", __func__, ctrl->id, ctrl->value); switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: sr030pc30_enable_autowhitebalance(sd, ctrl->value); break; case V4L2_CID_BLUE_BALANCE: ret = sr030pc30_set_bluebalance(sd, ctrl->value); break; case V4L2_CID_RED_BALANCE: ret = sr030pc30_set_redbalance(sd, ctrl->value); break; case V4L2_CID_EXPOSURE_AUTO: sr030pc30_enable_autoexposure(sd, ctrl->value == V4L2_EXPOSURE_AUTO); break; case V4L2_CID_EXPOSURE: ret = sr030pc30_set_exposure(sd, ctrl->value); break; default: return -EINVAL; } return ret; } static int sr030pc30_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct sr030pc30_info *info = to_sr030pc30(sd); v4l2_dbg(1, debug, sd, "%s: id: %d\n", __func__, ctrl->id); switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: ctrl->value = info->auto_wb; break; case V4L2_CID_BLUE_BALANCE: ctrl->value = info->blue_balance; break; case V4L2_CID_RED_BALANCE: ctrl->value = info->red_balance; break; case V4L2_CID_EXPOSURE_AUTO: ctrl->value = info->auto_exp; break; case V4L2_CID_EXPOSURE: ctrl->value = info->exposure; break; default: return -EINVAL; } return 0; } static int sr030pc30_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (!code || index >= ARRAY_SIZE(sr030pc30_formats)) return -EINVAL; *code = sr030pc30_formats[index].code; return 0; } static int sr030pc30_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret; if (!mf) return -EINVAL; if (!info->curr_win || !info->curr_fmt) { ret = sr030pc30_set_params(sd); if (ret) return ret; } mf->width = info->curr_win->width; mf->height = info->curr_win->height; mf->code = info->curr_fmt->code; mf->colorspace = info->curr_fmt->colorspace; mf->field = V4L2_FIELD_NONE; return 0; } /* Return nearest media bus frame format. */ static const struct sr030pc30_format *try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { int i = ARRAY_SIZE(sr030pc30_formats); sr030pc30_try_frame_size(mf); while (i--) if (mf->code == sr030pc30_formats[i].code) break; mf->code = sr030pc30_formats[i].code; return &sr030pc30_formats[i]; } /* Return nearest media bus frame format. */ static int sr030pc30_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { if (!sd || !mf) return -EINVAL; try_fmt(sd, mf); return 0; } static int sr030pc30_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sr030pc30_info *info = to_sr030pc30(sd); if (!sd || !mf) return -EINVAL; info->curr_fmt = try_fmt(sd, mf); return sr030pc30_set_params(sd); } static int sr030pc30_base_config(struct v4l2_subdev *sd) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret; unsigned long expmin, expmax; ret = sr030pc30_bulk_write_reg(sd, sr030pc30_base_regs); if (!ret) { info->curr_fmt = &sr030pc30_formats[0]; info->curr_win = &sr030pc30_sizes[0]; ret = sr030pc30_set_params(sd); } if (!ret) ret = sr030pc30_pwr_ctrl(sd, false, false); if (!ret && !info->pdata) return ret; expmin = EXPOS_MIN_MS * info->pdata->clk_rate / (8 * 1000); expmax = EXPOS_MAX_MS * info->pdata->clk_rate / (8 * 1000); v4l2_dbg(1, debug, sd, "%s: expmin= %lx, expmax= %lx", __func__, expmin, expmax); /* Setting up manual exposure time range */ ret = cam_i2c_write(sd, EXP_MMINH_REG, expmin >> 8 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMINL_REG, expmin & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMAXH_REG, expmax >> 16 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMAXM_REG, expmax >> 8 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMAXL_REG, expmax & 0xFF); return ret; } static int sr030pc30_s_power(struct v4l2_subdev *sd, int on) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sr030pc30_info *info = to_sr030pc30(sd); const struct sr030pc30_platform_data *pdata = info->pdata; int ret; if (pdata == NULL) { WARN(1, "No platform data!\n"); return -EINVAL; } /* * Put sensor into power sleep mode before switching off * power and disabling MCLK. */ if (!on) sr030pc30_pwr_ctrl(sd, false, true); /* set_power controls sensor's power and clock */ if (pdata->set_power) { ret = pdata->set_power(&client->dev, on); if (ret) return ret; } if (on) { ret = sr030pc30_base_config(sd); } else { ret = 0; info->curr_win = NULL; info->curr_fmt = NULL; } return ret; } static const struct v4l2_subdev_core_ops sr030pc30_core_ops = { .s_power = sr030pc30_s_power, .queryctrl = sr030pc30_queryctrl, .s_ctrl = sr030pc30_s_ctrl, .g_ctrl = sr030pc30_g_ctrl, }; static const struct v4l2_subdev_video_ops sr030pc30_video_ops = { .g_mbus_fmt = sr030pc30_g_fmt, .s_mbus_fmt = sr030pc30_s_fmt, .try_mbus_fmt = sr030pc30_try_fmt, .enum_mbus_fmt = sr030pc30_enum_fmt, }; static const struct v4l2_subdev_ops sr030pc30_ops = { .core = &sr030pc30_core_ops, .video = &sr030pc30_video_ops, }; /* * Detect sensor type. Return 0 if SR030PC30 was detected * or -ENODEV otherwise. */ static int sr030pc30_detect(struct i2c_client *client) { const struct sr030pc30_platform_data *pdata = client->dev.platform_data; int ret; /* Enable sensor's power and clock */ if (pdata->set_power) { ret = pdata->set_power(&client->dev, 1); if (ret) return ret; } ret = i2c_smbus_read_byte_data(client, DEVICE_ID_REG); if (pdata->set_power) pdata->set_power(&client->dev, 0); if (ret < 0) { dev_err(&client->dev, "%s: I2C read failed\n", __func__); return ret; } return ret == SR030PC30_ID ? 0 : -ENODEV; } static int sr030pc30_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct sr030pc30_info *info; struct v4l2_subdev *sd; const struct sr030pc30_platform_data *pdata = client->dev.platform_data; int ret; if (!pdata) { dev_err(&client->dev, "No platform data!"); return -EIO; } ret = sr030pc30_detect(client); if (ret) return ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; sd = &info->sd; strcpy(sd->name, MODULE_NAME); info->pdata = client->dev.platform_data; v4l2_i2c_subdev_init(sd, client, &sr030pc30_ops); info->i2c_reg_page = -1; info->hflip = 1; info->auto_exp = 1; info->exposure = 30; return 0; } static int sr030pc30_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct sr030pc30_info *info = to_sr030pc30(sd); v4l2_device_unregister_subdev(sd); kfree(info); return 0; } static const struct i2c_device_id sr030pc30_id[] = { { MODULE_NAME, 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, sr030pc30_id); static struct i2c_driver sr030pc30_i2c_driver = { .driver = { .name = MODULE_NAME }, .probe = sr030pc30_probe, .remove = sr030pc30_remove, .id_table = sr030pc30_id, }; module_i2c_driver(sr030pc30_i2c_driver); MODULE_DESCRIPTION("Siliconfile SR030PC30 camera driver"); MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Supervenom/linux-mod_sys_call
drivers/media/pci/saa7164/saa7164-cards.c
8772
21639
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include "saa7164.h" /* The Bridge API needs to understand register widths (in bytes) for the * attached I2C devices, so we can simplify the virtual i2c mechansms * and keep the -i2c.c implementation clean. */ #define REGLEN_8bit 1 #define REGLEN_16bit 2 struct saa7164_board saa7164_boards[] = { [SAA7164_BOARD_UNKNOWN] = { /* Bridge will not load any firmware, without knowing * the rev this would be fatal. */ .name = "Unknown", }, [SAA7164_BOARD_UNKNOWN_REV2] = { /* Bridge will load the v2 f/w and dump descriptors */ /* Required during new board bringup */ .name = "Generic Rev2", .chiprev = SAA7164_CHIP_REV2, }, [SAA7164_BOARD_UNKNOWN_REV3] = { /* Bridge will load the v2 f/w and dump descriptors */ /* Required during new board bringup */ .name = "Generic Rev3", .chiprev = SAA7164_CHIP_REV3, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x1d, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1b, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1f, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200_2] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV2, .unit = {{ .id = 0x06, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x05, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1f, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200_3] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV2, .unit = {{ .id = 0x1d, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x05, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1b, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1c, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1f, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200_4] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x1d, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x05, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1b, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1c, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1f, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2250] = { .name = "Hauppauge WinTV-HVR2250", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x22, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x07, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x08, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x20, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x23, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2250_2] = { .name = "Hauppauge WinTV-HVR2250", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x28, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x07, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x08, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x24, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x26, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x29, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2250_3] = { .name = "Hauppauge WinTV-HVR2250", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x26, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x07, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x08, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x22, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x24, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x27, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200_5] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x23, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x05, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x21, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x22, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x24, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x25, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, }; const unsigned int saa7164_bcount = ARRAY_SIZE(saa7164_boards); /* ------------------------------------------------------------------ */ /* PCI subsystem IDs */ struct saa7164_subid saa7164_subids[] = { { .subvendor = 0x0070, .subdevice = 0x8880, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250, }, { .subvendor = 0x0070, .subdevice = 0x8810, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250, }, { .subvendor = 0x0070, .subdevice = 0x8980, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200, }, { .subvendor = 0x0070, .subdevice = 0x8900, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_2, }, { .subvendor = 0x0070, .subdevice = 0x8901, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_3, }, { .subvendor = 0x0070, .subdevice = 0x88A1, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250_3, }, { .subvendor = 0x0070, .subdevice = 0x8891, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250_2, }, { .subvendor = 0x0070, .subdevice = 0x8851, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250_2, }, { .subvendor = 0x0070, .subdevice = 0x8940, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_4, }, { .subvendor = 0x0070, .subdevice = 0x8953, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_5, }, }; const unsigned int saa7164_idcount = ARRAY_SIZE(saa7164_subids); void saa7164_card_list(struct saa7164_dev *dev) { int i; if (0 == dev->pci->subsystem_vendor && 0 == dev->pci->subsystem_device) { printk(KERN_ERR "%s: Board has no valid PCIe Subsystem ID and can't\n" "%s: be autodetected. Pass card=<n> insmod option to\n" "%s: workaround that. Send complaints to the vendor\n" "%s: of the TV card. Best regards,\n" "%s: -- tux\n", dev->name, dev->name, dev->name, dev->name, dev->name); } else { printk(KERN_ERR "%s: Your board isn't known (yet) to the driver.\n" "%s: Try to pick one of the existing card configs via\n" "%s: card=<n> insmod option. Updating to the latest\n" "%s: version might help as well.\n", dev->name, dev->name, dev->name, dev->name); } printk(KERN_ERR "%s: Here are valid choices for the card=<n> insmod " "option:\n", dev->name); for (i = 0; i < saa7164_bcount; i++) printk(KERN_ERR "%s: card=%d -> %s\n", dev->name, i, saa7164_boards[i].name); } /* TODO: clean this define up into the -cards.c structs */ #define PCIEBRIDGE_UNITID 2 void saa7164_gpio_setup(struct saa7164_dev *dev) { switch (dev->board) { case SAA7164_BOARD_HAUPPAUGE_HVR2200: case SAA7164_BOARD_HAUPPAUGE_HVR2200_2: case SAA7164_BOARD_HAUPPAUGE_HVR2200_3: case SAA7164_BOARD_HAUPPAUGE_HVR2200_4: case SAA7164_BOARD_HAUPPAUGE_HVR2200_5: case SAA7164_BOARD_HAUPPAUGE_HVR2250: case SAA7164_BOARD_HAUPPAUGE_HVR2250_2: case SAA7164_BOARD_HAUPPAUGE_HVR2250_3: /* GPIO 2: s5h1411 / tda10048-1 demod reset GPIO 3: s5h1411 / tda10048-2 demod reset GPIO 7: IRBlaster Zilog reset */ /* Reset parts by going in and out of reset */ saa7164_api_clear_gpiobit(dev, PCIEBRIDGE_UNITID, 2); saa7164_api_clear_gpiobit(dev, PCIEBRIDGE_UNITID, 3); msleep(20); saa7164_api_set_gpiobit(dev, PCIEBRIDGE_UNITID, 2); saa7164_api_set_gpiobit(dev, PCIEBRIDGE_UNITID, 3); break; } } static void hauppauge_eeprom(struct saa7164_dev *dev, u8 *eeprom_data) { struct tveeprom tv; /* TODO: Assumption: eeprom on bus 0 */ tveeprom_hauppauge_analog(&dev->i2c_bus[0].i2c_client, &tv, eeprom_data); /* Make sure we support the board model */ switch (tv.model) { case 88001: /* Development board - Limit circulation */ /* WinTV-HVR2250 (PCIe, Retail, full-height bracket) * ATSC/QAM (TDA18271/S5H1411) and basic analog, no IR, FM */ case 88021: /* WinTV-HVR2250 (PCIe, Retail, full-height bracket) * ATSC/QAM (TDA18271/S5H1411) and basic analog, MCE CIR, FM */ break; case 88041: /* WinTV-HVR2250 (PCIe, Retail, full-height bracket) * ATSC/QAM (TDA18271/S5H1411) and basic analog, no IR, FM */ break; case 88061: /* WinTV-HVR2250 (PCIe, Retail, full-height bracket) * ATSC/QAM (TDA18271/S5H1411) and basic analog, FM */ break; case 89519: case 89609: /* WinTV-HVR2200 (PCIe, Retail, full-height) * DVB-T (TDA18271/TDA10048) and basic analog, no IR */ break; case 89619: /* WinTV-HVR2200 (PCIe, Retail, half-height) * DVB-T (TDA18271/TDA10048) and basic analog, no IR */ break; default: printk(KERN_ERR "%s: Warning: Unknown Hauppauge model #%d\n", dev->name, tv.model); break; } printk(KERN_INFO "%s: Hauppauge eeprom: model=%d\n", dev->name, tv.model); } void saa7164_card_setup(struct saa7164_dev *dev) { static u8 eeprom[256]; if (dev->i2c_bus[0].i2c_rc == 0) { if (saa7164_api_read_eeprom(dev, &eeprom[0], sizeof(eeprom)) < 0) return; } switch (dev->board) { case SAA7164_BOARD_HAUPPAUGE_HVR2200: case SAA7164_BOARD_HAUPPAUGE_HVR2200_2: case SAA7164_BOARD_HAUPPAUGE_HVR2200_3: case SAA7164_BOARD_HAUPPAUGE_HVR2200_4: case SAA7164_BOARD_HAUPPAUGE_HVR2200_5: case SAA7164_BOARD_HAUPPAUGE_HVR2250: case SAA7164_BOARD_HAUPPAUGE_HVR2250_2: case SAA7164_BOARD_HAUPPAUGE_HVR2250_3: hauppauge_eeprom(dev, &eeprom[0]); break; } } /* With most other drivers, the kernel expects to communicate with subdrivers * through i2c. This bridge does not allow that, it does not expose any direct * access to I2C. Instead we have to communicate through the device f/w for * register access to 'processing units'. Each unit has a unique * id, regardless of how the physical implementation occurs across * the three physical i2c busses. The being said if we want leverge of * the existing kernel drivers for tuners and demods we have to 'speak i2c', * to this bridge implements 3 virtual i2c buses. This is a helper function * for those. * * Description: Translate the kernels notion of an i2c address and bus into * the appropriate unitid. */ int saa7164_i2caddr_to_unitid(struct saa7164_i2c *bus, int addr) { /* For a given bus and i2c device address, return the saa7164 unique * unitid. < 0 on error */ struct saa7164_dev *dev = bus->dev; struct saa7164_unit *unit; int i; for (i = 0; i < SAA7164_MAX_UNITS; i++) { unit = &saa7164_boards[dev->board].unit[i]; if (unit->type == SAA7164_UNIT_UNDEFINED) continue; if ((bus->nr == unit->i2c_bus_nr) && (addr == unit->i2c_bus_addr)) return unit->id; } return -1; } /* The 7164 API needs to know the i2c register length in advance. * this is a helper function. Based on a specific chip addr and bus return the * reg length. */ int saa7164_i2caddr_to_reglen(struct saa7164_i2c *bus, int addr) { /* For a given bus and i2c device address, return the * saa7164 registry address width. < 0 on error */ struct saa7164_dev *dev = bus->dev; struct saa7164_unit *unit; int i; for (i = 0; i < SAA7164_MAX_UNITS; i++) { unit = &saa7164_boards[dev->board].unit[i]; if (unit->type == SAA7164_UNIT_UNDEFINED) continue; if ((bus->nr == unit->i2c_bus_nr) && (addr == unit->i2c_bus_addr)) return unit->i2c_reg_len; } return -1; } /* TODO: implement a 'findeeprom' functio like the above and fix any other * eeprom related todo's in -api.c. */ /* Translate a unitid into a x readable device name, for display purposes. */ char *saa7164_unitid_name(struct saa7164_dev *dev, u8 unitid) { char *undefed = "UNDEFINED"; char *bridge = "BRIDGE"; struct saa7164_unit *unit; int i; if (unitid == 0) return bridge; for (i = 0; i < SAA7164_MAX_UNITS; i++) { unit = &saa7164_boards[dev->board].unit[i]; if (unit->type == SAA7164_UNIT_UNDEFINED) continue; if (unitid == unit->id) return unit->name; } return undefed; }
gpl-2.0
alebcay/android_kernel_oneplus_msm8974
arch/m32r/platforms/mappi3/setup.c
9028
5437
/* * linux/arch/m32r/platforms/mappi3/setup.c * * Setup routines for Renesas MAPPI-III(M3A-2170) Board * * Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto, Mamoru Sakugawa */ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <asm/m32r.h> #include <asm/io.h> #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) icu_data_t icu_data[NR_IRQS]; static void disable_mappi3_irq(unsigned int irq) { unsigned long port, data; if ((irq == 0) ||(irq >= NR_IRQS)) { printk("bad irq 0x%08x\n", irq); return; } port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7; outl(data, port); } static void enable_mappi3_irq(unsigned int irq) { unsigned long port, data; if ((irq == 0) ||(irq >= NR_IRQS)) { printk("bad irq 0x%08x\n", irq); return; } port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6; outl(data, port); } static void mask_mappi3(struct irq_data *data) { disable_mappi3_irq(data->irq); } static void unmask_mappi3(struct irq_data *data) { enable_mappi3_irq(data->irq); } static void shutdown_mappi3(struct irq_data *data) { unsigned long port; port = irq2port(data->irq); outl(M32R_ICUCR_ILEVEL7, port); } static struct irq_chip mappi3_irq_type = { .name = "MAPPI3-IRQ", .irq_shutdown = shutdown_mappi3, .irq_mask = mask_mappi3, .irq_unmask = unmask_mappi3, }; void __init init_IRQ(void) { #if defined(CONFIG_SMC91X) /* INT0 : LAN controller (SMC91111) */ irq_set_chip_and_handler(M32R_IRQ_INT0, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; disable_mappi3_irq(M32R_IRQ_INT0); #endif /* CONFIG_SMC91X */ /* MFT2 : system timer */ irq_set_chip_and_handler(M32R_IRQ_MFT2, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN; disable_mappi3_irq(M32R_IRQ_MFT2); #ifdef CONFIG_SERIAL_M32R_SIO /* SIO0_R : uart receive data */ irq_set_chip_and_handler(M32R_IRQ_SIO0_R, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_R].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO0_R); /* SIO0_S : uart send data */ irq_set_chip_and_handler(M32R_IRQ_SIO0_S, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_S].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO0_S); /* SIO1_R : uart receive data */ irq_set_chip_and_handler(M32R_IRQ_SIO1_R, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_R].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO1_R); /* SIO1_S : uart send data */ irq_set_chip_and_handler(M32R_IRQ_SIO1_S, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_S].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO1_S); #endif /* CONFIG_M32R_USE_DBG_CONSOLE */ #if defined(CONFIG_USB) /* INT1 : USB Host controller interrupt */ irq_set_chip_and_handler(M32R_IRQ_INT1, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01; disable_mappi3_irq(M32R_IRQ_INT1); #endif /* CONFIG_USB */ /* CFC IREQ */ irq_set_chip_and_handler(PLD_IRQ_CFIREQ, &mappi3_irq_type, handle_level_irq); icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01; disable_mappi3_irq(PLD_IRQ_CFIREQ); #if defined(CONFIG_M32R_CFC) /* ICUCR41: CFC Insert & eject */ irq_set_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi3_irq_type, handle_level_irq); icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00; disable_mappi3_irq(PLD_IRQ_CFC_INSERT); #endif /* CONFIG_M32R_CFC */ /* IDE IREQ */ irq_set_chip_and_handler(PLD_IRQ_IDEIREQ, &mappi3_irq_type, handle_level_irq); icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; disable_mappi3_irq(PLD_IRQ_IDEIREQ); } #if defined(CONFIG_SMC91X) #define LAN_IOSTART 0x300 #define LAN_IOEND 0x320 static struct resource smc91x_resources[] = { [0] = { .start = (LAN_IOSTART), .end = (LAN_IOEND), .flags = IORESOURCE_MEM, }, [1] = { .start = M32R_IRQ_INT0, .end = M32R_IRQ_INT0, .flags = IORESOURCE_IRQ, } }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; #endif #if defined(CONFIG_FB_S1D13XXX) #include <video/s1d13xxxfb.h> #include <asm/s1d13806.h> static struct s1d13xxxfb_pdata s1d13xxxfb_data = { .initregs = s1d13xxxfb_initregs, .initregssize = ARRAY_SIZE(s1d13xxxfb_initregs), .platform_init_video = NULL, #ifdef CONFIG_PM .platform_suspend_video = NULL, .platform_resume_video = NULL, #endif }; static struct resource s1d13xxxfb_resources[] = { [0] = { .start = 0x1d600000UL, .end = 0x1d73FFFFUL, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x1d400000UL, .end = 0x1d4001FFUL, .flags = IORESOURCE_MEM, } }; static struct platform_device s1d13xxxfb_device = { .name = S1D_DEVICENAME, .id = 0, .dev = { .platform_data = &s1d13xxxfb_data, }, .num_resources = ARRAY_SIZE(s1d13xxxfb_resources), .resource = s1d13xxxfb_resources, }; #endif static int __init platform_init(void) { #if defined(CONFIG_SMC91X) platform_device_register(&smc91x_device); #endif #if defined(CONFIG_FB_S1D13XXX) platform_device_register(&s1d13xxxfb_device); #endif return 0; } arch_initcall(platform_init);
gpl-2.0
kylon/AndromadusMod-New
drivers/video/cfbcopyarea.c
9028
11287
/* * Generic function for frame buffer with packed pixels of any depth. * * Copyright (C) 1999-2005 James Simmons <jsimmons@www.infradead.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * NOTES: * * This is for cfb packed pixels. Iplan and such are incorporated in the * drivers that need them. * * FIXME * * Also need to add code to deal with cards endians that are different than * the native cpu endians. I also need to deal with MSB position in the word. * * The two functions or copying forward and backward could be split up like * the ones for filling, i.e. in aligned and unaligned versions. This would * help moving some redundant computations and branches out of the loop, too. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/fb.h> #include <asm/types.h> #include <asm/io.h> #include "fb_draw.h" #if BITS_PER_LONG == 32 # define FB_WRITEL fb_writel # define FB_READL fb_readl #else # define FB_WRITEL fb_writeq # define FB_READL fb_readq #endif /* * Generic bitwise copy algorithm */ static void bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src, int src_idx, int bits, unsigned n, u32 bswapmask) { unsigned long first, last; int const shift = dst_idx-src_idx; int left, right; first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); if (!shift) { // Same alignment for source and dest if (dst_idx+n <= bits) { // Single word if (last) first &= last; FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first != ~0UL) { FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); dst++; src++; n -= bits - dst_idx; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); n -= 8; } while (n--) FB_WRITEL(FB_READL(src++), dst++); // Trailing bits if (last) FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); } } else { /* Different alignment for source and dest */ unsigned long d0, d1; int m; right = shift & (bits - 1); left = -shift & (bits - 1); bswapmask &= shift; if (dst_idx+n <= bits) { // Single destination word if (last) first &= last; d0 = FB_READL(src); d0 = fb_rev_pixels_in_long(d0, bswapmask); if (shift > 0) { // Single source word d0 >>= right; } else if (src_idx+n <= bits) { // Single source word d0 <<= left; } else { // 2 source words d1 = FB_READL(src + 1); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); } else { // Multiple destination words /** We must always remember the last value read, because in case SRC and DST overlap bitwise (e.g. when moving just one pixel in 1bpp), we always collect one full long for DST and that might overlap with the current long from SRC. We store this value in 'd0'. */ d0 = FB_READL(src++); d0 = fb_rev_pixels_in_long(d0, bswapmask); // Leading bits if (shift > 0) { // Single source word d1 = d0; d0 >>= right; dst++; n -= bits - dst_idx; } else { // 2 source words d1 = FB_READL(src++); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; dst++; n -= bits - dst_idx; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); d0 = d1; // Main chunk m = n % bits; n /= bits; while ((n >= 4) && !bswapmask) { d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; n -= 4; } while (n--) { d1 = FB_READL(src++); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0 << left | d1 >> right; d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(d0, dst++); d0 = d1; } // Trailing bits if (last) { if (m <= right) { // Single source word d0 <<= left; } else { // 2 source words d1 = FB_READL(src); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), last), dst); } } } } /* * Generic bitwise copy algorithm, operating backward */ static void bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src, int src_idx, int bits, unsigned n, u32 bswapmask) { unsigned long first, last; int shift; dst += (n-1)/bits; src += (n-1)/bits; if ((n-1) % bits) { dst_idx += (n-1) % bits; dst += dst_idx >> (ffs(bits) - 1); dst_idx &= bits - 1; src_idx += (n-1) % bits; src += src_idx >> (ffs(bits) - 1); src_idx &= bits - 1; } shift = dst_idx-src_idx; first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits), bswapmask); if (!shift) { // Same alignment for source and dest if ((unsigned long)dst_idx+1 >= n) { // Single word if (last) first &= last; FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first != ~0UL) { FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); dst--; src--; n -= dst_idx+1; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); n -= 8; } while (n--) FB_WRITEL(FB_READL(src--), dst--); // Trailing bits if (last) FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); } } else { // Different alignment for source and dest unsigned long d0, d1; int m; int const left = -shift & (bits-1); int const right = shift & (bits-1); bswapmask &= shift; if ((unsigned long)dst_idx+1 >= n) { // Single destination word if (last) first &= last; d0 = FB_READL(src); if (shift < 0) { // Single source word d0 <<= left; } else if (1+(unsigned long)src_idx >= n) { // Single source word d0 >>= right; } else { // 2 source words d1 = FB_READL(src - 1); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); } else { // Multiple destination words /** We must always remember the last value read, because in case SRC and DST overlap bitwise (e.g. when moving just one pixel in 1bpp), we always collect one full long for DST and that might overlap with the current long from SRC. We store this value in 'd0'. */ d0 = FB_READL(src--); d0 = fb_rev_pixels_in_long(d0, bswapmask); // Leading bits if (shift < 0) { // Single source word d1 = d0; d0 <<= left; } else { // 2 source words d1 = FB_READL(src--); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); d0 = d1; dst--; n -= dst_idx+1; // Main chunk m = n % bits; n /= bits; while ((n >= 4) && !bswapmask) { d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; n -= 4; } while (n--) { d1 = FB_READL(src--); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0 >> right | d1 << left; d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(d0, dst--); d0 = d1; } // Trailing bits if (last) { if (m <= left) { // Single source word d0 >>= right; } else { // 2 source words d1 = FB_READL(src); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), last), dst); } } } } void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) { u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy; u32 height = area->height, width = area->width; unsigned long const bits_per_line = p->fix.line_length*8u; unsigned long __iomem *dst = NULL, *src = NULL; int bits = BITS_PER_LONG, bytes = bits >> 3; int dst_idx = 0, src_idx = 0, rev_copy = 0; u32 bswapmask = fb_compute_bswapmask(p); if (p->state != FBINFO_STATE_RUNNING) return; /* if the beginning of the target area might overlap with the end of the source area, be have to copy the area reverse. */ if ((dy == sy && dx > sx) || (dy > sy)) { dy += height; sy += height; rev_copy = 1; } // split the base of the framebuffer into a long-aligned address and the // index of the first bit dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1)); dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1)); // add offset of source and target area dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel; src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel; if (p->fbops->fb_sync) p->fbops->fb_sync(p); if (rev_copy) { while (height--) { dst_idx -= bits_per_line; src_idx -= bits_per_line; dst += dst_idx >> (ffs(bits) - 1); dst_idx &= (bytes - 1); src += src_idx >> (ffs(bits) - 1); src_idx &= (bytes - 1); bitcpy_rev(p, dst, dst_idx, src, src_idx, bits, width*p->var.bits_per_pixel, bswapmask); } } else { while (height--) { dst += dst_idx >> (ffs(bits) - 1); dst_idx &= (bytes - 1); src += src_idx >> (ffs(bits) - 1); src_idx &= (bytes - 1); bitcpy(p, dst, dst_idx, src, src_idx, bits, width*p->var.bits_per_pixel, bswapmask); dst_idx += bits_per_line; src_idx += bits_per_line; } } } EXPORT_SYMBOL(cfb_copyarea); MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>"); MODULE_DESCRIPTION("Generic software accelerated copyarea"); MODULE_LICENSE("GPL");
gpl-2.0
meimz/linux
arch/m32r/platforms/mappi3/setup.c
9028
5437
/* * linux/arch/m32r/platforms/mappi3/setup.c * * Setup routines for Renesas MAPPI-III(M3A-2170) Board * * Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto, Mamoru Sakugawa */ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <asm/m32r.h> #include <asm/io.h> #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) icu_data_t icu_data[NR_IRQS]; static void disable_mappi3_irq(unsigned int irq) { unsigned long port, data; if ((irq == 0) ||(irq >= NR_IRQS)) { printk("bad irq 0x%08x\n", irq); return; } port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7; outl(data, port); } static void enable_mappi3_irq(unsigned int irq) { unsigned long port, data; if ((irq == 0) ||(irq >= NR_IRQS)) { printk("bad irq 0x%08x\n", irq); return; } port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6; outl(data, port); } static void mask_mappi3(struct irq_data *data) { disable_mappi3_irq(data->irq); } static void unmask_mappi3(struct irq_data *data) { enable_mappi3_irq(data->irq); } static void shutdown_mappi3(struct irq_data *data) { unsigned long port; port = irq2port(data->irq); outl(M32R_ICUCR_ILEVEL7, port); } static struct irq_chip mappi3_irq_type = { .name = "MAPPI3-IRQ", .irq_shutdown = shutdown_mappi3, .irq_mask = mask_mappi3, .irq_unmask = unmask_mappi3, }; void __init init_IRQ(void) { #if defined(CONFIG_SMC91X) /* INT0 : LAN controller (SMC91111) */ irq_set_chip_and_handler(M32R_IRQ_INT0, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; disable_mappi3_irq(M32R_IRQ_INT0); #endif /* CONFIG_SMC91X */ /* MFT2 : system timer */ irq_set_chip_and_handler(M32R_IRQ_MFT2, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN; disable_mappi3_irq(M32R_IRQ_MFT2); #ifdef CONFIG_SERIAL_M32R_SIO /* SIO0_R : uart receive data */ irq_set_chip_and_handler(M32R_IRQ_SIO0_R, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_R].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO0_R); /* SIO0_S : uart send data */ irq_set_chip_and_handler(M32R_IRQ_SIO0_S, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_S].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO0_S); /* SIO1_R : uart receive data */ irq_set_chip_and_handler(M32R_IRQ_SIO1_R, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_R].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO1_R); /* SIO1_S : uart send data */ irq_set_chip_and_handler(M32R_IRQ_SIO1_S, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_S].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO1_S); #endif /* CONFIG_M32R_USE_DBG_CONSOLE */ #if defined(CONFIG_USB) /* INT1 : USB Host controller interrupt */ irq_set_chip_and_handler(M32R_IRQ_INT1, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01; disable_mappi3_irq(M32R_IRQ_INT1); #endif /* CONFIG_USB */ /* CFC IREQ */ irq_set_chip_and_handler(PLD_IRQ_CFIREQ, &mappi3_irq_type, handle_level_irq); icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01; disable_mappi3_irq(PLD_IRQ_CFIREQ); #if defined(CONFIG_M32R_CFC) /* ICUCR41: CFC Insert & eject */ irq_set_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi3_irq_type, handle_level_irq); icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00; disable_mappi3_irq(PLD_IRQ_CFC_INSERT); #endif /* CONFIG_M32R_CFC */ /* IDE IREQ */ irq_set_chip_and_handler(PLD_IRQ_IDEIREQ, &mappi3_irq_type, handle_level_irq); icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; disable_mappi3_irq(PLD_IRQ_IDEIREQ); } #if defined(CONFIG_SMC91X) #define LAN_IOSTART 0x300 #define LAN_IOEND 0x320 static struct resource smc91x_resources[] = { [0] = { .start = (LAN_IOSTART), .end = (LAN_IOEND), .flags = IORESOURCE_MEM, }, [1] = { .start = M32R_IRQ_INT0, .end = M32R_IRQ_INT0, .flags = IORESOURCE_IRQ, } }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; #endif #if defined(CONFIG_FB_S1D13XXX) #include <video/s1d13xxxfb.h> #include <asm/s1d13806.h> static struct s1d13xxxfb_pdata s1d13xxxfb_data = { .initregs = s1d13xxxfb_initregs, .initregssize = ARRAY_SIZE(s1d13xxxfb_initregs), .platform_init_video = NULL, #ifdef CONFIG_PM .platform_suspend_video = NULL, .platform_resume_video = NULL, #endif }; static struct resource s1d13xxxfb_resources[] = { [0] = { .start = 0x1d600000UL, .end = 0x1d73FFFFUL, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x1d400000UL, .end = 0x1d4001FFUL, .flags = IORESOURCE_MEM, } }; static struct platform_device s1d13xxxfb_device = { .name = S1D_DEVICENAME, .id = 0, .dev = { .platform_data = &s1d13xxxfb_data, }, .num_resources = ARRAY_SIZE(s1d13xxxfb_resources), .resource = s1d13xxxfb_resources, }; #endif static int __init platform_init(void) { #if defined(CONFIG_SMC91X) platform_device_register(&smc91x_device); #endif #if defined(CONFIG_FB_S1D13XXX) platform_device_register(&s1d13xxxfb_device); #endif return 0; } arch_initcall(platform_init);
gpl-2.0
virt2real/linux-3.10
drivers/video/cfbcopyarea.c
9028
11287
/* * Generic function for frame buffer with packed pixels of any depth. * * Copyright (C) 1999-2005 James Simmons <jsimmons@www.infradead.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * NOTES: * * This is for cfb packed pixels. Iplan and such are incorporated in the * drivers that need them. * * FIXME * * Also need to add code to deal with cards endians that are different than * the native cpu endians. I also need to deal with MSB position in the word. * * The two functions or copying forward and backward could be split up like * the ones for filling, i.e. in aligned and unaligned versions. This would * help moving some redundant computations and branches out of the loop, too. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/fb.h> #include <asm/types.h> #include <asm/io.h> #include "fb_draw.h" #if BITS_PER_LONG == 32 # define FB_WRITEL fb_writel # define FB_READL fb_readl #else # define FB_WRITEL fb_writeq # define FB_READL fb_readq #endif /* * Generic bitwise copy algorithm */ static void bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src, int src_idx, int bits, unsigned n, u32 bswapmask) { unsigned long first, last; int const shift = dst_idx-src_idx; int left, right; first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); if (!shift) { // Same alignment for source and dest if (dst_idx+n <= bits) { // Single word if (last) first &= last; FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first != ~0UL) { FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); dst++; src++; n -= bits - dst_idx; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); n -= 8; } while (n--) FB_WRITEL(FB_READL(src++), dst++); // Trailing bits if (last) FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); } } else { /* Different alignment for source and dest */ unsigned long d0, d1; int m; right = shift & (bits - 1); left = -shift & (bits - 1); bswapmask &= shift; if (dst_idx+n <= bits) { // Single destination word if (last) first &= last; d0 = FB_READL(src); d0 = fb_rev_pixels_in_long(d0, bswapmask); if (shift > 0) { // Single source word d0 >>= right; } else if (src_idx+n <= bits) { // Single source word d0 <<= left; } else { // 2 source words d1 = FB_READL(src + 1); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); } else { // Multiple destination words /** We must always remember the last value read, because in case SRC and DST overlap bitwise (e.g. when moving just one pixel in 1bpp), we always collect one full long for DST and that might overlap with the current long from SRC. We store this value in 'd0'. */ d0 = FB_READL(src++); d0 = fb_rev_pixels_in_long(d0, bswapmask); // Leading bits if (shift > 0) { // Single source word d1 = d0; d0 >>= right; dst++; n -= bits - dst_idx; } else { // 2 source words d1 = FB_READL(src++); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; dst++; n -= bits - dst_idx; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); d0 = d1; // Main chunk m = n % bits; n /= bits; while ((n >= 4) && !bswapmask) { d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; n -= 4; } while (n--) { d1 = FB_READL(src++); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0 << left | d1 >> right; d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(d0, dst++); d0 = d1; } // Trailing bits if (last) { if (m <= right) { // Single source word d0 <<= left; } else { // 2 source words d1 = FB_READL(src); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), last), dst); } } } } /* * Generic bitwise copy algorithm, operating backward */ static void bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src, int src_idx, int bits, unsigned n, u32 bswapmask) { unsigned long first, last; int shift; dst += (n-1)/bits; src += (n-1)/bits; if ((n-1) % bits) { dst_idx += (n-1) % bits; dst += dst_idx >> (ffs(bits) - 1); dst_idx &= bits - 1; src_idx += (n-1) % bits; src += src_idx >> (ffs(bits) - 1); src_idx &= bits - 1; } shift = dst_idx-src_idx; first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits), bswapmask); if (!shift) { // Same alignment for source and dest if ((unsigned long)dst_idx+1 >= n) { // Single word if (last) first &= last; FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first != ~0UL) { FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); dst--; src--; n -= dst_idx+1; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); n -= 8; } while (n--) FB_WRITEL(FB_READL(src--), dst--); // Trailing bits if (last) FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); } } else { // Different alignment for source and dest unsigned long d0, d1; int m; int const left = -shift & (bits-1); int const right = shift & (bits-1); bswapmask &= shift; if ((unsigned long)dst_idx+1 >= n) { // Single destination word if (last) first &= last; d0 = FB_READL(src); if (shift < 0) { // Single source word d0 <<= left; } else if (1+(unsigned long)src_idx >= n) { // Single source word d0 >>= right; } else { // 2 source words d1 = FB_READL(src - 1); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); } else { // Multiple destination words /** We must always remember the last value read, because in case SRC and DST overlap bitwise (e.g. when moving just one pixel in 1bpp), we always collect one full long for DST and that might overlap with the current long from SRC. We store this value in 'd0'. */ d0 = FB_READL(src--); d0 = fb_rev_pixels_in_long(d0, bswapmask); // Leading bits if (shift < 0) { // Single source word d1 = d0; d0 <<= left; } else { // 2 source words d1 = FB_READL(src--); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); d0 = d1; dst--; n -= dst_idx+1; // Main chunk m = n % bits; n /= bits; while ((n >= 4) && !bswapmask) { d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; n -= 4; } while (n--) { d1 = FB_READL(src--); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0 >> right | d1 << left; d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(d0, dst--); d0 = d1; } // Trailing bits if (last) { if (m <= left) { // Single source word d0 >>= right; } else { // 2 source words d1 = FB_READL(src); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), last), dst); } } } } void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) { u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy; u32 height = area->height, width = area->width; unsigned long const bits_per_line = p->fix.line_length*8u; unsigned long __iomem *dst = NULL, *src = NULL; int bits = BITS_PER_LONG, bytes = bits >> 3; int dst_idx = 0, src_idx = 0, rev_copy = 0; u32 bswapmask = fb_compute_bswapmask(p); if (p->state != FBINFO_STATE_RUNNING) return; /* if the beginning of the target area might overlap with the end of the source area, be have to copy the area reverse. */ if ((dy == sy && dx > sx) || (dy > sy)) { dy += height; sy += height; rev_copy = 1; } // split the base of the framebuffer into a long-aligned address and the // index of the first bit dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1)); dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1)); // add offset of source and target area dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel; src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel; if (p->fbops->fb_sync) p->fbops->fb_sync(p); if (rev_copy) { while (height--) { dst_idx -= bits_per_line; src_idx -= bits_per_line; dst += dst_idx >> (ffs(bits) - 1); dst_idx &= (bytes - 1); src += src_idx >> (ffs(bits) - 1); src_idx &= (bytes - 1); bitcpy_rev(p, dst, dst_idx, src, src_idx, bits, width*p->var.bits_per_pixel, bswapmask); } } else { while (height--) { dst += dst_idx >> (ffs(bits) - 1); dst_idx &= (bytes - 1); src += src_idx >> (ffs(bits) - 1); src_idx &= (bytes - 1); bitcpy(p, dst, dst_idx, src, src_idx, bits, width*p->var.bits_per_pixel, bswapmask); dst_idx += bits_per_line; src_idx += bits_per_line; } } } EXPORT_SYMBOL(cfb_copyarea); MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>"); MODULE_DESCRIPTION("Generic software accelerated copyarea"); MODULE_LICENSE("GPL");
gpl-2.0
cattleprod/GT-N7100
drivers/video/cfbcopyarea.c
9028
11287
/* * Generic function for frame buffer with packed pixels of any depth. * * Copyright (C) 1999-2005 James Simmons <jsimmons@www.infradead.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * NOTES: * * This is for cfb packed pixels. Iplan and such are incorporated in the * drivers that need them. * * FIXME * * Also need to add code to deal with cards endians that are different than * the native cpu endians. I also need to deal with MSB position in the word. * * The two functions or copying forward and backward could be split up like * the ones for filling, i.e. in aligned and unaligned versions. This would * help moving some redundant computations and branches out of the loop, too. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/fb.h> #include <asm/types.h> #include <asm/io.h> #include "fb_draw.h" #if BITS_PER_LONG == 32 # define FB_WRITEL fb_writel # define FB_READL fb_readl #else # define FB_WRITEL fb_writeq # define FB_READL fb_readq #endif /* * Generic bitwise copy algorithm */ static void bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src, int src_idx, int bits, unsigned n, u32 bswapmask) { unsigned long first, last; int const shift = dst_idx-src_idx; int left, right; first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); if (!shift) { // Same alignment for source and dest if (dst_idx+n <= bits) { // Single word if (last) first &= last; FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first != ~0UL) { FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); dst++; src++; n -= bits - dst_idx; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); n -= 8; } while (n--) FB_WRITEL(FB_READL(src++), dst++); // Trailing bits if (last) FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); } } else { /* Different alignment for source and dest */ unsigned long d0, d1; int m; right = shift & (bits - 1); left = -shift & (bits - 1); bswapmask &= shift; if (dst_idx+n <= bits) { // Single destination word if (last) first &= last; d0 = FB_READL(src); d0 = fb_rev_pixels_in_long(d0, bswapmask); if (shift > 0) { // Single source word d0 >>= right; } else if (src_idx+n <= bits) { // Single source word d0 <<= left; } else { // 2 source words d1 = FB_READL(src + 1); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); } else { // Multiple destination words /** We must always remember the last value read, because in case SRC and DST overlap bitwise (e.g. when moving just one pixel in 1bpp), we always collect one full long for DST and that might overlap with the current long from SRC. We store this value in 'd0'. */ d0 = FB_READL(src++); d0 = fb_rev_pixels_in_long(d0, bswapmask); // Leading bits if (shift > 0) { // Single source word d1 = d0; d0 >>= right; dst++; n -= bits - dst_idx; } else { // 2 source words d1 = FB_READL(src++); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; dst++; n -= bits - dst_idx; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); d0 = d1; // Main chunk m = n % bits; n /= bits; while ((n >= 4) && !bswapmask) { d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; n -= 4; } while (n--) { d1 = FB_READL(src++); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0 << left | d1 >> right; d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(d0, dst++); d0 = d1; } // Trailing bits if (last) { if (m <= right) { // Single source word d0 <<= left; } else { // 2 source words d1 = FB_READL(src); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), last), dst); } } } } /* * Generic bitwise copy algorithm, operating backward */ static void bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src, int src_idx, int bits, unsigned n, u32 bswapmask) { unsigned long first, last; int shift; dst += (n-1)/bits; src += (n-1)/bits; if ((n-1) % bits) { dst_idx += (n-1) % bits; dst += dst_idx >> (ffs(bits) - 1); dst_idx &= bits - 1; src_idx += (n-1) % bits; src += src_idx >> (ffs(bits) - 1); src_idx &= bits - 1; } shift = dst_idx-src_idx; first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits), bswapmask); if (!shift) { // Same alignment for source and dest if ((unsigned long)dst_idx+1 >= n) { // Single word if (last) first &= last; FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first != ~0UL) { FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); dst--; src--; n -= dst_idx+1; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); n -= 8; } while (n--) FB_WRITEL(FB_READL(src--), dst--); // Trailing bits if (last) FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); } } else { // Different alignment for source and dest unsigned long d0, d1; int m; int const left = -shift & (bits-1); int const right = shift & (bits-1); bswapmask &= shift; if ((unsigned long)dst_idx+1 >= n) { // Single destination word if (last) first &= last; d0 = FB_READL(src); if (shift < 0) { // Single source word d0 <<= left; } else if (1+(unsigned long)src_idx >= n) { // Single source word d0 >>= right; } else { // 2 source words d1 = FB_READL(src - 1); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); } else { // Multiple destination words /** We must always remember the last value read, because in case SRC and DST overlap bitwise (e.g. when moving just one pixel in 1bpp), we always collect one full long for DST and that might overlap with the current long from SRC. We store this value in 'd0'. */ d0 = FB_READL(src--); d0 = fb_rev_pixels_in_long(d0, bswapmask); // Leading bits if (shift < 0) { // Single source word d1 = d0; d0 <<= left; } else { // 2 source words d1 = FB_READL(src--); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); d0 = d1; dst--; n -= dst_idx+1; // Main chunk m = n % bits; n /= bits; while ((n >= 4) && !bswapmask) { d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; n -= 4; } while (n--) { d1 = FB_READL(src--); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0 >> right | d1 << left; d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(d0, dst--); d0 = d1; } // Trailing bits if (last) { if (m <= left) { // Single source word d0 >>= right; } else { // 2 source words d1 = FB_READL(src); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), last), dst); } } } } void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) { u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy; u32 height = area->height, width = area->width; unsigned long const bits_per_line = p->fix.line_length*8u; unsigned long __iomem *dst = NULL, *src = NULL; int bits = BITS_PER_LONG, bytes = bits >> 3; int dst_idx = 0, src_idx = 0, rev_copy = 0; u32 bswapmask = fb_compute_bswapmask(p); if (p->state != FBINFO_STATE_RUNNING) return; /* if the beginning of the target area might overlap with the end of the source area, be have to copy the area reverse. */ if ((dy == sy && dx > sx) || (dy > sy)) { dy += height; sy += height; rev_copy = 1; } // split the base of the framebuffer into a long-aligned address and the // index of the first bit dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1)); dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1)); // add offset of source and target area dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel; src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel; if (p->fbops->fb_sync) p->fbops->fb_sync(p); if (rev_copy) { while (height--) { dst_idx -= bits_per_line; src_idx -= bits_per_line; dst += dst_idx >> (ffs(bits) - 1); dst_idx &= (bytes - 1); src += src_idx >> (ffs(bits) - 1); src_idx &= (bytes - 1); bitcpy_rev(p, dst, dst_idx, src, src_idx, bits, width*p->var.bits_per_pixel, bswapmask); } } else { while (height--) { dst += dst_idx >> (ffs(bits) - 1); dst_idx &= (bytes - 1); src += src_idx >> (ffs(bits) - 1); src_idx &= (bytes - 1); bitcpy(p, dst, dst_idx, src, src_idx, bits, width*p->var.bits_per_pixel, bswapmask); dst_idx += bits_per_line; src_idx += bits_per_line; } } } EXPORT_SYMBOL(cfb_copyarea); MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>"); MODULE_DESCRIPTION("Generic software accelerated copyarea"); MODULE_LICENSE("GPL");
gpl-2.0
willizambranoback/evolution_CM13
drivers/video/cfbcopyarea.c
9028
11287
/* * Generic function for frame buffer with packed pixels of any depth. * * Copyright (C) 1999-2005 James Simmons <jsimmons@www.infradead.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * NOTES: * * This is for cfb packed pixels. Iplan and such are incorporated in the * drivers that need them. * * FIXME * * Also need to add code to deal with cards endians that are different than * the native cpu endians. I also need to deal with MSB position in the word. * * The two functions or copying forward and backward could be split up like * the ones for filling, i.e. in aligned and unaligned versions. This would * help moving some redundant computations and branches out of the loop, too. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/fb.h> #include <asm/types.h> #include <asm/io.h> #include "fb_draw.h" #if BITS_PER_LONG == 32 # define FB_WRITEL fb_writel # define FB_READL fb_readl #else # define FB_WRITEL fb_writeq # define FB_READL fb_readq #endif /* * Generic bitwise copy algorithm */ static void bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src, int src_idx, int bits, unsigned n, u32 bswapmask) { unsigned long first, last; int const shift = dst_idx-src_idx; int left, right; first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); if (!shift) { // Same alignment for source and dest if (dst_idx+n <= bits) { // Single word if (last) first &= last; FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first != ~0UL) { FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); dst++; src++; n -= bits - dst_idx; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); FB_WRITEL(FB_READL(src++), dst++); n -= 8; } while (n--) FB_WRITEL(FB_READL(src++), dst++); // Trailing bits if (last) FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); } } else { /* Different alignment for source and dest */ unsigned long d0, d1; int m; right = shift & (bits - 1); left = -shift & (bits - 1); bswapmask &= shift; if (dst_idx+n <= bits) { // Single destination word if (last) first &= last; d0 = FB_READL(src); d0 = fb_rev_pixels_in_long(d0, bswapmask); if (shift > 0) { // Single source word d0 >>= right; } else if (src_idx+n <= bits) { // Single source word d0 <<= left; } else { // 2 source words d1 = FB_READL(src + 1); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); } else { // Multiple destination words /** We must always remember the last value read, because in case SRC and DST overlap bitwise (e.g. when moving just one pixel in 1bpp), we always collect one full long for DST and that might overlap with the current long from SRC. We store this value in 'd0'. */ d0 = FB_READL(src++); d0 = fb_rev_pixels_in_long(d0, bswapmask); // Leading bits if (shift > 0) { // Single source word d1 = d0; d0 >>= right; dst++; n -= bits - dst_idx; } else { // 2 source words d1 = FB_READL(src++); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; dst++; n -= bits - dst_idx; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); d0 = d1; // Main chunk m = n % bits; n /= bits; while ((n >= 4) && !bswapmask) { d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; d1 = FB_READL(src++); FB_WRITEL(d0 << left | d1 >> right, dst++); d0 = d1; n -= 4; } while (n--) { d1 = FB_READL(src++); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0 << left | d1 >> right; d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(d0, dst++); d0 = d1; } // Trailing bits if (last) { if (m <= right) { // Single source word d0 <<= left; } else { // 2 source words d1 = FB_READL(src); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0<<left | d1>>right; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), last), dst); } } } } /* * Generic bitwise copy algorithm, operating backward */ static void bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src, int src_idx, int bits, unsigned n, u32 bswapmask) { unsigned long first, last; int shift; dst += (n-1)/bits; src += (n-1)/bits; if ((n-1) % bits) { dst_idx += (n-1) % bits; dst += dst_idx >> (ffs(bits) - 1); dst_idx &= bits - 1; src_idx += (n-1) % bits; src += src_idx >> (ffs(bits) - 1); src_idx &= bits - 1; } shift = dst_idx-src_idx; first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask); last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits), bswapmask); if (!shift) { // Same alignment for source and dest if ((unsigned long)dst_idx+1 >= n) { // Single word if (last) first &= last; FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); } else { // Multiple destination words // Leading bits if (first != ~0UL) { FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); dst--; src--; n -= dst_idx+1; } // Main chunk n /= bits; while (n >= 8) { FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); FB_WRITEL(FB_READL(src--), dst--); n -= 8; } while (n--) FB_WRITEL(FB_READL(src--), dst--); // Trailing bits if (last) FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); } } else { // Different alignment for source and dest unsigned long d0, d1; int m; int const left = -shift & (bits-1); int const right = shift & (bits-1); bswapmask &= shift; if ((unsigned long)dst_idx+1 >= n) { // Single destination word if (last) first &= last; d0 = FB_READL(src); if (shift < 0) { // Single source word d0 <<= left; } else if (1+(unsigned long)src_idx >= n) { // Single source word d0 >>= right; } else { // 2 source words d1 = FB_READL(src - 1); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); } else { // Multiple destination words /** We must always remember the last value read, because in case SRC and DST overlap bitwise (e.g. when moving just one pixel in 1bpp), we always collect one full long for DST and that might overlap with the current long from SRC. We store this value in 'd0'. */ d0 = FB_READL(src--); d0 = fb_rev_pixels_in_long(d0, bswapmask); // Leading bits if (shift < 0) { // Single source word d1 = d0; d0 <<= left; } else { // 2 source words d1 = FB_READL(src--); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), first), dst); d0 = d1; dst--; n -= dst_idx+1; // Main chunk m = n % bits; n /= bits; while ((n >= 4) && !bswapmask) { d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; d1 = FB_READL(src--); FB_WRITEL(d0 >> right | d1 << left, dst--); d0 = d1; n -= 4; } while (n--) { d1 = FB_READL(src--); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0 >> right | d1 << left; d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(d0, dst--); d0 = d1; } // Trailing bits if (last) { if (m <= left) { // Single source word d0 >>= right; } else { // 2 source words d1 = FB_READL(src); d1 = fb_rev_pixels_in_long(d1, bswapmask); d0 = d0>>right | d1<<left; } d0 = fb_rev_pixels_in_long(d0, bswapmask); FB_WRITEL(comp(d0, FB_READL(dst), last), dst); } } } } void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) { u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy; u32 height = area->height, width = area->width; unsigned long const bits_per_line = p->fix.line_length*8u; unsigned long __iomem *dst = NULL, *src = NULL; int bits = BITS_PER_LONG, bytes = bits >> 3; int dst_idx = 0, src_idx = 0, rev_copy = 0; u32 bswapmask = fb_compute_bswapmask(p); if (p->state != FBINFO_STATE_RUNNING) return; /* if the beginning of the target area might overlap with the end of the source area, be have to copy the area reverse. */ if ((dy == sy && dx > sx) || (dy > sy)) { dy += height; sy += height; rev_copy = 1; } // split the base of the framebuffer into a long-aligned address and the // index of the first bit dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1)); dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1)); // add offset of source and target area dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel; src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel; if (p->fbops->fb_sync) p->fbops->fb_sync(p); if (rev_copy) { while (height--) { dst_idx -= bits_per_line; src_idx -= bits_per_line; dst += dst_idx >> (ffs(bits) - 1); dst_idx &= (bytes - 1); src += src_idx >> (ffs(bits) - 1); src_idx &= (bytes - 1); bitcpy_rev(p, dst, dst_idx, src, src_idx, bits, width*p->var.bits_per_pixel, bswapmask); } } else { while (height--) { dst += dst_idx >> (ffs(bits) - 1); dst_idx &= (bytes - 1); src += src_idx >> (ffs(bits) - 1); src_idx &= (bytes - 1); bitcpy(p, dst, dst_idx, src, src_idx, bits, width*p->var.bits_per_pixel, bswapmask); dst_idx += bits_per_line; src_idx += bits_per_line; } } } EXPORT_SYMBOL(cfb_copyarea); MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>"); MODULE_DESCRIPTION("Generic software accelerated copyarea"); MODULE_LICENSE("GPL");
gpl-2.0
bensonhsu2013/android_kernel_samsung_lt02wifi
drivers/isdn/hisax/hscx.c
9796
7482
/* $Id: hscx.c,v 1.24.2.4 2004/01/24 20:47:23 keil Exp $ * * HSCX specific routines * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "hscx.h" #include "isac.h" #include "isdnl1.h" #include <linux/interrupt.h> #include <linux/slab.h> static char *HSCXVer[] = {"A1", "?1", "A2", "?3", "A3", "V2.1", "?6", "?7", "?8", "?9", "?10", "?11", "?12", "?13", "?14", "???"}; int HscxVersion(struct IsdnCardState *cs, char *s) { int verA, verB; verA = cs->BC_Read_Reg(cs, 0, HSCX_VSTR) & 0xf; verB = cs->BC_Read_Reg(cs, 1, HSCX_VSTR) & 0xf; printk(KERN_INFO "%s HSCX version A: %s B: %s\n", s, HSCXVer[verA], HSCXVer[verB]); if ((verA == 0) | (verA == 0xf) | (verB == 0) | (verB == 0xf)) return (1); else return (0); } void modehscx(struct BCState *bcs, int mode, int bc) { struct IsdnCardState *cs = bcs->cs; int hscx = bcs->hw.hscx.hscx; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hscx %c mode %d ichan %d", 'A' + hscx, mode, bc); bcs->mode = mode; bcs->channel = bc; cs->BC_Write_Reg(cs, hscx, HSCX_XAD1, 0xFF); cs->BC_Write_Reg(cs, hscx, HSCX_XAD2, 0xFF); cs->BC_Write_Reg(cs, hscx, HSCX_RAH2, 0xFF); cs->BC_Write_Reg(cs, hscx, HSCX_XBCH, 0x0); cs->BC_Write_Reg(cs, hscx, HSCX_RLCR, 0x0); cs->BC_Write_Reg(cs, hscx, HSCX_CCR1, test_bit(HW_IPAC, &cs->HW_Flags) ? 0x82 : 0x85); cs->BC_Write_Reg(cs, hscx, HSCX_CCR2, 0x30); cs->BC_Write_Reg(cs, hscx, HSCX_XCCR, 7); cs->BC_Write_Reg(cs, hscx, HSCX_RCCR, 7); /* Switch IOM 1 SSI */ if (test_bit(HW_IOM1, &cs->HW_Flags) && (hscx == 0)) bc = 1 - bc; if (bc == 0) { cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, test_bit(HW_IOM1, &cs->HW_Flags) ? 0x7 : bcs->hw.hscx.tsaxr0); cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, test_bit(HW_IOM1, &cs->HW_Flags) ? 0x7 : bcs->hw.hscx.tsaxr0); } else { cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, bcs->hw.hscx.tsaxr1); cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, bcs->hw.hscx.tsaxr1); } switch (mode) { case (L1_MODE_NULL): cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, 0x1f); cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, 0x1f); cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0x84); break; case (L1_MODE_TRANS): cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0xe4); break; case (L1_MODE_HDLC): cs->BC_Write_Reg(cs, hscx, HSCX_CCR1, test_bit(HW_IPAC, &cs->HW_Flags) ? 0x8a : 0x8d); cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0x8c); break; } if (mode) cs->BC_Write_Reg(cs, hscx, HSCX_CMDR, 0x41); cs->BC_Write_Reg(cs, hscx, HSCX_ISTA, 0x00); } void hscx_l2l1(struct PStack *st, int pr, void *arg) { struct BCState *bcs = st->l1.bcs; u_long flags; struct sk_buff *skb = arg; switch (pr) { case (PH_DATA | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { skb_queue_tail(&bcs->squeue, skb); } else { bcs->tx_skb = skb; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->hw.hscx.count = 0; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { printk(KERN_WARNING "hscx_l2l1: this shouldn't happen\n"); } else { test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; bcs->hw.hscx.count = 0; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | REQUEST): if (!bcs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (PH_ACTIVATE | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag); modehscx(bcs, st->l1.mode, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | REQUEST): l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | CONFIRM): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag); test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); modehscx(bcs, 0, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL); break; } } static void close_hscxstate(struct BCState *bcs) { modehscx(bcs, 0, bcs->channel); if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) { kfree(bcs->hw.hscx.rcvbuf); bcs->hw.hscx.rcvbuf = NULL; kfree(bcs->blog); bcs->blog = NULL; skb_queue_purge(&bcs->rqueue); skb_queue_purge(&bcs->squeue); if (bcs->tx_skb) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } } int open_hscxstate(struct IsdnCardState *cs, struct BCState *bcs) { if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) { if (!(bcs->hw.hscx.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for hscx.rcvbuf\n"); test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); return (1); } if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for bcs->blog\n"); test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); kfree(bcs->hw.hscx.rcvbuf); bcs->hw.hscx.rcvbuf = NULL; return (2); } skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); } bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); bcs->event = 0; bcs->hw.hscx.rcvidx = 0; bcs->tx_cnt = 0; return (0); } static int setstack_hscx(struct PStack *st, struct BCState *bcs) { bcs->channel = st->l1.bc; if (open_hscxstate(st->l1.hardware, bcs)) return (-1); st->l1.bcs = bcs; st->l2.l2l1 = hscx_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); return (0); } void clear_pending_hscx_ints(struct IsdnCardState *cs) { int val, eval; val = cs->BC_Read_Reg(cs, 1, HSCX_ISTA); debugl1(cs, "HSCX B ISTA %x", val); if (val & 0x01) { eval = cs->BC_Read_Reg(cs, 1, HSCX_EXIR); debugl1(cs, "HSCX B EXIR %x", eval); } if (val & 0x02) { eval = cs->BC_Read_Reg(cs, 0, HSCX_EXIR); debugl1(cs, "HSCX A EXIR %x", eval); } val = cs->BC_Read_Reg(cs, 0, HSCX_ISTA); debugl1(cs, "HSCX A ISTA %x", val); val = cs->BC_Read_Reg(cs, 1, HSCX_STAR); debugl1(cs, "HSCX B STAR %x", val); val = cs->BC_Read_Reg(cs, 0, HSCX_STAR); debugl1(cs, "HSCX A STAR %x", val); /* disable all IRQ */ cs->BC_Write_Reg(cs, 0, HSCX_MASK, 0xFF); cs->BC_Write_Reg(cs, 1, HSCX_MASK, 0xFF); } void inithscx(struct IsdnCardState *cs) { cs->bcs[0].BC_SetStack = setstack_hscx; cs->bcs[1].BC_SetStack = setstack_hscx; cs->bcs[0].BC_Close = close_hscxstate; cs->bcs[1].BC_Close = close_hscxstate; cs->bcs[0].hw.hscx.hscx = 0; cs->bcs[1].hw.hscx.hscx = 1; cs->bcs[0].hw.hscx.tsaxr0 = 0x2f; cs->bcs[0].hw.hscx.tsaxr1 = 3; cs->bcs[1].hw.hscx.tsaxr0 = 0x2f; cs->bcs[1].hw.hscx.tsaxr1 = 3; modehscx(cs->bcs, 0, 0); modehscx(cs->bcs + 1, 0, 0); } void inithscxisac(struct IsdnCardState *cs, int part) { if (part & 1) { clear_pending_isac_ints(cs); clear_pending_hscx_ints(cs); initisac(cs); inithscx(cs); } if (part & 2) { /* Reenable all IRQ */ cs->writeisac(cs, ISAC_MASK, 0); cs->BC_Write_Reg(cs, 0, HSCX_MASK, 0); cs->BC_Write_Reg(cs, 1, HSCX_MASK, 0); /* RESET Receiver and Transmitter */ cs->writeisac(cs, ISAC_CMDR, 0x41); } }
gpl-2.0
dinh-linux/linux-socfpga
drivers/tty/n_tracesink.c
12100
7253
/* * n_tracesink.c - Trace data router and sink path through tty space. * * Copyright (C) Intel 2011 * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * The trace sink uses the Linux line discipline framework to receive * trace data coming from the PTI source line discipline driver * to a user-desired tty port, like USB. * This is to provide a way to extract modem trace data on * devices that do not have a PTI HW module, or just need modem * trace data to come out of a different HW output port. * This is part of a solution for the P1149.7, compact JTAG, standard. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/ioctl.h> #include <linux/tty.h> #include <linux/tty_ldisc.h> #include <linux/errno.h> #include <linux/string.h> #include <asm-generic/bug.h> #include "n_tracesink.h" /* * Other ldisc drivers use 65536 which basically means, * 'I can always accept 64k' and flow control is off. * This number is deemed appropriate for this driver. */ #define RECEIVE_ROOM 65536 #define DRIVERNAME "n_tracesink" /* * there is a quirk with this ldisc is he can write data * to a tty from anyone calling his kernel API, which * meets customer requirements in the drivers/misc/pti.c * project. So he needs to know when he can and cannot write when * the API is called. In theory, the API can be called * after an init() but before a successful open() which * would crash the system if tty is not checked. */ static struct tty_struct *this_tty; static DEFINE_MUTEX(writelock); /** * n_tracesink_open() - Called when a tty is opened by a SW entity. * @tty: terminal device to the ldisc. * * Return: * 0 for success, * -EFAULT = couldn't get a tty kref n_tracesink will sit * on top of * -EEXIST = open() called successfully once and it cannot * be called again. * * Caveats: open() should only be successful the first time a * SW entity calls it. */ static int n_tracesink_open(struct tty_struct *tty) { int retval = -EEXIST; mutex_lock(&writelock); if (this_tty == NULL) { this_tty = tty_kref_get(tty); if (this_tty == NULL) { retval = -EFAULT; } else { tty->disc_data = this_tty; tty_driver_flush_buffer(tty); retval = 0; } } mutex_unlock(&writelock); return retval; } /** * n_tracesink_close() - close connection * @tty: terminal device to the ldisc. * * Called when a software entity wants to close a connection. */ static void n_tracesink_close(struct tty_struct *tty) { mutex_lock(&writelock); tty_driver_flush_buffer(tty); tty_kref_put(this_tty); this_tty = NULL; tty->disc_data = NULL; mutex_unlock(&writelock); } /** * n_tracesink_read() - read request from user space * @tty: terminal device passed into the ldisc. * @file: pointer to open file object. * @buf: pointer to the data buffer that gets eventually returned. * @nr: number of bytes of the data buffer that is returned. * * function that allows read() functionality in userspace. By default if this * is not implemented it returns -EIO. This module is functioning like a * router via n_tracesink_receivebuf(), and there is no real requirement * to implement this function. However, an error return value other than * -EIO should be used just to show that there was an intent not to have * this function implemented. Return value based on read() man pages. * * Return: * -EINVAL */ static ssize_t n_tracesink_read(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr) { return -EINVAL; } /** * n_tracesink_write() - Function that allows write() in userspace. * @tty: terminal device passed into the ldisc. * @file: pointer to open file object. * @buf: pointer to the data buffer that gets eventually returned. * @nr: number of bytes of the data buffer that is returned. * * By default if this is not implemented, it returns -EIO. * This should not be implemented, ever, because * 1. this driver is functioning like a router via * n_tracesink_receivebuf() * 2. No writes to HW will ever go through this line discpline driver. * However, an error return value other than -EIO should be used * just to show that there was an intent not to have this function * implemented. Return value based on write() man pages. * * Return: * -EINVAL */ static ssize_t n_tracesink_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { return -EINVAL; } /** * n_tracesink_datadrain() - Kernel API function used to route * trace debugging data to user-defined * port like USB. * * @buf: Trace debuging data buffer to write to tty target * port. Null value will return with no write occurring. * @count: Size of buf. Value of 0 or a negative number will * return with no write occuring. * * Caveat: If this line discipline does not set the tty it sits * on top of via an open() call, this API function will not * call the tty's write() call because it will have no pointer * to call the write(). */ void n_tracesink_datadrain(u8 *buf, int count) { mutex_lock(&writelock); if ((buf != NULL) && (count > 0) && (this_tty != NULL)) this_tty->ops->write(this_tty, buf, count); mutex_unlock(&writelock); } EXPORT_SYMBOL_GPL(n_tracesink_datadrain); /* * Flush buffer is not impelemented as the ldisc has no internal buffering * so the tty_driver_flush_buffer() is sufficient for this driver's needs. */ /* * tty_ldisc function operations for this driver. */ static struct tty_ldisc_ops tty_n_tracesink = { .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = DRIVERNAME, .open = n_tracesink_open, .close = n_tracesink_close, .read = n_tracesink_read, .write = n_tracesink_write }; /** * n_tracesink_init- module initialisation * * Registers this module as a line discipline driver. * * Return: * 0 for success, any other value error. */ static int __init n_tracesink_init(void) { /* Note N_TRACESINK is defined in linux/tty.h */ int retval = tty_register_ldisc(N_TRACESINK, &tty_n_tracesink); if (retval < 0) pr_err("%s: Registration failed: %d\n", __func__, retval); return retval; } /** * n_tracesink_exit - module unload * * Removes this module as a line discipline driver. */ static void __exit n_tracesink_exit(void) { int retval = tty_unregister_ldisc(N_TRACESINK); if (retval < 0) pr_err("%s: Unregistration failed: %d\n", __func__, retval); } module_init(n_tracesink_init); module_exit(n_tracesink_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jay Freyensee"); MODULE_ALIAS_LDISC(N_TRACESINK); MODULE_DESCRIPTION("Trace sink ldisc driver");
gpl-2.0
NoelMacwan/SXDNanhu
drivers/misc/sgi-xp/xpc_sn2.c
13892
69998
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. */ /* * Cross Partition Communication (XPC) sn2-based functions. * * Architecture specific implementation of common functions. * */ #include <linux/delay.h> #include <linux/slab.h> #include <asm/uncached.h> #include <asm/sn/mspec.h> #include <asm/sn/sn_sal.h> #include "xpc.h" /* * Define the number of u64s required to represent all the C-brick nasids * as a bitmap. The cross-partition kernel modules deal only with * C-brick nasids, thus the need for bitmaps which don't account for * odd-numbered (non C-brick) nasids. */ #define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2) #define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8) #define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64) /* * Memory for XPC's amo variables is allocated by the MSPEC driver. These * pages are located in the lowest granule. The lowest granule uses 4k pages * for cached references and an alternate TLB handler to never provide a * cacheable mapping for the entire region. This will prevent speculative * reading of cached copies of our lines from being issued which will cause * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote * partitions (i.e., XPCs) consider themselves currently engaged with the * local XPC and 1 amo variable to request partition deactivation. */ #define XPC_NOTIFY_IRQ_AMOS_SN2 0 #define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \ XP_MAX_NPARTITIONS_SN2) #define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \ XP_NASID_MASK_WORDS_SN2) #define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1) /* * Buffer used to store a local copy of portions of a remote partition's * reserved page (either its header and part_nasids mask, or its vars). */ static void *xpc_remote_copy_buffer_base_sn2; static char *xpc_remote_copy_buffer_sn2; static struct xpc_vars_sn2 *xpc_vars_sn2; static struct xpc_vars_part_sn2 *xpc_vars_part_sn2; static int xpc_setup_partitions_sn2(void) { /* nothing needs to be done */ return 0; } static void xpc_teardown_partitions_sn2(void) { /* nothing needs to be done */ } /* SH_IPI_ACCESS shub register value on startup */ static u64 xpc_sh1_IPI_access_sn2; static u64 xpc_sh2_IPI_access0_sn2; static u64 xpc_sh2_IPI_access1_sn2; static u64 xpc_sh2_IPI_access2_sn2; static u64 xpc_sh2_IPI_access3_sn2; /* * Change protections to allow IPI operations. */ static void xpc_allow_IPI_ops_sn2(void) { int node; int nasid; /* !!! The following should get moved into SAL. */ if (is_shub2()) { xpc_sh2_IPI_access0_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); xpc_sh2_IPI_access1_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); xpc_sh2_IPI_access2_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); xpc_sh2_IPI_access3_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); for_each_online_node(node) { nasid = cnodeid_to_nasid(node); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), -1UL); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), -1UL); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), -1UL); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), -1UL); } } else { xpc_sh1_IPI_access_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); for_each_online_node(node) { nasid = cnodeid_to_nasid(node); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), -1UL); } } } /* * Restrict protections to disallow IPI operations. */ static void xpc_disallow_IPI_ops_sn2(void) { int node; int nasid; /* !!! The following should get moved into SAL. */ if (is_shub2()) { for_each_online_node(node) { nasid = cnodeid_to_nasid(node); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), xpc_sh2_IPI_access0_sn2); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), xpc_sh2_IPI_access1_sn2); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), xpc_sh2_IPI_access2_sn2); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), xpc_sh2_IPI_access3_sn2); } } else { for_each_online_node(node) { nasid = cnodeid_to_nasid(node); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), xpc_sh1_IPI_access_sn2); } } } /* * The following set of functions are used for the sending and receiving of * IRQs (also known as IPIs). There are two flavors of IRQs, one that is * associated with partition activity (SGI_XPC_ACTIVATE) and the other that * is associated with channel activity (SGI_XPC_NOTIFY). */ static u64 xpc_receive_IRQ_amo_sn2(struct amo *amo) { return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); } static enum xp_retval xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid, int vector) { int ret = 0; unsigned long irq_flags; local_irq_save(irq_flags); FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag); sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); return (ret == 0) ? xpSuccess : xpPioReadError; } static struct amo * xpc_init_IRQ_amo_sn2(int index) { struct amo *amo = xpc_vars_sn2->amos_page + index; (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */ return amo; } /* * Functions associated with SGI_XPC_ACTIVATE IRQ. */ /* * Notify the heartbeat check thread that an activate IRQ has been received. */ static irqreturn_t xpc_handle_activate_IRQ_sn2(int irq, void *dev_id) { unsigned long irq_flags; spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); xpc_activate_IRQ_rcvd++; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); wake_up_interruptible(&xpc_activate_IRQ_wq); return IRQ_HANDLED; } /* * Flag the appropriate amo variable and send an IRQ to the specified node. */ static void xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid, int to_nasid, int to_phys_cpuid) { struct amo *amos = (struct amo *)__va(amos_page_pa + (XPC_ACTIVATE_IRQ_AMOS_SN2 * sizeof(struct amo))); (void)xpc_send_IRQ_sn2(&amos[BIT_WORD(from_nasid / 2)], BIT_MASK(from_nasid / 2), to_nasid, to_phys_cpuid, SGI_XPC_ACTIVATE); } static void xpc_send_local_activate_IRQ_sn2(int from_nasid) { unsigned long irq_flags; struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa + (XPC_ACTIVATE_IRQ_AMOS_SN2 * sizeof(struct amo))); /* fake the sending and receipt of an activate IRQ from remote nasid */ FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable), FETCHOP_OR, BIT_MASK(from_nasid / 2)); spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); xpc_activate_IRQ_rcvd++; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); wake_up_interruptible(&xpc_activate_IRQ_wq); } /* * Functions associated with SGI_XPC_NOTIFY IRQ. */ /* * Check to see if any chctl flags were sent from the specified partition. */ static void xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part) { union xpc_channel_ctl_flags chctl; unsigned long irq_flags; chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2. local_chctl_amo_va); if (chctl.all_flags == 0) return; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.all_flags |= chctl.all_flags; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags=" "0x%llx\n", XPC_PARTID(part), chctl.all_flags); xpc_wakeup_channel_mgr(part); } /* * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more * than one partition, we use an amo structure per partition to indicate * whether a partition has sent an IRQ or not. If it has, then wake up the * associated kthread to handle it. * * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC * running on other partitions. * * Noteworthy Arguments: * * irq - Interrupt ReQuest number. NOT USED. * * dev_id - partid of IRQ's potential sender. */ static irqreturn_t xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) { short partid = (short)(u64)dev_id; struct xpc_partition *part = &xpc_partitions[partid]; DBUG_ON(partid < 0 || partid >= XP_MAX_NPARTITIONS_SN2); if (xpc_part_ref(part)) { xpc_check_for_sent_chctl_flags_sn2(part); xpc_part_deref(part); } return IRQ_HANDLED; } /* * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor * because the write to their associated amo variable completed after the IRQ * was received. */ static void xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; if (xpc_part_ref(part)) { xpc_check_for_sent_chctl_flags_sn2(part); part_sn2->dropped_notify_IRQ_timer.expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; add_timer(&part_sn2->dropped_notify_IRQ_timer); xpc_part_deref(part); } } /* * Send a notify IRQ to the remote partition that is associated with the * specified channel. */ static void xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, char *chctl_flag_string, unsigned long *irq_flags) { struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; union xpc_channel_ctl_flags chctl = { 0 }; enum xp_retval ret; if (likely(part->act_state != XPC_P_AS_DEACTIVATING)) { chctl.flags[ch->number] = chctl_flag; ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va, chctl.all_flags, part_sn2->notify_IRQ_nasid, part_sn2->notify_IRQ_phys_cpuid, SGI_XPC_NOTIFY); dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", chctl_flag_string, ch->partid, ch->number, ret); if (unlikely(ret != xpSuccess)) { if (irq_flags != NULL) spin_unlock_irqrestore(&ch->lock, *irq_flags); XPC_DEACTIVATE_PARTITION(part, ret); if (irq_flags != NULL) spin_lock_irqsave(&ch->lock, *irq_flags); } } } #define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \ xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f) /* * Make it look like the remote partition, which is associated with the * specified channel, sent us a notify IRQ. This faked IRQ will be handled * by xpc_check_for_dropped_notify_IRQ_sn2(). */ static void xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, char *chctl_flag_string) { struct xpc_partition *part = &xpc_partitions[ch->partid]; union xpc_channel_ctl_flags chctl = { 0 }; chctl.flags[ch->number] = chctl_flag; FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va-> variable), FETCHOP_OR, chctl.all_flags); dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", chctl_flag_string, ch->partid, ch->number); } #define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \ xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f) static void xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; args->reason = ch->reason; XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags); } static void xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags); } static void xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; args->entry_size = ch->entry_size; args->local_nentries = ch->local_nentries; XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags); } static void xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; args->remote_nentries = ch->remote_nentries; args->local_nentries = ch->local_nentries; args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue); XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); } static void xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENCOMPLETE, irq_flags); } static void xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch) { XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL); } static void xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch) { XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST); } static enum xp_retval xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch, unsigned long msgqueue_pa) { ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa; return xpSuccess; } /* * This next set of functions are used to keep track of when a partition is * potentially engaged in accessing memory belonging to another partition. */ static void xpc_indicate_partition_engaged_sn2(struct xpc_partition *part) { unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa + (XPC_ENGAGED_PARTITIONS_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); /* set bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, BIT(sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); } static void xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa + (XPC_ENGAGED_PARTITIONS_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); /* clear bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, ~BIT(sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); /* * Send activate IRQ to get other side to see that we've cleared our * bit in their engaged partitions amo. */ xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, cnodeid_to_nasid(0), part_sn2->activate_IRQ_nasid, part_sn2->activate_IRQ_phys_cpuid); } static void xpc_assume_partition_disengaged_sn2(short partid) { struct amo *amo = xpc_vars_sn2->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; /* clear bit(s) based on partid mask in our partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, ~BIT(partid)); } static int xpc_partition_engaged_sn2(short partid) { struct amo *amo = xpc_vars_sn2->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; /* our partition's amo variable ANDed with partid mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & BIT(partid)) != 0; } static int xpc_any_partition_engaged_sn2(void) { struct amo *amo = xpc_vars_sn2->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; /* our partition's amo variable */ return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0; } /* original protection values for each node */ static u64 xpc_prot_vec_sn2[MAX_NUMNODES]; /* * Change protections to allow amo operations on non-Shub 1.1 systems. */ static enum xp_retval xpc_allow_amo_ops_sn2(struct amo *amos_page) { enum xp_retval ret = xpSuccess; /* * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST * collides with memory operations. On those systems we call * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. */ if (!enable_shub_wars_1_1()) ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE); return ret; } /* * Change protections to allow amo operations on Shub 1.1 systems. */ static void xpc_allow_amo_ops_shub_wars_1_1_sn2(void) { int node; int nasid; if (!enable_shub_wars_1_1()) return; for_each_online_node(node) { nasid = cnodeid_to_nasid(node); /* save current protection values */ xpc_prot_vec_sn2[node] = (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_MD_DQLP_MMR_DIR_PRIVEC0)); /* open up everything */ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_MD_DQLP_MMR_DIR_PRIVEC0), -1UL); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_MD_DQRP_MMR_DIR_PRIVEC0), -1UL); } } static enum xp_retval xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa, size_t *len) { s64 status; enum xp_retval ret; status = sn_partition_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, (u64 *)len); if (status == SALRET_OK) ret = xpSuccess; else if (status == SALRET_MORE_PASSES) ret = xpNeedMoreInfo; else ret = xpSalError; return ret; } static int xpc_setup_rsvd_page_sn2(struct xpc_rsvd_page *rp) { struct amo *amos_page; int i; int ret; xpc_vars_sn2 = XPC_RP_VARS(rp); rp->sn.sn2.vars_pa = xp_pa(xpc_vars_sn2); /* vars_part array follows immediately after vars */ xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE); /* * Before clearing xpc_vars_sn2, see if a page of amos had been * previously allocated. If not we'll need to allocate one and set * permissions so that cross-partition amos are allowed. * * The allocated amo page needs MCA reporting to remain disabled after * XPC has unloaded. To make this work, we keep a copy of the pointer * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure, * which is pointed to by the reserved page, and re-use that saved copy * on subsequent loads of XPC. This amo page is never freed, and its * memory protections are never restricted. */ amos_page = xpc_vars_sn2->amos_page; if (amos_page == NULL) { amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1)); if (amos_page == NULL) { dev_err(xpc_part, "can't allocate page of amos\n"); return -ENOMEM; } /* * Open up amo-R/W to cpu. This is done on Shub 1.1 systems * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called. */ ret = xpc_allow_amo_ops_sn2(amos_page); if (ret != xpSuccess) { dev_err(xpc_part, "can't allow amo operations\n"); uncached_free_page(__IA64_UNCACHED_OFFSET | TO_PHYS((u64)amos_page), 1); return -EPERM; } } /* clear xpc_vars_sn2 */ memset(xpc_vars_sn2, 0, sizeof(struct xpc_vars_sn2)); xpc_vars_sn2->version = XPC_V_VERSION; xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0); xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0); xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2); xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page); xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */ /* clear xpc_vars_part_sn2 */ memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) * XP_MAX_NPARTITIONS_SN2); /* initialize the activate IRQ related amo variables */ for (i = 0; i < xpc_nasid_mask_nlongs; i++) (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i); /* initialize the engaged remote partitions related amo variables */ (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2); (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2); return 0; } static int xpc_hb_allowed_sn2(short partid, void *heartbeating_to_mask) { return test_bit(partid, heartbeating_to_mask); } static void xpc_allow_hb_sn2(short partid) { DBUG_ON(xpc_vars_sn2 == NULL); set_bit(partid, xpc_vars_sn2->heartbeating_to_mask); } static void xpc_disallow_hb_sn2(short partid) { DBUG_ON(xpc_vars_sn2 == NULL); clear_bit(partid, xpc_vars_sn2->heartbeating_to_mask); } static void xpc_disallow_all_hbs_sn2(void) { DBUG_ON(xpc_vars_sn2 == NULL); bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, xp_max_npartitions); } static void xpc_increment_heartbeat_sn2(void) { xpc_vars_sn2->heartbeat++; } static void xpc_offline_heartbeat_sn2(void) { xpc_increment_heartbeat_sn2(); xpc_vars_sn2->heartbeat_offline = 1; } static void xpc_online_heartbeat_sn2(void) { xpc_increment_heartbeat_sn2(); xpc_vars_sn2->heartbeat_offline = 0; } static void xpc_heartbeat_init_sn2(void) { DBUG_ON(xpc_vars_sn2 == NULL); bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); xpc_online_heartbeat_sn2(); } static void xpc_heartbeat_exit_sn2(void) { xpc_offline_heartbeat_sn2(); } static enum xp_retval xpc_get_remote_heartbeat_sn2(struct xpc_partition *part) { struct xpc_vars_sn2 *remote_vars; enum xp_retval ret; remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2; /* pull the remote vars structure that contains the heartbeat */ ret = xp_remote_memcpy(xp_pa(remote_vars), part->sn.sn2.remote_vars_pa, XPC_RP_VARS_SIZE); if (ret != xpSuccess) return ret; dev_dbg(xpc_part, "partid=%d, heartbeat=%lld, last_heartbeat=%lld, " "heartbeat_offline=%lld, HB_mask[0]=0x%lx\n", XPC_PARTID(part), remote_vars->heartbeat, part->last_heartbeat, remote_vars->heartbeat_offline, remote_vars->heartbeating_to_mask[0]); if ((remote_vars->heartbeat == part->last_heartbeat && !remote_vars->heartbeat_offline) || !xpc_hb_allowed_sn2(sn_partition_id, remote_vars->heartbeating_to_mask)) { ret = xpNoHeartbeat; } else { part->last_heartbeat = remote_vars->heartbeat; } return ret; } /* * Get a copy of the remote partition's XPC variables from the reserved page. * * remote_vars points to a buffer that is cacheline aligned for BTE copies and * assumed to be of size XPC_RP_VARS_SIZE. */ static enum xp_retval xpc_get_remote_vars_sn2(unsigned long remote_vars_pa, struct xpc_vars_sn2 *remote_vars) { enum xp_retval ret; if (remote_vars_pa == 0) return xpVarsNotSet; /* pull over the cross partition variables */ ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa, XPC_RP_VARS_SIZE); if (ret != xpSuccess) return ret; if (XPC_VERSION_MAJOR(remote_vars->version) != XPC_VERSION_MAJOR(XPC_V_VERSION)) { return xpBadVersion; } return xpSuccess; } static void xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, unsigned long remote_rp_pa, int nasid) { xpc_send_local_activate_IRQ_sn2(nasid); } static void xpc_request_partition_reactivation_sn2(struct xpc_partition *part) { xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid); } static void xpc_request_partition_deactivation_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa + (XPC_DEACTIVATE_REQUEST_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); /* set bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, BIT(sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); /* * Send activate IRQ to get other side to see that we've set our * bit in their deactivate request amo. */ xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, cnodeid_to_nasid(0), part_sn2->activate_IRQ_nasid, part_sn2->activate_IRQ_phys_cpuid); } static void xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part) { unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa + (XPC_DEACTIVATE_REQUEST_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); /* clear bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, ~BIT(sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); } static int xpc_partition_deactivation_requested_sn2(short partid) { struct amo *amo = xpc_vars_sn2->amos_page + XPC_DEACTIVATE_REQUEST_AMO_SN2; /* our partition's amo variable ANDed with partid mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & BIT(partid)) != 0; } /* * Update the remote partition's info. */ static void xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version, unsigned long *remote_rp_ts_jiffies, unsigned long remote_rp_pa, unsigned long remote_vars_pa, struct xpc_vars_sn2 *remote_vars) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; part->remote_rp_version = remote_rp_version; dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", part->remote_rp_version); part->remote_rp_ts_jiffies = *remote_rp_ts_jiffies; dev_dbg(xpc_part, " remote_rp_ts_jiffies = 0x%016lx\n", part->remote_rp_ts_jiffies); part->remote_rp_pa = remote_rp_pa; dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa); part_sn2->remote_vars_pa = remote_vars_pa; dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", part_sn2->remote_vars_pa); part->last_heartbeat = remote_vars->heartbeat - 1; dev_dbg(xpc_part, " last_heartbeat = 0x%016llx\n", part->last_heartbeat); part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa; dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", part_sn2->remote_vars_part_pa); part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid; dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n", part_sn2->activate_IRQ_nasid); part_sn2->activate_IRQ_phys_cpuid = remote_vars->activate_IRQ_phys_cpuid; dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n", part_sn2->activate_IRQ_phys_cpuid); part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa; dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n", part_sn2->remote_amos_page_pa); part_sn2->remote_vars_version = remote_vars->version; dev_dbg(xpc_part, " remote_vars_version = 0x%x\n", part_sn2->remote_vars_version); } /* * Prior code has determined the nasid which generated a activate IRQ. * Inspect that nasid to determine if its partition needs to be activated * or deactivated. * * A partition is considered "awaiting activation" if our partition * flags indicate it is not active and it has a heartbeat. A * partition is considered "awaiting deactivation" if our partition * flags indicate it is active but it has no heartbeat or it is not * sending its heartbeat to us. * * To determine the heartbeat, the remote nasid must have a properly * initialized reserved page. */ static void xpc_identify_activate_IRQ_req_sn2(int nasid) { struct xpc_rsvd_page *remote_rp; struct xpc_vars_sn2 *remote_vars; unsigned long remote_rp_pa; unsigned long remote_vars_pa; int remote_rp_version; int reactivate = 0; unsigned long remote_rp_ts_jiffies = 0; short partid; struct xpc_partition *part; struct xpc_partition_sn2 *part_sn2; enum xp_retval ret; /* pull over the reserved page structure */ remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer_sn2; ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); if (ret != xpSuccess) { dev_warn(xpc_part, "unable to get reserved page from nasid %d, " "which sent interrupt, reason=%d\n", nasid, ret); return; } remote_vars_pa = remote_rp->sn.sn2.vars_pa; remote_rp_version = remote_rp->version; remote_rp_ts_jiffies = remote_rp->ts_jiffies; partid = remote_rp->SAL_partid; part = &xpc_partitions[partid]; part_sn2 = &part->sn.sn2; /* pull over the cross partition variables */ remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2; ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars); if (ret != xpSuccess) { dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " "which sent interrupt, reason=%d\n", nasid, ret); XPC_DEACTIVATE_PARTITION(part, ret); return; } part->activate_IRQ_rcvd++; dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " "%lld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd, remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]); if (xpc_partition_disengaged(part) && part->act_state == XPC_P_AS_INACTIVE) { xpc_update_partition_info_sn2(part, remote_rp_version, &remote_rp_ts_jiffies, remote_rp_pa, remote_vars_pa, remote_vars); if (xpc_partition_deactivation_requested_sn2(partid)) { /* * Other side is waiting on us to deactivate even though * we already have. */ return; } xpc_activate_partition(part); return; } DBUG_ON(part->remote_rp_version == 0); DBUG_ON(part_sn2->remote_vars_version == 0); if (remote_rp_ts_jiffies != part->remote_rp_ts_jiffies) { /* the other side rebooted */ DBUG_ON(xpc_partition_engaged_sn2(partid)); DBUG_ON(xpc_partition_deactivation_requested_sn2(partid)); xpc_update_partition_info_sn2(part, remote_rp_version, &remote_rp_ts_jiffies, remote_rp_pa, remote_vars_pa, remote_vars); reactivate = 1; } if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) { /* still waiting on other side to disengage from us */ return; } if (reactivate) XPC_DEACTIVATE_PARTITION(part, xpReactivating); else if (xpc_partition_deactivation_requested_sn2(partid)) XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown); } /* * Loop through the activation amo variables and process any bits * which are set. Each bit indicates a nasid sending a partition * activation or deactivation request. * * Return #of IRQs detected. */ int xpc_identify_activate_IRQ_sender_sn2(void) { int l; int b; unsigned long nasid_mask_long; u64 nasid; /* remote nasid */ int n_IRQs_detected = 0; struct amo *act_amos; act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2; /* scan through activate amo variables looking for non-zero entries */ for (l = 0; l < xpc_nasid_mask_nlongs; l++) { if (xpc_exiting) break; nasid_mask_long = xpc_receive_IRQ_amo_sn2(&act_amos[l]); b = find_first_bit(&nasid_mask_long, BITS_PER_LONG); if (b >= BITS_PER_LONG) { /* no IRQs from nasids in this amo variable */ continue; } dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", l, nasid_mask_long); /* * If this nasid has been added to the machine since * our partition was reset, this will retain the * remote nasid in our reserved pages machine mask. * This is used in the event of module reload. */ xpc_mach_nasids[l] |= nasid_mask_long; /* locate the nasid(s) which sent interrupts */ do { n_IRQs_detected++; nasid = (l * BITS_PER_LONG + b) * 2; dev_dbg(xpc_part, "interrupt from nasid %lld\n", nasid); xpc_identify_activate_IRQ_req_sn2(nasid); b = find_next_bit(&nasid_mask_long, BITS_PER_LONG, b + 1); } while (b < BITS_PER_LONG); } return n_IRQs_detected; } static void xpc_process_activate_IRQ_rcvd_sn2(void) { unsigned long irq_flags; int n_IRQs_expected; int n_IRQs_detected; spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); n_IRQs_expected = xpc_activate_IRQ_rcvd; xpc_activate_IRQ_rcvd = 0; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2(); if (n_IRQs_detected < n_IRQs_expected) { /* retry once to help avoid missing amo */ (void)xpc_identify_activate_IRQ_sender_sn2(); } } /* * Setup the channel structures that are sn2 specific. */ static enum xp_retval xpc_setup_ch_structures_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; struct xpc_channel_sn2 *ch_sn2; enum xp_retval retval; int ret; int cpuid; int ch_number; struct timer_list *timer; short partid = XPC_PARTID(part); /* allocate all the required GET/PUT values */ part_sn2->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL, &part_sn2->local_GPs_base); if (part_sn2->local_GPs == NULL) { dev_err(xpc_chan, "can't get memory for local get/put " "values\n"); return xpNoMemory; } part_sn2->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL, &part_sn2->remote_GPs_base); if (part_sn2->remote_GPs == NULL) { dev_err(xpc_chan, "can't get memory for remote get/put " "values\n"); retval = xpNoMemory; goto out_1; } part_sn2->remote_GPs_pa = 0; /* allocate all the required open and close args */ part_sn2->local_openclose_args = xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, &part_sn2-> local_openclose_args_base); if (part_sn2->local_openclose_args == NULL) { dev_err(xpc_chan, "can't get memory for local connect args\n"); retval = xpNoMemory; goto out_2; } part_sn2->remote_openclose_args_pa = 0; part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid); part_sn2->notify_IRQ_nasid = 0; part_sn2->notify_IRQ_phys_cpuid = 0; part_sn2->remote_chctl_amo_va = NULL; sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid); ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2, IRQF_SHARED, part_sn2->notify_IRQ_owner, (void *)(u64)partid); if (ret != 0) { dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " "errno=%d\n", -ret); retval = xpLackOfResources; goto out_3; } /* Setup a timer to check for dropped notify IRQs */ timer = &part_sn2->dropped_notify_IRQ_timer; init_timer(timer); timer->function = (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2; timer->data = (unsigned long)part; timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; add_timer(timer); for (ch_number = 0; ch_number < part->nchannels; ch_number++) { ch_sn2 = &part->channels[ch_number].sn.sn2; ch_sn2->local_GP = &part_sn2->local_GPs[ch_number]; ch_sn2->local_openclose_args = &part_sn2->local_openclose_args[ch_number]; mutex_init(&ch_sn2->msg_to_pull_mutex); } /* * Setup the per partition specific variables required by the * remote partition to establish channel connections with us. * * The setting of the magic # indicates that these per partition * specific variables are ready to be used. */ xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs); xpc_vars_part_sn2[partid].openclose_args_pa = xp_pa(part_sn2->local_openclose_args); xpc_vars_part_sn2[partid].chctl_amo_pa = xp_pa(part_sn2->local_chctl_amo_va); cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid); xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid = cpu_physical_id(cpuid); xpc_vars_part_sn2[partid].nchannels = part->nchannels; xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1_SN2; return xpSuccess; /* setup of ch structures failed */ out_3: kfree(part_sn2->local_openclose_args_base); part_sn2->local_openclose_args = NULL; out_2: kfree(part_sn2->remote_GPs_base); part_sn2->remote_GPs = NULL; out_1: kfree(part_sn2->local_GPs_base); part_sn2->local_GPs = NULL; return retval; } /* * Teardown the channel structures that are sn2 specific. */ static void xpc_teardown_ch_structures_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; short partid = XPC_PARTID(part); /* * Indicate that the variables specific to the remote partition are no * longer available for its use. */ xpc_vars_part_sn2[partid].magic = 0; /* in case we've still got outstanding timers registered... */ del_timer_sync(&part_sn2->dropped_notify_IRQ_timer); free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); kfree(part_sn2->local_openclose_args_base); part_sn2->local_openclose_args = NULL; kfree(part_sn2->remote_GPs_base); part_sn2->remote_GPs = NULL; kfree(part_sn2->local_GPs_base); part_sn2->local_GPs = NULL; part_sn2->local_chctl_amo_va = NULL; } /* * Create a wrapper that hides the underlying mechanism for pulling a cacheline * (or multiple cachelines) from a remote partition. * * src_pa must be a cacheline aligned physical address on the remote partition. * dst must be a cacheline aligned virtual address on this partition. * cnt must be cacheline sized */ /* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */ static enum xp_retval xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, const unsigned long src_pa, size_t cnt) { enum xp_retval ret; DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa)); DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst)); DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); if (part->act_state == XPC_P_AS_DEACTIVATING) return part->reason; ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt); if (ret != xpSuccess) { dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," " ret=%d\n", XPC_PARTID(part), ret); } return ret; } /* * Pull the remote per partition specific variables from the specified * partition. */ static enum xp_retval xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; u8 buffer[L1_CACHE_BYTES * 2]; struct xpc_vars_part_sn2 *pulled_entry_cacheline = (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); struct xpc_vars_part_sn2 *pulled_entry; unsigned long remote_entry_cacheline_pa; unsigned long remote_entry_pa; short partid = XPC_PARTID(part); enum xp_retval ret; /* pull the cacheline that contains the variables we're interested in */ DBUG_ON(part_sn2->remote_vars_part_pa != L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa)); DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2); remote_entry_pa = part_sn2->remote_vars_part_pa + sn_partition_id * sizeof(struct xpc_vars_part_sn2); remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline + (remote_entry_pa & (L1_CACHE_BYTES - 1))); ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline, remote_entry_cacheline_pa, L1_CACHE_BYTES); if (ret != xpSuccess) { dev_dbg(xpc_chan, "failed to pull XPC vars_part from " "partition %d, ret=%d\n", partid, ret); return ret; } /* see if they've been set up yet */ if (pulled_entry->magic != XPC_VP_MAGIC1_SN2 && pulled_entry->magic != XPC_VP_MAGIC2_SN2) { if (pulled_entry->magic != 0) { dev_dbg(xpc_chan, "partition %d's XPC vars_part for " "partition %d has bad magic value (=0x%llx)\n", partid, sn_partition_id, pulled_entry->magic); return xpBadMagic; } /* they've not been initialized yet */ return xpRetry; } if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1_SN2) { /* validate the variables */ if (pulled_entry->GPs_pa == 0 || pulled_entry->openclose_args_pa == 0 || pulled_entry->chctl_amo_pa == 0) { dev_err(xpc_chan, "partition %d's XPC vars_part for " "partition %d are not valid\n", partid, sn_partition_id); return xpInvalidAddress; } /* the variables we imported look to be valid */ part_sn2->remote_GPs_pa = pulled_entry->GPs_pa; part_sn2->remote_openclose_args_pa = pulled_entry->openclose_args_pa; part_sn2->remote_chctl_amo_va = (struct amo *)__va(pulled_entry->chctl_amo_pa); part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid; part_sn2->notify_IRQ_phys_cpuid = pulled_entry->notify_IRQ_phys_cpuid; if (part->nchannels > pulled_entry->nchannels) part->nchannels = pulled_entry->nchannels; /* let the other side know that we've pulled their variables */ xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2_SN2; } if (pulled_entry->magic == XPC_VP_MAGIC1_SN2) return xpRetry; return xpSuccess; } /* * Establish first contact with the remote partititon. This involves pulling * the XPC per partition variables from the remote partition and waiting for * the remote partition to pull ours. */ static enum xp_retval xpc_make_first_contact_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; enum xp_retval ret; /* * Register the remote partition's amos with SAL so it can handle * and cleanup errors within that address range should the remote * partition go down. We don't unregister this range because it is * difficult to tell when outstanding writes to the remote partition * are finished and thus when it is safe to unregister. This should * not result in wasted space in the SAL xp_addr_region table because * we should get the same page for remote_amos_page_pa after module * reloads and system reboots. */ if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa, PAGE_SIZE, 1) < 0) { dev_warn(xpc_part, "xpc_activating(%d) failed to register " "xp_addr region\n", XPC_PARTID(part)); ret = xpPhysAddrRegFailed; XPC_DEACTIVATE_PARTITION(part, ret); return ret; } /* * Send activate IRQ to get other side to activate if they've not * already begun to do so. */ xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, cnodeid_to_nasid(0), part_sn2->activate_IRQ_nasid, part_sn2->activate_IRQ_phys_cpuid); while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) { if (ret != xpRetry) { XPC_DEACTIVATE_PARTITION(part, ret); return ret; } dev_dbg(xpc_part, "waiting to make first contact with " "partition %d\n", XPC_PARTID(part)); /* wait a 1/4 of a second or so */ (void)msleep_interruptible(250); if (part->act_state == XPC_P_AS_DEACTIVATING) return part->reason; } return xpSuccess; } /* * Get the chctl flags and pull the openclose args and/or remote GPs as needed. */ static u64 xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; unsigned long irq_flags; union xpc_channel_ctl_flags chctl; enum xp_retval ret; /* * See if there are any chctl flags to be handled. */ spin_lock_irqsave(&part->chctl_lock, irq_flags); chctl = part->chctl; if (chctl.all_flags != 0) part->chctl.all_flags = 0; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); if (xpc_any_openclose_chctl_flags_set(&chctl)) { ret = xpc_pull_remote_cachelines_sn2(part, part-> remote_openclose_args, part_sn2-> remote_openclose_args_pa, XPC_OPENCLOSE_ARGS_SIZE); if (ret != xpSuccess) { XPC_DEACTIVATE_PARTITION(part, ret); dev_dbg(xpc_chan, "failed to pull openclose args from " "partition %d, ret=%d\n", XPC_PARTID(part), ret); /* don't bother processing chctl flags anymore */ chctl.all_flags = 0; } } if (xpc_any_msg_chctl_flags_set(&chctl)) { ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, part_sn2->remote_GPs_pa, XPC_GP_SIZE); if (ret != xpSuccess) { XPC_DEACTIVATE_PARTITION(part, ret); dev_dbg(xpc_chan, "failed to pull GPs from partition " "%d, ret=%d\n", XPC_PARTID(part), ret); /* don't bother processing chctl flags anymore */ chctl.all_flags = 0; } } return chctl.all_flags; } /* * Allocate the local message queue and the notify queue. */ static enum xp_retval xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; unsigned long irq_flags; int nentries; size_t nbytes; for (nentries = ch->local_nentries; nentries > 0; nentries--) { nbytes = nentries * ch->entry_size; ch_sn2->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2->local_msgqueue_base); if (ch_sn2->local_msgqueue == NULL) continue; nbytes = nentries * sizeof(struct xpc_notify_sn2); ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL); if (ch_sn2->notify_queue == NULL) { kfree(ch_sn2->local_msgqueue_base); ch_sn2->local_msgqueue = NULL; continue; } spin_lock_irqsave(&ch->lock, irq_flags); if (nentries < ch->local_nentries) { dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, " "partid=%d, channel=%d\n", nentries, ch->local_nentries, ch->partid, ch->number); ch->local_nentries = nentries; } spin_unlock_irqrestore(&ch->lock, irq_flags); return xpSuccess; } dev_dbg(xpc_chan, "can't get memory for local message queue and notify " "queue, partid=%d, channel=%d\n", ch->partid, ch->number); return xpNoMemory; } /* * Allocate the cached remote message queue. */ static enum xp_retval xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; unsigned long irq_flags; int nentries; size_t nbytes; DBUG_ON(ch->remote_nentries <= 0); for (nentries = ch->remote_nentries; nentries > 0; nentries--) { nbytes = nentries * ch->entry_size; ch_sn2->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2-> remote_msgqueue_base); if (ch_sn2->remote_msgqueue == NULL) continue; spin_lock_irqsave(&ch->lock, irq_flags); if (nentries < ch->remote_nentries) { dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, " "partid=%d, channel=%d\n", nentries, ch->remote_nentries, ch->partid, ch->number); ch->remote_nentries = nentries; } spin_unlock_irqrestore(&ch->lock, irq_flags); return xpSuccess; } dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " "partid=%d, channel=%d\n", ch->partid, ch->number); return xpNoMemory; } /* * Allocate message queues and other stuff associated with a channel. * * Note: Assumes all of the channel sizes are filled in. */ static enum xp_retval xpc_setup_msg_structures_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; enum xp_retval ret; DBUG_ON(ch->flags & XPC_C_SETUP); ret = xpc_allocate_local_msgqueue_sn2(ch); if (ret == xpSuccess) { ret = xpc_allocate_remote_msgqueue_sn2(ch); if (ret != xpSuccess) { kfree(ch_sn2->local_msgqueue_base); ch_sn2->local_msgqueue = NULL; kfree(ch_sn2->notify_queue); ch_sn2->notify_queue = NULL; } } return ret; } /* * Free up message queues and other stuff that were allocated for the specified * channel. */ static void xpc_teardown_msg_structures_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; DBUG_ON(!spin_is_locked(&ch->lock)); ch_sn2->remote_msgqueue_pa = 0; ch_sn2->local_GP->get = 0; ch_sn2->local_GP->put = 0; ch_sn2->remote_GP.get = 0; ch_sn2->remote_GP.put = 0; ch_sn2->w_local_GP.get = 0; ch_sn2->w_local_GP.put = 0; ch_sn2->w_remote_GP.get = 0; ch_sn2->w_remote_GP.put = 0; ch_sn2->next_msg_to_pull = 0; if (ch->flags & XPC_C_SETUP) { dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", ch->flags, ch->partid, ch->number); kfree(ch_sn2->local_msgqueue_base); ch_sn2->local_msgqueue = NULL; kfree(ch_sn2->remote_msgqueue_base); ch_sn2->remote_msgqueue = NULL; kfree(ch_sn2->notify_queue); ch_sn2->notify_queue = NULL; } } /* * Notify those who wanted to be notified upon delivery of their message. */ static void xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put) { struct xpc_notify_sn2 *notify; u8 notify_type; s64 get = ch->sn.sn2.w_remote_GP.get - 1; while (++get < put && atomic_read(&ch->n_to_notify) > 0) { notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries]; /* * See if the notify entry indicates it was associated with * a message who's sender wants to be notified. It is possible * that it is, but someone else is doing or has done the * notification. */ notify_type = notify->type; if (notify_type == 0 || cmpxchg(&notify->type, notify_type, 0) != notify_type) { continue; } DBUG_ON(notify_type != XPC_N_CALL); atomic_dec(&ch->n_to_notify); if (notify->func != NULL) { dev_dbg(xpc_chan, "notify->func() called, notify=0x%p " "msg_number=%lld partid=%d channel=%d\n", (void *)notify, get, ch->partid, ch->number); notify->func(reason, ch->partid, ch->number, notify->key); dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p" " msg_number=%lld partid=%d channel=%d\n", (void *)notify, get, ch->partid, ch->number); } } } static void xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch) { xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put); } /* * Clear some of the msg flags in the local message queue. */ static inline void xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; s64 get; get = ch_sn2->w_remote_GP.get; do { msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + (get % ch->local_nentries) * ch->entry_size); DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); msg->flags = 0; } while (++get < ch_sn2->remote_GP.get); } /* * Clear some of the msg flags in the remote message queue. */ static inline void xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; s64 put, remote_nentries = ch->remote_nentries; /* flags are zeroed when the buffer is allocated */ if (ch_sn2->remote_GP.put < remote_nentries) return; put = max(ch_sn2->w_remote_GP.put, remote_nentries); do { msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + (put % remote_nentries) * ch->entry_size); DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); DBUG_ON(!(msg->flags & XPC_M_SN2_DONE)); DBUG_ON(msg->number != put - remote_nentries); msg->flags = 0; } while (++put < ch_sn2->remote_GP.put); } static int xpc_n_of_deliverable_payloads_sn2(struct xpc_channel *ch) { return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get; } static void xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number) { struct xpc_channel *ch = &part->channels[ch_number]; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; int npayloads_sent; ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number]; /* See what, if anything, has changed for each connected channel */ xpc_msgqueue_ref(ch); if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get && ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) { /* nothing changed since GPs were last pulled */ xpc_msgqueue_deref(ch); return; } if (!(ch->flags & XPC_C_CONNECTED)) { xpc_msgqueue_deref(ch); return; } /* * First check to see if messages recently sent by us have been * received by the other side. (The remote GET value will have * changed since we last looked at it.) */ if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) { /* * We need to notify any senders that want to be notified * that their sent messages have been received by their * intended recipients. We need to do this before updating * w_remote_GP.get so that we don't allocate the same message * queue entries prematurely (see xpc_allocate_msg()). */ if (atomic_read(&ch->n_to_notify) > 0) { /* * Notify senders that messages sent have been * received and delivered by the other side. */ xpc_notify_senders_sn2(ch, xpMsgDelivered, ch_sn2->remote_GP.get); } /* * Clear msg->flags in previously sent messages, so that * they're ready for xpc_allocate_msg(). */ xpc_clear_local_msgqueue_flags_sn2(ch); ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get; dev_dbg(xpc_chan, "w_remote_GP.get changed to %lld, partid=%d, " "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid, ch->number); /* * If anyone was waiting for message queue entries to become * available, wake them up. */ if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) wake_up(&ch->msg_allocate_wq); } /* * Now check for newly sent messages by the other side. (The remote * PUT value will have changed since we last looked at it.) */ if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) { /* * Clear msg->flags in previously received messages, so that * they're ready for xpc_get_deliverable_payload_sn2(). */ xpc_clear_remote_msgqueue_flags_sn2(ch); smp_wmb(); /* ensure flags have been cleared before bte_copy */ ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put; dev_dbg(xpc_chan, "w_remote_GP.put changed to %lld, partid=%d, " "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid, ch->number); npayloads_sent = xpc_n_of_deliverable_payloads_sn2(ch); if (npayloads_sent > 0) { dev_dbg(xpc_chan, "msgs waiting to be copied and " "delivered=%d, partid=%d, channel=%d\n", npayloads_sent, ch->partid, ch->number); if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) xpc_activate_kthreads(ch, npayloads_sent); } } xpc_msgqueue_deref(ch); } static struct xpc_msg_sn2 * xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) { struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; unsigned long remote_msg_pa; struct xpc_msg_sn2 *msg; u32 msg_index; u32 nmsgs; u64 msg_offset; enum xp_retval ret; if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) { /* we were interrupted by a signal */ return NULL; } while (get >= ch_sn2->next_msg_to_pull) { /* pull as many messages as are ready and able to be pulled */ msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries; DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put); nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull; if (msg_index + nmsgs > ch->remote_nentries) { /* ignore the ones that wrap the msg queue for now */ nmsgs = ch->remote_nentries - msg_index; } msg_offset = msg_index * ch->entry_size; msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset); remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset; ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa, nmsgs * ch->entry_size); if (ret != xpSuccess) { dev_dbg(xpc_chan, "failed to pull %d msgs starting with" " msg %lld from partition %d, channel=%d, " "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull, ch->partid, ch->number, ret); XPC_DEACTIVATE_PARTITION(part, ret); mutex_unlock(&ch_sn2->msg_to_pull_mutex); return NULL; } ch_sn2->next_msg_to_pull += nmsgs; } mutex_unlock(&ch_sn2->msg_to_pull_mutex); /* return the message we were looking for */ msg_offset = (get % ch->remote_nentries) * ch->entry_size; msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset); return msg; } /* * Get the next deliverable message's payload. */ static void * xpc_get_deliverable_payload_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; void *payload = NULL; s64 get; do { if (ch->flags & XPC_C_DISCONNECTING) break; get = ch_sn2->w_local_GP.get; smp_rmb(); /* guarantee that .get loads before .put */ if (get == ch_sn2->w_remote_GP.put) break; /* There are messages waiting to be pulled and delivered. * We need to try to secure one for ourselves. We'll do this * by trying to increment w_local_GP.get and hope that no one * else beats us to it. If they do, we'll we'll simply have * to try again for the next one. */ if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) { /* we got the entry referenced by get */ dev_dbg(xpc_chan, "w_local_GP.get changed to %lld, " "partid=%d, channel=%d\n", get + 1, ch->partid, ch->number); /* pull the message from the remote partition */ msg = xpc_pull_remote_msg_sn2(ch, get); if (msg != NULL) { DBUG_ON(msg->number != get); DBUG_ON(msg->flags & XPC_M_SN2_DONE); DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); payload = &msg->payload; } break; } } while (1); return payload; } /* * Now we actually send the messages that are ready to be sent by advancing * the local message queue's Put value and then send a chctl msgrequest to the * recipient partition. */ static void xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; s64 put = initial_put + 1; int send_msgrequest = 0; while (1) { while (1) { if (put == ch_sn2->w_local_GP.put) break; msg = (struct xpc_msg_sn2 *)((u64)ch_sn2-> local_msgqueue + (put % ch->local_nentries) * ch->entry_size); if (!(msg->flags & XPC_M_SN2_READY)) break; put++; } if (put == initial_put) { /* nothing's changed */ break; } if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) != initial_put) { /* someone else beat us to it */ DBUG_ON(ch_sn2->local_GP->put < initial_put); break; } /* we just set the new value of local_GP->put */ dev_dbg(xpc_chan, "local_GP->put changed to %lld, partid=%d, " "channel=%d\n", put, ch->partid, ch->number); send_msgrequest = 1; /* * We need to ensure that the message referenced by * local_GP->put is not XPC_M_SN2_READY or that local_GP->put * equals w_local_GP.put, so we'll go have a look. */ initial_put = put; } if (send_msgrequest) xpc_send_chctl_msgrequest_sn2(ch); } /* * Allocate an entry for a message from the message queue associated with the * specified channel. */ static enum xp_retval xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, struct xpc_msg_sn2 **address_of_msg) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; enum xp_retval ret; s64 put; /* * Get the next available message entry from the local message queue. * If none are available, we'll make sure that we grab the latest * GP values. */ ret = xpTimeout; while (1) { put = ch_sn2->w_local_GP.put; smp_rmb(); /* guarantee that .put loads before .get */ if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) { /* There are available message entries. We need to try * to secure one for ourselves. We'll do this by trying * to increment w_local_GP.put as long as someone else * doesn't beat us to it. If they do, we'll have to * try again. */ if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) == put) { /* we got the entry referenced by put */ break; } continue; /* try again */ } /* * There aren't any available msg entries at this time. * * In waiting for a message entry to become available, * we set a timeout in case the other side is not sending * completion interrupts. This lets us fake a notify IRQ * that will cause the notify IRQ handler to fetch the latest * GP values as if an interrupt was sent by the other side. */ if (ret == xpTimeout) xpc_send_chctl_local_msgrequest_sn2(ch); if (flags & XPC_NOWAIT) return xpNoWait; ret = xpc_allocate_msg_wait(ch); if (ret != xpInterrupted && ret != xpTimeout) return ret; } /* get the message's address and initialize it */ msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + (put % ch->local_nentries) * ch->entry_size); DBUG_ON(msg->flags != 0); msg->number = put; dev_dbg(xpc_chan, "w_local_GP.put changed to %lld; msg=0x%p, " "msg_number=%lld, partid=%d, channel=%d\n", put + 1, (void *)msg, msg->number, ch->partid, ch->number); *address_of_msg = msg; return xpSuccess; } /* * Common code that does the actual sending of the message by advancing the * local message queue's Put value and sends a chctl msgrequest to the * partition the message is being sent to. */ static enum xp_retval xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload, u16 payload_size, u8 notify_type, xpc_notify_func func, void *key) { enum xp_retval ret = xpSuccess; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg = msg; struct xpc_notify_sn2 *notify = notify; s64 msg_number; s64 put; DBUG_ON(notify_type == XPC_N_CALL && func == NULL); if (XPC_MSG_SIZE(payload_size) > ch->entry_size) return xpPayloadTooBig; xpc_msgqueue_ref(ch); if (ch->flags & XPC_C_DISCONNECTING) { ret = ch->reason; goto out_1; } if (!(ch->flags & XPC_C_CONNECTED)) { ret = xpNotConnected; goto out_1; } ret = xpc_allocate_msg_sn2(ch, flags, &msg); if (ret != xpSuccess) goto out_1; msg_number = msg->number; if (notify_type != 0) { /* * Tell the remote side to send an ACK interrupt when the * message has been delivered. */ msg->flags |= XPC_M_SN2_INTERRUPT; atomic_inc(&ch->n_to_notify); notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries]; notify->func = func; notify->key = key; notify->type = notify_type; /* ??? Is a mb() needed here? */ if (ch->flags & XPC_C_DISCONNECTING) { /* * An error occurred between our last error check and * this one. We will try to clear the type field from * the notify entry. If we succeed then * xpc_disconnect_channel() didn't already process * the notify entry. */ if (cmpxchg(&notify->type, notify_type, 0) == notify_type) { atomic_dec(&ch->n_to_notify); ret = ch->reason; } goto out_1; } } memcpy(&msg->payload, payload, payload_size); msg->flags |= XPC_M_SN2_READY; /* * The preceding store of msg->flags must occur before the following * load of local_GP->put. */ smp_mb(); /* see if the message is next in line to be sent, if so send it */ put = ch_sn2->local_GP->put; if (put == msg_number) xpc_send_msgs_sn2(ch, put); out_1: xpc_msgqueue_deref(ch); return ret; } /* * Now we actually acknowledge the messages that have been delivered and ack'd * by advancing the cached remote message queue's Get value and if requested * send a chctl msgrequest to the message sender's partition. * * If a message has XPC_M_SN2_INTERRUPT set, send an interrupt to the partition * that sent the message. */ static void xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; s64 get = initial_get + 1; int send_msgrequest = 0; while (1) { while (1) { if (get == ch_sn2->w_local_GP.get) break; msg = (struct xpc_msg_sn2 *)((u64)ch_sn2-> remote_msgqueue + (get % ch->remote_nentries) * ch->entry_size); if (!(msg->flags & XPC_M_SN2_DONE)) break; msg_flags |= msg->flags; get++; } if (get == initial_get) { /* nothing's changed */ break; } if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) != initial_get) { /* someone else beat us to it */ DBUG_ON(ch_sn2->local_GP->get <= initial_get); break; } /* we just set the new value of local_GP->get */ dev_dbg(xpc_chan, "local_GP->get changed to %lld, partid=%d, " "channel=%d\n", get, ch->partid, ch->number); send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT); /* * We need to ensure that the message referenced by * local_GP->get is not XPC_M_SN2_DONE or that local_GP->get * equals w_local_GP.get, so we'll go have a look. */ initial_get = get; } if (send_msgrequest) xpc_send_chctl_msgrequest_sn2(ch); } static void xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) { struct xpc_msg_sn2 *msg; s64 msg_number; s64 get; msg = container_of(payload, struct xpc_msg_sn2, payload); msg_number = msg->number; dev_dbg(xpc_chan, "msg=0x%p, msg_number=%lld, partid=%d, channel=%d\n", (void *)msg, msg_number, ch->partid, ch->number); DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) != msg_number % ch->remote_nentries); DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); DBUG_ON(msg->flags & XPC_M_SN2_DONE); msg->flags |= XPC_M_SN2_DONE; /* * The preceding store of msg->flags must occur before the following * load of local_GP->get. */ smp_mb(); /* * See if this message is next in line to be acknowledged as having * been delivered. */ get = ch->sn.sn2.local_GP->get; if (get == msg_number) xpc_acknowledge_msgs_sn2(ch, get, msg->flags); } static struct xpc_arch_operations xpc_arch_ops_sn2 = { .setup_partitions = xpc_setup_partitions_sn2, .teardown_partitions = xpc_teardown_partitions_sn2, .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2, .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2, .setup_rsvd_page = xpc_setup_rsvd_page_sn2, .allow_hb = xpc_allow_hb_sn2, .disallow_hb = xpc_disallow_hb_sn2, .disallow_all_hbs = xpc_disallow_all_hbs_sn2, .increment_heartbeat = xpc_increment_heartbeat_sn2, .offline_heartbeat = xpc_offline_heartbeat_sn2, .online_heartbeat = xpc_online_heartbeat_sn2, .heartbeat_init = xpc_heartbeat_init_sn2, .heartbeat_exit = xpc_heartbeat_exit_sn2, .get_remote_heartbeat = xpc_get_remote_heartbeat_sn2, .request_partition_activation = xpc_request_partition_activation_sn2, .request_partition_reactivation = xpc_request_partition_reactivation_sn2, .request_partition_deactivation = xpc_request_partition_deactivation_sn2, .cancel_partition_deactivation_request = xpc_cancel_partition_deactivation_request_sn2, .setup_ch_structures = xpc_setup_ch_structures_sn2, .teardown_ch_structures = xpc_teardown_ch_structures_sn2, .make_first_contact = xpc_make_first_contact_sn2, .get_chctl_all_flags = xpc_get_chctl_all_flags_sn2, .send_chctl_closerequest = xpc_send_chctl_closerequest_sn2, .send_chctl_closereply = xpc_send_chctl_closereply_sn2, .send_chctl_openrequest = xpc_send_chctl_openrequest_sn2, .send_chctl_openreply = xpc_send_chctl_openreply_sn2, .send_chctl_opencomplete = xpc_send_chctl_opencomplete_sn2, .process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2, .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2, .setup_msg_structures = xpc_setup_msg_structures_sn2, .teardown_msg_structures = xpc_teardown_msg_structures_sn2, .indicate_partition_engaged = xpc_indicate_partition_engaged_sn2, .indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2, .partition_engaged = xpc_partition_engaged_sn2, .any_partition_engaged = xpc_any_partition_engaged_sn2, .assume_partition_disengaged = xpc_assume_partition_disengaged_sn2, .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2, .send_payload = xpc_send_payload_sn2, .get_deliverable_payload = xpc_get_deliverable_payload_sn2, .received_payload = xpc_received_payload_sn2, .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2, }; int xpc_init_sn2(void) { int ret; size_t buf_size; xpc_arch_ops = xpc_arch_ops_sn2; if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) { dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is " "larger than %d\n", XPC_MSG_HDR_MAX_SIZE); return -E2BIG; } buf_size = max(XPC_RP_VARS_SIZE, XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2); xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned(buf_size, GFP_KERNEL, &xpc_remote_copy_buffer_base_sn2); if (xpc_remote_copy_buffer_sn2 == NULL) { dev_err(xpc_part, "can't get memory for remote copy buffer\n"); return -ENOMEM; } /* open up protections for IPI and [potentially] amo operations */ xpc_allow_IPI_ops_sn2(); xpc_allow_amo_ops_shub_wars_1_1_sn2(); /* * This is safe to do before the xpc_hb_checker thread has started * because the handler releases a wait queue. If an interrupt is * received before the thread is waiting, it will not go to sleep, * but rather immediately process the interrupt. */ ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0, "xpc hb", NULL); if (ret != 0) { dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " "errno=%d\n", -ret); xpc_disallow_IPI_ops_sn2(); kfree(xpc_remote_copy_buffer_base_sn2); } return ret; } void xpc_exit_sn2(void) { free_irq(SGI_XPC_ACTIVATE, NULL); xpc_disallow_IPI_ops_sn2(); kfree(xpc_remote_copy_buffer_base_sn2); }
gpl-2.0
kevin78/linux-kevin
drivers/misc/cxl/fault.c
69
7162
/* * Copyright 2014 IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/pid.h> #include <linux/mm.h> #include <linux/moduleparam.h> #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "cxl" "." #include <asm/current.h> #include <asm/copro.h> #include <asm/mmu.h> #include "cxl.h" static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) { return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && (sste->esid_data == cpu_to_be64(slb->esid))); } /* * This finds a free SSTE for the given SLB, or returns NULL if it's already in * the segment table. */ static struct cxl_sste* find_free_sste(struct cxl_context *ctx, struct copro_slb *slb) { struct cxl_sste *primary, *sste, *ret = NULL; unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ unsigned int entry; unsigned int hash; if (slb->vsid & SLB_VSID_B_1T) hash = (slb->esid >> SID_SHIFT_1T) & mask; else /* 256M */ hash = (slb->esid >> SID_SHIFT) & mask; primary = ctx->sstp + (hash << 3); for (entry = 0, sste = primary; entry < 8; entry++, sste++) { if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) ret = sste; if (sste_matches(sste, slb)) return NULL; } if (ret) return ret; /* Nothing free, select an entry to cast out */ ret = primary + ctx->sst_lru; ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; return ret; } static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) { /* mask is the group index, we search primary and secondary here. */ struct cxl_sste *sste; unsigned long flags; spin_lock_irqsave(&ctx->sste_lock, flags); sste = find_free_sste(ctx, slb); if (!sste) goto out_unlock; pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", sste - ctx->sstp, slb->vsid, slb->esid); sste->vsid_data = cpu_to_be64(slb->vsid); sste->esid_data = cpu_to_be64(slb->esid); out_unlock: spin_unlock_irqrestore(&ctx->sste_lock, flags); } static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, u64 ea) { struct copro_slb slb = {0,0}; int rc; if (!(rc = copro_calculate_slb(mm, ea, &slb))) { cxl_load_segment(ctx, &slb); } return rc; } static void cxl_ack_ae(struct cxl_context *ctx) { unsigned long flags; cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); spin_lock_irqsave(&ctx->lock, flags); ctx->pending_fault = true; ctx->fault_addr = ctx->dar; ctx->fault_dsisr = ctx->dsisr; spin_unlock_irqrestore(&ctx->lock, flags); wake_up_all(&ctx->wq); } static int cxl_handle_segment_miss(struct cxl_context *ctx, struct mm_struct *mm, u64 ea) { int rc; pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); if ((rc = cxl_fault_segment(ctx, mm, ea))) cxl_ack_ae(ctx); else { mb(); /* Order seg table write to TFC MMIO write */ cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); } return IRQ_HANDLED; } static void cxl_handle_page_fault(struct cxl_context *ctx, struct mm_struct *mm, u64 dsisr, u64 dar) { unsigned flt = 0; int result; unsigned long access, flags, inv_flags = 0; if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { pr_devel("copro_handle_mm_fault failed: %#x\n", result); return cxl_ack_ae(ctx); } /* * update_mmu_cache() will not have loaded the hash since current->trap * is not a 0x400 or 0x300, so just call hash_page_mm() here. */ access = _PAGE_PRESENT; if (dsisr & CXL_PSL_DSISR_An_S) access |= _PAGE_RW; if ((!ctx->kernel) || ~(dar & (1ULL << 63))) access |= _PAGE_USER; if (dsisr & DSISR_NOHPTE) inv_flags |= HPTE_NOHPTE_UPDATE; local_irq_save(flags); hash_page_mm(mm, dar, access, 0x300, inv_flags); local_irq_restore(flags); pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); } void cxl_handle_fault(struct work_struct *fault_work) { struct cxl_context *ctx = container_of(fault_work, struct cxl_context, fault_work); u64 dsisr = ctx->dsisr; u64 dar = ctx->dar; struct task_struct *task; struct mm_struct *mm; if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { /* Most likely explanation is harmless - a dedicated process * has detached and these were cleared by the PSL purge, but * warn about it just in case */ dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); return; } pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { pr_devel("cxl_handle_fault unable to get task %i\n", pid_nr(ctx->pid)); cxl_ack_ae(ctx); return; } if (!(mm = get_task_mm(task))) { pr_devel("cxl_handle_fault unable to get mm %i\n", pid_nr(ctx->pid)); cxl_ack_ae(ctx); goto out; } if (dsisr & CXL_PSL_DSISR_An_DS) cxl_handle_segment_miss(ctx, mm, dar); else if (dsisr & CXL_PSL_DSISR_An_DM) cxl_handle_page_fault(ctx, mm, dsisr, dar); else WARN(1, "cxl_handle_fault has nothing to handle\n"); mmput(mm); out: put_task_struct(task); } static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) { int rc; struct task_struct *task; struct mm_struct *mm; if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { pr_devel("cxl_prefault_one unable to get task %i\n", pid_nr(ctx->pid)); return; } if (!(mm = get_task_mm(task))) { pr_devel("cxl_prefault_one unable to get mm %i\n", pid_nr(ctx->pid)); put_task_struct(task); return; } rc = cxl_fault_segment(ctx, mm, ea); mmput(mm); put_task_struct(task); } static u64 next_segment(u64 ea, u64 vsid) { if (vsid & SLB_VSID_B_1T) ea |= (1ULL << 40) - 1; else ea |= (1ULL << 28) - 1; return ea + 1; } static void cxl_prefault_vma(struct cxl_context *ctx) { u64 ea, last_esid = 0; struct copro_slb slb; struct vm_area_struct *vma; int rc; struct task_struct *task; struct mm_struct *mm; if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { pr_devel("cxl_prefault_vma unable to get task %i\n", pid_nr(ctx->pid)); return; } if (!(mm = get_task_mm(task))) { pr_devel("cxl_prefault_vm unable to get mm %i\n", pid_nr(ctx->pid)); goto out1; } down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { for (ea = vma->vm_start; ea < vma->vm_end; ea = next_segment(ea, slb.vsid)) { rc = copro_calculate_slb(mm, ea, &slb); if (rc) continue; if (last_esid == slb.esid) continue; cxl_load_segment(ctx, &slb); last_esid = slb.esid; } } up_read(&mm->mmap_sem); mmput(mm); out1: put_task_struct(task); } void cxl_prefault(struct cxl_context *ctx, u64 wed) { switch (ctx->afu->prefault_mode) { case CXL_PREFAULT_WED: cxl_prefault_one(ctx, wed); break; case CXL_PREFAULT_ALL: cxl_prefault_vma(ctx); break; default: break; } }
gpl-2.0
sconklin/rPi-kernel
net/mac80211/mesh_sync.c
69
9283
/* * Copyright 2011-2012, Pavel Zubarev <pavel.zubarev@gmail.com> * Copyright 2011-2012, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de> * Copyright 2011-2012, cozybit Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "ieee80211_i.h" #include "mesh.h" #include "driver-ops.h" /* This is not in the standard. It represents a tolerable tbtt drift below * which we do no TSF adjustment. */ #define TOFFSET_MINIMUM_ADJUSTMENT 10 /* This is not in the standard. It is a margin added to the * Toffset setpoint to mitigate TSF overcorrection * introduced by TSF adjustment latency. */ #define TOFFSET_SET_MARGIN 20 /* This is not in the standard. It represents the maximum Toffset jump above * which we'll invalidate the Toffset setpoint and choose a new setpoint. This * could be, for instance, in case a neighbor is restarted and its TSF counter * reset. */ #define TOFFSET_MAXIMUM_ADJUSTMENT 30000 /* 30 ms */ struct sync_method { u8 method; struct ieee80211_mesh_sync_ops ops; }; /** * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT * * @ie: information elements of a management frame from the mesh peer */ static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) { return (ie->mesh_config->meshconf_cap & MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; } void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; /* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */ u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500; u64 tsf; u64 tsfdelta; spin_lock_bh(&ifmsh->sync_offset_lock); if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n", (long long) ifmsh->sync_offset_clockdrift_max); tsfdelta = -ifmsh->sync_offset_clockdrift_max; ifmsh->sync_offset_clockdrift_max = 0; } else { msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting by %llu\n", (long long) ifmsh->sync_offset_clockdrift_max, (unsigned long long) beacon_int_fraction); tsfdelta = -beacon_int_fraction; ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; } spin_unlock_bh(&ifmsh->sync_offset_lock); tsf = drv_get_tsf(local, sdata); if (tsf != -1ULL) drv_set_tsf(local, sdata, tsf + tsfdelta); } static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, u16 stype, struct ieee80211_mgmt *mgmt, struct ieee802_11_elems *elems, struct ieee80211_rx_status *rx_status) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; struct sta_info *sta; u64 t_t, t_r; WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); /* standard mentions only beacons */ if (stype != IEEE80211_STYPE_BEACON) return; /* The current tsf is a first approximation for the timestamp * for the received beacon. Further down we try to get a * better value from the rx_status->mactime field if * available. Also we have to call drv_get_tsf() before * entering the rcu-read section.*/ t_r = drv_get_tsf(local, sdata); rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (!sta) goto no_sync; /* check offset sync conditions (13.13.2.2.1) * * TODO also sync to * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors */ if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr); goto no_sync; } if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) { /* * The mactime is defined as the time the first data symbol * of the frame hits the PHY, and the timestamp of the beacon * is defined as "the time that the data symbol containing the * first bit of the timestamp is transmitted to the PHY plus * the transmitting STA's delays through its local PHY from the * MAC-PHY interface to its interface with the WM" (802.11 * 11.1.2) * * T_r, in 13.13.2.2.2, is just defined as "the frame reception * time" but we unless we interpret that time to be the same * time of the beacon timestamp, the offset calculation will be * off. Below we adjust t_r to be "the time at which the first * symbol of the timestamp element in the beacon is received". * This correction depends on the rate. * * Based on similar code in ibss.c */ int rate; if (rx_status->flag & RX_FLAG_HT) { /* TODO: * In principle there could be HT-beacons (Dual Beacon * HT Operation options), but for now ignore them and * just use the primary (i.e. non-HT) beacons for * synchronization. * */ goto no_sync; } else rate = local->hw.wiphy->bands[rx_status->band]-> bitrates[rx_status->rate_idx].bitrate; /* 24 bytes of header * 8 bits/byte * * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/ t_r = rx_status->mactime + (24 * 8 * 10 / rate); } /* Timing offset calculation (see 13.13.2.2.2) */ t_t = le64_to_cpu(mgmt->u.beacon.timestamp); sta->t_offset = t_t - t_r; if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset; msync_dbg(sdata, "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n", sta->sta.addr, (long long) sta->t_offset, (long long) sta->t_offset_setpoint, (long long) t_clockdrift); if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT || t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) { msync_dbg(sdata, "STA %pM : t_clockdrift=%lld too large, setpoint reset\n", sta->sta.addr, (long long) t_clockdrift); clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); goto no_sync; } rcu_read_unlock(); spin_lock_bh(&ifmsh->sync_offset_lock); if (t_clockdrift > ifmsh->sync_offset_clockdrift_max) ifmsh->sync_offset_clockdrift_max = t_clockdrift; spin_unlock_bh(&ifmsh->sync_offset_lock); } else { sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN; set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); msync_dbg(sdata, "STA %pM : offset was invalid, sta->t_offset=%lld\n", sta->sta.addr, (long long) sta->t_offset); rcu_read_unlock(); } return; no_sync: rcu_read_unlock(); } static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); BUG_ON(!rcu_read_lock_held()); spin_lock_bh(&ifmsh->sync_offset_lock); if (ifmsh->sync_offset_clockdrift_max > TOFFSET_MINIMUM_ADJUSTMENT) { /* Since ajusting the tsf here would * require a possibly blocking call * to the driver tsf setter, we punt * the tsf adjustment to the mesh tasklet */ msync_dbg(sdata, "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n", ifmsh->sync_offset_clockdrift_max); set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags); } else { msync_dbg(sdata, "TBTT : max clockdrift=%lld; too small to adjust\n", (long long)ifmsh->sync_offset_clockdrift_max); ifmsh->sync_offset_clockdrift_max = 0; } spin_unlock_bh(&ifmsh->sync_offset_lock); } static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u8 offset; if (!ifmsh->ie || !ifmsh->ie_len) return NULL; offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); if (!offset) return NULL; return ifmsh->ie + offset + 2; } static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, u16 stype, struct ieee80211_mgmt *mgmt, struct ieee802_11_elems *elems, struct ieee80211_rx_status *rx_status) { const u8 *oui; WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); msync_dbg(sdata, "called mesh_sync_vendor_rx_bcn_presp\n"); oui = mesh_get_vendor_oui(sdata); /* here you would implement the vendor offset tracking for this oui */ } static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata) { const u8 *oui; WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); msync_dbg(sdata, "called mesh_sync_vendor_adjust_tbtt\n"); oui = mesh_get_vendor_oui(sdata); /* here you would implement the vendor tsf adjustment for this oui */ } /* global variable */ static struct sync_method sync_methods[] = { { .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, .ops = { .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp, .adjust_tbtt = &mesh_sync_offset_adjust_tbtt, } }, { .method = IEEE80211_SYNC_METHOD_VENDOR, .ops = { .rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp, .adjust_tbtt = &mesh_sync_vendor_adjust_tbtt, } }, }; struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method) { struct ieee80211_mesh_sync_ops *ops = NULL; u8 i; for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) { if (sync_methods[i].method == method) { ops = &sync_methods[i].ops; break; } } return ops; }
gpl-2.0
kimjh-sane/imx6sane-linux-3.14.28
drivers/scsi/esas2r/esas2r_init.c
325
47510
/* * linux/drivers/scsi/esas2r/esas2r_init.c * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include "esas2r.h" static bool esas2r_initmem_alloc(struct esas2r_adapter *a, struct esas2r_mem_desc *mem_desc, u32 align) { mem_desc->esas2r_param = mem_desc->size + align; mem_desc->virt_addr = NULL; mem_desc->phys_addr = 0; mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev, (size_t)mem_desc-> esas2r_param, (dma_addr_t *)&mem_desc-> phys_addr, GFP_KERNEL); if (mem_desc->esas2r_data == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate %lu bytes of consistent memory!", (long unsigned int)mem_desc->esas2r_param); return false; } mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align); mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align); memset(mem_desc->virt_addr, 0, mem_desc->size); return true; } static void esas2r_initmem_free(struct esas2r_adapter *a, struct esas2r_mem_desc *mem_desc) { if (mem_desc->virt_addr == NULL) return; /* * Careful! phys_addr and virt_addr may have been adjusted from the * original allocation in order to return the desired alignment. That * means we have to use the original address (in esas2r_data) and size * (esas2r_param) and calculate the original physical address based on * the difference between the requested and actual allocation size. */ if (mem_desc->phys_addr) { int unalign = ((u8 *)mem_desc->virt_addr) - ((u8 *)mem_desc->esas2r_data); dma_free_coherent(&a->pcid->dev, (size_t)mem_desc->esas2r_param, mem_desc->esas2r_data, (dma_addr_t)(mem_desc->phys_addr - unalign)); } else { kfree(mem_desc->esas2r_data); } mem_desc->virt_addr = NULL; } static bool alloc_vda_req(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_mem_desc *memdesc = kzalloc( sizeof(struct esas2r_mem_desc), GFP_KERNEL); if (memdesc == NULL) { esas2r_hdebug("could not alloc mem for vda request memdesc\n"); return false; } memdesc->size = sizeof(union atto_vda_req) + ESAS2R_DATA_BUF_LEN; if (!esas2r_initmem_alloc(a, memdesc, 256)) { esas2r_hdebug("could not alloc mem for vda request\n"); kfree(memdesc); return false; } a->num_vrqs++; list_add(&memdesc->next_desc, &a->vrq_mds_head); rq->vrq_md = memdesc; rq->vrq = (union atto_vda_req *)memdesc->virt_addr; rq->vrq->scsi.handle = a->num_vrqs; return true; } static void esas2r_unmap_regions(struct esas2r_adapter *a) { if (a->regs) iounmap((void __iomem *)a->regs); a->regs = NULL; pci_release_region(a->pcid, 2); if (a->data_window) iounmap((void __iomem *)a->data_window); a->data_window = NULL; pci_release_region(a->pcid, 0); } static int esas2r_map_regions(struct esas2r_adapter *a) { int error; a->regs = NULL; a->data_window = NULL; error = pci_request_region(a->pcid, 2, a->name); if (error != 0) { esas2r_log(ESAS2R_LOG_CRIT, "pci_request_region(2) failed, error %d", error); return error; } a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2), pci_resource_len(a->pcid, 2)); if (a->regs == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "ioremap failed for regs mem region\n"); pci_release_region(a->pcid, 2); return -EFAULT; } error = pci_request_region(a->pcid, 0, a->name); if (error != 0) { esas2r_log(ESAS2R_LOG_CRIT, "pci_request_region(2) failed, error %d", error); esas2r_unmap_regions(a); return error; } a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid, 0), pci_resource_len(a->pcid, 0)); if (a->data_window == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "ioremap failed for data_window mem region\n"); esas2r_unmap_regions(a); return -EFAULT; } return 0; } static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode) { int i; /* Set up interrupt mode based on the requested value */ switch (intr_mode) { case INTR_MODE_LEGACY: use_legacy_interrupts: a->intr_mode = INTR_MODE_LEGACY; break; case INTR_MODE_MSI: i = pci_enable_msi(a->pcid); if (i != 0) { esas2r_log(ESAS2R_LOG_WARN, "failed to enable MSI for adapter %d, " "falling back to legacy interrupts " "(err=%d)", a->index, i); goto use_legacy_interrupts; } a->intr_mode = INTR_MODE_MSI; set_bit(AF2_MSI_ENABLED, &a->flags2); break; default: esas2r_log(ESAS2R_LOG_WARN, "unknown interrupt_mode %d requested, " "falling back to legacy interrupt", interrupt_mode); goto use_legacy_interrupts; } } static void esas2r_claim_interrupts(struct esas2r_adapter *a) { unsigned long flags = IRQF_DISABLED; if (a->intr_mode == INTR_MODE_LEGACY) flags |= IRQF_SHARED; esas2r_log(ESAS2R_LOG_INFO, "esas2r_claim_interrupts irq=%d (%p, %s, %x)", a->pcid->irq, a, a->name, flags); if (request_irq(a->pcid->irq, (a->intr_mode == INTR_MODE_LEGACY) ? esas2r_interrupt : esas2r_msi_interrupt, flags, a->name, a)) { esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X", a->pcid->irq); return; } set_bit(AF2_IRQ_CLAIMED, &a->flags2); esas2r_log(ESAS2R_LOG_INFO, "claimed IRQ %d flags: 0x%lx", a->pcid->irq, flags); } int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, int index) { struct esas2r_adapter *a; u64 bus_addr = 0; int i; void *next_uncached; struct esas2r_request *first_request, *last_request; if (index >= MAX_ADAPTERS) { esas2r_log(ESAS2R_LOG_CRIT, "tried to init invalid adapter index %u!", index); return 0; } if (esas2r_adapters[index]) { esas2r_log(ESAS2R_LOG_CRIT, "tried to init existing adapter index %u!", index); return 0; } a = (struct esas2r_adapter *)host->hostdata; memset(a, 0, sizeof(struct esas2r_adapter)); a->pcid = pcid; a->host = host; if (sizeof(dma_addr_t) > 4) { const uint64_t required_mask = dma_get_required_mask (&pcid->dev); if (required_mask > DMA_BIT_MASK(32) && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pcid, DMA_BIT_MASK(64))) { esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "64-bit PCI addressing enabled\n"); } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) && !pci_set_consistent_dma_mask(pcid, DMA_BIT_MASK(32))) { esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "32-bit PCI addressing enabled\n"); } else { esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask"); esas2r_kill_adapter(index); return 0; } } else { if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) && !pci_set_consistent_dma_mask(pcid, DMA_BIT_MASK(32))) { esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "32-bit PCI addressing enabled\n"); } else { esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask"); esas2r_kill_adapter(index); return 0; } } esas2r_adapters[index] = a; sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index); esas2r_debug("new adapter %p, name %s", a, a->name); spin_lock_init(&a->request_lock); spin_lock_init(&a->fw_event_lock); sema_init(&a->fm_api_semaphore, 1); sema_init(&a->fs_api_semaphore, 1); sema_init(&a->nvram_semaphore, 1); esas2r_fw_event_off(a); snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", a->index); a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); init_waitqueue_head(&a->buffered_ioctl_waiter); init_waitqueue_head(&a->nvram_waiter); init_waitqueue_head(&a->fm_api_waiter); init_waitqueue_head(&a->fs_api_waiter); init_waitqueue_head(&a->vda_waiter); INIT_LIST_HEAD(&a->general_req.req_list); INIT_LIST_HEAD(&a->active_list); INIT_LIST_HEAD(&a->defer_list); INIT_LIST_HEAD(&a->free_sg_list_head); INIT_LIST_HEAD(&a->avail_request); INIT_LIST_HEAD(&a->vrq_mds_head); INIT_LIST_HEAD(&a->fw_event_list); first_request = (struct esas2r_request *)((u8 *)(a + 1)); for (last_request = first_request, i = 1; i < num_requests; last_request++, i++) { INIT_LIST_HEAD(&last_request->req_list); list_add_tail(&last_request->comp_list, &a->avail_request); if (!alloc_vda_req(a, last_request)) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate a VDA request!"); esas2r_kill_adapter(index); return 0; } } esas2r_debug("requests: %p to %p (%d, %d)", first_request, last_request, sizeof(*first_request), num_requests); if (esas2r_map_regions(a) != 0) { esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!"); esas2r_kill_adapter(index); return 0; } a->index = index; /* interrupts will be disabled until we are done with init */ atomic_inc(&a->dis_ints_cnt); atomic_inc(&a->disable_cnt); set_bit(AF_CHPRST_PENDING, &a->flags); set_bit(AF_DISC_PENDING, &a->flags); set_bit(AF_FIRST_INIT, &a->flags); set_bit(AF_LEGACY_SGE_MODE, &a->flags); a->init_msg = ESAS2R_INIT_MSG_START; a->max_vdareq_size = 128; a->build_sgl = esas2r_build_sg_list_sge; esas2r_setup_interrupts(a, interrupt_mode); a->uncached_size = esas2r_get_uncached_size(a); a->uncached = dma_alloc_coherent(&pcid->dev, (size_t)a->uncached_size, (dma_addr_t *)&bus_addr, GFP_KERNEL); if (a->uncached == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate %d bytes of consistent memory!", a->uncached_size); esas2r_kill_adapter(index); return 0; } a->uncached_phys = bus_addr; esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)", a->uncached_size, a->uncached, upper_32_bits(bus_addr), lower_32_bits(bus_addr)); memset(a->uncached, 0, a->uncached_size); next_uncached = a->uncached; if (!esas2r_init_adapter_struct(a, &next_uncached)) { esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize adapter structure (2)!"); esas2r_kill_adapter(index); return 0; } tasklet_init(&a->tasklet, esas2r_adapter_tasklet, (unsigned long)a); /* * Disable chip interrupts to prevent spurious interrupts * until we claim the IRQ. */ esas2r_disable_chip_interrupts(a); esas2r_check_adapter(a); if (!esas2r_init_adapter_hw(a, true)) esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!"); else esas2r_debug("esas2r_init_adapter ok"); esas2r_claim_interrupts(a); if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) esas2r_enable_chip_interrupts(a); set_bit(AF2_INIT_DONE, &a->flags2); if (!test_bit(AF_DEGRADED_MODE, &a->flags)) esas2r_kickoff_timer(a); esas2r_debug("esas2r_init_adapter done for %p (%d)", a, a->disable_cnt); return 1; } static void esas2r_adapter_power_down(struct esas2r_adapter *a, int power_management) { struct esas2r_mem_desc *memdesc, *next; if ((test_bit(AF2_INIT_DONE, &a->flags2)) && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { if (!power_management) { del_timer_sync(&a->timer); tasklet_kill(&a->tasklet); } esas2r_power_down(a); /* * There are versions of firmware that do not handle the sync * cache command correctly. Stall here to ensure that the * cache is lazily flushed. */ mdelay(500); esas2r_debug("chip halted"); } /* Remove sysfs binary files */ if (a->sysfs_fw_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw); a->sysfs_fw_created = 0; } if (a->sysfs_fs_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs); a->sysfs_fs_created = 0; } if (a->sysfs_vda_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda); a->sysfs_vda_created = 0; } if (a->sysfs_hw_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw); a->sysfs_hw_created = 0; } if (a->sysfs_live_nvram_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_live_nvram); a->sysfs_live_nvram_created = 0; } if (a->sysfs_default_nvram_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_default_nvram); a->sysfs_default_nvram_created = 0; } /* Clean up interrupts */ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "free_irq(%d) called", a->pcid->irq); free_irq(a->pcid->irq, a); esas2r_debug("IRQ released"); clear_bit(AF2_IRQ_CLAIMED, &a->flags2); } if (test_bit(AF2_MSI_ENABLED, &a->flags2)) { pci_disable_msi(a->pcid); clear_bit(AF2_MSI_ENABLED, &a->flags2); esas2r_debug("MSI disabled"); } if (a->inbound_list_md.virt_addr) esas2r_initmem_free(a, &a->inbound_list_md); if (a->outbound_list_md.virt_addr) esas2r_initmem_free(a, &a->outbound_list_md); list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head, next_desc) { esas2r_initmem_free(a, memdesc); } /* Following frees everything allocated via alloc_vda_req */ list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) { esas2r_initmem_free(a, memdesc); list_del(&memdesc->next_desc); kfree(memdesc); } kfree(a->first_ae_req); a->first_ae_req = NULL; kfree(a->sg_list_mds); a->sg_list_mds = NULL; kfree(a->req_table); a->req_table = NULL; if (a->regs) { esas2r_unmap_regions(a); a->regs = NULL; a->data_window = NULL; esas2r_debug("regions unmapped"); } } /* Release/free allocated resources for specified adapters. */ void esas2r_kill_adapter(int i) { struct esas2r_adapter *a = esas2r_adapters[i]; if (a) { unsigned long flags; struct workqueue_struct *wq; esas2r_debug("killing adapter %p [%d] ", a, i); esas2r_fw_event_off(a); esas2r_adapter_power_down(a, 0); if (esas2r_buffered_ioctl && (a->pcid == esas2r_buffered_ioctl_pcid)) { dma_free_coherent(&a->pcid->dev, (size_t)esas2r_buffered_ioctl_size, esas2r_buffered_ioctl, esas2r_buffered_ioctl_addr); esas2r_buffered_ioctl = NULL; } if (a->vda_buffer) { dma_free_coherent(&a->pcid->dev, (size_t)VDA_MAX_BUFFER_SIZE, a->vda_buffer, (dma_addr_t)a->ppvda_buffer); a->vda_buffer = NULL; } if (a->fs_api_buffer) { dma_free_coherent(&a->pcid->dev, (size_t)a->fs_api_buffer_size, a->fs_api_buffer, (dma_addr_t)a->ppfs_api_buffer); a->fs_api_buffer = NULL; } kfree(a->local_atto_ioctl); a->local_atto_ioctl = NULL; spin_lock_irqsave(&a->fw_event_lock, flags); wq = a->fw_event_q; a->fw_event_q = NULL; spin_unlock_irqrestore(&a->fw_event_lock, flags); if (wq) destroy_workqueue(wq); if (a->uncached) { dma_free_coherent(&a->pcid->dev, (size_t)a->uncached_size, a->uncached, (dma_addr_t)a->uncached_phys); a->uncached = NULL; esas2r_debug("uncached area freed"); } esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "pci_disable_device() called. msix_enabled: %d " "msi_enabled: %d irq: %d pin: %d", a->pcid->msix_enabled, a->pcid->msi_enabled, a->pcid->irq, a->pcid->pin); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "before pci_disable_device() enable_cnt: %d", a->pcid->enable_cnt.counter); pci_disable_device(a->pcid); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "after pci_disable_device() enable_cnt: %d", a->pcid->enable_cnt.counter); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "pci_set_drv_data(%p, NULL) called", a->pcid); pci_set_drvdata(a->pcid, NULL); esas2r_adapters[i] = NULL; if (test_bit(AF2_INIT_DONE, &a->flags2)) { clear_bit(AF2_INIT_DONE, &a->flags2); set_bit(AF_DEGRADED_MODE, &a->flags); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->host->shost_gendev), "scsi_remove_host() called"); scsi_remove_host(a->host); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->host->shost_gendev), "scsi_host_put() called"); scsi_host_put(a->host); } } } int esas2r_cleanup(struct Scsi_Host *host) { struct esas2r_adapter *a; int index; if (host == NULL) { int i; esas2r_debug("esas2r_cleanup everything"); for (i = 0; i < MAX_ADAPTERS; i++) esas2r_kill_adapter(i); return -1; } esas2r_debug("esas2r_cleanup called for host %p", host); a = (struct esas2r_adapter *)host->hostdata; index = a->index; esas2r_kill_adapter(index); return index; } int esas2r_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host = pci_get_drvdata(pdev); u32 device_state; struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()"); if (!a) return -ENODEV; esas2r_adapter_power_down(a, 1); device_state = pci_choose_state(pdev, state); esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "pci_save_state() called"); pci_save_state(pdev); esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "pci_disable_device() called"); pci_disable_device(pdev); esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "pci_set_power_state() called"); pci_set_power_state(pdev, device_state); esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0"); return 0; } int esas2r_resume(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; int rez; esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()"); esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "pci_set_power_state(PCI_D0) " "called"); pci_set_power_state(pdev, PCI_D0); esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "pci_enable_wake(PCI_D0, 0) " "called"); pci_enable_wake(pdev, PCI_D0, 0); esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "pci_restore_state() called"); pci_restore_state(pdev); esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "pci_enable_device() called"); rez = pci_enable_device(pdev); pci_set_master(pdev); if (!a) { rez = -ENODEV; goto error_exit; } if (esas2r_map_regions(a) != 0) { esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!"); rez = -ENOMEM; goto error_exit; } /* Set up interupt mode */ esas2r_setup_interrupts(a, a->intr_mode); /* * Disable chip interrupts to prevent spurious interrupts until we * claim the IRQ. */ esas2r_disable_chip_interrupts(a); if (!esas2r_power_up(a, true)) { esas2r_debug("yikes, esas2r_power_up failed"); rez = -ENOMEM; goto error_exit; } esas2r_claim_interrupts(a); if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { /* * Now that system interrupt(s) are claimed, we can enable * chip interrupts. */ esas2r_enable_chip_interrupts(a); esas2r_kickoff_timer(a); } else { esas2r_debug("yikes, unable to claim IRQ"); esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!"); rez = -ENOMEM; goto error_exit; } error_exit: esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d", rez); return rez; } bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) { set_bit(AF_DEGRADED_MODE, &a->flags); esas2r_log(ESAS2R_LOG_CRIT, "setting adapter to degraded mode: %s\n", error_str); return false; } u32 esas2r_get_uncached_size(struct esas2r_adapter *a) { return sizeof(struct esas2r_sas_nvram) + ALIGN(ESAS2R_DISC_BUF_LEN, 8) + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */ + 8 + (num_sg_lists * (u16)sgl_page_size) + ALIGN((num_requests + num_ae_requests + 1 + ESAS2R_LIST_EXTRA) * sizeof(struct esas2r_inbound_list_source_entry), 8) + ALIGN((num_requests + num_ae_requests + 1 + ESAS2R_LIST_EXTRA) * sizeof(struct atto_vda_ob_rsp), 8) + 256; /* VDA request and buffer align */ } static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) { int pcie_cap_reg; pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); if (pcie_cap_reg) { u16 devcontrol; pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, &devcontrol); if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) { esas2r_log(ESAS2R_LOG_INFO, "max read request size > 512B"); devcontrol &= ~PCI_EXP_DEVCTL_READRQ; devcontrol |= 0x2000; pci_write_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, devcontrol); } } } /* * Determine the organization of the uncached data area and * finish initializing the adapter structure */ bool esas2r_init_adapter_struct(struct esas2r_adapter *a, void **uncached_area) { u32 i; u8 *high; struct esas2r_inbound_list_source_entry *element; struct esas2r_request *rq; struct esas2r_mem_desc *sgl; spin_lock_init(&a->sg_list_lock); spin_lock_init(&a->mem_lock); spin_lock_init(&a->queue_lock); a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS]; if (!alloc_vda_req(a, &a->general_req)) { esas2r_hdebug( "failed to allocate a VDA request for the general req!"); return false; } /* allocate requests for asynchronous events */ a->first_ae_req = kzalloc(num_ae_requests * sizeof(struct esas2r_request), GFP_KERNEL); if (a->first_ae_req == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate memory for asynchronous events"); return false; } /* allocate the S/G list memory descriptors */ a->sg_list_mds = kzalloc( num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL); if (a->sg_list_mds == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate memory for s/g list descriptors"); return false; } /* allocate the request table */ a->req_table = kzalloc((num_requests + num_ae_requests + 1) * sizeof(struct esas2r_request *), GFP_KERNEL); if (a->req_table == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate memory for the request table"); return false; } /* initialize PCI configuration space */ esas2r_init_pci_cfg_space(a); /* * the thunder_stream boards all have a serial flash part that has a * different base address on the AHB bus. */ if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID) && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) a->flags2 |= AF2_THUNDERBOLT; if (test_bit(AF2_THUNDERBOLT, &a->flags2)) a->flags2 |= AF2_SERIAL_FLASH; if (a->pcid->subsystem_device == ATTO_TLSH_1068) a->flags2 |= AF2_THUNDERLINK; /* Uncached Area */ high = (u8 *)*uncached_area; /* initialize the scatter/gather table pages */ for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) { sgl->size = sgl_page_size; list_add_tail(&sgl->next_desc, &a->free_sg_list_head); if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) { /* Allow the driver to load if the minimum count met. */ if (i < NUM_SGL_MIN) return false; break; } } /* compute the size of the lists */ a->list_size = num_requests + ESAS2R_LIST_EXTRA; /* allocate the inbound list */ a->inbound_list_md.size = a->list_size * sizeof(struct esas2r_inbound_list_source_entry); if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) { esas2r_hdebug("failed to allocate IB list"); return false; } /* allocate the outbound list */ a->outbound_list_md.size = a->list_size * sizeof(struct atto_vda_ob_rsp); if (!esas2r_initmem_alloc(a, &a->outbound_list_md, ESAS2R_LIST_ALIGN)) { esas2r_hdebug("failed to allocate IB list"); return false; } /* allocate the NVRAM structure */ a->nvram = (struct esas2r_sas_nvram *)high; high += sizeof(struct esas2r_sas_nvram); /* allocate the discovery buffer */ a->disc_buffer = high; high += ESAS2R_DISC_BUF_LEN; high = PTR_ALIGN(high, 8); /* allocate the outbound list copy pointer */ a->outbound_copy = (u32 volatile *)high; high += sizeof(u32); if (!test_bit(AF_NVR_VALID, &a->flags)) esas2r_nvram_set_defaults(a); /* update the caller's uncached memory area pointer */ *uncached_area = (void *)high; /* initialize the allocated memory */ if (test_bit(AF_FIRST_INIT, &a->flags)) { memset(a->req_table, 0, (num_requests + num_ae_requests + 1) * sizeof(struct esas2r_request *)); esas2r_targ_db_initialize(a); /* prime parts of the inbound list */ element = (struct esas2r_inbound_list_source_entry *)a-> inbound_list_md. virt_addr; for (i = 0; i < a->list_size; i++) { element->address = 0; element->reserved = 0; element->length = cpu_to_le32(HWILSE_INTERFACE_F0 | (sizeof(union atto_vda_req) / sizeof(u32))); element++; } /* init the AE requests */ for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, i++) { INIT_LIST_HEAD(&rq->req_list); if (!alloc_vda_req(a, rq)) { esas2r_hdebug( "failed to allocate a VDA request!"); return false; } esas2r_rq_init_request(rq, a); /* override the completion function */ rq->comp_cb = esas2r_ae_complete; } } return true; } /* This code will verify that the chip is operational. */ bool esas2r_check_adapter(struct esas2r_adapter *a) { u32 starttime; u32 doorbell; u64 ppaddr; u32 dw; /* * if the chip reset detected flag is set, we can bypass a bunch of * stuff. */ if (test_bit(AF_CHPRST_DETECTED, &a->flags)) goto skip_chip_reset; /* * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver * may have left them enabled or we may be recovering from a fault. */ esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK); esas2r_flush_register_dword(a, MU_INT_MASK_OUT); /* * wait for the firmware to become ready by forcing an interrupt and * waiting for a response. */ starttime = jiffies_to_msecs(jiffies); while (true) { esas2r_force_interrupt(a); doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell == 0xFFFFFFFF) { /* * Give the firmware up to two seconds to enable * register access after a reset. */ if ((jiffies_to_msecs(jiffies) - starttime) > 2000) return esas2r_set_degraded_mode(a, "unable to access registers"); } else if (doorbell & DRBL_FORCE_INT) { u32 ver = (doorbell & DRBL_FW_VER_MSK); /* * This driver supports version 0 and version 1 of * the API */ esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); if (ver == DRBL_FW_VER_0) { set_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 128; a->build_sgl = esas2r_build_sg_list_sge; } else if (ver == DRBL_FW_VER_1) { clear_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 1024; a->build_sgl = esas2r_build_sg_list_prd; } else { return esas2r_set_degraded_mode(a, "unknown firmware version"); } break; } schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > 180000) { esas2r_hdebug("FW ready TMO"); esas2r_bugon(); return esas2r_set_degraded_mode(a, "firmware start has timed out"); } } /* purge any asynchronous events since we will repost them later */ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); starttime = jiffies_to_msecs(jiffies); while (true) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_MSG_IFC_DOWN) { esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); break; } schedule_timeout_interruptible(msecs_to_jiffies(50)); if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { esas2r_hdebug("timeout waiting for interface down"); break; } } skip_chip_reset: /* * first things first, before we go changing any of these registers * disable the communication lists. */ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); dw &= ~MU_ILC_ENABLE; esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); dw &= ~MU_OLC_ENABLE; esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); /* configure the communication list addresses */ ppaddr = a->inbound_list_md.phys_addr; esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO, lower_32_bits(ppaddr)); esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI, upper_32_bits(ppaddr)); ppaddr = a->outbound_list_md.phys_addr; esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO, lower_32_bits(ppaddr)); esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI, upper_32_bits(ppaddr)); ppaddr = a->uncached_phys + ((u8 *)a->outbound_copy - a->uncached); esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO, lower_32_bits(ppaddr)); esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI, upper_32_bits(ppaddr)); /* reset the read and write pointers */ *a->outbound_copy = a->last_write = a->last_read = a->list_size - 1; set_bit(AF_COMM_LIST_TOGGLE, &a->flags); esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | a->last_write); esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | a->last_write); esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE | a->last_write); esas2r_write_register_dword(a, MU_OUT_LIST_WRITE, MU_OLW_TOGGLE | a->last_write); /* configure the interface select fields */ dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG); dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST); esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG, (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR)); dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG); dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE); esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG, (dw | MU_OLIC_LIST_F0 | MU_OLIC_SOURCE_DDR)); /* finish configuring the communication lists */ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK); dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC | (a->list_size << MU_ILC_NUMBER_SHIFT); esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK); dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT); esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); /* * notify the firmware that we're done setting up the communication * list registers. wait here until the firmware is done configuring * its lists. it will signal that it is done by enabling the lists. */ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT); starttime = jiffies_to_msecs(jiffies); while (true) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_MSG_IFC_INIT) { esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); break; } schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { esas2r_hdebug( "timeout waiting for communication list init"); esas2r_bugon(); return esas2r_set_degraded_mode(a, "timeout waiting for communication list init"); } } /* * flag whether the firmware supports the power down doorbell. we * determine this by reading the inbound doorbell enable mask. */ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); if (doorbell & DRBL_POWER_DOWN) set_bit(AF2_VDA_POWER_DOWN, &a->flags2); else clear_bit(AF2_VDA_POWER_DOWN, &a->flags2); /* * enable assertion of outbound queue and doorbell interrupts in the * main interrupt cause register. */ esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK); esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK); return true; } /* Process the initialization message just completed and format the next one. */ static bool esas2r_format_init_msg(struct esas2r_adapter *a, struct esas2r_request *rq) { u32 msg = a->init_msg; struct atto_vda_cfg_init *ci; a->init_msg = 0; switch (msg) { case ESAS2R_INIT_MSG_START: case ESAS2R_INIT_MSG_REINIT: { struct timeval now; do_gettimeofday(&now); esas2r_hdebug("CFG init"); esas2r_build_cfg_req(a, rq, VDA_CFG_INIT, 0, NULL); ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; ci->sgl_page_size = cpu_to_le32(sgl_page_size); ci->epoch_time = cpu_to_le32(now.tv_sec); rq->flags |= RF_FAILURE_OK; a->init_msg = ESAS2R_INIT_MSG_INIT; break; } case ESAS2R_INIT_MSG_INIT: if (rq->req_stat == RS_SUCCESS) { u32 major; u32 minor; u16 fw_release; a->fw_version = le16_to_cpu( rq->func_rsp.cfg_rsp.vda_version); a->fw_build = rq->func_rsp.cfg_rsp.fw_build; fw_release = le16_to_cpu( rq->func_rsp.cfg_rsp.fw_release); major = LOBYTE(fw_release); minor = HIBYTE(fw_release); a->fw_version += (major << 16) + (minor << 24); } else { esas2r_hdebug("FAILED"); } /* * the 2.71 and earlier releases of R6xx firmware did not error * unsupported config requests correctly. */ if ((test_bit(AF2_THUNDERBOLT, &a->flags2)) || (be32_to_cpu(a->fw_version) > 0x00524702)) { esas2r_hdebug("CFG get init"); esas2r_build_cfg_req(a, rq, VDA_CFG_GET_INIT2, sizeof(struct atto_vda_cfg_init), NULL); rq->vrq->cfg.sg_list_offset = offsetof( struct atto_vda_cfg_req, data.sge); rq->vrq->cfg.data.prde.ctl_len = cpu_to_le32(sizeof(struct atto_vda_cfg_init)); rq->vrq->cfg.data.prde.address = cpu_to_le64( rq->vrq_md->phys_addr + sizeof(union atto_vda_req)); rq->flags |= RF_FAILURE_OK; a->init_msg = ESAS2R_INIT_MSG_GET_INIT; break; } case ESAS2R_INIT_MSG_GET_INIT: if (msg == ESAS2R_INIT_MSG_GET_INIT) { ci = (struct atto_vda_cfg_init *)rq->data_buf; if (rq->req_stat == RS_SUCCESS) { a->num_targets_backend = le32_to_cpu(ci->num_targets_backend); a->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel); } else { esas2r_hdebug("FAILED"); } } /* fall through */ default: rq->req_stat = RS_SUCCESS; return false; } return true; } /* * Perform initialization messages via the request queue. Messages are * performed with interrupts disabled. */ bool esas2r_init_msgs(struct esas2r_adapter *a) { bool success = true; struct esas2r_request *rq = &a->general_req; esas2r_rq_init_request(rq, a); rq->comp_cb = esas2r_dummy_complete; if (a->init_msg == 0) a->init_msg = ESAS2R_INIT_MSG_REINIT; while (a->init_msg) { if (esas2r_format_init_msg(a, rq)) { unsigned long flags; while (true) { spin_lock_irqsave(&a->queue_lock, flags); esas2r_start_vda_request(a, rq); spin_unlock_irqrestore(&a->queue_lock, flags); esas2r_wait_request(a, rq); if (rq->req_stat != RS_PENDING) break; } } if (rq->req_stat == RS_SUCCESS || ((rq->flags & RF_FAILURE_OK) && rq->req_stat != RS_TIMEOUT)) continue; esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)", a->init_msg, rq->req_stat, rq->flags); a->init_msg = ESAS2R_INIT_MSG_START; success = false; break; } esas2r_rq_destroy_request(rq, a); return success; } /* Initialize the adapter chip */ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) { bool rslt = false; struct esas2r_request *rq; u32 i; if (test_bit(AF_DEGRADED_MODE, &a->flags)) goto exit; if (!test_bit(AF_NVR_VALID, &a->flags)) { if (!esas2r_nvram_read_direct(a)) esas2r_log(ESAS2R_LOG_WARN, "invalid/missing NVRAM parameters"); } if (!esas2r_init_msgs(a)) { esas2r_set_degraded_mode(a, "init messages failed"); goto exit; } /* The firmware is ready. */ clear_bit(AF_DEGRADED_MODE, &a->flags); clear_bit(AF_CHPRST_PENDING, &a->flags); /* Post all the async event requests */ for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) esas2r_start_ae_request(a, rq); if (!a->flash_rev[0]) esas2r_read_flash_rev(a); if (!a->image_type[0]) esas2r_read_image_type(a); if (a->fw_version == 0) a->fw_rev[0] = 0; else sprintf(a->fw_rev, "%1d.%02d", (int)LOBYTE(HIWORD(a->fw_version)), (int)HIBYTE(HIWORD(a->fw_version))); esas2r_hdebug("firmware revision: %s", a->fw_rev); if (test_bit(AF_CHPRST_DETECTED, &a->flags) && (test_bit(AF_FIRST_INIT, &a->flags))) { esas2r_enable_chip_interrupts(a); return true; } /* initialize discovery */ esas2r_disc_initialize(a); /* * wait for the device wait time to expire here if requested. this is * usually requested during initial driver load and possibly when * resuming from a low power state. deferred device waiting will use * interrupts. chip reset recovery always defers device waiting to * avoid being in a TASKLET too long. */ if (init_poll) { u32 currtime = a->disc_start_time; u32 nexttick = 100; u32 deltatime; /* * Block Tasklets from getting scheduled and indicate this is * polled discovery. */ set_bit(AF_TASKLET_SCHEDULED, &a->flags); set_bit(AF_DISC_POLLED, &a->flags); /* * Temporarily bring the disable count to zero to enable * deferred processing. Note that the count is already zero * after the first initialization. */ if (test_bit(AF_FIRST_INIT, &a->flags)) atomic_dec(&a->disable_cnt); while (test_bit(AF_DISC_PENDING, &a->flags)) { schedule_timeout_interruptible(msecs_to_jiffies(100)); /* * Determine the need for a timer tick based on the * delta time between this and the last iteration of * this loop. We don't use the absolute time because * then we would have to worry about when nexttick * wraps and currtime hasn't yet. */ deltatime = jiffies_to_msecs(jiffies) - currtime; currtime += deltatime; /* * Process any waiting discovery as long as the chip is * up. If a chip reset happens during initial polling, * we have to make sure the timer tick processes the * doorbell indicating the firmware is ready. */ if (!test_bit(AF_CHPRST_PENDING, &a->flags)) esas2r_disc_check_for_work(a); /* Simulate a timer tick. */ if (nexttick <= deltatime) { /* Time for a timer tick */ nexttick += 100; esas2r_timer_tick(a); } if (nexttick > deltatime) nexttick -= deltatime; /* Do any deferred processing */ if (esas2r_is_tasklet_pending(a)) esas2r_do_tasklet_tasks(a); } if (test_bit(AF_FIRST_INIT, &a->flags)) atomic_inc(&a->disable_cnt); clear_bit(AF_DISC_POLLED, &a->flags); clear_bit(AF_TASKLET_SCHEDULED, &a->flags); } esas2r_targ_db_report_changes(a); /* * For cases where (a) the initialization messages processing may * handle an interrupt for a port event and a discovery is waiting, but * we are not waiting for devices, or (b) the device wait time has been * exhausted but there is still discovery pending, start any leftover * discovery in interrupt driven mode. */ esas2r_disc_start_waiting(a); /* Enable chip interrupts */ a->int_mask = ESAS2R_INT_STS_MASK; esas2r_enable_chip_interrupts(a); esas2r_enable_heartbeat(a); rslt = true; exit: /* * Regardless of whether initialization was successful, certain things * need to get done before we exit. */ if (test_bit(AF_CHPRST_DETECTED, &a->flags) && test_bit(AF_FIRST_INIT, &a->flags)) { /* * Reinitialization was performed during the first * initialization. Only clear the chip reset flag so the * original device polling is not cancelled. */ if (!rslt) clear_bit(AF_CHPRST_PENDING, &a->flags); } else { /* First initialization or a subsequent re-init is complete. */ if (!rslt) { clear_bit(AF_CHPRST_PENDING, &a->flags); clear_bit(AF_DISC_PENDING, &a->flags); } /* Enable deferred processing after the first initialization. */ if (test_bit(AF_FIRST_INIT, &a->flags)) { clear_bit(AF_FIRST_INIT, &a->flags); if (atomic_dec_return(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); } } return rslt; } void esas2r_reset_adapter(struct esas2r_adapter *a) { set_bit(AF_OS_RESET, &a->flags); esas2r_local_reset_adapter(a); esas2r_schedule_tasklet(a); } void esas2r_reset_chip(struct esas2r_adapter *a) { if (!esas2r_is_adapter_present(a)) return; /* * Before we reset the chip, save off the VDA core dump. The VDA core * dump is located in the upper 512KB of the onchip SRAM. Make sure * to not overwrite a previous crash that was saved. */ if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) && !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) { esas2r_read_mem_block(a, a->fw_coredump_buff, MW_DATA_ADDR_SRAM + 0x80000, ESAS2R_FWCOREDUMP_SZ); set_bit(AF2_COREDUMP_SAVED, &a->flags2); } clear_bit(AF2_COREDUMP_AVAIL, &a->flags2); /* Reset the chip */ if (a->pcid->revision == MVR_FREY_B2) esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2, MU_CTL_IN_FULL_RST2); else esas2r_write_register_dword(a, MU_CTL_STATUS_IN, MU_CTL_IN_FULL_RST); /* Stall a little while to let the reset condition clear */ mdelay(10); } static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a) { u32 starttime; u32 doorbell; esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN); starttime = jiffies_to_msecs(jiffies); while (true) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_POWER_DOWN) { esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); break; } schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > 30000) { esas2r_hdebug("Timeout waiting for power down"); break; } } } /* * Perform power management processing including managing device states, adapter * states, interrupts, and I/O. */ void esas2r_power_down(struct esas2r_adapter *a) { set_bit(AF_POWER_MGT, &a->flags); set_bit(AF_POWER_DOWN, &a->flags); if (!test_bit(AF_DEGRADED_MODE, &a->flags)) { u32 starttime; u32 doorbell; /* * We are currently running OK and will be reinitializing later. * increment the disable count to coordinate with * esas2r_init_adapter. We don't have to do this in degraded * mode since we never enabled interrupts in the first place. */ esas2r_disable_chip_interrupts(a); esas2r_disable_heartbeat(a); /* wait for any VDA activity to clear before continuing */ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); starttime = jiffies_to_msecs(jiffies); while (true) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_MSG_IFC_DOWN) { esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); break; } schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { esas2r_hdebug( "timeout waiting for interface down"); break; } } /* * For versions of firmware that support it tell them the driver * is powering down. */ if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2)) esas2r_power_down_notify_firmware(a); } /* Suspend I/O processing. */ set_bit(AF_OS_RESET, &a->flags); set_bit(AF_DISC_PENDING, &a->flags); set_bit(AF_CHPRST_PENDING, &a->flags); esas2r_process_adapter_reset(a); /* Remove devices now that I/O is cleaned up. */ a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); esas2r_targ_db_remove_all(a, false); } /* * Perform power management processing including managing device states, adapter * states, interrupts, and I/O. */ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) { bool ret; clear_bit(AF_POWER_DOWN, &a->flags); esas2r_init_pci_cfg_space(a); set_bit(AF_FIRST_INIT, &a->flags); atomic_inc(&a->disable_cnt); /* reinitialize the adapter */ ret = esas2r_check_adapter(a); if (!esas2r_init_adapter_hw(a, init_poll)) ret = false; /* send the reset asynchronous event */ esas2r_send_reset_ae(a, true); /* clear this flag after initialization. */ clear_bit(AF_POWER_MGT, &a->flags); return ret; } bool esas2r_is_adapter_present(struct esas2r_adapter *a) { if (test_bit(AF_NOT_PRESENT, &a->flags)) return false; if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { set_bit(AF_NOT_PRESENT, &a->flags); return false; } return true; } const char *esas2r_get_model_name(struct esas2r_adapter *a) { switch (a->pcid->subsystem_device) { case ATTO_ESAS_R680: return "ATTO ExpressSAS R680"; case ATTO_ESAS_R608: return "ATTO ExpressSAS R608"; case ATTO_ESAS_R60F: return "ATTO ExpressSAS R60F"; case ATTO_ESAS_R6F0: return "ATTO ExpressSAS R6F0"; case ATTO_ESAS_R644: return "ATTO ExpressSAS R644"; case ATTO_ESAS_R648: return "ATTO ExpressSAS R648"; case ATTO_TSSC_3808: return "ATTO ThunderStream SC 3808D"; case ATTO_TSSC_3808E: return "ATTO ThunderStream SC 3808E"; case ATTO_TLSH_1068: return "ATTO ThunderLink SH 1068"; } return "ATTO SAS Controller"; } const char *esas2r_get_model_name_short(struct esas2r_adapter *a) { switch (a->pcid->subsystem_device) { case ATTO_ESAS_R680: return "R680"; case ATTO_ESAS_R608: return "R608"; case ATTO_ESAS_R60F: return "R60F"; case ATTO_ESAS_R6F0: return "R6F0"; case ATTO_ESAS_R644: return "R644"; case ATTO_ESAS_R648: return "R648"; case ATTO_TSSC_3808: return "SC 3808D"; case ATTO_TSSC_3808E: return "SC 3808E"; case ATTO_TLSH_1068: return "SH 1068"; } return "unknown"; }
gpl-2.0
AiJiaZone/linux-4.0
drivers/staging/comedi/drivers/icp_multi.c
581
10082
/* * icp_multi.c * Comedi driver for Inova ICP_MULTI board * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Driver: icp_multi * Description: Inova ICP_MULTI * Devices: [Inova] ICP_MULTI (icp_multi) * Author: Anne Smorthit <anne.smorthit@sfwte.ch> * Status: works * * Configuration options: not applicable, uses PCI auto config * * The driver works for analog input and output and digital input and * output. It does not work with interrupts or with the counters. Currently * no support for DMA. * * It has 16 single-ended or 8 differential Analogue Input channels with * 12-bit resolution. Ranges : 5V, 10V, +/-5V, +/-10V, 0..20mA and 4..20mA. * Input ranges can be individually programmed for each channel. Voltage or * current measurement is selected by jumper. * * There are 4 x 12-bit Analogue Outputs. Ranges : 5V, 10V, +/-5V, +/-10V * * 16 x Digital Inputs, 24V * * 8 x Digital Outputs, 24V, 1A * * 4 x 16-bit counters - not implemented */ #include <linux/module.h> #include <linux/delay.h> #include "../comedi_pci.h" #define ICP_MULTI_ADC_CSR 0x00 /* R/W: ADC command/status register */ #define ICP_MULTI_ADC_CSR_ST BIT(0) /* Start ADC */ #define ICP_MULTI_ADC_CSR_BSY BIT(0) /* ADC busy */ #define ICP_MULTI_ADC_CSR_BI BIT(4) /* Bipolar input range */ #define ICP_MULTI_ADC_CSR_RA BIT(5) /* Input range 0 = 5V, 1 = 10V */ #define ICP_MULTI_ADC_CSR_DI BIT(6) /* Input mode 1 = differential */ #define ICP_MULTI_ADC_CSR_DI_CHAN(x) (((x) & 0x7) << 9) #define ICP_MULTI_ADC_CSR_SE_CHAN(x) (((x) & 0xf) << 8) #define ICP_MULTI_AI 2 /* R: Analogue input data */ #define ICP_MULTI_DAC_CSR 0x04 /* R/W: DAC command/status register */ #define ICP_MULTI_DAC_CSR_ST BIT(0) /* Start DAC */ #define ICP_MULTI_DAC_CSR_BSY BIT(0) /* DAC busy */ #define ICP_MULTI_DAC_CSR_BI BIT(4) /* Bipolar output range */ #define ICP_MULTI_DAC_CSR_RA BIT(5) /* Output range 0 = 5V, 1 = 10V */ #define ICP_MULTI_DAC_CSR_CHAN(x) (((x) & 0x3) << 8) #define ICP_MULTI_AO 6 /* R/W: Analogue output data */ #define ICP_MULTI_DI 8 /* R/W: Digital inputs */ #define ICP_MULTI_DO 0x0A /* R/W: Digital outputs */ #define ICP_MULTI_INT_EN 0x0c /* R/W: Interrupt enable register */ #define ICP_MULTI_INT_STAT 0x0e /* R/W: Interrupt status register */ #define ICP_MULTI_INT_ADC_RDY BIT(0) /* A/D conversion ready interrupt */ #define ICP_MULTI_INT_DAC_RDY BIT(1) /* D/A conversion ready interrupt */ #define ICP_MULTI_INT_DOUT_ERR BIT(2) /* Digital output error interrupt */ #define ICP_MULTI_INT_DIN_STAT BIT(3) /* Digital input status change int. */ #define ICP_MULTI_INT_CIE0 BIT(4) /* Counter 0 overrun interrupt */ #define ICP_MULTI_INT_CIE1 BIT(5) /* Counter 1 overrun interrupt */ #define ICP_MULTI_INT_CIE2 BIT(6) /* Counter 2 overrun interrupt */ #define ICP_MULTI_INT_CIE3 BIT(7) /* Counter 3 overrun interrupt */ #define ICP_MULTI_INT_MASK 0xff /* All interrupts */ #define ICP_MULTI_CNTR0 0x10 /* R/W: Counter 0 */ #define ICP_MULTI_CNTR1 0x12 /* R/W: counter 1 */ #define ICP_MULTI_CNTR2 0x14 /* R/W: Counter 2 */ #define ICP_MULTI_CNTR3 0x16 /* R/W: Counter 3 */ /* analog input and output have the same range options */ static const struct comedi_lrange icp_multi_ranges = { 4, { UNI_RANGE(5), UNI_RANGE(10), BIP_RANGE(5), BIP_RANGE(10) } }; static const char range_codes_analog[] = { 0x00, 0x20, 0x10, 0x30 }; static int icp_multi_ai_eoc(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context) { unsigned int status; status = readw(dev->mmio + ICP_MULTI_ADC_CSR); if ((status & ICP_MULTI_ADC_CSR_BSY) == 0) return 0; return -EBUSY; } static int icp_multi_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int range = CR_RANGE(insn->chanspec); unsigned int aref = CR_AREF(insn->chanspec); unsigned int adc_csr; int ret = 0; int n; /* Set mode and range data for specified channel */ if (aref == AREF_DIFF) { adc_csr = ICP_MULTI_ADC_CSR_DI_CHAN(chan) | ICP_MULTI_ADC_CSR_DI; } else { adc_csr = ICP_MULTI_ADC_CSR_SE_CHAN(chan); } adc_csr |= range_codes_analog[range]; writew(adc_csr, dev->mmio + ICP_MULTI_ADC_CSR); for (n = 0; n < insn->n; n++) { /* Set start ADC bit */ writew(adc_csr | ICP_MULTI_ADC_CSR_ST, dev->mmio + ICP_MULTI_ADC_CSR); udelay(1); /* Wait for conversion to complete, or get fed up waiting */ ret = comedi_timeout(dev, s, insn, icp_multi_ai_eoc, 0); if (ret) break; data[n] = (readw(dev->mmio + ICP_MULTI_AI) >> 4) & 0x0fff; } return ret ? ret : n; } static int icp_multi_ao_ready(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context) { unsigned int status; status = readw(dev->mmio + ICP_MULTI_DAC_CSR); if ((status & ICP_MULTI_DAC_CSR_BSY) == 0) return 0; return -EBUSY; } static int icp_multi_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int range = CR_RANGE(insn->chanspec); unsigned int dac_csr; int i; /* Select channel and range */ dac_csr = ICP_MULTI_DAC_CSR_CHAN(chan); dac_csr |= range_codes_analog[range]; writew(dac_csr, dev->mmio + ICP_MULTI_DAC_CSR); for (i = 0; i < insn->n; i++) { unsigned int val = data[i]; int ret; /* Wait for analog output to be ready for new data */ ret = comedi_timeout(dev, s, insn, icp_multi_ao_ready, 0); if (ret) return ret; writew(val, dev->mmio + ICP_MULTI_AO); /* Set start conversion bit to write data to channel */ writew(dac_csr | ICP_MULTI_DAC_CSR_ST, dev->mmio + ICP_MULTI_DAC_CSR); s->readback[chan] = val; } return insn->n; } static int icp_multi_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = readw(dev->mmio + ICP_MULTI_DI); return insn->n; } static int icp_multi_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (comedi_dio_update_state(s, data)) writew(s->state, dev->mmio + ICP_MULTI_DO); data[1] = s->state; return insn->n; } static int icp_multi_reset(struct comedi_device *dev) { int i; /* Disable all interrupts and clear any requests */ writew(0, dev->mmio + ICP_MULTI_INT_EN); writew(ICP_MULTI_INT_MASK, dev->mmio + ICP_MULTI_INT_STAT); /* Reset the analog output channels to 0V */ for (i = 0; i < 4; i++) { unsigned int dac_csr = ICP_MULTI_DAC_CSR_CHAN(i); /* Select channel and 0..5V range */ writew(dac_csr, dev->mmio + ICP_MULTI_DAC_CSR); /* Output 0V */ writew(0, dev->mmio + ICP_MULTI_AO); /* Set start conversion bit to write data to channel */ writew(dac_csr | ICP_MULTI_DAC_CSR_ST, dev->mmio + ICP_MULTI_DAC_CSR); udelay(1); } /* Digital outputs to 0 */ writew(0, dev->mmio + ICP_MULTI_DO); return 0; } static int icp_multi_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); struct comedi_subdevice *s; int ret; ret = comedi_pci_enable(dev); if (ret) return ret; dev->mmio = pci_ioremap_bar(pcidev, 2); if (!dev->mmio) return -ENOMEM; ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; icp_multi_reset(dev); /* Analog Input subdevice */ s = &dev->subdevices[0]; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF; s->n_chan = 16; s->maxdata = 0x0fff; s->range_table = &icp_multi_ranges; s->insn_read = icp_multi_ai_insn_read; /* Analog Output subdevice */ s = &dev->subdevices[1]; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON; s->n_chan = 4; s->maxdata = 0x0fff; s->range_table = &icp_multi_ranges; s->insn_write = icp_multi_ao_insn_write; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; /* Digital Input subdevice */ s = &dev->subdevices[2]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = icp_multi_di_insn_bits; /* Digital Output subdevice */ s = &dev->subdevices[3]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = icp_multi_do_insn_bits; return 0; } static struct comedi_driver icp_multi_driver = { .driver_name = "icp_multi", .module = THIS_MODULE, .auto_attach = icp_multi_auto_attach, .detach = comedi_pci_detach, }; static int icp_multi_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &icp_multi_driver, id->driver_data); } static const struct pci_device_id icp_multi_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ICP, 0x8000) }, { 0 } }; MODULE_DEVICE_TABLE(pci, icp_multi_pci_table); static struct pci_driver icp_multi_pci_driver = { .name = "icp_multi", .id_table = icp_multi_pci_table, .probe = icp_multi_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(icp_multi_driver, icp_multi_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi driver for Inova ICP_MULTI board"); MODULE_LICENSE("GPL");
gpl-2.0
longman88/qspinlock-v14
drivers/char/tpm/tpm_infineon.c
837
16899
/* * Description: * Device Driver for the Infineon Technologies * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module * Specifications at www.trustedcomputinggroup.org * * Copyright (C) 2005, Marcel Selhorst <tpmdd@selhorst.net> * Sirrix AG - security technologies <tpmdd@sirrix.com> and * Applied Data Security Group, Ruhr-University Bochum, Germany * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/init.h> #include <linux/pnp.h> #include "tpm.h" /* Infineon specific definitions */ /* maximum number of WTX-packages */ #define TPM_MAX_WTX_PACKAGES 50 /* msleep-Time for WTX-packages */ #define TPM_WTX_MSLEEP_TIME 20 /* msleep-Time --> Interval to check status register */ #define TPM_MSLEEP_TIME 3 /* gives number of max. msleep()-calls before throwing timeout */ #define TPM_MAX_TRIES 5000 #define TPM_INFINEON_DEV_VEN_VALUE 0x15D1 #define TPM_INF_IO_PORT 0x0 #define TPM_INF_IO_MEM 0x1 #define TPM_INF_ADDR 0x0 #define TPM_INF_DATA 0x1 struct tpm_inf_dev { int iotype; void __iomem *mem_base; /* MMIO ioremap'd addr */ unsigned long map_base; /* phys MMIO base */ unsigned long map_size; /* MMIO region size */ unsigned int index_off; /* index register offset */ unsigned int data_regs; /* Data registers */ unsigned int data_size; unsigned int config_port; /* IO Port config index reg */ unsigned int config_size; }; static struct tpm_inf_dev tpm_dev; static inline void tpm_data_out(unsigned char data, unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.data_regs + offset); else writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline unsigned char tpm_data_in(unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.data_regs + offset); else return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline void tpm_config_out(unsigned char data, unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.config_port + offset); else writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset); } static inline unsigned char tpm_config_in(unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.config_port + offset); else return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); } /* TPM header definitions */ enum infineon_tpm_header { TPM_VL_VER = 0x01, TPM_VL_CHANNEL_CONTROL = 0x07, TPM_VL_CHANNEL_PERSONALISATION = 0x0A, TPM_VL_CHANNEL_TPM = 0x0B, TPM_VL_CONTROL = 0x00, TPM_INF_NAK = 0x15, TPM_CTRL_WTX = 0x10, TPM_CTRL_WTX_ABORT = 0x18, TPM_CTRL_WTX_ABORT_ACK = 0x18, TPM_CTRL_ERROR = 0x20, TPM_CTRL_CHAININGACK = 0x40, TPM_CTRL_CHAINING = 0x80, TPM_CTRL_DATA = 0x04, TPM_CTRL_DATA_CHA = 0x84, TPM_CTRL_DATA_CHA_ACK = 0xC4 }; enum infineon_tpm_register { WRFIFO = 0x00, RDFIFO = 0x01, STAT = 0x02, CMD = 0x03 }; enum infineon_tpm_command_bits { CMD_DIS = 0x00, CMD_LP = 0x01, CMD_RES = 0x02, CMD_IRQC = 0x06 }; enum infineon_tpm_status_bits { STAT_XFE = 0x00, STAT_LPA = 0x01, STAT_FOK = 0x02, STAT_TOK = 0x03, STAT_IRQA = 0x06, STAT_RDA = 0x07 }; /* some outgoing values */ enum infineon_tpm_values { CHIP_ID1 = 0x20, CHIP_ID2 = 0x21, TPM_DAR = 0x30, RESET_LP_IRQC_DISABLE = 0x41, ENABLE_REGISTER_PAIR = 0x55, IOLIMH = 0x60, IOLIML = 0x61, DISABLE_REGISTER_PAIR = 0xAA, IDVENL = 0xF1, IDVENH = 0xF2, IDPDL = 0xF3, IDPDH = 0xF4 }; static int number_of_wtx; static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo) { int status; int check = 0; int i; if (clear_wrfifo) { for (i = 0; i < 4096; i++) { status = tpm_data_in(WRFIFO); if (status == 0xff) { if (check == 5) break; else check++; } } } /* Note: The values which are currently in the FIFO of the TPM are thrown away since there is no usage for them. Usually, this has nothing to say, since the TPM will give its answer immediately or will be aborted anyway, so the data here is usually garbage and useless. We have to clean this, because the next communication with the TPM would be rubbish, if there is still some old data in the Read FIFO. */ i = 0; do { status = tpm_data_in(RDFIFO); status = tpm_data_in(STAT); i++; if (i == TPM_MAX_TRIES) return -EIO; } while ((status & (1 << STAT_RDA)) != 0); return 0; } static int wait(struct tpm_chip *chip, int wait_for_bit) { int status; int i; for (i = 0; i < TPM_MAX_TRIES; i++) { status = tpm_data_in(STAT); /* check the status-register if wait_for_bit is set */ if (status & 1 << wait_for_bit) break; msleep(TPM_MSLEEP_TIME); } if (i == TPM_MAX_TRIES) { /* timeout occurs */ if (wait_for_bit == STAT_XFE) dev_err(chip->dev, "Timeout in wait(STAT_XFE)\n"); if (wait_for_bit == STAT_RDA) dev_err(chip->dev, "Timeout in wait(STAT_RDA)\n"); return -EIO; } return 0; }; static void wait_and_send(struct tpm_chip *chip, u8 sendbyte) { wait(chip, STAT_XFE); tpm_data_out(sendbyte, WRFIFO); } /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more calculation time, it sends a WTX-package, which has to be acknowledged or aborted. This usually occurs if you are hammering the TPM with key creation. Set the maximum number of WTX-packages in the definitions above, if the number is reached, the waiting-time will be denied and the TPM command has to be resend. */ static void tpm_wtx(struct tpm_chip *chip) { number_of_wtx++; dev_info(chip->dev, "Granting WTX (%02d / %02d)\n", number_of_wtx, TPM_MAX_WTX_PACKAGES); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX); wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); msleep(TPM_WTX_MSLEEP_TIME); } static void tpm_wtx_abort(struct tpm_chip *chip) { dev_info(chip->dev, "Aborting WTX\n"); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX_ABORT); wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); number_of_wtx = 0; msleep(TPM_WTX_MSLEEP_TIME); } static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) { int i; int ret; u32 size = 0; number_of_wtx = 0; recv_begin: /* start receiving header */ for (i = 0; i < 4; i++) { ret = wait(chip, STAT_RDA); if (ret) return -EIO; buf[i] = tpm_data_in(RDFIFO); } if (buf[0] != TPM_VL_VER) { dev_err(chip->dev, "Wrong transport protocol implementation!\n"); return -EIO; } if (buf[1] == TPM_CTRL_DATA) { /* size of the data received */ size = ((buf[2] << 8) | buf[3]); for (i = 0; i < size; i++) { wait(chip, STAT_RDA); buf[i] = tpm_data_in(RDFIFO); } if ((size == 0x6D00) && (buf[1] == 0x80)) { dev_err(chip->dev, "Error handling on vendor layer!\n"); return -EIO; } for (i = 0; i < size; i++) buf[i] = buf[i + 6]; size = size - 6; return size; } if (buf[1] == TPM_CTRL_WTX) { dev_info(chip->dev, "WTX-package received\n"); if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { tpm_wtx(chip); goto recv_begin; } else { tpm_wtx_abort(chip); goto recv_begin; } } if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { dev_info(chip->dev, "WTX-abort acknowledged\n"); return size; } if (buf[1] == TPM_CTRL_ERROR) { dev_err(chip->dev, "ERROR-package received:\n"); if (buf[4] == TPM_INF_NAK) dev_err(chip->dev, "-> Negative acknowledgement" " - retransmit command!\n"); return -EIO; } return -EIO; } static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) { int i; int ret; u8 count_high, count_low, count_4, count_3, count_2, count_1; /* Disabling Reset, LP and IRQC */ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); ret = empty_fifo(chip, 1); if (ret) { dev_err(chip->dev, "Timeout while clearing FIFO\n"); return -EIO; } ret = wait(chip, STAT_XFE); if (ret) return -EIO; count_4 = (count & 0xff000000) >> 24; count_3 = (count & 0x00ff0000) >> 16; count_2 = (count & 0x0000ff00) >> 8; count_1 = (count & 0x000000ff); count_high = ((count + 6) & 0xffffff00) >> 8; count_low = ((count + 6) & 0x000000ff); /* Sending Header */ wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_DATA); wait_and_send(chip, count_high); wait_and_send(chip, count_low); /* Sending Data Header */ wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_VL_CHANNEL_TPM); wait_and_send(chip, count_4); wait_and_send(chip, count_3); wait_and_send(chip, count_2); wait_and_send(chip, count_1); /* Sending Data */ for (i = 0; i < count; i++) { wait_and_send(chip, buf[i]); } return count; } static void tpm_inf_cancel(struct tpm_chip *chip) { /* Since we are using the legacy mode to communicate with the TPM, we have no cancel functions, but have a workaround for interrupting the TPM through WTX. */ } static u8 tpm_inf_status(struct tpm_chip *chip) { return tpm_data_in(STAT); } static const struct tpm_class_ops tpm_inf = { .recv = tpm_inf_recv, .send = tpm_inf_send, .cancel = tpm_inf_cancel, .status = tpm_inf_status, .req_complete_mask = 0, .req_complete_val = 0, }; static const struct pnp_device_id tpm_inf_pnp_tbl[] = { /* Infineon TPMs */ {"IFX0101", 0}, {"IFX0102", 0}, {"", 0} }; MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); static int tpm_inf_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { int rc = 0; u8 iol, ioh; int vendorid[2]; int version[2]; int productid[2]; char chipname[20]; struct tpm_chip *chip; /* read IO-ports through PnP */ if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { tpm_dev.iotype = TPM_INF_IO_PORT; tpm_dev.config_port = pnp_port_start(dev, 0); tpm_dev.config_size = pnp_port_len(dev, 0); tpm_dev.data_regs = pnp_port_start(dev, 1); tpm_dev.data_size = pnp_port_len(dev, 1); if ((tpm_dev.data_size < 4) || (tpm_dev.config_size < 2)) { rc = -EINVAL; goto err_last; } dev_info(&dev->dev, "Found %s with ID %s\n", dev->name, dev_id->id); if (!((tpm_dev.data_regs >> 8) & 0xff)) { rc = -EINVAL; goto err_last; } /* publish my base address and request region */ if (request_region(tpm_dev.data_regs, tpm_dev.data_size, "tpm_infineon0") == NULL) { rc = -EINVAL; goto err_last; } if (request_region(tpm_dev.config_port, tpm_dev.config_size, "tpm_infineon0") == NULL) { release_region(tpm_dev.data_regs, tpm_dev.data_size); rc = -EINVAL; goto err_last; } } else if (pnp_mem_valid(dev, 0) && !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { tpm_dev.iotype = TPM_INF_IO_MEM; tpm_dev.map_base = pnp_mem_start(dev, 0); tpm_dev.map_size = pnp_mem_len(dev, 0); dev_info(&dev->dev, "Found %s with ID %s\n", dev->name, dev_id->id); /* publish my base address and request region */ if (request_mem_region(tpm_dev.map_base, tpm_dev.map_size, "tpm_infineon0") == NULL) { rc = -EINVAL; goto err_last; } tpm_dev.mem_base = ioremap(tpm_dev.map_base, tpm_dev.map_size); if (tpm_dev.mem_base == NULL) { release_mem_region(tpm_dev.map_base, tpm_dev.map_size); rc = -EINVAL; goto err_last; } /* * The only known MMIO based Infineon TPM system provides * a single large mem region with the device config * registers at the default TPM_ADDR. The data registers * seem like they could be placed anywhere within the MMIO * region, but lets just put them at zero offset. */ tpm_dev.index_off = TPM_ADDR; tpm_dev.data_regs = 0x0; } else { rc = -EINVAL; goto err_last; } /* query chip for its vendor, its version number a.s.o. */ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); tpm_config_out(IDVENL, TPM_INF_ADDR); vendorid[1] = tpm_config_in(TPM_INF_DATA); tpm_config_out(IDVENH, TPM_INF_ADDR); vendorid[0] = tpm_config_in(TPM_INF_DATA); tpm_config_out(IDPDL, TPM_INF_ADDR); productid[1] = tpm_config_in(TPM_INF_DATA); tpm_config_out(IDPDH, TPM_INF_ADDR); productid[0] = tpm_config_in(TPM_INF_DATA); tpm_config_out(CHIP_ID1, TPM_INF_ADDR); version[1] = tpm_config_in(TPM_INF_DATA); tpm_config_out(CHIP_ID2, TPM_INF_ADDR); version[0] = tpm_config_in(TPM_INF_DATA); switch ((productid[0] << 8) | productid[1]) { case 6: snprintf(chipname, sizeof(chipname), " (SLD 9630 TT 1.1)"); break; case 11: snprintf(chipname, sizeof(chipname), " (SLB 9635 TT 1.2)"); break; default: snprintf(chipname, sizeof(chipname), " (unknown chip)"); break; } if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) { /* configure TPM with IO-ports */ tpm_config_out(IOLIMH, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); tpm_config_out(IOLIML, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); /* control if IO-ports are set correctly */ tpm_config_out(IOLIMH, TPM_INF_ADDR); ioh = tpm_config_in(TPM_INF_DATA); tpm_config_out(IOLIML, TPM_INF_ADDR); iol = tpm_config_in(TPM_INF_DATA); if ((ioh << 8 | iol) != tpm_dev.data_regs) { dev_err(&dev->dev, "Could not set IO-data registers to 0x%x\n", tpm_dev.data_regs); rc = -EIO; goto err_release_region; } /* activate register */ tpm_config_out(TPM_DAR, TPM_INF_ADDR); tpm_config_out(0x01, TPM_INF_DATA); tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); /* disable RESET, LP and IRQC */ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); /* Finally, we're done, print some infos */ dev_info(&dev->dev, "TPM found: " "config base 0x%lx, " "data base 0x%lx, " "chip version 0x%02x%02x, " "vendor id 0x%x%x (Infineon), " "product id 0x%02x%02x" "%s\n", tpm_dev.iotype == TPM_INF_IO_PORT ? tpm_dev.config_port : tpm_dev.map_base + tpm_dev.index_off, tpm_dev.iotype == TPM_INF_IO_PORT ? tpm_dev.data_regs : tpm_dev.map_base + tpm_dev.data_regs, version[0], version[1], vendorid[0], vendorid[1], productid[0], productid[1], chipname); if (!(chip = tpm_register_hardware(&dev->dev, &tpm_inf))) goto err_release_region; return 0; } else { rc = -ENODEV; goto err_release_region; } err_release_region: if (tpm_dev.iotype == TPM_INF_IO_PORT) { release_region(tpm_dev.data_regs, tpm_dev.data_size); release_region(tpm_dev.config_port, tpm_dev.config_size); } else { iounmap(tpm_dev.mem_base); release_mem_region(tpm_dev.map_base, tpm_dev.map_size); } err_last: return rc; } static void tpm_inf_pnp_remove(struct pnp_dev *dev) { struct tpm_chip *chip = pnp_get_drvdata(dev); if (chip) { if (tpm_dev.iotype == TPM_INF_IO_PORT) { release_region(tpm_dev.data_regs, tpm_dev.data_size); release_region(tpm_dev.config_port, tpm_dev.config_size); } else { iounmap(tpm_dev.mem_base); release_mem_region(tpm_dev.map_base, tpm_dev.map_size); } tpm_dev_vendor_release(chip); tpm_remove_hardware(chip->dev); } } static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state) { struct tpm_chip *chip = pnp_get_drvdata(dev); int rc; if (chip) { u8 savestate[] = { 0, 193, /* TPM_TAG_RQU_COMMAND */ 0, 0, 0, 10, /* blob length (in bytes) */ 0, 0, 0, 152 /* TPM_ORD_SaveState */ }; dev_info(&dev->dev, "saving TPM state\n"); rc = tpm_inf_send(chip, savestate, sizeof(savestate)); if (rc < 0) { dev_err(&dev->dev, "error while saving TPM state\n"); return rc; } } return 0; } static int tpm_inf_pnp_resume(struct pnp_dev *dev) { /* Re-configure TPM after suspending */ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); tpm_config_out(IOLIMH, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); tpm_config_out(IOLIML, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); /* activate register */ tpm_config_out(TPM_DAR, TPM_INF_ADDR); tpm_config_out(0x01, TPM_INF_DATA); tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); /* disable RESET, LP and IRQC */ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); return tpm_pm_resume(&dev->dev); } static struct pnp_driver tpm_inf_pnp_driver = { .name = "tpm_inf_pnp", .id_table = tpm_inf_pnp_tbl, .probe = tpm_inf_pnp_probe, .suspend = tpm_inf_pnp_suspend, .resume = tpm_inf_pnp_resume, .remove = tpm_inf_pnp_remove }; static int __init init_inf(void) { return pnp_register_driver(&tpm_inf_pnp_driver); } static void __exit cleanup_inf(void) { pnp_unregister_driver(&tpm_inf_pnp_driver); } module_init(init_inf); module_exit(cleanup_inf); MODULE_AUTHOR("Marcel Selhorst <tpmdd@sirrix.com>"); MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); MODULE_VERSION("1.9.2"); MODULE_LICENSE("GPL");
gpl-2.0
duanyukun/linux
drivers/gpu/drm/radeon/r600_hdmi.c
837
15805
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Christian König. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Christian König */ #include <linux/hdmi.h> #include <linux/gcd.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> #include "radeon.h" #include "radeon_asic.h" #include "radeon_audio.h" #include "r600d.h" #include "atom.h" /* * HDMI color format */ enum r600_hdmi_color_format { RGB = 0, YCC_422 = 1, YCC_444 = 2 }; /* * IEC60958 status bits */ enum r600_hdmi_iec_status_bits { AUDIO_STATUS_DIG_ENABLE = 0x01, AUDIO_STATUS_V = 0x02, AUDIO_STATUS_VCFG = 0x04, AUDIO_STATUS_EMPHASIS = 0x08, AUDIO_STATUS_COPYRIGHT = 0x10, AUDIO_STATUS_NONAUDIO = 0x20, AUDIO_STATUS_PROFESSIONAL = 0x40, AUDIO_STATUS_LEVEL = 0x80 }; static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev) { struct r600_audio_pin status; uint32_t value; value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); /* number of channels */ status.channels = (value & 0x7) + 1; /* bits per sample */ switch ((value & 0xF0) >> 4) { case 0x0: status.bits_per_sample = 8; break; case 0x1: status.bits_per_sample = 16; break; case 0x2: status.bits_per_sample = 20; break; case 0x3: status.bits_per_sample = 24; break; case 0x4: status.bits_per_sample = 32; break; default: dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n", (int)value); status.bits_per_sample = 16; } /* current sampling rate in HZ */ if (value & 0x4000) status.rate = 44100; else status.rate = 48000; status.rate *= ((value >> 11) & 0x7) + 1; status.rate /= ((value >> 8) & 0x7) + 1; value = RREG32(R600_AUDIO_STATUS_BITS); /* iec 60958 status bits */ status.status_bits = value & 0xff; /* iec 60958 category code */ status.category_code = (value >> 8) & 0xff; return status; } /* * update all hdmi interfaces with current audio parameters */ void r600_audio_update_hdmi(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, audio_work); struct drm_device *dev = rdev->ddev; struct r600_audio_pin audio_status = r600_audio_status(rdev); struct drm_encoder *encoder; bool changed = false; if (rdev->audio.pin[0].channels != audio_status.channels || rdev->audio.pin[0].rate != audio_status.rate || rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample || rdev->audio.pin[0].status_bits != audio_status.status_bits || rdev->audio.pin[0].category_code != audio_status.category_code) { rdev->audio.pin[0] = audio_status; changed = true; } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (!radeon_encoder_is_digital(encoder)) continue; if (changed || r600_hdmi_buffer_status_changed(encoder)) r600_hdmi_update_audio_settings(encoder); } } /* enable the audio stream */ void r600_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin, u8 enable_mask) { u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL); if (!pin) return; if (enable_mask) { tmp |= AUDIO_ENABLED; if (enable_mask & 1) tmp |= PIN0_AUDIO_ENABLED; if (enable_mask & 2) tmp |= PIN1_AUDIO_ENABLED; if (enable_mask & 4) tmp |= PIN2_AUDIO_ENABLED; if (enable_mask & 8) tmp |= PIN3_AUDIO_ENABLED; } else { tmp &= ~(AUDIO_ENABLED | PIN0_AUDIO_ENABLED | PIN1_AUDIO_ENABLED | PIN2_AUDIO_ENABLED | PIN3_AUDIO_ENABLED); } WREG32(AZ_HOT_PLUG_CONTROL, tmp); } struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev) { /* only one pin on 6xx-NI */ return &rdev->audio.pin[0]; } void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset, const struct radeon_hdmi_acr *acr) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; /* DCE 3.0 uses register that's normally for CRC_CONTROL */ uint32_t acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL : HDMI0_ACR_PACKET_CONTROL; WREG32_P(acr_ctl + offset, HDMI0_ACR_SOURCE | /* select SW CTS value */ HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */ ~(HDMI0_ACR_SOURCE | HDMI0_ACR_AUTO_SEND)); WREG32_P(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr->cts_32khz), ~HDMI0_ACR_CTS_32_MASK); WREG32_P(HDMI0_ACR_32_1 + offset, HDMI0_ACR_N_32(acr->n_32khz), ~HDMI0_ACR_N_32_MASK); WREG32_P(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr->cts_44_1khz), ~HDMI0_ACR_CTS_44_MASK); WREG32_P(HDMI0_ACR_44_1 + offset, HDMI0_ACR_N_44(acr->n_44_1khz), ~HDMI0_ACR_N_44_MASK); WREG32_P(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr->cts_48khz), ~HDMI0_ACR_CTS_48_MASK); WREG32_P(HDMI0_ACR_48_1 + offset, HDMI0_ACR_N_48(acr->n_48khz), ~HDMI0_ACR_N_48_MASK); } /* * build a HDMI Video Info Frame */ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset, unsigned char *buffer, size_t size) { uint8_t *frame = buffer + 3; WREG32(HDMI0_AVI_INFO0 + offset, frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); WREG32(HDMI0_AVI_INFO1 + offset, frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); WREG32(HDMI0_AVI_INFO2 + offset, frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); WREG32(HDMI0_AVI_INFO3 + offset, frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset, HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */ WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ } /* * build a Audio Info Frame */ static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder, const void *buffer, size_t size) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t offset = dig->afmt->offset; const u8 *frame = buffer + 3; WREG32(HDMI0_AUDIO_INFO0 + offset, frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); WREG32(HDMI0_AUDIO_INFO1 + offset, frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24)); } /* * test if audio buffer is filled enough to start playing */ static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t offset = dig->afmt->offset; return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0; } /* * have buffer status changed since last call? */ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; int status, result; if (!dig->afmt || !dig->afmt->enabled) return 0; status = r600_hdmi_is_audio_buffer_filled(encoder); result = dig->afmt->last_buffer_filled_status != status; dig->afmt->last_buffer_filled_status = status; return result; } /* * write the audio workaround status to the hardware */ void r600_hdmi_audio_workaround(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t offset = dig->afmt->offset; bool hdmi_audio_workaround = false; /* FIXME */ u32 value; if (!hdmi_audio_workaround || r600_hdmi_is_audio_buffer_filled(encoder)) value = 0; /* disable workaround */ else value = HDMI0_AUDIO_TEST_EN; /* enable workaround */ WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, value, ~HDMI0_AUDIO_TEST_EN); } void r600_hdmi_audio_set_dto(struct radeon_device *rdev, struct radeon_crtc *crtc, unsigned int clock) { struct radeon_encoder *radeon_encoder; struct radeon_encoder_atom_dig *dig; if (!crtc) return; radeon_encoder = to_radeon_encoder(crtc->encoder); dig = radeon_encoder->enc_priv; if (!dig) return; if (dig->dig_encoder == 0) { WREG32(DCCG_AUDIO_DTO0_PHASE, 24000 * 100); WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ } else { WREG32(DCCG_AUDIO_DTO1_PHASE, 24000 * 100); WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ } } void r600_set_vbi_packet(struct drm_encoder *encoder, u32 offset) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset, HDMI0_NULL_SEND | /* send null packets when required */ HDMI0_GC_SEND | /* send general control packets */ HDMI0_GC_CONT); /* send general control packets every frame */ } void r600_set_audio_packet(struct drm_encoder *encoder, u32 offset) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */ ~(HDMI0_AUDIO_SAMPLE_SEND | HDMI0_AUDIO_DELAY_EN_MASK | HDMI0_AUDIO_PACKETS_PER_LINE_MASK | HDMI0_60958_CS_UPDATE)); WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset, HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */ ~HDMI0_AUDIO_INFO_LINE_MASK); WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset, ~(HDMI0_GENERIC0_SEND | HDMI0_GENERIC0_CONT | HDMI0_GENERIC0_UPDATE | HDMI0_GENERIC1_SEND | HDMI0_GENERIC1_CONT | HDMI0_GENERIC0_LINE_MASK | HDMI0_GENERIC1_LINE_MASK)); WREG32_P(HDMI0_60958_0 + offset, HDMI0_60958_CS_CHANNEL_NUMBER_L(1), ~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK | HDMI0_60958_CS_CLOCK_ACCURACY_MASK)); WREG32_P(HDMI0_60958_1 + offset, HDMI0_60958_CS_CHANNEL_NUMBER_R(2), ~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK); } void r600_set_mute(struct drm_encoder *encoder, u32 offset, bool mute) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; if (mute) WREG32_OR(HDMI0_GC + offset, HDMI0_GC_AVMUTE); else WREG32_AND(HDMI0_GC + offset, ~HDMI0_GC_AVMUTE); } /** * r600_hdmi_update_audio_settings - Update audio infoframe * * @encoder: drm encoder * * Gets info about current audio stream and updates audio infoframe. */ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct r600_audio_pin audio = r600_audio_status(rdev); uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE]; struct hdmi_audio_infoframe frame; uint32_t offset; uint32_t value; ssize_t err; if (!dig->afmt || !dig->afmt->enabled) return; offset = dig->afmt->offset; DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n", r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped", audio.channels, audio.rate, audio.bits_per_sample); DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n", (int)audio.status_bits, (int)audio.category_code); err = hdmi_audio_infoframe_init(&frame); if (err < 0) { DRM_ERROR("failed to setup audio infoframe\n"); return; } frame.channels = audio.channels; err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { DRM_ERROR("failed to pack audio infoframe\n"); return; } value = RREG32(HDMI0_AUDIO_PACKET_CONTROL + offset); if (value & HDMI0_AUDIO_TEST_EN) WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, value & ~HDMI0_AUDIO_TEST_EN); WREG32_OR(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK); WREG32_AND(HDMI0_INFOFRAME_CONTROL0 + offset, ~HDMI0_AUDIO_INFO_SOURCE); r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer)); WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, HDMI0_AUDIO_INFO_CONT | HDMI0_AUDIO_INFO_UPDATE); } /* * enable the HDMI engine */ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; u32 hdmi = HDMI0_ERROR_ACK; if (!dig || !dig->afmt) return; /* Older chipsets require setting HDMI and routing manually */ if (!ASIC_IS_DCE3(rdev)) { if (enable) hdmi |= HDMI0_ENABLE; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: if (enable) { WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN); hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA); } else { WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN); } break; case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (enable) { WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN); hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA); } else { WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN); } break; case ENCODER_OBJECT_ID_INTERNAL_DDI: if (enable) { WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN); hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA); } else { WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN); } break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: if (enable) hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA); break; default: dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n", radeon_encoder->encoder_id); break; } WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi); } if (rdev->irq.installed) { /* if irq is available use it */ /* XXX: shouldn't need this on any asics. Double check DCE2/3 */ if (enable) radeon_irq_kms_enable_afmt(rdev, dig->afmt->id); else radeon_irq_kms_disable_afmt(rdev, dig->afmt->id); } dig->afmt->enabled = enable; DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); }
gpl-2.0
sammy44nts/linux-kernel
drivers/clk/ti/clk-2xxx.c
1093
9249
/* * OMAP2 Clock init * * Copyright (C) 2013 Texas Instruments, Inc * Tero Kristo (t-kristo@ti.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/clk-provider.h> #include <linux/clk/ti.h> static struct ti_dt_clk omap2xxx_clks[] = { DT_CLK(NULL, "func_32k_ck", "func_32k_ck"), DT_CLK(NULL, "secure_32k_ck", "secure_32k_ck"), DT_CLK(NULL, "virt_12m_ck", "virt_12m_ck"), DT_CLK(NULL, "virt_13m_ck", "virt_13m_ck"), DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"), DT_CLK(NULL, "virt_26m_ck", "virt_26m_ck"), DT_CLK(NULL, "aplls_clkin_ck", "aplls_clkin_ck"), DT_CLK(NULL, "aplls_clkin_x2_ck", "aplls_clkin_x2_ck"), DT_CLK(NULL, "osc_ck", "osc_ck"), DT_CLK(NULL, "sys_ck", "sys_ck"), DT_CLK(NULL, "alt_ck", "alt_ck"), DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"), DT_CLK(NULL, "dpll_ck", "dpll_ck"), DT_CLK(NULL, "apll96_ck", "apll96_ck"), DT_CLK(NULL, "apll54_ck", "apll54_ck"), DT_CLK(NULL, "func_54m_ck", "func_54m_ck"), DT_CLK(NULL, "core_ck", "core_ck"), DT_CLK(NULL, "func_96m_ck", "func_96m_ck"), DT_CLK(NULL, "func_48m_ck", "func_48m_ck"), DT_CLK(NULL, "func_12m_ck", "func_12m_ck"), DT_CLK(NULL, "sys_clkout_src", "sys_clkout_src"), DT_CLK(NULL, "sys_clkout", "sys_clkout"), DT_CLK(NULL, "emul_ck", "emul_ck"), DT_CLK(NULL, "mpu_ck", "mpu_ck"), DT_CLK(NULL, "dsp_fck", "dsp_fck"), DT_CLK(NULL, "gfx_3d_fck", "gfx_3d_fck"), DT_CLK(NULL, "gfx_2d_fck", "gfx_2d_fck"), DT_CLK(NULL, "gfx_ick", "gfx_ick"), DT_CLK("omapdss_dss", "ick", "dss_ick"), DT_CLK(NULL, "dss_ick", "dss_ick"), DT_CLK(NULL, "dss1_fck", "dss1_fck"), DT_CLK(NULL, "dss2_fck", "dss2_fck"), DT_CLK(NULL, "dss_54m_fck", "dss_54m_fck"), DT_CLK(NULL, "core_l3_ck", "core_l3_ck"), DT_CLK(NULL, "ssi_fck", "ssi_ssr_sst_fck"), DT_CLK(NULL, "usb_l4_ick", "usb_l4_ick"), DT_CLK(NULL, "l4_ck", "l4_ck"), DT_CLK(NULL, "ssi_l4_ick", "ssi_l4_ick"), DT_CLK(NULL, "gpt1_ick", "gpt1_ick"), DT_CLK(NULL, "gpt1_fck", "gpt1_fck"), DT_CLK(NULL, "gpt2_ick", "gpt2_ick"), DT_CLK(NULL, "gpt2_fck", "gpt2_fck"), DT_CLK(NULL, "gpt3_ick", "gpt3_ick"), DT_CLK(NULL, "gpt3_fck", "gpt3_fck"), DT_CLK(NULL, "gpt4_ick", "gpt4_ick"), DT_CLK(NULL, "gpt4_fck", "gpt4_fck"), DT_CLK(NULL, "gpt5_ick", "gpt5_ick"), DT_CLK(NULL, "gpt5_fck", "gpt5_fck"), DT_CLK(NULL, "gpt6_ick", "gpt6_ick"), DT_CLK(NULL, "gpt6_fck", "gpt6_fck"), DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), DT_CLK(NULL, "gpt7_fck", "gpt7_fck"), DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), DT_CLK(NULL, "gpt8_fck", "gpt8_fck"), DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), DT_CLK(NULL, "gpt9_fck", "gpt9_fck"), DT_CLK(NULL, "gpt10_ick", "gpt10_ick"), DT_CLK(NULL, "gpt10_fck", "gpt10_fck"), DT_CLK(NULL, "gpt11_ick", "gpt11_ick"), DT_CLK(NULL, "gpt11_fck", "gpt11_fck"), DT_CLK(NULL, "gpt12_ick", "gpt12_ick"), DT_CLK(NULL, "gpt12_fck", "gpt12_fck"), DT_CLK("omap-mcbsp.1", "ick", "mcbsp1_ick"), DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"), DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"), DT_CLK("omap-mcbsp.2", "ick", "mcbsp2_ick"), DT_CLK(NULL, "mcbsp2_ick", "mcbsp2_ick"), DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"), DT_CLK("omap2_mcspi.1", "ick", "mcspi1_ick"), DT_CLK(NULL, "mcspi1_ick", "mcspi1_ick"), DT_CLK(NULL, "mcspi1_fck", "mcspi1_fck"), DT_CLK("omap2_mcspi.2", "ick", "mcspi2_ick"), DT_CLK(NULL, "mcspi2_ick", "mcspi2_ick"), DT_CLK(NULL, "mcspi2_fck", "mcspi2_fck"), DT_CLK(NULL, "uart1_ick", "uart1_ick"), DT_CLK(NULL, "uart1_fck", "uart1_fck"), DT_CLK(NULL, "uart2_ick", "uart2_ick"), DT_CLK(NULL, "uart2_fck", "uart2_fck"), DT_CLK(NULL, "uart3_ick", "uart3_ick"), DT_CLK(NULL, "uart3_fck", "uart3_fck"), DT_CLK(NULL, "gpios_ick", "gpios_ick"), DT_CLK(NULL, "gpios_fck", "gpios_fck"), DT_CLK("omap_wdt", "ick", "mpu_wdt_ick"), DT_CLK(NULL, "mpu_wdt_ick", "mpu_wdt_ick"), DT_CLK(NULL, "mpu_wdt_fck", "mpu_wdt_fck"), DT_CLK(NULL, "sync_32k_ick", "sync_32k_ick"), DT_CLK(NULL, "wdt1_ick", "wdt1_ick"), DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"), DT_CLK("omap24xxcam", "fck", "cam_fck"), DT_CLK(NULL, "cam_fck", "cam_fck"), DT_CLK("omap24xxcam", "ick", "cam_ick"), DT_CLK(NULL, "cam_ick", "cam_ick"), DT_CLK(NULL, "mailboxes_ick", "mailboxes_ick"), DT_CLK(NULL, "wdt4_ick", "wdt4_ick"), DT_CLK(NULL, "wdt4_fck", "wdt4_fck"), DT_CLK(NULL, "mspro_ick", "mspro_ick"), DT_CLK(NULL, "mspro_fck", "mspro_fck"), DT_CLK(NULL, "fac_ick", "fac_ick"), DT_CLK(NULL, "fac_fck", "fac_fck"), DT_CLK("omap_hdq.0", "ick", "hdq_ick"), DT_CLK(NULL, "hdq_ick", "hdq_ick"), DT_CLK("omap_hdq.0", "fck", "hdq_fck"), DT_CLK(NULL, "hdq_fck", "hdq_fck"), DT_CLK("omap_i2c.1", "ick", "i2c1_ick"), DT_CLK(NULL, "i2c1_ick", "i2c1_ick"), DT_CLK("omap_i2c.2", "ick", "i2c2_ick"), DT_CLK(NULL, "i2c2_ick", "i2c2_ick"), DT_CLK(NULL, "gpmc_fck", "gpmc_fck"), DT_CLK(NULL, "sdma_fck", "sdma_fck"), DT_CLK(NULL, "sdma_ick", "sdma_ick"), DT_CLK(NULL, "sdrc_ick", "sdrc_ick"), DT_CLK(NULL, "des_ick", "des_ick"), DT_CLK("omap-sham", "ick", "sha_ick"), DT_CLK(NULL, "sha_ick", "sha_ick"), DT_CLK("omap_rng", "ick", "rng_ick"), DT_CLK(NULL, "rng_ick", "rng_ick"), DT_CLK("omap-aes", "ick", "aes_ick"), DT_CLK(NULL, "aes_ick", "aes_ick"), DT_CLK(NULL, "pka_ick", "pka_ick"), DT_CLK(NULL, "usb_fck", "usb_fck"), DT_CLK(NULL, "timer_32k_ck", "func_32k_ck"), DT_CLK(NULL, "timer_sys_ck", "sys_ck"), DT_CLK(NULL, "timer_ext_ck", "alt_ck"), { .node_name = NULL }, }; static struct ti_dt_clk omap2420_clks[] = { DT_CLK(NULL, "sys_clkout2_src", "sys_clkout2_src"), DT_CLK(NULL, "sys_clkout2", "sys_clkout2"), DT_CLK(NULL, "dsp_ick", "dsp_ick"), DT_CLK(NULL, "iva1_ifck", "iva1_ifck"), DT_CLK(NULL, "iva1_mpu_int_ifck", "iva1_mpu_int_ifck"), DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), DT_CLK(NULL, "wdt3_fck", "wdt3_fck"), DT_CLK("mmci-omap.0", "ick", "mmc_ick"), DT_CLK(NULL, "mmc_ick", "mmc_ick"), DT_CLK("mmci-omap.0", "fck", "mmc_fck"), DT_CLK(NULL, "mmc_fck", "mmc_fck"), DT_CLK(NULL, "eac_ick", "eac_ick"), DT_CLK(NULL, "eac_fck", "eac_fck"), DT_CLK(NULL, "i2c1_fck", "i2c1_fck"), DT_CLK(NULL, "i2c2_fck", "i2c2_fck"), DT_CLK(NULL, "vlynq_ick", "vlynq_ick"), DT_CLK(NULL, "vlynq_fck", "vlynq_fck"), DT_CLK("musb-hdrc", "fck", "osc_ck"), { .node_name = NULL }, }; static struct ti_dt_clk omap2430_clks[] = { DT_CLK("twl", "fck", "osc_ck"), DT_CLK(NULL, "iva2_1_ick", "iva2_1_ick"), DT_CLK(NULL, "mdm_ick", "mdm_ick"), DT_CLK(NULL, "mdm_osc_ck", "mdm_osc_ck"), DT_CLK("omap-mcbsp.3", "ick", "mcbsp3_ick"), DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"), DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"), DT_CLK("omap-mcbsp.4", "ick", "mcbsp4_ick"), DT_CLK(NULL, "mcbsp4_ick", "mcbsp4_ick"), DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"), DT_CLK("omap-mcbsp.5", "ick", "mcbsp5_ick"), DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"), DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"), DT_CLK("omap2_mcspi.3", "ick", "mcspi3_ick"), DT_CLK(NULL, "mcspi3_ick", "mcspi3_ick"), DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"), DT_CLK(NULL, "icr_ick", "icr_ick"), DT_CLK(NULL, "i2chs1_fck", "i2chs1_fck"), DT_CLK(NULL, "i2chs2_fck", "i2chs2_fck"), DT_CLK("musb-omap2430", "ick", "usbhs_ick"), DT_CLK(NULL, "usbhs_ick", "usbhs_ick"), DT_CLK("omap_hsmmc.0", "ick", "mmchs1_ick"), DT_CLK(NULL, "mmchs1_ick", "mmchs1_ick"), DT_CLK(NULL, "mmchs1_fck", "mmchs1_fck"), DT_CLK("omap_hsmmc.1", "ick", "mmchs2_ick"), DT_CLK(NULL, "mmchs2_ick", "mmchs2_ick"), DT_CLK(NULL, "mmchs2_fck", "mmchs2_fck"), DT_CLK(NULL, "gpio5_ick", "gpio5_ick"), DT_CLK(NULL, "gpio5_fck", "gpio5_fck"), DT_CLK(NULL, "mdm_intc_ick", "mdm_intc_ick"), DT_CLK("omap_hsmmc.0", "mmchsdb_fck", "mmchsdb1_fck"), DT_CLK(NULL, "mmchsdb1_fck", "mmchsdb1_fck"), DT_CLK("omap_hsmmc.1", "mmchsdb_fck", "mmchsdb2_fck"), DT_CLK(NULL, "mmchsdb2_fck", "mmchsdb2_fck"), { .node_name = NULL }, }; static const char *enable_init_clks[] = { "apll96_ck", "apll54_ck", "sync_32k_ick", "omapctrl_ick", "gpmc_fck", "sdrc_ick", }; enum { OMAP2_SOC_OMAP2420, OMAP2_SOC_OMAP2430, }; static int __init omap2xxx_dt_clk_init(int soc_type) { ti_dt_clocks_register(omap2xxx_clks); if (soc_type == OMAP2_SOC_OMAP2420) ti_dt_clocks_register(omap2420_clks); else ti_dt_clocks_register(omap2430_clks); omap2xxx_clkt_vps_init(); omap2_clk_disable_autoidle_all(); omap2_clk_enable_init_clocks(enable_init_clks, ARRAY_SIZE(enable_init_clks)); pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n", (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 1000000), (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 100000) % 10, (clk_get_rate(clk_get_sys(NULL, "dpll_ck")) / 1000000), (clk_get_rate(clk_get_sys(NULL, "mpu_ck")) / 1000000)); return 0; } int __init omap2420_dt_clk_init(void) { return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2420); } int __init omap2430_dt_clk_init(void) { return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2430); }
gpl-2.0
EdwinMoq/android_kernel_lge_omap4-common
sound/soc/codecs/ak4641.c
2373
18286
/* * ak4641.c -- AK4641 ALSA Soc Audio driver * * Copyright (C) 2008 Harald Welte <laforge@gnufiish.org> * Copyright (C) 2011 Dmitry Artamonow <mad_soft@inbox.ru> * * Based on ak4535.c by Richard Purdie * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include <sound/ak4641.h> #include "ak4641.h" /* codec private data */ struct ak4641_priv { struct snd_soc_codec *codec; unsigned int sysclk; int deemph; int playback_fs; }; /* * ak4641 register cache */ static const u8 ak4641_reg[AK4641_CACHEREGNUM] = { 0x00, 0x80, 0x00, 0x80, 0x02, 0x00, 0x11, 0x05, 0x00, 0x00, 0x36, 0x10, 0x00, 0x00, 0x57, 0x00, 0x88, 0x88, 0x08, 0x08 }; static const int deemph_settings[] = {44100, 0, 48000, 32000}; static int ak4641_set_deemph(struct snd_soc_codec *codec) { struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); int i, best = 0; for (i = 0 ; i < ARRAY_SIZE(deemph_settings); i++) { /* if deemphasis is on, select the nearest available rate */ if (ak4641->deemph && deemph_settings[i] != 0 && abs(deemph_settings[i] - ak4641->playback_fs) < abs(deemph_settings[best] - ak4641->playback_fs)) best = i; if (!ak4641->deemph && deemph_settings[i] == 0) best = i; } dev_dbg(codec->dev, "Set deemphasis %d\n", best); return snd_soc_update_bits(codec, AK4641_DAC, 0x3, best); } static int ak4641_put_deemph(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); int deemph = ucontrol->value.enumerated.item[0]; if (deemph > 1) return -EINVAL; ak4641->deemph = deemph; return ak4641_set_deemph(codec); } static int ak4641_get_deemph(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); ucontrol->value.enumerated.item[0] = ak4641->deemph; return 0; }; static const char *ak4641_mono_out[] = {"(L + R)/2", "Hi-Z"}; static const char *ak4641_hp_out[] = {"Stereo", "Mono"}; static const char *ak4641_mic_select[] = {"Internal", "External"}; static const char *ak4641_mic_or_dac[] = {"Microphone", "Voice DAC"}; static const DECLARE_TLV_DB_SCALE(mono_gain_tlv, -1700, 2300, 0); static const DECLARE_TLV_DB_SCALE(mic_boost_tlv, 0, 2000, 0); static const DECLARE_TLV_DB_SCALE(eq_tlv, -1050, 150, 0); static const DECLARE_TLV_DB_SCALE(master_tlv, -12750, 50, 0); static const DECLARE_TLV_DB_SCALE(mic_stereo_sidetone_tlv, -2700, 300, 0); static const DECLARE_TLV_DB_SCALE(mic_mono_sidetone_tlv, -400, 400, 0); static const DECLARE_TLV_DB_SCALE(capture_tlv, -800, 50, 0); static const DECLARE_TLV_DB_SCALE(alc_tlv, -800, 50, 0); static const DECLARE_TLV_DB_SCALE(aux_in_tlv, -2100, 300, 0); static const struct soc_enum ak4641_mono_out_enum = SOC_ENUM_SINGLE(AK4641_SIG1, 6, 2, ak4641_mono_out); static const struct soc_enum ak4641_hp_out_enum = SOC_ENUM_SINGLE(AK4641_MODE2, 2, 2, ak4641_hp_out); static const struct soc_enum ak4641_mic_select_enum = SOC_ENUM_SINGLE(AK4641_MIC, 1, 2, ak4641_mic_select); static const struct soc_enum ak4641_mic_or_dac_enum = SOC_ENUM_SINGLE(AK4641_BTIF, 4, 2, ak4641_mic_or_dac); static const struct snd_kcontrol_new ak4641_snd_controls[] = { SOC_ENUM("Mono 1 Output", ak4641_mono_out_enum), SOC_SINGLE_TLV("Mono 1 Gain Volume", AK4641_SIG1, 7, 1, 1, mono_gain_tlv), SOC_ENUM("Headphone Output", ak4641_hp_out_enum), SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0, ak4641_get_deemph, ak4641_put_deemph), SOC_SINGLE_TLV("Mic Boost Volume", AK4641_MIC, 0, 1, 0, mic_boost_tlv), SOC_SINGLE("ALC Operation Time", AK4641_TIMER, 0, 3, 0), SOC_SINGLE("ALC Recovery Time", AK4641_TIMER, 2, 3, 0), SOC_SINGLE("ALC ZC Time", AK4641_TIMER, 4, 3, 0), SOC_SINGLE("ALC 1 Switch", AK4641_ALC1, 5, 1, 0), SOC_SINGLE_TLV("ALC Volume", AK4641_ALC2, 0, 71, 0, alc_tlv), SOC_SINGLE("Left Out Enable Switch", AK4641_SIG2, 1, 1, 0), SOC_SINGLE("Right Out Enable Switch", AK4641_SIG2, 0, 1, 0), SOC_SINGLE_TLV("Capture Volume", AK4641_PGA, 0, 71, 0, capture_tlv), SOC_DOUBLE_R_TLV("Master Playback Volume", AK4641_LATT, AK4641_RATT, 0, 255, 1, master_tlv), SOC_SINGLE_TLV("AUX In Volume", AK4641_VOL, 0, 15, 0, aux_in_tlv), SOC_SINGLE("Equalizer Switch", AK4641_DAC, 2, 1, 0), SOC_SINGLE_TLV("EQ1 100 Hz Volume", AK4641_EQLO, 0, 15, 1, eq_tlv), SOC_SINGLE_TLV("EQ2 250 Hz Volume", AK4641_EQLO, 4, 15, 1, eq_tlv), SOC_SINGLE_TLV("EQ3 1 kHz Volume", AK4641_EQMID, 0, 15, 1, eq_tlv), SOC_SINGLE_TLV("EQ4 3.5 kHz Volume", AK4641_EQMID, 4, 15, 1, eq_tlv), SOC_SINGLE_TLV("EQ5 10 kHz Volume", AK4641_EQHI, 0, 15, 1, eq_tlv), }; /* Mono 1 Mixer */ static const struct snd_kcontrol_new ak4641_mono1_mixer_controls[] = { SOC_DAPM_SINGLE_TLV("Mic Mono Sidetone Volume", AK4641_VOL, 7, 1, 0, mic_mono_sidetone_tlv), SOC_DAPM_SINGLE("Mic Mono Sidetone Switch", AK4641_SIG1, 4, 1, 0), SOC_DAPM_SINGLE("Mono Playback Switch", AK4641_SIG1, 5, 1, 0), }; /* Stereo Mixer */ static const struct snd_kcontrol_new ak4641_stereo_mixer_controls[] = { SOC_DAPM_SINGLE_TLV("Mic Sidetone Volume", AK4641_VOL, 4, 7, 0, mic_stereo_sidetone_tlv), SOC_DAPM_SINGLE("Mic Sidetone Switch", AK4641_SIG2, 4, 1, 0), SOC_DAPM_SINGLE("Playback Switch", AK4641_SIG2, 7, 1, 0), SOC_DAPM_SINGLE("Aux Bypass Switch", AK4641_SIG2, 5, 1, 0), }; /* Input Mixer */ static const struct snd_kcontrol_new ak4641_input_mixer_controls[] = { SOC_DAPM_SINGLE("Mic Capture Switch", AK4641_MIC, 2, 1, 0), SOC_DAPM_SINGLE("Aux Capture Switch", AK4641_MIC, 5, 1, 0), }; /* Mic mux */ static const struct snd_kcontrol_new ak4641_mic_mux_control = SOC_DAPM_ENUM("Mic Select", ak4641_mic_select_enum); /* Input mux */ static const struct snd_kcontrol_new ak4641_input_mux_control = SOC_DAPM_ENUM("Input Select", ak4641_mic_or_dac_enum); /* mono 2 switch */ static const struct snd_kcontrol_new ak4641_mono2_control = SOC_DAPM_SINGLE("Switch", AK4641_SIG1, 0, 1, 0); /* ak4641 dapm widgets */ static const struct snd_soc_dapm_widget ak4641_dapm_widgets[] = { SND_SOC_DAPM_MIXER("Stereo Mixer", SND_SOC_NOPM, 0, 0, &ak4641_stereo_mixer_controls[0], ARRAY_SIZE(ak4641_stereo_mixer_controls)), SND_SOC_DAPM_MIXER("Mono1 Mixer", SND_SOC_NOPM, 0, 0, &ak4641_mono1_mixer_controls[0], ARRAY_SIZE(ak4641_mono1_mixer_controls)), SND_SOC_DAPM_MIXER("Input Mixer", SND_SOC_NOPM, 0, 0, &ak4641_input_mixer_controls[0], ARRAY_SIZE(ak4641_input_mixer_controls)), SND_SOC_DAPM_MUX("Mic Mux", SND_SOC_NOPM, 0, 0, &ak4641_mic_mux_control), SND_SOC_DAPM_MUX("Input Mux", SND_SOC_NOPM, 0, 0, &ak4641_input_mux_control), SND_SOC_DAPM_SWITCH("Mono 2 Enable", SND_SOC_NOPM, 0, 0, &ak4641_mono2_control), SND_SOC_DAPM_OUTPUT("LOUT"), SND_SOC_DAPM_OUTPUT("ROUT"), SND_SOC_DAPM_OUTPUT("MOUT1"), SND_SOC_DAPM_OUTPUT("MOUT2"), SND_SOC_DAPM_OUTPUT("MICOUT"), SND_SOC_DAPM_ADC("ADC", "HiFi Capture", AK4641_PM1, 0, 0), SND_SOC_DAPM_PGA("Mic", AK4641_PM1, 1, 0, NULL, 0), SND_SOC_DAPM_PGA("AUX In", AK4641_PM1, 2, 0, NULL, 0), SND_SOC_DAPM_PGA("Mono Out", AK4641_PM1, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("Line Out", AK4641_PM1, 4, 0, NULL, 0), SND_SOC_DAPM_DAC("DAC", "HiFi Playback", AK4641_PM2, 0, 0), SND_SOC_DAPM_PGA("Mono Out 2", AK4641_PM2, 3, 0, NULL, 0), SND_SOC_DAPM_ADC("Voice ADC", "Voice Capture", AK4641_BTIF, 0, 0), SND_SOC_DAPM_ADC("Voice DAC", "Voice Playback", AK4641_BTIF, 1, 0), SND_SOC_DAPM_MICBIAS("Mic Int Bias", AK4641_MIC, 3, 0), SND_SOC_DAPM_MICBIAS("Mic Ext Bias", AK4641_MIC, 4, 0), SND_SOC_DAPM_INPUT("MICIN"), SND_SOC_DAPM_INPUT("MICEXT"), SND_SOC_DAPM_INPUT("AUX"), SND_SOC_DAPM_INPUT("AIN"), }; static const struct snd_soc_dapm_route ak4641_audio_map[] = { /* Stereo Mixer */ {"Stereo Mixer", "Playback Switch", "DAC"}, {"Stereo Mixer", "Mic Sidetone Switch", "Input Mux"}, {"Stereo Mixer", "Aux Bypass Switch", "AUX In"}, /* Mono 1 Mixer */ {"Mono1 Mixer", "Mic Mono Sidetone Switch", "Input Mux"}, {"Mono1 Mixer", "Mono Playback Switch", "DAC"}, /* Mic */ {"Mic", NULL, "AIN"}, {"Mic Mux", "Internal", "Mic Int Bias"}, {"Mic Mux", "External", "Mic Ext Bias"}, {"Mic Int Bias", NULL, "MICIN"}, {"Mic Ext Bias", NULL, "MICEXT"}, {"MICOUT", NULL, "Mic Mux"}, /* Input Mux */ {"Input Mux", "Microphone", "Mic"}, {"Input Mux", "Voice DAC", "Voice DAC"}, /* Line Out */ {"LOUT", NULL, "Line Out"}, {"ROUT", NULL, "Line Out"}, {"Line Out", NULL, "Stereo Mixer"}, /* Mono 1 Out */ {"MOUT1", NULL, "Mono Out"}, {"Mono Out", NULL, "Mono1 Mixer"}, /* Mono 2 Out */ {"MOUT2", NULL, "Mono 2 Enable"}, {"Mono 2 Enable", "Switch", "Mono Out 2"}, {"Mono Out 2", NULL, "Stereo Mixer"}, {"Voice ADC", NULL, "Mono 2 Enable"}, /* Aux In */ {"AUX In", NULL, "AUX"}, /* ADC */ {"ADC", NULL, "Input Mixer"}, {"Input Mixer", "Mic Capture Switch", "Mic"}, {"Input Mixer", "Aux Capture Switch", "AUX In"}, }; static int ak4641_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); ak4641->sysclk = freq; return 0; } static int ak4641_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); int rate = params_rate(params), fs = 256; u8 mode2; if (rate) fs = ak4641->sysclk / rate; else return -EINVAL; /* set fs */ switch (fs) { case 1024: mode2 = (0x2 << 5); break; case 512: mode2 = (0x1 << 5); break; case 256: mode2 = (0x0 << 5); break; default: dev_err(codec->dev, "Error: unsupported fs=%d\n", fs); return -EINVAL; } snd_soc_update_bits(codec, AK4641_MODE2, (0x3 << 5), mode2); /* Update de-emphasis filter for the new rate */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ak4641->playback_fs = rate; ak4641_set_deemph(codec); }; return 0; } static int ak4641_pcm_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u8 btif; /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: btif = (0x3 << 5); break; case SND_SOC_DAIFMT_LEFT_J: btif = (0x2 << 5); break; case SND_SOC_DAIFMT_DSP_A: /* MSB after FRM */ btif = (0x0 << 5); break; case SND_SOC_DAIFMT_DSP_B: /* MSB during FRM */ btif = (0x1 << 5); break; default: return -EINVAL; } return snd_soc_update_bits(codec, AK4641_BTIF, (0x3 << 5), btif); } static int ak4641_i2s_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u8 mode1 = 0; /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: mode1 = 0x02; break; case SND_SOC_DAIFMT_LEFT_J: mode1 = 0x01; break; default: return -EINVAL; } return snd_soc_write(codec, AK4641_MODE1, mode1); } static int ak4641_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; return snd_soc_update_bits(codec, AK4641_DAC, 0x20, mute ? 0x20 : 0); } static int ak4641_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct ak4641_platform_data *pdata = codec->dev->platform_data; int ret; switch (level) { case SND_SOC_BIAS_ON: /* unmute */ snd_soc_update_bits(codec, AK4641_DAC, 0x20, 0); break; case SND_SOC_BIAS_PREPARE: /* mute */ snd_soc_update_bits(codec, AK4641_DAC, 0x20, 0x20); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { if (pdata && gpio_is_valid(pdata->gpio_power)) gpio_set_value(pdata->gpio_power, 1); mdelay(1); if (pdata && gpio_is_valid(pdata->gpio_npdn)) gpio_set_value(pdata->gpio_npdn, 1); mdelay(1); ret = snd_soc_cache_sync(codec); if (ret) { dev_err(codec->dev, "Failed to sync cache: %d\n", ret); return ret; } } snd_soc_update_bits(codec, AK4641_PM1, 0x80, 0x80); snd_soc_update_bits(codec, AK4641_PM2, 0x80, 0); break; case SND_SOC_BIAS_OFF: snd_soc_update_bits(codec, AK4641_PM1, 0x80, 0); if (pdata && gpio_is_valid(pdata->gpio_npdn)) gpio_set_value(pdata->gpio_npdn, 0); if (pdata && gpio_is_valid(pdata->gpio_power)) gpio_set_value(pdata->gpio_power, 0); codec->cache_sync = 1; break; } codec->dapm.bias_level = level; return 0; } #define AK4641_RATES (SNDRV_PCM_RATE_8000_48000) #define AK4641_RATES_BT (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000) #define AK4641_FORMATS (SNDRV_PCM_FMTBIT_S16_LE) static struct snd_soc_dai_ops ak4641_i2s_dai_ops = { .hw_params = ak4641_i2s_hw_params, .set_fmt = ak4641_i2s_set_dai_fmt, .digital_mute = ak4641_mute, .set_sysclk = ak4641_set_dai_sysclk, }; static struct snd_soc_dai_ops ak4641_pcm_dai_ops = { .hw_params = NULL, /* rates are controlled by BT chip */ .set_fmt = ak4641_pcm_set_dai_fmt, .digital_mute = ak4641_mute, .set_sysclk = ak4641_set_dai_sysclk, }; struct snd_soc_dai_driver ak4641_dai[] = { { .name = "ak4641-hifi", .id = 1, .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = AK4641_RATES, .formats = AK4641_FORMATS, }, .capture = { .stream_name = "HiFi Capture", .channels_min = 1, .channels_max = 2, .rates = AK4641_RATES, .formats = AK4641_FORMATS, }, .ops = &ak4641_i2s_dai_ops, .symmetric_rates = 1, }, { .name = "ak4641-voice", .id = 1, .playback = { .stream_name = "Voice Playback", .channels_min = 1, .channels_max = 1, .rates = AK4641_RATES_BT, .formats = AK4641_FORMATS, }, .capture = { .stream_name = "Voice Capture", .channels_min = 1, .channels_max = 1, .rates = AK4641_RATES_BT, .formats = AK4641_FORMATS, }, .ops = &ak4641_pcm_dai_ops, .symmetric_rates = 1, }, }; static int ak4641_suspend(struct snd_soc_codec *codec, pm_message_t state) { ak4641_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int ak4641_resume(struct snd_soc_codec *codec) { ak4641_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int ak4641_probe(struct snd_soc_codec *codec) { struct ak4641_platform_data *pdata = codec->dev->platform_data; int ret; if (pdata) { if (gpio_is_valid(pdata->gpio_power)) { ret = gpio_request_one(pdata->gpio_power, GPIOF_OUT_INIT_LOW, "ak4641 power"); if (ret) goto err_out; } if (gpio_is_valid(pdata->gpio_npdn)) { ret = gpio_request_one(pdata->gpio_npdn, GPIOF_OUT_INIT_LOW, "ak4641 npdn"); if (ret) goto err_gpio; udelay(1); /* > 150 ns */ gpio_set_value(pdata->gpio_npdn, 1); } } ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C); if (ret != 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); goto err_register; } /* power on device */ ak4641_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; err_register: if (pdata) { if (gpio_is_valid(pdata->gpio_power)) gpio_set_value(pdata->gpio_power, 0); if (gpio_is_valid(pdata->gpio_npdn)) gpio_free(pdata->gpio_npdn); } err_gpio: if (pdata && gpio_is_valid(pdata->gpio_power)) gpio_free(pdata->gpio_power); err_out: return ret; } static int ak4641_remove(struct snd_soc_codec *codec) { struct ak4641_platform_data *pdata = codec->dev->platform_data; ak4641_set_bias_level(codec, SND_SOC_BIAS_OFF); if (pdata) { if (gpio_is_valid(pdata->gpio_power)) { gpio_set_value(pdata->gpio_power, 0); gpio_free(pdata->gpio_power); } if (gpio_is_valid(pdata->gpio_npdn)) gpio_free(pdata->gpio_npdn); } return 0; } static struct snd_soc_codec_driver soc_codec_dev_ak4641 = { .probe = ak4641_probe, .remove = ak4641_remove, .suspend = ak4641_suspend, .resume = ak4641_resume, .controls = ak4641_snd_controls, .num_controls = ARRAY_SIZE(ak4641_snd_controls), .dapm_widgets = ak4641_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(ak4641_dapm_widgets), .dapm_routes = ak4641_audio_map, .num_dapm_routes = ARRAY_SIZE(ak4641_audio_map), .set_bias_level = ak4641_set_bias_level, .reg_cache_size = ARRAY_SIZE(ak4641_reg), .reg_word_size = sizeof(u8), .reg_cache_default = ak4641_reg, .reg_cache_step = 1, }; static int __devinit ak4641_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct ak4641_priv *ak4641; int ret; ak4641 = kzalloc(sizeof(struct ak4641_priv), GFP_KERNEL); if (!ak4641) return -ENOMEM; i2c_set_clientdata(i2c, ak4641); ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_ak4641, ak4641_dai, ARRAY_SIZE(ak4641_dai)); if (ret < 0) kfree(ak4641); return ret; } static int __devexit ak4641_i2c_remove(struct i2c_client *i2c) { snd_soc_unregister_codec(&i2c->dev); kfree(i2c_get_clientdata(i2c)); return 0; } static const struct i2c_device_id ak4641_i2c_id[] = { { "ak4641", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ak4641_i2c_id); static struct i2c_driver ak4641_i2c_driver = { .driver = { .name = "ak4641", .owner = THIS_MODULE, }, .probe = ak4641_i2c_probe, .remove = __devexit_p(ak4641_i2c_remove), .id_table = ak4641_i2c_id, }; static int __init ak4641_modinit(void) { int ret; ret = i2c_add_driver(&ak4641_i2c_driver); if (ret != 0) pr_err("Failed to register AK4641 I2C driver: %d\n", ret); return ret; } module_init(ak4641_modinit); static void __exit ak4641_exit(void) { i2c_del_driver(&ak4641_i2c_driver); } module_exit(ak4641_exit); MODULE_DESCRIPTION("SoC AK4641 driver"); MODULE_AUTHOR("Harald Welte <laforge@gnufiish.org>"); MODULE_LICENSE("GPL");
gpl-2.0
detule/lge-linux-msm
drivers/ata/pata_it821x.c
2373
27883
/* * pata_it821x.c - IT821x PATA for new ATA layer * (C) 2005 Red Hat Inc * Alan Cox <alan@lxorguk.ukuu.org.uk> * (C) 2007 Bartlomiej Zolnierkiewicz * * based upon * * it821x.c * * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004 * * Copyright (C) 2004 Red Hat * * May be copied or modified under the terms of the GNU General Public License * Based in part on the ITE vendor provided SCSI driver. * * Documentation available from IT8212F_V04.pdf * http://www.ite.com.tw/EN/products_more.aspx?CategoryID=3&ID=5,91 * Some other documents are NDA. * * The ITE8212 isn't exactly a standard IDE controller. It has two * modes. In pass through mode then it is an IDE controller. In its smart * mode its actually quite a capable hardware raid controller disguised * as an IDE controller. Smart mode only understands DMA read/write and * identify, none of the fancier commands apply. The IT8211 is identical * in other respects but lacks the raid mode. * * Errata: * o Rev 0x10 also requires master/slave hold the same DMA timings and * cannot do ATAPI MWDMA. * o The identify data for raid volumes lacks CHS info (technically ok) * but also fails to set the LBA28 and other bits. We fix these in * the IDE probe quirk code. * o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode * raid then the controller firmware dies * o Smart mode without RAID doesn't clear all the necessary identify * bits to reduce the command set to the one used * * This has a few impacts on the driver * - In pass through mode we do all the work you would expect * - In smart mode the clocking set up is done by the controller generally * but we must watch the other limits and filter. * - There are a few extra vendor commands that actually talk to the * controller but only work PIO with no IRQ. * * Vendor areas of the identify block in smart mode are used for the * timing and policy set up. Each HDD in raid mode also has a serial * block on the disk. The hardware extra commands are get/set chip status, * rebuild, get rebuild status. * * In Linux the driver supports pass through mode as if the device was * just another IDE controller. If the smart mode is running then * volumes are managed by the controller firmware and each IDE "disk" * is a raid volume. Even more cute - the controller can do automated * hotplug and rebuild. * * The pass through controller itself is a little demented. It has a * flaw that it has a single set of PIO/MWDMA timings per channel so * non UDMA devices restrict each others performance. It also has a * single clock source per channel so mixed UDMA100/133 performance * isn't perfect and we have to pick a clock. Thankfully none of this * matters in smart mode. ATAPI DMA is not currently supported. * * It seems the smart mode is a win for RAID1/RAID10 but otherwise not. * * TODO * - ATAPI and other speed filtering * - RAID configuration ioctls */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/slab.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_it821x" #define DRV_VERSION "0.4.2" struct it821x_dev { unsigned int smart:1, /* Are we in smart raid mode */ timing10:1; /* Rev 0x10 */ u8 clock_mode; /* 0, ATA_50 or ATA_66 */ u8 want[2][2]; /* Mode/Pri log for master slave */ /* We need these for switching the clock when DMA goes on/off The high byte is the 66Mhz timing */ u16 pio[2]; /* Cached PIO values */ u16 mwdma[2]; /* Cached MWDMA values */ u16 udma[2]; /* Cached UDMA values (per drive) */ u16 last_device; /* Master or slave loaded ? */ }; #define ATA_66 0 #define ATA_50 1 #define ATA_ANY 2 #define UDMA_OFF 0 #define MWDMA_OFF 0 /* * We allow users to force the card into non raid mode without * flashing the alternative BIOS. This is also necessary right now * for embedded platforms that cannot run a PC BIOS but are using this * device. */ static int it8212_noraid; /** * it821x_program - program the PIO/MWDMA registers * @ap: ATA port * @adev: Device to program * @timing: Timing value (66Mhz in top 8bits, 50 in the low 8) * * Program the PIO/MWDMA timing for this channel according to the * current clock. These share the same register so are managed by * the DMA start/stop sequence as with the old driver. */ static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct it821x_dev *itdev = ap->private_data; int channel = ap->port_no; u8 conf; /* Program PIO/MWDMA timing bits */ if (itdev->clock_mode == ATA_66) conf = timing >> 8; else conf = timing & 0xFF; pci_write_config_byte(pdev, 0x54 + 4 * channel, conf); } /** * it821x_program_udma - program the UDMA registers * @ap: ATA port * @adev: ATA device to update * @timing: Timing bits. Top 8 are for 66Mhz bottom for 50Mhz * * Program the UDMA timing for this drive according to the * current clock. Handles the dual clocks and also knows about * the errata on the 0x10 revision. The UDMA errata is partly handled * here and partly in start_dma. */ static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing) { struct it821x_dev *itdev = ap->private_data; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int channel = ap->port_no; int unit = adev->devno; u8 conf; /* Program UDMA timing bits */ if (itdev->clock_mode == ATA_66) conf = timing >> 8; else conf = timing & 0xFF; if (itdev->timing10 == 0) pci_write_config_byte(pdev, 0x56 + 4 * channel + unit, conf); else { /* Early revision must be programmed for both together */ pci_write_config_byte(pdev, 0x56 + 4 * channel, conf); pci_write_config_byte(pdev, 0x56 + 4 * channel + 1, conf); } } /** * it821x_clock_strategy * @ap: ATA interface * @adev: ATA device being updated * * Select between the 50 and 66Mhz base clocks to get the best * results for this interface. */ static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct it821x_dev *itdev = ap->private_data; u8 unit = adev->devno; struct ata_device *pair = ata_dev_pair(adev); int clock, altclock; u8 v; int sel = 0; /* Look for the most wanted clocking */ if (itdev->want[0][0] > itdev->want[1][0]) { clock = itdev->want[0][1]; altclock = itdev->want[1][1]; } else { clock = itdev->want[1][1]; altclock = itdev->want[0][1]; } /* Master doesn't care does the slave ? */ if (clock == ATA_ANY) clock = altclock; /* Nobody cares - keep the same clock */ if (clock == ATA_ANY) return; /* No change */ if (clock == itdev->clock_mode) return; /* Load this into the controller */ if (clock == ATA_66) itdev->clock_mode = ATA_66; else { itdev->clock_mode = ATA_50; sel = 1; } pci_read_config_byte(pdev, 0x50, &v); v &= ~(1 << (1 + ap->port_no)); v |= sel << (1 + ap->port_no); pci_write_config_byte(pdev, 0x50, v); /* * Reprogram the UDMA/PIO of the pair drive for the switch * MWDMA will be dealt with by the dma switcher */ if (pair && itdev->udma[1-unit] != UDMA_OFF) { it821x_program_udma(ap, pair, itdev->udma[1-unit]); it821x_program(ap, pair, itdev->pio[1-unit]); } /* * Reprogram the UDMA/PIO of our drive for the switch. * MWDMA will be dealt with by the dma switcher */ if (itdev->udma[unit] != UDMA_OFF) { it821x_program_udma(ap, adev, itdev->udma[unit]); it821x_program(ap, adev, itdev->pio[unit]); } } /** * it821x_passthru_set_piomode - set PIO mode data * @ap: ATA interface * @adev: ATA device * * Configure for PIO mode. This is complicated as the register is * shared by PIO and MWDMA and for both channels. */ static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev) { /* Spec says 89 ref driver uses 88 */ static const u16 pio[] = { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 }; static const u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY }; struct it821x_dev *itdev = ap->private_data; int unit = adev->devno; int mode_wanted = adev->pio_mode - XFER_PIO_0; /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */ itdev->want[unit][1] = pio_want[mode_wanted]; itdev->want[unit][0] = 1; /* PIO is lowest priority */ itdev->pio[unit] = pio[mode_wanted]; it821x_clock_strategy(ap, adev); it821x_program(ap, adev, itdev->pio[unit]); } /** * it821x_passthru_set_dmamode - set initial DMA mode data * @ap: ATA interface * @adev: ATA device * * Set up the DMA modes. The actions taken depend heavily on the mode * to use. If UDMA is used as is hopefully the usual case then the * timing register is private and we need only consider the clock. If * we are using MWDMA then we have to manage the setting ourself as * we switch devices and mode. */ static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev) { static const u16 dma[] = { 0x8866, 0x3222, 0x3121 }; static const u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY }; static const u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 }; static const u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct it821x_dev *itdev = ap->private_data; int channel = ap->port_no; int unit = adev->devno; u8 conf; if (adev->dma_mode >= XFER_UDMA_0) { int mode_wanted = adev->dma_mode - XFER_UDMA_0; itdev->want[unit][1] = udma_want[mode_wanted]; itdev->want[unit][0] = 3; /* UDMA is high priority */ itdev->mwdma[unit] = MWDMA_OFF; itdev->udma[unit] = udma[mode_wanted]; if (mode_wanted >= 5) itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */ /* UDMA on. Again revision 0x10 must do the pair */ pci_read_config_byte(pdev, 0x50, &conf); if (itdev->timing10) conf &= channel ? 0x9F: 0xE7; else conf &= ~ (1 << (3 + 2 * channel + unit)); pci_write_config_byte(pdev, 0x50, conf); it821x_clock_strategy(ap, adev); it821x_program_udma(ap, adev, itdev->udma[unit]); } else { int mode_wanted = adev->dma_mode - XFER_MW_DMA_0; itdev->want[unit][1] = mwdma_want[mode_wanted]; itdev->want[unit][0] = 2; /* MWDMA is low priority */ itdev->mwdma[unit] = dma[mode_wanted]; itdev->udma[unit] = UDMA_OFF; /* UDMA bits off - Revision 0x10 do them in pairs */ pci_read_config_byte(pdev, 0x50, &conf); if (itdev->timing10) conf |= channel ? 0x60: 0x18; else conf |= 1 << (3 + 2 * channel + unit); pci_write_config_byte(pdev, 0x50, conf); it821x_clock_strategy(ap, adev); } } /** * it821x_passthru_dma_start - DMA start callback * @qc: Command in progress * * Usually drivers set the DMA timing at the point the set_dmamode call * is made. IT821x however requires we load new timings on the * transitions in some cases. */ static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct it821x_dev *itdev = ap->private_data; int unit = adev->devno; if (itdev->mwdma[unit] != MWDMA_OFF) it821x_program(ap, adev, itdev->mwdma[unit]); else if (itdev->udma[unit] != UDMA_OFF && itdev->timing10) it821x_program_udma(ap, adev, itdev->udma[unit]); ata_bmdma_start(qc); } /** * it821x_passthru_dma_stop - DMA stop callback * @qc: ATA command * * We loaded new timings in dma_start, as a result we need to restore * the PIO timings in dma_stop so that the next command issue gets the * right clock values. */ static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct it821x_dev *itdev = ap->private_data; int unit = adev->devno; ata_bmdma_stop(qc); if (itdev->mwdma[unit] != MWDMA_OFF) it821x_program(ap, adev, itdev->pio[unit]); } /** * it821x_passthru_dev_select - Select master/slave * @ap: ATA port * @device: Device number (not pointer) * * Device selection hook. If necessary perform clock switching */ static void it821x_passthru_dev_select(struct ata_port *ap, unsigned int device) { struct it821x_dev *itdev = ap->private_data; if (itdev && device != itdev->last_device) { struct ata_device *adev = &ap->link.device[device]; it821x_program(ap, adev, itdev->pio[adev->devno]); itdev->last_device = device; } ata_sff_dev_select(ap, device); } /** * it821x_smart_qc_issue - wrap qc issue prot * @qc: command * * Wrap the command issue sequence for the IT821x. We need to * perform out own device selection timing loads before the * usual happenings kick off */ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc) { switch(qc->tf.command) { /* Commands the firmware supports */ case ATA_CMD_READ: case ATA_CMD_READ_EXT: case ATA_CMD_WRITE: case ATA_CMD_WRITE_EXT: case ATA_CMD_PIO_READ: case ATA_CMD_PIO_READ_EXT: case ATA_CMD_PIO_WRITE: case ATA_CMD_PIO_WRITE_EXT: case ATA_CMD_READ_MULTI: case ATA_CMD_READ_MULTI_EXT: case ATA_CMD_WRITE_MULTI: case ATA_CMD_WRITE_MULTI_EXT: case ATA_CMD_ID_ATA: case ATA_CMD_INIT_DEV_PARAMS: case 0xFC: /* Internal 'report rebuild state' */ /* Arguably should just no-op this one */ case ATA_CMD_SET_FEATURES: return ata_bmdma_qc_issue(qc); } printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); return AC_ERR_DEV; } /** * it821x_passthru_qc_issue - wrap qc issue prot * @qc: command * * Wrap the command issue sequence for the IT821x. We need to * perform out own device selection timing loads before the * usual happenings kick off */ static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc) { it821x_passthru_dev_select(qc->ap, qc->dev->devno); return ata_bmdma_qc_issue(qc); } /** * it821x_smart_set_mode - mode setting * @link: interface to set up * @unused: device that failed (error only) * * Use a non standard set_mode function. We don't want to be tuned. * The BIOS configured everything. Our job is not to fiddle. We * read the dma enabled bits from the PCI configuration of the device * and respect them. */ static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unused) { struct ata_device *dev; ata_for_each_dev(dev, link, ENABLED) { /* We don't really care */ dev->pio_mode = XFER_PIO_0; dev->dma_mode = XFER_MW_DMA_0; /* We do need the right mode information for DMA or PIO and this comes from the current configuration flags */ if (ata_id_has_dma(dev->id)) { ata_dev_info(dev, "configured for DMA\n"); dev->xfer_mode = XFER_MW_DMA_0; dev->xfer_shift = ATA_SHIFT_MWDMA; dev->flags &= ~ATA_DFLAG_PIO; } else { ata_dev_info(dev, "configured for PIO\n"); dev->xfer_mode = XFER_PIO_0; dev->xfer_shift = ATA_SHIFT_PIO; dev->flags |= ATA_DFLAG_PIO; } } return 0; } /** * it821x_dev_config - Called each device identify * @adev: Device that has just been identified * * Perform the initial setup needed for each device that is chip * special. In our case we need to lock the sector count to avoid * blowing the brains out of the firmware with large LBA48 requests * */ static void it821x_dev_config(struct ata_device *adev) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (adev->max_sectors > 255) adev->max_sectors = 255; if (strstr(model_num, "Integrated Technology Express")) { /* RAID mode */ ata_dev_info(adev, "%sRAID%d volume", adev->id[147] ? "Bootable " : "", adev->id[129]); if (adev->id[129] != 1) pr_cont("(%dK stripe)", adev->id[146]); pr_cont("\n"); } /* This is a controller firmware triggered funny, don't report the drive faulty! */ adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC; /* No HPA in 'smart' mode */ adev->horkage |= ATA_HORKAGE_BROKEN_HPA; } /** * it821x_read_id - Hack identify data up * @adev: device to read * @tf: proposed taskfile * @id: buffer for returned ident data * * Query the devices on this firmware driven port and slightly * mash the identify data to stop us and common tools trying to * use features not firmware supported. The firmware itself does * some masking (eg SMART) but not enough. */ static unsigned int it821x_read_id(struct ata_device *adev, struct ata_taskfile *tf, u16 *id) { unsigned int err_mask; unsigned char model_num[ATA_ID_PROD_LEN + 1]; err_mask = ata_do_dev_read_id(adev, tf, id); if (err_mask) return err_mask; ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num)); id[83] &= ~(1 << 12); /* Cache flush is firmware handled */ id[83] &= ~(1 << 13); /* Ditto for LBA48 flushes */ id[84] &= ~(1 << 6); /* No FUA */ id[85] &= ~(1 << 10); /* No HPA */ id[76] = 0; /* No NCQ/AN etc */ if (strstr(model_num, "Integrated Technology Express")) { /* Set feature bits the firmware neglects */ id[49] |= 0x0300; /* LBA, DMA */ id[83] &= 0x7FFF; id[83] |= 0x4400; /* Word 83 is valid and LBA48 */ id[86] |= 0x0400; /* LBA48 on */ id[ATA_ID_MAJOR_VER] |= 0x1F; /* Clear the serial number because it's different each boot which breaks validation on resume */ memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN); } return err_mask; } /** * it821x_check_atapi_dma - ATAPI DMA handler * @qc: Command we are about to issue * * Decide if this ATAPI command can be issued by DMA on this * controller. Return 0 if it can be. */ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct it821x_dev *itdev = ap->private_data; /* Only use dma for transfers to/from the media. */ if (ata_qc_raw_nbytes(qc) < 2048) return -EOPNOTSUPP; /* No ATAPI DMA in smart mode */ if (itdev->smart) return -EOPNOTSUPP; /* No ATAPI DMA on rev 10 */ if (itdev->timing10) return -EOPNOTSUPP; /* Cool */ return 0; } /** * it821x_display_disk - display disk setup * @n: Device number * @buf: Buffer block from firmware * * Produce a nice informative display of the device setup as provided * by the firmware. */ static void it821x_display_disk(int n, u8 *buf) { unsigned char id[41]; int mode = 0; char *mtype = ""; char mbuf[8]; char *cbl = "(40 wire cable)"; static const char *types[5] = { "RAID0", "RAID1", "RAID 0+1", "JBOD", "DISK" }; if (buf[52] > 4) /* No Disk */ return; ata_id_c_string((u16 *)buf, id, 0, 41); if (buf[51]) { mode = ffs(buf[51]); mtype = "UDMA"; } else if (buf[49]) { mode = ffs(buf[49]); mtype = "MWDMA"; } if (buf[76]) cbl = ""; if (mode) snprintf(mbuf, 8, "%5s%d", mtype, mode - 1); else strcpy(mbuf, "PIO"); if (buf[52] == 4) printk(KERN_INFO "%d: %-6s %-8s %s %s\n", n, mbuf, types[buf[52]], id, cbl); else printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n", n, mbuf, types[buf[52]], buf[53], id, cbl); if (buf[125] < 100) printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]); } /** * it821x_firmware_command - issue firmware command * @ap: IT821x port to interrogate * @cmd: command * @len: length * * Issue firmware commands expecting data back from the controller. We * use this to issue commands that do not go via the normal paths. Other * commands such as 0xFC can be issued normally. */ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len) { u8 status; int n = 0; u16 *buf = kmalloc(len, GFP_KERNEL); if (buf == NULL) { printk(KERN_ERR "it821x_firmware_command: Out of memory\n"); return NULL; } /* This isn't quite a normal ATA command as we are talking to the firmware not the drives */ ap->ctl |= ATA_NIEN; iowrite8(ap->ctl, ap->ioaddr.ctl_addr); ata_wait_idle(ap); iowrite8(ATA_DEVICE_OBS, ap->ioaddr.device_addr); iowrite8(cmd, ap->ioaddr.command_addr); udelay(1); /* This should be almost immediate but a little paranoia goes a long way. */ while(n++ < 10) { status = ioread8(ap->ioaddr.status_addr); if (status & ATA_ERR) { kfree(buf); printk(KERN_ERR "it821x_firmware_command: rejected\n"); return NULL; } if (status & ATA_DRQ) { ioread16_rep(ap->ioaddr.data_addr, buf, len/2); return (u8 *)buf; } mdelay(1); } kfree(buf); printk(KERN_ERR "it821x_firmware_command: timeout\n"); return NULL; } /** * it821x_probe_firmware - firmware reporting/setup * @ap: IT821x port being probed * * Probe the firmware of the controller by issuing firmware command * 0xFA and analysing the returned data. */ static void it821x_probe_firmware(struct ata_port *ap) { u8 *buf; int i; /* This is a bit ugly as we can't just issue a task file to a device as this is controller magic */ buf = it821x_firmware_command(ap, 0xFA, 512); if (buf != NULL) { printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n", buf[505], buf[506], buf[507], buf[508]); for (i = 0; i < 4; i++) it821x_display_disk(i, buf + 128 * i); kfree(buf); } } /** * it821x_port_start - port setup * @ap: ATA port being set up * * The it821x needs to maintain private data structures and also to * use the standard PCI interface which lacks support for this * functionality. We instead set up the private data on the port * start hook, and tear it down on port stop */ static int it821x_port_start(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct it821x_dev *itdev; u8 conf; int ret = ata_bmdma_port_start(ap); if (ret < 0) return ret; itdev = devm_kzalloc(&pdev->dev, sizeof(struct it821x_dev), GFP_KERNEL); if (itdev == NULL) return -ENOMEM; ap->private_data = itdev; pci_read_config_byte(pdev, 0x50, &conf); if (conf & 1) { itdev->smart = 1; /* Long I/O's although allowed in LBA48 space cause the onboard firmware to enter the twighlight zone */ /* No ATAPI DMA in this mode either */ if (ap->port_no == 0) it821x_probe_firmware(ap); } /* Pull the current clocks from 0x50 */ if (conf & (1 << (1 + ap->port_no))) itdev->clock_mode = ATA_50; else itdev->clock_mode = ATA_66; itdev->want[0][1] = ATA_ANY; itdev->want[1][1] = ATA_ANY; itdev->last_device = -1; if (pdev->revision == 0x10) { itdev->timing10 = 1; /* Need to disable ATAPI DMA for this case */ if (!itdev->smart) printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n"); } return 0; } /** * it821x_rdc_cable - Cable detect for RDC1010 * @ap: port we are checking * * Return the RDC1010 cable type. Unlike the IT821x we know how to do * this and can do host side cable detect */ static int it821x_rdc_cable(struct ata_port *ap) { u16 r40; struct pci_dev *pdev = to_pci_dev(ap->host->dev); pci_read_config_word(pdev, 0x40, &r40); if (r40 & (1 << (2 + ap->port_no))) return ATA_CBL_PATA40; return ATA_CBL_PATA80; } static struct scsi_host_template it821x_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations it821x_smart_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, .qc_issue = it821x_smart_qc_issue, .cable_detect = ata_cable_80wire, .set_mode = it821x_smart_set_mode, .dev_config = it821x_dev_config, .read_id = it821x_read_id, .port_start = it821x_port_start, }; static struct ata_port_operations it821x_passthru_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, .sff_dev_select = it821x_passthru_dev_select, .bmdma_start = it821x_passthru_bmdma_start, .bmdma_stop = it821x_passthru_bmdma_stop, .qc_issue = it821x_passthru_qc_issue, .cable_detect = ata_cable_unknown, .set_piomode = it821x_passthru_set_piomode, .set_dmamode = it821x_passthru_set_dmamode, .port_start = it821x_port_start, }; static struct ata_port_operations it821x_rdc_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, .sff_dev_select = it821x_passthru_dev_select, .bmdma_start = it821x_passthru_bmdma_start, .bmdma_stop = it821x_passthru_bmdma_stop, .qc_issue = it821x_passthru_qc_issue, .cable_detect = it821x_rdc_cable, .set_piomode = it821x_passthru_set_piomode, .set_dmamode = it821x_passthru_set_dmamode, .port_start = it821x_port_start, }; static void it821x_disable_raid(struct pci_dev *pdev) { /* Neither the RDC nor the IT8211 */ if (pdev->vendor != PCI_VENDOR_ID_ITE || pdev->device != PCI_DEVICE_ID_ITE_8212) return; /* Reset local CPU, and set BIOS not ready */ pci_write_config_byte(pdev, 0x5E, 0x01); /* Set to bypass mode, and reset PCI bus */ pci_write_config_byte(pdev, 0x50, 0x00); pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_PARITY | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_write_config_word(pdev, 0x40, 0xA0F3); pci_write_config_dword(pdev,0x4C, 0x02040204); pci_write_config_byte(pdev, 0x42, 0x36); pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20); } static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { u8 conf; static const struct ata_port_info info_smart = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &it821x_smart_port_ops }; static const struct ata_port_info info_passthru = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &it821x_passthru_port_ops }; static const struct ata_port_info info_rdc = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &it821x_rdc_port_ops }; static const struct ata_port_info info_rdc_11 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, /* No UDMA */ .port_ops = &it821x_rdc_port_ops }; const struct ata_port_info *ppi[] = { NULL, NULL }; static char *mode[2] = { "pass through", "smart" }; int rc; rc = pcim_enable_device(pdev); if (rc) return rc; if (pdev->vendor == PCI_VENDOR_ID_RDC) { /* Deal with Vortex86SX */ if (pdev->revision == 0x11) ppi[0] = &info_rdc_11; else ppi[0] = &info_rdc; } else { /* Force the card into bypass mode if so requested */ if (it8212_noraid) { printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n"); it821x_disable_raid(pdev); } pci_read_config_byte(pdev, 0x50, &conf); conf &= 1; printk(KERN_INFO DRV_NAME": controller in %s mode.\n", mode[conf]); if (conf == 0) ppi[0] = &info_passthru; else ppi[0] = &info_smart; } return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0); } #ifdef CONFIG_PM static int it821x_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; /* Resume - turn raid back off if need be */ if (it8212_noraid) it821x_disable_raid(pdev); ata_host_resume(host); return rc; } #endif static const struct pci_device_id it821x[] = { { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), }, { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), }, { PCI_VDEVICE(RDC, PCI_DEVICE_ID_RDC_D1010), }, { }, }; static struct pci_driver it821x_pci_driver = { .name = DRV_NAME, .id_table = it821x, .probe = it821x_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = it821x_reinit_one, #endif }; module_pci_driver(it821x_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, it821x); MODULE_VERSION(DRV_VERSION); module_param_named(noraid, it8212_noraid, int, S_IRUGO); MODULE_PARM_DESC(noraid, "Force card into bypass mode");
gpl-2.0
mobius1484/Satori
drivers/misc/iwmc3200top/main.c
4933
17118
/* * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver * drivers/misc/iwmc3200top/main.c * * Copyright (C) 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> * - * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/debugfs.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio.h> #include "iwmc3200top.h" #include "log.h" #include "fw-msg.h" #include "debugfs.h" #define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver" #define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation." #define DRIVER_VERSION "0.1.62" MODULE_DESCRIPTION(DRIVER_DESCRIPTION); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRIVER_COPYRIGHT); MODULE_FIRMWARE(FW_NAME(FW_API_VER)); static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count) { return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count); } int iwmct_tx(struct iwmct_priv *priv, void *src, int count) { int ret; sdio_claim_host(priv->func); ret = __iwmct_tx(priv, src, count); sdio_release_host(priv->func); return ret; } /* * This workers main task is to wait for OP_OPR_ALIVE * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed. * When OP_OPR_ALIVE received it will issue * a call to "bus_rescan_devices". */ static void iwmct_rescan_worker(struct work_struct *ws) { struct iwmct_priv *priv; int ret; priv = container_of(ws, struct iwmct_priv, bus_rescan_worker); LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n"); ret = bus_rescan_devices(priv->func->dev.bus); if (ret < 0) LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n"); } static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg) { switch (msg->hdr.opcode) { case OP_OPR_ALIVE: LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n"); schedule_work(&priv->bus_rescan_worker); break; default: LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n", msg->hdr.opcode); break; } } static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len) { struct top_msg *msg; msg = (struct top_msg *)buf; if (msg->hdr.type != COMM_TYPE_D2H) { LOG_ERROR(priv, FW_MSG, "Message from TOP with invalid message type 0x%X\n", msg->hdr.type); return; } if (len < sizeof(msg->hdr)) { LOG_ERROR(priv, FW_MSG, "Message from TOP is too short for message header " "received %d bytes, expected at least %zd bytes\n", len, sizeof(msg->hdr)); return; } if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) { LOG_ERROR(priv, FW_MSG, "Message length (%d bytes) is shorter than " "in header (%d bytes)\n", len, le16_to_cpu(msg->hdr.length)); return; } switch (msg->hdr.category) { case COMM_CATEGORY_OPERATIONAL: op_top_message(priv, (struct top_msg *)buf); break; case COMM_CATEGORY_DEBUG: case COMM_CATEGORY_TESTABILITY: case COMM_CATEGORY_DIAGNOSTICS: iwmct_log_top_message(priv, buf, len); break; default: LOG_ERROR(priv, FW_MSG, "Message from TOP with unknown category 0x%X\n", msg->hdr.category); break; } } int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len) { int ret; u8 *buf; LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n"); /* add padding to 256 for IWMC */ ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256; LOG_HEXDUMP(FW_MSG, cmd, len); if (len > FW_HCMD_BLOCK_SIZE) { LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n", len, FW_HCMD_BLOCK_SIZE); return -1; } buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL); if (!buf) { LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n", FW_HCMD_BLOCK_SIZE); return -1; } memcpy(buf, cmd, len); ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE); kfree(buf); return ret; } static void iwmct_irq_read_worker(struct work_struct *ws) { struct iwmct_priv *priv; struct iwmct_work_struct *read_req; __le32 *buf = NULL; int ret; int iosize; u32 barker; bool is_barker; priv = container_of(ws, struct iwmct_priv, isr_worker); LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws); /* --------------------- Handshake with device -------------------- */ sdio_claim_host(priv->func); /* all list manipulations have to be protected by * sdio_claim_host/sdio_release_host */ if (list_empty(&priv->read_req_list)) { LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n"); goto exit_release; } read_req = list_entry(priv->read_req_list.next, struct iwmct_work_struct, list); list_del(&read_req->list); iosize = read_req->iosize; kfree(read_req); buf = kzalloc(iosize, GFP_KERNEL); if (!buf) { LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize); goto exit_release; } LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n", iosize, buf, priv->func->num); /* read from device */ ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize); if (ret) { LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret); goto exit_release; } LOG_HEXDUMP(IRQ, (u8 *)buf, iosize); barker = le32_to_cpu(buf[0]); /* Verify whether it's a barker and if not - treat as regular Rx */ if (barker == IWMC_BARKER_ACK || (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) { /* Valid Barker is equal on first 4 dwords */ is_barker = (buf[1] == buf[0]) && (buf[2] == buf[0]) && (buf[3] == buf[0]); if (!is_barker) { LOG_WARNING(priv, IRQ, "Potentially inconsistent barker " "%08X_%08X_%08X_%08X\n", le32_to_cpu(buf[0]), le32_to_cpu(buf[1]), le32_to_cpu(buf[2]), le32_to_cpu(buf[3])); } } else { is_barker = false; } /* Handle Top CommHub message */ if (!is_barker) { sdio_release_host(priv->func); handle_top_message(priv, (u8 *)buf, iosize); goto exit; } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */ if (atomic_read(&priv->dev_sync) == 0) { LOG_ERROR(priv, IRQ, "ACK barker arrived out-of-sync\n"); goto exit_release; } /* Continuing to FW download (after Sync is completed)*/ atomic_set(&priv->dev_sync, 0); LOG_INFO(priv, IRQ, "ACK barker arrived " "- starting FW download\n"); } else { /* REBOOT barker */ LOG_INFO(priv, IRQ, "Received reboot barker: %x\n", barker); priv->barker = barker; if (barker & BARKER_DNLOAD_SYNC_MSK) { /* Send the same barker back */ ret = __iwmct_tx(priv, buf, iosize); if (ret) { LOG_ERROR(priv, IRQ, "error %d echoing barker\n", ret); goto exit_release; } LOG_INFO(priv, IRQ, "Echoing barker to device\n"); atomic_set(&priv->dev_sync, 1); goto exit_release; } /* Continuing to FW download (without Sync) */ LOG_INFO(priv, IRQ, "No sync requested " "- starting FW download\n"); } sdio_release_host(priv->func); if (priv->dbg.fw_download) iwmct_fw_load(priv); else LOG_ERROR(priv, IRQ, "FW download not allowed\n"); goto exit; exit_release: sdio_release_host(priv->func); exit: kfree(buf); LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n"); } static void iwmct_irq(struct sdio_func *func) { struct iwmct_priv *priv; int val, ret; int iosize; int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR; struct iwmct_work_struct *read_req; priv = sdio_get_drvdata(func); LOG_TRACE(priv, IRQ, "enter iwmct_irq\n"); /* read the function's status register */ val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret); LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret); if (!val) { LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n"); goto exit_clear_intr; } /* * read 2 bytes of the transaction size * IMPORTANT: sdio transaction size has to be read before clearing * sdio interrupt!!! */ val = sdio_readb(priv->func, addr++, &ret); iosize = val; val = sdio_readb(priv->func, addr++, &ret); iosize += val << 8; LOG_INFO(priv, IRQ, "READ size %d\n", iosize); if (iosize == 0) { LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize); goto exit_clear_intr; } /* allocate a work structure to pass iosize to the worker */ read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL); if (!read_req) { LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n"); goto exit_clear_intr; } INIT_LIST_HEAD(&read_req->list); read_req->iosize = iosize; list_add_tail(&priv->read_req_list, &read_req->list); /* clear the function's interrupt request bit (write 1 to clear) */ sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); schedule_work(&priv->isr_worker); LOG_TRACE(priv, IRQ, "exit iwmct_irq\n"); return; exit_clear_intr: /* clear the function's interrupt request bit (write 1 to clear) */ sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); } static int blocks; module_param(blocks, int, 0604); MODULE_PARM_DESC(blocks, "max_blocks_to_send"); static bool dump; module_param(dump, bool, 0604); MODULE_PARM_DESC(dump, "dump_hex_content"); static bool jump = 1; module_param(jump, bool, 0604); static bool direct = 1; module_param(direct, bool, 0604); static bool checksum = 1; module_param(checksum, bool, 0604); static bool fw_download = 1; module_param(fw_download, bool, 0604); static int block_size = IWMC_SDIO_BLK_SIZE; module_param(block_size, int, 0404); static int download_trans_blks = IWMC_DEFAULT_TR_BLK; module_param(download_trans_blks, int, 0604); static bool rubbish_barker; module_param(rubbish_barker, bool, 0604); #ifdef CONFIG_IWMC3200TOP_DEBUG static int log_level[LOG_SRC_MAX]; static unsigned int log_level_argc; module_param_array(log_level, int, &log_level_argc, 0604); MODULE_PARM_DESC(log_level, "log_level"); static int log_level_fw[FW_LOG_SRC_MAX]; static unsigned int log_level_fw_argc; module_param_array(log_level_fw, int, &log_level_fw_argc, 0604); MODULE_PARM_DESC(log_level_fw, "log_level_fw"); #endif void iwmct_dbg_init_params(struct iwmct_priv *priv) { #ifdef CONFIG_IWMC3200TOP_DEBUG int i; for (i = 0; i < log_level_argc; i++) { dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n", i, log_level[i]); iwmct_log_set_filter((log_level[i] >> 8) & 0xFF, log_level[i] & 0xFF); } for (i = 0; i < log_level_fw_argc; i++) { dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n", i, log_level_fw[i]); iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF, log_level_fw[i] & 0xFF); } #endif priv->dbg.blocks = blocks; LOG_INFO(priv, INIT, "blocks=%d\n", blocks); priv->dbg.dump = (bool)dump; LOG_INFO(priv, INIT, "dump=%d\n", dump); priv->dbg.jump = (bool)jump; LOG_INFO(priv, INIT, "jump=%d\n", jump); priv->dbg.direct = (bool)direct; LOG_INFO(priv, INIT, "direct=%d\n", direct); priv->dbg.checksum = (bool)checksum; LOG_INFO(priv, INIT, "checksum=%d\n", checksum); priv->dbg.fw_download = (bool)fw_download; LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download); priv->dbg.block_size = block_size; LOG_INFO(priv, INIT, "block_size=%d\n", block_size); priv->dbg.download_trans_blks = download_trans_blks; LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks); } /***************************************************************************** * * sysfs attributes * *****************************************************************************/ static ssize_t show_iwmct_fw_version(struct device *d, struct device_attribute *attr, char *buf) { struct iwmct_priv *priv = dev_get_drvdata(d); return sprintf(buf, "%s\n", priv->dbg.label_fw); } static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL); #ifdef CONFIG_IWMC3200TOP_DEBUG static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO, show_iwmct_log_level, store_iwmct_log_level); static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO, show_iwmct_log_level_fw, store_iwmct_log_level_fw); #endif static struct attribute *iwmct_sysfs_entries[] = { &dev_attr_cc_label_fw.attr, #ifdef CONFIG_IWMC3200TOP_DEBUG &dev_attr_log_level.attr, &dev_attr_log_level_fw.attr, #endif NULL }; static struct attribute_group iwmct_attribute_group = { .name = NULL, /* put in device directory */ .attrs = iwmct_sysfs_entries, }; static int iwmct_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct iwmct_priv *priv; int ret; int val = 1; int addr = IWMC_SDIO_INTR_ENABLE_ADDR; dev_dbg(&func->dev, "enter iwmct_probe\n"); dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n", jiffies_to_msecs(2147483647), HZ); priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL); if (!priv) { dev_err(&func->dev, "kzalloc error\n"); return -ENOMEM; } priv->func = func; sdio_set_drvdata(func, priv); INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker); INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker); init_waitqueue_head(&priv->wait_q); sdio_claim_host(func); /* FIXME: Remove after it is fixed in the Boot ROM upgrade */ func->enable_timeout = 10; /* In our HW, setting the block size also wakes up the boot rom. */ ret = sdio_set_block_size(func, priv->dbg.block_size); if (ret) { LOG_ERROR(priv, INIT, "sdio_set_block_size() failure: %d\n", ret); goto error_sdio_enable; } ret = sdio_enable_func(func); if (ret) { LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret); goto error_sdio_enable; } /* init reset and dev_sync states */ atomic_set(&priv->reset, 0); atomic_set(&priv->dev_sync, 0); /* init read req queue */ INIT_LIST_HEAD(&priv->read_req_list); /* process configurable parameters */ iwmct_dbg_init_params(priv); ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group); if (ret) { LOG_ERROR(priv, INIT, "Failed to register attributes and " "initialize module_params\n"); goto error_dev_attrs; } iwmct_dbgfs_register(priv, DRV_NAME); if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) { LOG_INFO(priv, INIT, "Reducing transaction to 8 blocks = 2K (from %d)\n", priv->dbg.download_trans_blks); priv->dbg.download_trans_blks = 8; } priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size; LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len); ret = sdio_claim_irq(func, iwmct_irq); if (ret) { LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret); goto error_claim_irq; } /* Enable function's interrupt */ sdio_writeb(priv->func, val, addr, &ret); if (ret) { LOG_ERROR(priv, INIT, "Failure writing to " "Interrupt Enable Register (%d): %d\n", addr, ret); goto error_enable_int; } sdio_release_host(func); LOG_INFO(priv, INIT, "exit iwmct_probe\n"); return ret; error_enable_int: sdio_release_irq(func); error_claim_irq: sdio_disable_func(func); error_dev_attrs: iwmct_dbgfs_unregister(priv->dbgfs); sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group); error_sdio_enable: sdio_release_host(func); return ret; } static void iwmct_remove(struct sdio_func *func) { struct iwmct_work_struct *read_req; struct iwmct_priv *priv = sdio_get_drvdata(func); LOG_INFO(priv, INIT, "enter\n"); sdio_claim_host(func); sdio_release_irq(func); sdio_release_host(func); /* Make sure works are finished */ flush_work_sync(&priv->bus_rescan_worker); flush_work_sync(&priv->isr_worker); sdio_claim_host(func); sdio_disable_func(func); sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group); iwmct_dbgfs_unregister(priv->dbgfs); sdio_release_host(func); /* free read requests */ while (!list_empty(&priv->read_req_list)) { read_req = list_entry(priv->read_req_list.next, struct iwmct_work_struct, list); list_del(&read_req->list); kfree(read_req); } kfree(priv); } static const struct sdio_device_id iwmct_ids[] = { /* Intel Wireless MultiCom 3200 Top Driver */ { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)}, { }, /* Terminating entry */ }; MODULE_DEVICE_TABLE(sdio, iwmct_ids); static struct sdio_driver iwmct_driver = { .probe = iwmct_probe, .remove = iwmct_remove, .name = DRV_NAME, .id_table = iwmct_ids, }; static int __init iwmct_init(void) { int rc; /* Default log filter settings */ iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME); iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL); iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME); rc = sdio_register_driver(&iwmct_driver); return rc; } static void __exit iwmct_exit(void) { sdio_unregister_driver(&iwmct_driver); } module_init(iwmct_init); module_exit(iwmct_exit);
gpl-2.0
obsolete-ra/kernel_motorola_msm8226
arch/mips/lasat/sysctl.c
8773
6089
/* * Thomas Horsten <thh@lasat.com> * Copyright (C) 2000 LASAT Networks A/S. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Routines specific to the LASAT boards */ #include <linux/types.h> #include <asm/lasat/lasat.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/stddef.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/uaccess.h> #include <asm/time.h> #ifdef CONFIG_DS1603 #include "ds1603.h" #endif /* And the same for proc */ int proc_dolasatstring(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; r = proc_dostring(table, write, buffer, lenp, ppos); if ((!write) || r) return r; lasat_write_eeprom_info(); return 0; } /* proc function to write EEPROM after changing int entry */ int proc_dolasatint(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; r = proc_dointvec(table, write, buffer, lenp, ppos); if ((!write) || r) return r; lasat_write_eeprom_info(); return 0; } #ifdef CONFIG_DS1603 static int rtctmp; /* proc function to read/write RealTime Clock */ int proc_dolasatrtc(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct timespec ts; int r; if (!write) { read_persistent_clock(&ts); rtctmp = ts.tv_sec; /* check for time < 0 and set to 0 */ if (rtctmp < 0) rtctmp = 0; } r = proc_dointvec(table, write, buffer, lenp, ppos); if (r) return r; if (write) rtc_mips_set_mmss(rtctmp); return 0; } #endif #ifdef CONFIG_INET int proc_lasat_ip(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { unsigned int ip; char *p, c; int len; char ipbuf[32]; if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { len = 0; p = buffer; while (len < *lenp) { if (get_user(c, p++)) return -EFAULT; if (c == 0 || c == '\n') break; len++; } if (len >= sizeof(ipbuf)-1) len = sizeof(ipbuf) - 1; if (copy_from_user(ipbuf, buffer, len)) return -EFAULT; ipbuf[len] = 0; *ppos += *lenp; /* Now see if we can convert it to a valid IP */ ip = in_aton(ipbuf); *(unsigned int *)(table->data) = ip; lasat_write_eeprom_info(); } else { ip = *(unsigned int *)(table->data); sprintf(ipbuf, "%d.%d.%d.%d", (ip) & 0xff, (ip >> 8) & 0xff, (ip >> 16) & 0xff, (ip >> 24) & 0xff); len = strlen(ipbuf); if (len > *lenp) len = *lenp; if (len) if (copy_to_user(buffer, ipbuf, len)) return -EFAULT; if (len < *lenp) { if (put_user('\n', ((char *) buffer) + len)) return -EFAULT; len++; } *lenp = len; *ppos += len; } return 0; } #endif int proc_lasat_prid(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; r = proc_dointvec(table, write, buffer, lenp, ppos); if (r < 0) return r; if (write) { lasat_board_info.li_eeprom_info.prid = lasat_board_info.li_prid; lasat_write_eeprom_info(); lasat_init_board_info(); } return 0; } extern int lasat_boot_to_service; static ctl_table lasat_table[] = { { .procname = "cpu-hz", .data = &lasat_board_info.li_cpu_hz, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "bus-hz", .data = &lasat_board_info.li_bus_hz, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "bmid", .data = &lasat_board_info.li_bmid, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "prid", .data = &lasat_board_info.li_prid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_lasat_prid, }, #ifdef CONFIG_INET { .procname = "ipaddr", .data = &lasat_board_info.li_eeprom_info.ipaddr, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_lasat_ip, }, { .procname = "netmask", .data = &lasat_board_info.li_eeprom_info.netmask, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_lasat_ip, }, #endif { .procname = "passwd_hash", .data = &lasat_board_info.li_eeprom_info.passwd_hash, .maxlen = sizeof(lasat_board_info.li_eeprom_info.passwd_hash), .mode = 0600, .proc_handler = proc_dolasatstring, }, { .procname = "boot-service", .data = &lasat_boot_to_service, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_DS1603 { .procname = "rtc", .data = &rtctmp, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dolasatrtc, }, #endif { .procname = "namestr", .data = &lasat_board_info.li_namestr, .maxlen = sizeof(lasat_board_info.li_namestr), .mode = 0444, .proc_handler = proc_dostring, }, { .procname = "typestr", .data = &lasat_board_info.li_typestr, .maxlen = sizeof(lasat_board_info.li_typestr), .mode = 0444, .proc_handler = proc_dostring, }, {} }; static ctl_table lasat_root_table[] = { { .procname = "lasat", .mode = 0555, .child = lasat_table }, {} }; static int __init lasat_register_sysctl(void) { struct ctl_table_header *lasat_table_header; lasat_table_header = register_sysctl_table(lasat_root_table); if (!lasat_table_header) { printk(KERN_ERR "Unable to register LASAT sysctl\n"); return -ENOMEM; } return 0; } __initcall(lasat_register_sysctl);
gpl-2.0
kmobs/htc-kernel-pyramid
arch/parisc/math-emu/dfdiv.c
14149
12636
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/dfdiv.c $Revision: 1.1 $ * * Purpose: * Double Precision Floating-point Divide * * External Interfaces: * dbl_fdiv(srcptr1,srcptr2,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "dbl_float.h" /* * Double Precision Floating-point Divide */ int dbl_fdiv (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2, dbl_floating_point * dstptr, unsigned int *status) { register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2; register unsigned int opnd3p1, opnd3p2, resultp1, resultp2; register int dest_exponent, count; register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE; boolean is_tiny; Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2); Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2); /* * set sign bit of result */ if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1)) Dbl_setnegativezerop1(resultp1); else Dbl_setzerop1(resultp1); /* * check first operand for NaN's or infinity */ if (Dbl_isinfinity_exponent(opnd1p1)) { if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) { if (Dbl_isnotnan(opnd2p1,opnd2p2)) { if (Dbl_isinfinity(opnd2p1,opnd2p2)) { /* * invalid since both operands * are infinity */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Dbl_makequietnan(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * return infinity */ Dbl_setinfinity_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } } else { /* * is NaN; signaling or quiet? */ if (Dbl_isone_signaling(opnd1p1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(opnd1p1); } /* * is second operand a signaling NaN? */ else if (Dbl_is_signalingnan(opnd2p1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(opnd2p1); Dbl_copytoptr(opnd2p1,opnd2p2,dstptr); return(NOEXCEPTION); } /* * return quiet NaN */ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr); return(NOEXCEPTION); } } /* * check second operand for NaN's or infinity */ if (Dbl_isinfinity_exponent(opnd2p1)) { if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) { /* * return zero */ Dbl_setzero_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * is NaN; signaling or quiet? */ if (Dbl_isone_signaling(opnd2p1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(opnd2p1); } /* * return quiet NaN */ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr); return(NOEXCEPTION); } /* * check for division by zero */ if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) { if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) { /* invalid since both operands are zero */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Dbl_makequietnan(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } if (Is_divisionbyzerotrap_enabled()) return(DIVISIONBYZEROEXCEPTION); Set_divisionbyzeroflag(); Dbl_setinfinity_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate exponent */ dest_exponent = Dbl_exponent(opnd1p1) - Dbl_exponent(opnd2p1) + DBL_BIAS; /* * Generate mantissa */ if (Dbl_isnotzero_exponent(opnd1p1)) { /* set hidden bit */ Dbl_clear_signexponent_set_hidden(opnd1p1); } else { /* check for zero */ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) { Dbl_setzero_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* is denormalized, want to normalize */ Dbl_clear_signexponent(opnd1p1); Dbl_leftshiftby1(opnd1p1,opnd1p2); Dbl_normalize(opnd1p1,opnd1p2,dest_exponent); } /* opnd2 needs to have hidden bit set with msb in hidden bit */ if (Dbl_isnotzero_exponent(opnd2p1)) { Dbl_clear_signexponent_set_hidden(opnd2p1); } else { /* is denormalized; want to normalize */ Dbl_clear_signexponent(opnd2p1); Dbl_leftshiftby1(opnd2p1,opnd2p2); while (Dbl_iszero_hiddenhigh7mantissa(opnd2p1)) { dest_exponent+=8; Dbl_leftshiftby8(opnd2p1,opnd2p2); } if (Dbl_iszero_hiddenhigh3mantissa(opnd2p1)) { dest_exponent+=4; Dbl_leftshiftby4(opnd2p1,opnd2p2); } while (Dbl_iszero_hidden(opnd2p1)) { dest_exponent++; Dbl_leftshiftby1(opnd2p1,opnd2p2); } } /* Divide the source mantissas */ /* * A non-restoring divide algorithm is used. */ Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2); Dbl_setzero(opnd3p1,opnd3p2); for (count=1; count <= DBL_P && (opnd1p1 || opnd1p2); count++) { Dbl_leftshiftby1(opnd1p1,opnd1p2); Dbl_leftshiftby1(opnd3p1,opnd3p2); if (Dbl_iszero_sign(opnd1p1)) { Dbl_setone_lowmantissap2(opnd3p2); Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2); } else { Twoword_add(opnd1p1, opnd1p2, opnd2p1, opnd2p2); } } if (count <= DBL_P) { Dbl_leftshiftby1(opnd3p1,opnd3p2); Dbl_setone_lowmantissap2(opnd3p2); Dbl_leftshift(opnd3p1,opnd3p2,(DBL_P-count)); if (Dbl_iszero_hidden(opnd3p1)) { Dbl_leftshiftby1(opnd3p1,opnd3p2); dest_exponent--; } } else { if (Dbl_iszero_hidden(opnd3p1)) { /* need to get one more bit of result */ Dbl_leftshiftby1(opnd1p1,opnd1p2); Dbl_leftshiftby1(opnd3p1,opnd3p2); if (Dbl_iszero_sign(opnd1p1)) { Dbl_setone_lowmantissap2(opnd3p2); Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2); } else { Twoword_add(opnd1p1,opnd1p2,opnd2p1,opnd2p2); } dest_exponent--; } if (Dbl_iszero_sign(opnd1p1)) guardbit = TRUE; stickybit = Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2); } inexact = guardbit | stickybit; /* * round result */ if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) { Dbl_clear_signexponent(opnd3p1); switch (Rounding_mode()) { case ROUNDPLUS: if (Dbl_iszero_sign(resultp1)) Dbl_increment(opnd3p1,opnd3p2); break; case ROUNDMINUS: if (Dbl_isone_sign(resultp1)) Dbl_increment(opnd3p1,opnd3p2); break; case ROUNDNEAREST: if (guardbit && (stickybit || Dbl_isone_lowmantissap2(opnd3p2))) { Dbl_increment(opnd3p1,opnd3p2); } } if (Dbl_isone_hidden(opnd3p1)) dest_exponent++; } Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2); /* * Test for overflow */ if (dest_exponent >= DBL_INFINITY_EXPONENT) { /* trap if OVERFLOWTRAP enabled */ if (Is_overflowtrap_enabled()) { /* * Adjust bias of result */ Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl); Dbl_copytoptr(resultp1,resultp2,dstptr); if (inexact) if (Is_inexacttrap_enabled()) return(OVERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(OVERFLOWEXCEPTION); } Set_overflowflag(); /* set result to infinity or largest number */ Dbl_setoverflow(resultp1,resultp2); inexact = TRUE; } /* * Test for underflow */ else if (dest_exponent <= 0) { /* trap if UNDERFLOWTRAP enabled */ if (Is_underflowtrap_enabled()) { /* * Adjust bias of result */ Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl); Dbl_copytoptr(resultp1,resultp2,dstptr); if (inexact) if (Is_inexacttrap_enabled()) return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(UNDERFLOWEXCEPTION); } /* Determine if should set underflow flag */ is_tiny = TRUE; if (dest_exponent == 0 && inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Dbl_iszero_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); if (Dbl_isone_hiddenoverflow(opnd3p1)) is_tiny = FALSE; Dbl_decrement(opnd3p1,opnd3p2); } break; case ROUNDMINUS: if (Dbl_isone_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); if (Dbl_isone_hiddenoverflow(opnd3p1)) is_tiny = FALSE; Dbl_decrement(opnd3p1,opnd3p2); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Dbl_isone_lowmantissap2(opnd3p2))) { Dbl_increment(opnd3p1,opnd3p2); if (Dbl_isone_hiddenoverflow(opnd3p1)) is_tiny = FALSE; Dbl_decrement(opnd3p1,opnd3p2); } break; } } /* * denormalize result or set to signed zero */ stickybit = inexact; Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit, stickybit,inexact); /* return rounded number */ if (inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Dbl_iszero_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); } break; case ROUNDMINUS: if (Dbl_isone_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Dbl_isone_lowmantissap2(opnd3p2))) { Dbl_increment(opnd3p1,opnd3p2); } break; } if (is_tiny) Set_underflowflag(); } Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2); } else Dbl_set_exponent(resultp1,dest_exponent); Dbl_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (inexact) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } return(NOEXCEPTION); }
gpl-2.0
noobnl/android_kernel_samsung_d2-jb_2.5.1
arch/parisc/math-emu/sfcmp.c
14149
4514
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/sfcmp.c $Revision: 1.1 $ * * Purpose: * sgl_cmp: compare two values * * External Interfaces: * sgl_fcmp(leftptr, rightptr, cond, status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" /* * sgl_cmp: compare two values */ int sgl_fcmp (sgl_floating_point * leftptr, sgl_floating_point * rightptr, unsigned int cond, unsigned int *status) /* The predicate to be tested */ { register unsigned int left, right; register int xorresult; /* Create local copies of the numbers */ left = *leftptr; right = *rightptr; /* * Test for NaN */ if( (Sgl_exponent(left) == SGL_INFINITY_EXPONENT) || (Sgl_exponent(right) == SGL_INFINITY_EXPONENT) ) { /* Check if a NaN is involved. Signal an invalid exception when * comparing a signaling NaN or when comparing quiet NaNs and the * low bit of the condition is set */ if( ( (Sgl_exponent(left) == SGL_INFINITY_EXPONENT) && Sgl_isnotzero_mantissa(left) && (Exception(cond) || Sgl_isone_signaling(left))) || ( (Sgl_exponent(right) == SGL_INFINITY_EXPONENT) && Sgl_isnotzero_mantissa(right) && (Exception(cond) || Sgl_isone_signaling(right)) ) ) { if( Is_invalidtrap_enabled() ) { Set_status_cbit(Unordered(cond)); return(INVALIDEXCEPTION); } else Set_invalidflag(); Set_status_cbit(Unordered(cond)); return(NOEXCEPTION); } /* All the exceptional conditions are handled, now special case NaN compares */ else if( ((Sgl_exponent(left) == SGL_INFINITY_EXPONENT) && Sgl_isnotzero_mantissa(left)) || ((Sgl_exponent(right) == SGL_INFINITY_EXPONENT) && Sgl_isnotzero_mantissa(right)) ) { /* NaNs always compare unordered. */ Set_status_cbit(Unordered(cond)); return(NOEXCEPTION); } /* infinities will drop down to the normal compare mechanisms */ } /* First compare for unequal signs => less or greater or * special equal case */ Sgl_xortointp1(left,right,xorresult); if( xorresult < 0 ) { /* left negative => less, left positive => greater. * equal is possible if both operands are zeros. */ if( Sgl_iszero_exponentmantissa(left) && Sgl_iszero_exponentmantissa(right) ) { Set_status_cbit(Equal(cond)); } else if( Sgl_isone_sign(left) ) { Set_status_cbit(Lessthan(cond)); } else { Set_status_cbit(Greaterthan(cond)); } } /* Signs are the same. Treat negative numbers separately * from the positives because of the reversed sense. */ else if( Sgl_all(left) == Sgl_all(right) ) { Set_status_cbit(Equal(cond)); } else if( Sgl_iszero_sign(left) ) { /* Positive compare */ if( Sgl_all(left) < Sgl_all(right) ) { Set_status_cbit(Lessthan(cond)); } else { Set_status_cbit(Greaterthan(cond)); } } else { /* Negative compare. Signed or unsigned compares * both work the same. That distinction is only * important when the sign bits differ. */ if( Sgl_all(left) > Sgl_all(right) ) { Set_status_cbit(Lessthan(cond)); } else { Set_status_cbit(Greaterthan(cond)); } } return(NOEXCEPTION); }
gpl-2.0
miamo/miamOv
drivers/usb/host/whci/hw.c
14661
2864
/* * Wireless Host Controller (WHC) hardware access helpers. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val) { unsigned long flags; u32 cmd; spin_lock_irqsave(&whc->lock, flags); cmd = le_readl(whc->base + WUSBCMD); cmd = (cmd & ~mask) | val; le_writel(cmd, whc->base + WUSBCMD); spin_unlock_irqrestore(&whc->lock, flags); } /** * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register * @whc: the WHCI HC * @cmd: command to start. * @params: parameters for the command (the WUSBGENCMDPARAMS register value). * @addr: pointer to any data for the command (may be NULL). * @len: length of the data (if any). */ int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len) { unsigned long flags; dma_addr_t dma_addr; int t; int ret = 0; mutex_lock(&whc->mutex); /* Wait for previous command to complete. */ t = wait_event_timeout(whc->cmd_wq, (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0, WHC_GENCMD_TIMEOUT_MS); if (t == 0) { dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", le_readl(whc->base + WUSBGENCMDSTS), le_readl(whc->base + WUSBGENCMDPARAMS)); ret = -ETIMEDOUT; goto out; } if (addr) { memcpy(whc->gen_cmd_buf, addr, len); dma_addr = whc->gen_cmd_buf_dma; } else dma_addr = 0; /* Poke registers to start cmd. */ spin_lock_irqsave(&whc->lock, flags); le_writel(params, whc->base + WUSBGENCMDPARAMS); le_writeq(dma_addr, whc->base + WUSBGENADDR); le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd, whc->base + WUSBGENCMDSTS); spin_unlock_irqrestore(&whc->lock, flags); out: mutex_unlock(&whc->mutex); return ret; } /** * whc_hw_error - recover from a hardware error * @whc: the WHCI HC that broke. * @reason: a description of the failure. * * Recover from broken hardware with a full reset. */ void whc_hw_error(struct whc *whc, const char *reason) { struct wusbhc *wusbhc = &whc->wusbhc; dev_err(&whc->umc->dev, "hardware error: %s\n", reason); wusbhc_reset_all(wusbhc); }
gpl-2.0
zarboz/brick_kernel_msm8960
block/blk-core.c
70
74838
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994, Karl Keyte: Added support for disk statistics * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> * - July2000 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 */ /* * This handles all read/write requests to block devices */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/kernel_stat.h> #include <linux/string.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/task_io_accounting_ops.h> #include <linux/fault-inject.h> #include <linux/list_sort.h> #define CREATE_TRACE_POINTS #include <trace/events/block.h> #include "blk.h" EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); static int __make_request(struct request_queue *q, struct bio *bio); /* * For the allocated request tables */ static struct kmem_cache *request_cachep; /* * For queue allocation */ struct kmem_cache *blk_requestq_cachep; /* * Controlling structure to kblockd */ static struct workqueue_struct *kblockd_workqueue; static void drive_stat_acct(struct request *rq, int new_io) { struct hd_struct *part; int rw = rq_data_dir(rq); int cpu; if (!blk_do_io_stat(rq)) return; cpu = part_stat_lock(); if (!new_io) { part = rq->part; part_stat_inc(cpu, part, merges[rw]); } else { part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); if (!hd_struct_try_get(part)) { /* * The partition is already being removed, * the request will be accounted on the disk only * * We take a reference on disk->part0 although that * partition will never be deleted, so we can treat * it as any other partition. */ part = &rq->rq_disk->part0; hd_struct_get(part); } part_round_stats(cpu, part); part_inc_in_flight(part, rw); rq->part = part; } part_stat_unlock(); } void blk_queue_congestion_threshold(struct request_queue *q) { int nr; nr = q->nr_requests - (q->nr_requests / 8) + 1; if (nr > q->nr_requests) nr = q->nr_requests; q->nr_congestion_on = nr; nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; if (nr < 1) nr = 1; q->nr_congestion_off = nr; } /** * blk_get_backing_dev_info - get the address of a queue's backing_dev_info * @bdev: device * * Locates the passed device's request queue and returns the address of its * backing_dev_info * * Will return NULL if the request queue cannot be located. */ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) { struct backing_dev_info *ret = NULL; struct request_queue *q = bdev_get_queue(bdev); if (q) ret = &q->backing_dev_info; return ret; } EXPORT_SYMBOL(blk_get_backing_dev_info); void blk_rq_init(struct request_queue *q, struct request *rq) { memset(rq, 0, sizeof(*rq)); INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->timeout_list); rq->cpu = -1; rq->q = q; rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->cmd = rq->__cmd; rq->cmd_len = BLK_MAX_CDB; rq->tag = -1; rq->ref_count = 1; rq->start_time = jiffies; set_start_time_ns(rq); rq->part = NULL; } EXPORT_SYMBOL(blk_rq_init); static void req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, int error) { if (error) clear_bit(BIO_UPTODATE, &bio->bi_flags); else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) error = -EIO; if (unlikely(nbytes > bio->bi_size)) { printk(KERN_ERR "%s: want %u bytes done, %u left\n", __func__, nbytes, bio->bi_size); nbytes = bio->bi_size; } if (unlikely(rq->cmd_flags & REQ_QUIET)) set_bit(BIO_QUIET, &bio->bi_flags); bio->bi_size -= nbytes; bio->bi_sector += (nbytes >> 9); if (bio_integrity(bio)) bio_integrity_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) bio_endio(bio, error); } void blk_dump_rq_flags(struct request *rq, char *msg) { int bit; printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, rq->cmd_flags); printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { printk(KERN_INFO " cdb: "); for (bit = 0; bit < BLK_MAX_CDB; bit++) printk("%02x ", rq->cmd[bit]); printk("\n"); } } EXPORT_SYMBOL(blk_dump_rq_flags); static void blk_delay_work(struct work_struct *work) { struct request_queue *q; q = container_of(work, struct request_queue, delay_work.work); spin_lock_irq(q->queue_lock); __blk_run_queue(q); spin_unlock_irq(q->queue_lock); } /** * blk_delay_queue - restart queueing after defined interval * @q: The &struct request_queue in question * @msecs: Delay in msecs * * Description: * Sometimes queueing needs to be postponed for a little while, to allow * resources to come back. This function will make sure that queueing is * restarted around the specified time. */ void blk_delay_queue(struct request_queue *q, unsigned long msecs) { queue_delayed_work(kblockd_workqueue, &q->delay_work, msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_delay_queue); /** * blk_start_queue - restart a previously stopped queue * @q: The &struct request_queue in question * * Description: * blk_start_queue() will clear the stop flag on the queue, and call * the request_fn for the queue if it was in a stopped state when * entered. Also see blk_stop_queue(). Queue lock must be held. **/ void blk_start_queue(struct request_queue *q) { WARN_ON(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); } EXPORT_SYMBOL(blk_start_queue); /** * blk_stop_queue - stop a queue * @q: The &struct request_queue in question * * Description: * The Linux block layer assumes that a block driver will consume all * entries on the request queue when the request_fn strategy is called. * Often this will not happen, because of hardware limitations (queue * depth settings). If a device driver gets a 'queue full' response, * or if it simply chooses not to queue more I/O at one point, it can * call this function to prevent the request_fn from being called until * the driver has signalled it's ready to go again. This happens by calling * blk_start_queue() to restart queue operations. Queue lock must be held. **/ void blk_stop_queue(struct request_queue *q) { __cancel_delayed_work(&q->delay_work); queue_flag_set(QUEUE_FLAG_STOPPED, q); } EXPORT_SYMBOL(blk_stop_queue); /** * blk_sync_queue - cancel any pending callbacks on a queue * @q: the queue * * Description: * The block layer may perform asynchronous callback activity * on a queue, such as calling the unplug function after a timeout. * A block device may call blk_sync_queue to ensure that any * such activity is cancelled, thus allowing it to release resources * that the callbacks might use. The caller must already have made sure * that its ->make_request_fn will not re-add plugging prior to calling * this function. * * This function does not cancel any asynchronous activity arising * out of elevator or throttling code. That would require elevaotor_exit() * and blk_throtl_exit() to be called with queue lock initialized. * */ void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); cancel_delayed_work_sync(&q->delay_work); } EXPORT_SYMBOL(blk_sync_queue); /** * __blk_run_queue - run a single device queue * @q: The queue to run * * Description: * See @blk_run_queue. This variant must be called with the queue lock * held and interrupts disabled. */ void __blk_run_queue(struct request_queue *q) { if (unlikely(blk_queue_stopped(q))) return; q->request_fn(q); } EXPORT_SYMBOL(__blk_run_queue); /** * blk_run_queue_async - run a single device queue in workqueue context * @q: The queue to run * * Description: * Tells kblockd to perform the equivalent of @blk_run_queue on behalf * of us. */ void blk_run_queue_async(struct request_queue *q) { if (likely(!blk_queue_stopped(q))) { __cancel_delayed_work(&q->delay_work); queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); } } EXPORT_SYMBOL(blk_run_queue_async); /** * blk_run_queue - run a single device queue * @q: The queue to run * * Description: * Invoke request handling on this queue, if it has pending work to do. * May be used to restart queueing when a request has completed. */ void blk_run_queue(struct request_queue *q) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_run_queue); void blk_put_queue(struct request_queue *q) { kobject_put(&q->kobj); } EXPORT_SYMBOL(blk_put_queue); /* * Note: If a driver supplied the queue lock, it is disconnected * by this function. The actual state of the lock doesn't matter * here as the request_queue isn't accessible after this point * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. */ void blk_cleanup_queue(struct request_queue *q) { /* * We know we have process context here, so we can be a little * cautious and ensure that pending block actions on this device * are done before moving on. Going into this function, we should * not have processes doing IO to this device. */ blk_sync_queue(q); del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); mutex_lock(&q->sysfs_lock); queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); mutex_unlock(&q->sysfs_lock); if (q->queue_lock != &q->__queue_lock) q->queue_lock = &q->__queue_lock; blk_put_queue(q); } EXPORT_SYMBOL(blk_cleanup_queue); static int blk_init_free_list(struct request_queue *q) { struct request_list *rl = &q->rq; if (unlikely(rl->rq_pool)) return 0; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; rl->elvpriv = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep, q->node); if (!rl->rq_pool) return -ENOMEM; return 0; } struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); } EXPORT_SYMBOL(blk_alloc_queue); struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) { struct request_queue *q; int err; q = kmem_cache_alloc_node(blk_requestq_cachep, gfp_mask | __GFP_ZERO, node_id); if (!q) return NULL; q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.state = 0; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.name = "block"; q->node = node_id; err = bdi_init(&q->backing_dev_info); if (err) { kmem_cache_free(blk_requestq_cachep, q); return NULL; } if (blk_throtl_init(q)) { kmem_cache_free(blk_requestq_cachep, q); return NULL; } setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->flush_queue[0]); INIT_LIST_HEAD(&q->flush_queue[1]); INIT_LIST_HEAD(&q->flush_data_in_flight); INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); kobject_init(&q->kobj, &blk_queue_ktype); mutex_init(&q->sysfs_lock); spin_lock_init(&q->__queue_lock); /* * By default initialize queue_lock to internal lock and driver can * override it later if need be. */ q->queue_lock = &q->__queue_lock; return q; } EXPORT_SYMBOL(blk_alloc_queue_node); /** * blk_init_queue - prepare a request queue for use with a block device * @rfn: The function to be called to process requests that have been * placed on the queue. * @lock: Request queue spin lock * * Description: * If a block device wishes to use the standard request handling procedures, * which sorts requests and coalesces adjacent requests, then it must * call blk_init_queue(). The function @rfn will be called when there * are requests on the queue that need to be processed. If the device * supports plugging, then @rfn may not be called immediately when requests * are available on the queue, but may be called at some time later instead. * Plugged queues are generally unplugged when a buffer belonging to one * of the requests on the queue is needed, or due to memory pressure. * * @rfn is not required, or even expected, to remove all requests off the * queue, but only as many as it can handle at a time. If it does leave * requests on the queue, it is responsible for arranging that the requests * get dealt with eventually. * * The queue spin lock must be held while manipulating the requests on the * request queue; this lock will be taken also from interrupt context, so irq * disabling is needed for it. * * Function returns a pointer to the initialized request queue, or %NULL if * it didn't succeed. * * Note: * blk_init_queue() must be paired with a blk_cleanup_queue() call * when the block device is deactivated (such as at module unload). **/ struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) { return blk_init_queue_node(rfn, lock, -1); } EXPORT_SYMBOL(blk_init_queue); struct request_queue * blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) { struct request_queue *uninit_q, *q; uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); if (!uninit_q) return NULL; q = blk_init_allocated_queue(uninit_q, rfn, lock); if (!q) blk_cleanup_queue(uninit_q); return q; } EXPORT_SYMBOL(blk_init_queue_node); struct request_queue * blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, spinlock_t *lock) { if (!q) return NULL; if (blk_init_free_list(q)) return NULL; q->request_fn = rfn; q->prep_rq_fn = NULL; q->unprep_rq_fn = NULL; q->queue_flags = QUEUE_FLAG_DEFAULT; /* Override internal queue lock with supplied lock pointer */ if (lock) q->queue_lock = lock; /* * This also sets hw/phys segments, boundary and size */ blk_queue_make_request(q, __make_request); q->sg_reserved_size = INT_MAX; /* * all done */ if (!elevator_init(q, NULL)) { blk_queue_congestion_threshold(q); return q; } return NULL; } EXPORT_SYMBOL(blk_init_allocated_queue); int blk_get_queue(struct request_queue *q) { if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { kobject_get(&q->kobj); return 0; } return 1; } EXPORT_SYMBOL(blk_get_queue); static inline void blk_free_request(struct request_queue *q, struct request *rq) { if (rq->cmd_flags & REQ_ELVPRIV) elv_put_request(q, rq); mempool_free(rq, q->rq.rq_pool); } static struct request * blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); if (!rq) return NULL; blk_rq_init(q, rq); rq->cmd_flags = flags | REQ_ALLOCED; if (priv) { if (unlikely(elv_set_request(q, rq, gfp_mask))) { mempool_free(rq, q->rq.rq_pool); return NULL; } rq->cmd_flags |= REQ_ELVPRIV; } return rq; } /* * ioc_batching returns true if the ioc is a valid batching request and * should be given priority access to a request. */ static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) { if (!ioc) return 0; /* * Make sure the process is able to allocate at least 1 request * even if the batch times out, otherwise we could theoretically * lose wakeups. */ return ioc->nr_batch_requests == q->nr_batching || (ioc->nr_batch_requests > 0 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); } /* * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This * will cause the process to be a "batcher" on all queues in the system. This * is the behaviour we want though - once it gets a wakeup it should be given * a nice run. */ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) { if (!ioc || ioc_batching(q, ioc)) return; ioc->nr_batch_requests = q->nr_batching; ioc->last_waited = jiffies; } static void __freed_request(struct request_queue *q, int sync) { struct request_list *rl = &q->rq; if (rl->count[sync] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, sync); if (rl->count[sync] + 1 <= q->nr_requests) { if (waitqueue_active(&rl->wait[sync])) wake_up(&rl->wait[sync]); blk_clear_queue_full(q, sync); } } /* * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ static void freed_request(struct request_queue *q, int sync, int priv) { struct request_list *rl = &q->rq; rl->count[sync]--; if (priv) rl->elvpriv--; __freed_request(q, sync); if (unlikely(rl->starved[sync ^ 1])) __freed_request(q, sync ^ 1); } /* * Determine if elevator data should be initialized when allocating the * request associated with @bio. */ static bool blk_rq_should_init_elevator(struct bio *bio) { if (!bio) return true; /* * Flush requests do not use the elevator so skip initialization. * This allows a request to share the flush and elevator data. */ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) return false; return true; } /* * Get a free request, queue_lock must be held. * Returns NULL on failure, with queue_lock held. * Returns !NULL on success, with queue_lock *not held*. */ static struct request *get_request(struct request_queue *q, int rw_flags, struct bio *bio, gfp_t gfp_mask) { struct request *rq = NULL; struct request_list *rl = &q->rq; struct io_context *ioc = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; int may_queue, priv = 0; may_queue = elv_may_queue(q, rw_flags); if (may_queue == ELV_MQUEUE_NO) goto rq_starved; if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= q->nr_requests) { ioc = current_io_context(GFP_ATOMIC, q->node); /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". * This process will be allowed to complete a batch of * requests, others will be blocked. */ if (!blk_queue_full(q, is_sync)) { ioc_set_batching(q, ioc); blk_set_queue_full(q, is_sync); } else { if (may_queue != ELV_MQUEUE_MUST && !ioc_batching(q, ioc)) { /* * The queue is full and the allocating * process is not a "batcher", and not * exempted by the IO scheduler */ goto out; } } } blk_set_queue_congested(q, is_sync); } /* * Only allow batching queuers to allocate up to 50% over the defined * limit of requests, otherwise we could have thousands of requests * allocated with any setting of ->nr_requests */ if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) goto out; rl->count[is_sync]++; rl->starved[is_sync] = 0; if (blk_rq_should_init_elevator(bio)) { priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); if (priv) rl->elvpriv++; } if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; spin_unlock_irq(q->queue_lock); rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything * we might have messed up. * * Allocating task should really be put onto the front of the * wait queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); freed_request(q, is_sync, priv); /* * in the very unlikely event that allocation failed and no * requests for this direction was pending, mark us starved * so that freeing of a request in the other direction will * notice us. another possible fix would be to split the * rq mempool into READ and WRITE */ rq_starved: if (unlikely(rl->count[is_sync] == 0)) rl->starved[is_sync] = 1; goto out; } /* * ioc may be NULL here, and ioc_batching will be false. That's * OK, if the queue is under the request limit then requests need * not count toward the nr_batch_requests limit. There will always * be some limit enforced by BLK_BATCH_TIME. */ if (ioc_batching(q, ioc)) ioc->nr_batch_requests--; trace_block_getrq(q, bio, rw_flags & 1); out: return rq; } /* * No available requests for this queue, wait for some requests to become * available. * * Called with q->queue_lock held, and returns with it unlocked. */ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct bio *bio) { const bool is_sync = rw_is_sync(rw_flags) != 0; struct request *rq; rq = get_request(q, rw_flags, bio, GFP_NOIO); while (!rq) { DEFINE_WAIT(wait); struct io_context *ioc; struct request_list *rl = &q->rq; prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, TASK_UNINTERRUPTIBLE); trace_block_sleeprq(q, bio, rw_flags & 1); spin_unlock_irq(q->queue_lock); io_schedule(); /* * After sleeping, we become a "batching" process and * will be able to allocate at least one request, and * up to a big batch of them for a small period time. * See ioc_batching, ioc_set_batching */ ioc = current_io_context(GFP_NOIO, q->node); ioc_set_batching(q, ioc); spin_lock_irq(q->queue_lock); finish_wait(&rl->wait[is_sync], &wait); rq = get_request(q, rw_flags, bio, GFP_NOIO); }; return rq; } struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { struct request *rq; if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) return NULL; BUG_ON(rw != READ && rw != WRITE); spin_lock_irq(q->queue_lock); if (gfp_mask & __GFP_WAIT) { rq = get_request_wait(q, rw, NULL); } else { rq = get_request(q, rw, NULL, gfp_mask); if (!rq) spin_unlock_irq(q->queue_lock); } /* q->queue_lock is unlocked at this point */ return rq; } EXPORT_SYMBOL(blk_get_request); /** * blk_make_request - given a bio, allocate a corresponding struct request. * @q: target request queue * @bio: The bio describing the memory mappings that will be submitted for IO. * It may be a chained-bio properly constructed by block/bio layer. * @gfp_mask: gfp flags to be used for memory allocation * * blk_make_request is the parallel of generic_make_request for BLOCK_PC * type commands. Where the struct request needs to be farther initialized by * the caller. It is passed a &struct bio, which describes the memory info of * the I/O transfer. * * The caller of blk_make_request must make sure that bi_io_vec * are set to describe the memory buffers. That bio_data_dir() will return * the needed direction of the request. (And all bio's in the passed bio-chain * are properly set accordingly) * * If called under none-sleepable conditions, mapped bio buffers must not * need bouncing, by calling the appropriate masked or flagged allocator, * suitable for the target device. Otherwise the call to blk_queue_bounce will * BUG. * * WARNING: When allocating/cloning a bio-chain, careful consideration should be * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for * anything but the first bio in the chain. Otherwise you risk waiting for IO * completion of a bio that hasn't been submitted yet, thus resulting in a * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead * of bio_alloc(), as that avoids the mempool deadlock. * If possible a big IO should be split into smaller parts when allocation * fails. Partial allocation should not be an error, or you risk a live-lock. */ struct request *blk_make_request(struct request_queue *q, struct bio *bio, gfp_t gfp_mask) { struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); if (unlikely(!rq)) return ERR_PTR(-ENOMEM); for_each_bio(bio) { struct bio *bounce_bio = bio; int ret; blk_queue_bounce(q, &bounce_bio); ret = blk_rq_append_bio(q, rq, bounce_bio); if (unlikely(ret)) { blk_put_request(rq); return ERR_PTR(ret); } } return rq; } EXPORT_SYMBOL(blk_make_request); /** * blk_requeue_request - put a request back on queue * @q: request queue where request should be inserted * @rq: request to be inserted * * Description: * Drivers often keep queueing requests until the hardware cannot accept * more, when that condition happens we need to put the request back * on the queue. Must be called with queue lock held. */ void blk_requeue_request(struct request_queue *q, struct request *rq) { blk_delete_timer(rq); blk_clear_rq_complete(rq); trace_block_rq_requeue(q, rq); if (blk_rq_tagged(rq)) blk_queue_end_tag(q, rq); BUG_ON(blk_queued_rq(rq)); elv_requeue_request(q, rq); } EXPORT_SYMBOL(blk_requeue_request); static void add_acct_request(struct request_queue *q, struct request *rq, int where) { drive_stat_acct(rq, 1); __elv_add_request(q, rq, where); } /** * blk_insert_request - insert a special request into a request queue * @q: request queue where request should be inserted * @rq: request to be inserted * @at_head: insert request at head or tail of queue * @data: private data * * Description: * Many block devices need to execute commands asynchronously, so they don't * block the whole kernel from preemption during request execution. This is * accomplished normally by inserting aritficial requests tagged as * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them * be scheduled for actual execution by the request queue. * * We have the option of inserting the head or the tail of the queue. * Typically we use the tail for new ioctls and so forth. We use the head * of the queue for things like a QUEUE_FULL message from a device, or a * host that is unable to accept a particular command. */ void blk_insert_request(struct request_queue *q, struct request *rq, int at_head, void *data) { int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; unsigned long flags; /* * tell I/O scheduler that this isn't a regular read/write (ie it * must not attempt merges on this) and that it acts as a soft * barrier */ rq->cmd_type = REQ_TYPE_SPECIAL; rq->special = data; spin_lock_irqsave(q->queue_lock, flags); /* * If command is tagged, release the tag */ if (blk_rq_tagged(rq)) blk_queue_end_tag(q, rq); add_acct_request(q, rq, where); __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_insert_request); static void part_round_stats_single(int cpu, struct hd_struct *part, unsigned long now) { if (now == part->stamp) return; if (part_in_flight(part)) { __part_stat_add(cpu, part, time_in_queue, part_in_flight(part) * (now - part->stamp)); __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); } part->stamp = now; } /** * part_round_stats() - Round off the performance stats on a struct disk_stats. * @cpu: cpu number for stats access * @part: target partition * * The average IO queue length and utilisation statistics are maintained * by observing the current state of the queue length and the amount of * time it has been in this state for. * * Normally, that accounting is done on IO completion, but that can result * in more than a second's worth of IO being accounted for within any one * second, leading to >100% utilisation. To deal with that, we call this * function to do a round-off before returning the results when reading * /proc/diskstats. This accounts immediately for all queue usage up to * the current jiffies and restarts the counters again. */ void part_round_stats(int cpu, struct hd_struct *part) { unsigned long now = jiffies; if (part->partno) part_round_stats_single(cpu, &part_to_disk(part)->part0, now); part_round_stats_single(cpu, part, now); } EXPORT_SYMBOL_GPL(part_round_stats); /* * queue lock must be held */ void __blk_put_request(struct request_queue *q, struct request *req) { if (unlikely(!q)) return; if (unlikely(--req->ref_count)) return; elv_completed_request(q, req); /* this is a bio leak */ WARN_ON(req->bio != NULL); /* * Request may not have originated from ll_rw_blk. if not, * it didn't come out of our reserved rq pools */ if (req->cmd_flags & REQ_ALLOCED) { int is_sync = rq_is_sync(req) != 0; int priv = req->cmd_flags & REQ_ELVPRIV; BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!hlist_unhashed(&req->hash)); blk_free_request(q, req); freed_request(q, is_sync, priv); } } EXPORT_SYMBOL_GPL(__blk_put_request); void blk_put_request(struct request *req) { unsigned long flags; struct request_queue *q = req->q; spin_lock_irqsave(q->queue_lock, flags); __blk_put_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_put_request); /** * blk_add_request_payload - add a payload to a request * @rq: request to update * @page: page backing the payload * @len: length of the payload. * * This allows to later add a payload to an already submitted request by * a block driver. The driver needs to take care of freeing the payload * itself. * * Note that this is a quite horrible hack and nothing but handling of * discard requests should ever use it. */ void blk_add_request_payload(struct request *rq, struct page *page, unsigned int len) { struct bio *bio = rq->bio; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_len = len; bio->bi_size = len; bio->bi_vcnt = 1; bio->bi_phys_segments = 1; rq->__data_len = rq->resid_len = len; rq->nr_phys_segments = 1; rq->buffer = bio_data(bio); } EXPORT_SYMBOL_GPL(blk_add_request_payload); static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, struct bio *bio) { const int ff = bio->bi_rw & REQ_FAILFAST_MASK; if (!ll_back_merge_fn(q, req, bio)) return false; trace_block_bio_backmerge(q, bio); if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) blk_rq_set_mixed_merge(req); req->biotail->bi_next = bio; req->biotail = bio; req->__data_len += bio->bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); drive_stat_acct(req, 0); elv_bio_merged(q, req, bio); return true; } static bool bio_attempt_front_merge(struct request_queue *q, struct request *req, struct bio *bio) { const int ff = bio->bi_rw & REQ_FAILFAST_MASK; if (!ll_front_merge_fn(q, req, bio)) return false; trace_block_bio_frontmerge(q, bio); if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) blk_rq_set_mixed_merge(req); bio->bi_next = req->bio; req->bio = bio; /* * may not be valid. if the low level driver said * it didn't need a bounce buffer then it better * not touch req->buffer either... */ req->buffer = bio_data(bio); req->__sector = bio->bi_sector; req->__data_len += bio->bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); drive_stat_acct(req, 0); elv_bio_merged(q, req, bio); return true; } /* * Attempts to merge with the plugged list in the current process. Returns * true if merge was successful, otherwise false. */ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, struct bio *bio) { struct blk_plug *plug; struct request *rq; bool ret = false; plug = tsk->plug; if (!plug) goto out; list_for_each_entry_reverse(rq, &plug->list, queuelist) { int el_ret; if (rq->q != q) continue; el_ret = elv_try_merge(rq, bio); if (el_ret == ELEVATOR_BACK_MERGE) { ret = bio_attempt_back_merge(q, rq, bio); if (ret) break; } else if (el_ret == ELEVATOR_FRONT_MERGE) { ret = bio_attempt_front_merge(q, rq, bio); if (ret) break; } } out: return ret; } void init_request_from_bio(struct request *req, struct bio *bio) { req->cpu = bio->bi_comp_cpu; req->cmd_type = REQ_TYPE_FS; req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; if (bio->bi_rw & REQ_RAHEAD) req->cmd_flags |= REQ_FAILFAST_MASK; req->errors = 0; req->__sector = bio->bi_sector; req->ioprio = bio_prio(bio); blk_rq_bio_prep(req->q, req, bio); } static int __make_request(struct request_queue *q, struct bio *bio) { const bool sync = !!(bio->bi_rw & REQ_SYNC); struct blk_plug *plug; int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; struct request *req; /* * low level driver can indicate that it wants pages above a * certain limit bounced to low memory (ie for highmem, or even * ISA dma in theory) */ blk_queue_bounce(q, &bio); if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { spin_lock_irq(q->queue_lock); where = ELEVATOR_INSERT_FLUSH; goto get_rq; } /* * Check if we can merge with the plugged list before grabbing * any locks. */ if (attempt_plug_merge(current, q, bio)) goto out; spin_lock_irq(q->queue_lock); el_ret = elv_merge(q, &req, bio); if (el_ret == ELEVATOR_BACK_MERGE) { if (bio_attempt_back_merge(q, req, bio)) { if (!attempt_back_merge(q, req)) elv_merged_request(q, req, el_ret); goto out_unlock; } } else if (el_ret == ELEVATOR_FRONT_MERGE) { if (bio_attempt_front_merge(q, req, bio)) { if (!attempt_front_merge(q, req)) elv_merged_request(q, req, el_ret); goto out_unlock; } } get_rq: /* * This sync check and mask will be re-done in init_request_from_bio(), * but we need to set it earlier to expose the sync flag to the * rq allocator and io schedulers. */ rw_flags = bio_data_dir(bio); if (sync) rw_flags |= REQ_SYNC; /* * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ req = get_request_wait(q, rw_flags, bio); /* * After dropping the lock and possibly sleeping here, our request * may now be mergeable after it had proven unmergeable (above). * We don't worry about that case for efficiency. It won't happen * often, and the elevators are able to handle it. */ init_request_from_bio(req, bio); if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || bio_flagged(bio, BIO_CPU_AFFINE)) { req->cpu = blk_cpu_to_group(get_cpu()); put_cpu(); } plug = current->plug; if (plug) { /* * If this is the first request added after a plug, fire * of a plug trace. If others have been added before, check * if we have multiple devices in this plug. If so, make a * note to sort the list before dispatch. */ if (list_empty(&plug->list)) trace_block_plug(q); else if (!plug->should_sort) { struct request *__rq; __rq = list_entry_rq(plug->list.prev); if (__rq->q != q) plug->should_sort = 1; } list_add_tail(&req->queuelist, &plug->list); drive_stat_acct(req, 1); } else { spin_lock_irq(q->queue_lock); add_acct_request(q, req, where); __blk_run_queue(q); out_unlock: spin_unlock_irq(q->queue_lock); } out: return 0; } /* * If bio->bi_dev is a partition, remap the location */ static inline void blk_partition_remap(struct bio *bio) { struct block_device *bdev = bio->bi_bdev; if (bio_sectors(bio) && bdev != bdev->bd_contains) { struct hd_struct *p = bdev->bd_part; bio->bi_sector += p->start_sect; bio->bi_bdev = bdev->bd_contains; trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, bdev->bd_dev, bio->bi_sector - p->start_sect); } } static void handle_bad_sector(struct bio *bio) { char b[BDEVNAME_SIZE]; printk(KERN_INFO "attempt to access beyond end of device\n"); printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", bdevname(bio->bi_bdev, b), bio->bi_rw, (unsigned long long)bio->bi_sector + bio_sectors(bio), (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); set_bit(BIO_EOF, &bio->bi_flags); } #ifdef CONFIG_FAIL_MAKE_REQUEST static DECLARE_FAULT_ATTR(fail_make_request); static int __init setup_fail_make_request(char *str) { return setup_fault_attr(&fail_make_request, str); } __setup("fail_make_request=", setup_fail_make_request); static int should_fail_request(struct bio *bio) { struct hd_struct *part = bio->bi_bdev->bd_part; if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail) return should_fail(&fail_make_request, bio->bi_size); return 0; } static int __init fail_make_request_debugfs(void) { return init_fault_attr_dentries(&fail_make_request, "fail_make_request"); } late_initcall(fail_make_request_debugfs); #else /* CONFIG_FAIL_MAKE_REQUEST */ static inline int should_fail_request(struct bio *bio) { return 0; } #endif /* CONFIG_FAIL_MAKE_REQUEST */ /* * Check whether this bio extends beyond the end of the device. */ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) { sector_t maxsector; if (!nr_sectors) return 0; /* Test device or partition size, when known. */ maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; if (maxsector) { sector_t sector = bio->bi_sector; if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { /* * This may well happen - the kernel calls bread() * without checking the size of the device, e.g., when * mounting a device. */ handle_bad_sector(bio); return 1; } } return 0; } /** * generic_make_request - hand a buffer to its device driver for I/O * @bio: The bio describing the location in memory and on the device. * * generic_make_request() is used to make I/O requests of block * devices. It is passed a &struct bio, which describes the I/O that needs * to be done. * * generic_make_request() does not return any status. The * success/failure status of the request, along with notification of * completion, is delivered asynchronously through the bio->bi_end_io * function described (one day) else where. * * The caller of generic_make_request must make sure that bi_io_vec * are set to describe the memory buffer, and that bi_dev and bi_sector are * set to describe the device address, and the * bi_end_io and optionally bi_private are set to describe how * completion notification should be signaled. * * generic_make_request and the drivers it calls may use bi_next if this * bio happens to be merged with someone else, and may change bi_dev and * bi_sector for remaps as it sees fit. So the values of these fields * should NOT be depended on after the call to generic_make_request. */ static inline void __generic_make_request(struct bio *bio) { struct request_queue *q; sector_t old_sector; int ret, nr_sectors = 0; dev_t old_dev; int err = -EIO; if (bio) nr_sectors = bio_sectors(bio); might_sleep(); if (bio_check_eod(bio, nr_sectors)) goto end_io; /* * Resolve the mapping until finished. (drivers are * still free to implement/resolve their own stacking * by explicitly returning 0) * * NOTE: we don't repeat the blk_size check for each new device. * Stacking drivers are expected to know what they are doing. */ old_sector = -1; old_dev = 0; do { char b[BDEVNAME_SIZE]; q = bdev_get_queue(bio->bi_bdev); if (unlikely(!q)) { printk(KERN_ERR "generic_make_request: Trying to access " "nonexistent block-device %s (%Lu)\n", bdevname(bio->bi_bdev, b), (long long) bio->bi_sector); goto end_io; } if (unlikely(!(bio->bi_rw & REQ_DISCARD) && nr_sectors > queue_max_hw_sectors(q))) { printk(KERN_ERR "bio too big device %s (%u > %u)\n", bdevname(bio->bi_bdev, b), bio_sectors(bio), queue_max_hw_sectors(q)); goto end_io; } if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) goto end_io; if (should_fail_request(bio)) goto end_io; /* * If this device has partitions, remap block n * of partition p to block n+start(p) of the disk. */ blk_partition_remap(bio); if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) goto end_io; if (old_sector != -1) trace_block_bio_remap(q, bio, old_dev, old_sector); old_sector = bio->bi_sector; old_dev = bio->bi_bdev->bd_dev; if (bio_check_eod(bio, nr_sectors)) goto end_io; /* * Filter flush bio's early so that make_request based * drivers without flush support don't have to worry * about them. */ if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); if (!nr_sectors) { err = 0; goto end_io; } } if ((bio->bi_rw & REQ_DISCARD) && (!blk_queue_discard(q) || ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { err = -EOPNOTSUPP; goto end_io; } if (blk_throtl_bio(q, &bio)) goto end_io; /* * If bio = NULL, bio has been throttled and will be submitted * later. */ if (!bio) break; trace_block_bio_queue(q, bio); ret = q->make_request_fn(q, bio); } while (ret); return; end_io: bio_endio(bio, err); } /* * We only want one ->make_request_fn to be active at a time, * else stack usage with stacked devices could be a problem. * So use current->bio_list to keep a list of requests * submited by a make_request_fn function. * current->bio_list is also used as a flag to say if * generic_make_request is currently active in this task or not. * If it is NULL, then no make_request is active. If it is non-NULL, * then a make_request is active, and new requests should be added * at the tail */ void generic_make_request(struct bio *bio) { struct bio_list bio_list_on_stack; if (current->bio_list) { /* make_request is active */ bio_list_add(current->bio_list, bio); return; } /* following loop may be a bit non-obvious, and so deserves some * explanation. * Before entering the loop, bio->bi_next is NULL (as all callers * ensure that) so we have a list with a single bio. * We pretend that we have just taken it off a longer list, so * we assign bio_list to a pointer to the bio_list_on_stack, * thus initialising the bio_list of new bios to be * added. __generic_make_request may indeed add some more bios * through a recursive call to generic_make_request. If it * did, we find a non-NULL value in bio_list and re-enter the loop * from the top. In this case we really did just take the bio * of the top of the list (no pretending) and so remove it from * bio_list, and call into __generic_make_request again. * * The loop was structured like this to make only one call to * __generic_make_request (which is important as it is large and * inlined) and to keep the structure simple. */ BUG_ON(bio->bi_next); bio_list_init(&bio_list_on_stack); current->bio_list = &bio_list_on_stack; do { __generic_make_request(bio); bio = bio_list_pop(current->bio_list); } while (bio); current->bio_list = NULL; /* deactivate */ } EXPORT_SYMBOL(generic_make_request); /** * submit_bio - submit a bio to the block device layer for I/O * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) * @bio: The &struct bio which describes the I/O * * submit_bio() is very similar in purpose to generic_make_request(), and * uses that function to do most of the work. Both are fairly rough * interfaces; @bio must be presetup and ready for I/O. * */ void submit_bio(int rw, struct bio *bio) { int count = bio_sectors(bio); bio->bi_rw |= rw; /* * If it's a regular read/write or a barrier with data attached, * go through the normal accounting stuff before submission. */ if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { if (rw & WRITE) { count_vm_events(PGPGOUT, count); } else { task_io_account_read(bio->bi_size); count_vm_events(PGPGIN, count); } if (unlikely(block_dump)) { char b[BDEVNAME_SIZE]; printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", current->comm, task_pid_nr(current), (rw & WRITE) ? "WRITE" : "READ", (unsigned long long)bio->bi_sector, bdevname(bio->bi_bdev, b), count); } } generic_make_request(bio); } EXPORT_SYMBOL(submit_bio); /** * blk_rq_check_limits - Helper function to check a request for the queue limit * @q: the queue * @rq: the request being checked * * Description: * @rq may have been made based on weaker limitations of upper-level queues * in request stacking drivers, and it may violate the limitation of @q. * Since the block layer and the underlying device driver trust @rq * after it is inserted to @q, it should be checked against @q before * the insertion using this generic function. * * This function should also be useful for request stacking drivers * in some cases below, so export this function. * Request stacking drivers like request-based dm may change the queue * limits while requests are in the queue (e.g. dm's table swapping). * Such request stacking drivers should check those requests agaist * the new queue limits again when they dispatch those requests, * although such checkings are also done against the old queue limits * when submitting requests. */ int blk_rq_check_limits(struct request_queue *q, struct request *rq) { if (rq->cmd_flags & REQ_DISCARD) return 0; if (blk_rq_sectors(rq) > queue_max_sectors(q) || blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { printk(KERN_ERR "%s: over max size limit.\n", __func__); return -EIO; } /* * queue's settings related to segment counting like q->bounce_pfn * may differ from that of other stacking queues. * Recalculate it to check the request correctly on this queue's * limitation. */ blk_recalc_rq_segments(rq); if (rq->nr_phys_segments > queue_max_segments(q)) { printk(KERN_ERR "%s: over max segments limit.\n", __func__); return -EIO; } return 0; } EXPORT_SYMBOL_GPL(blk_rq_check_limits); /** * blk_insert_cloned_request - Helper for stacking drivers to submit a request * @q: the queue to submit the request * @rq: the request being queued */ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) { unsigned long flags; if (blk_rq_check_limits(q, rq)) return -EIO; #ifdef CONFIG_FAIL_MAKE_REQUEST if (rq->rq_disk && rq->rq_disk->part0.make_it_fail && should_fail(&fail_make_request, blk_rq_bytes(rq))) return -EIO; #endif spin_lock_irqsave(q->queue_lock, flags); /* * Submitting request must be dequeued before calling this function * because it will be linked to another request_queue */ BUG_ON(blk_queued_rq(rq)); add_acct_request(q, rq, ELEVATOR_INSERT_BACK); spin_unlock_irqrestore(q->queue_lock, flags); return 0; } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); /** * blk_rq_err_bytes - determine number of bytes till the next failure boundary * @rq: request to examine * * Description: * A request could be merge of IOs which require different failure * handling. This function determines the number of bytes which * can be failed from the beginning of the request without * crossing into area which need to be retried further. * * Return: * The number of bytes to fail. * * Context: * queue_lock must be held. */ unsigned int blk_rq_err_bytes(const struct request *rq) { unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; unsigned int bytes = 0; struct bio *bio; if (!(rq->cmd_flags & REQ_MIXED_MERGE)) return blk_rq_bytes(rq); /* * Currently the only 'mixing' which can happen is between * different fastfail types. We can safely fail portions * which have all the failfast bits that the first one has - * the ones which are at least as eager to fail as the first * one. */ for (bio = rq->bio; bio; bio = bio->bi_next) { if ((bio->bi_rw & ff) != ff) break; bytes += bio->bi_size; } /* this could lead to infinite loop */ BUG_ON(blk_rq_bytes(rq) && !bytes); return bytes; } EXPORT_SYMBOL_GPL(blk_rq_err_bytes); static void blk_account_io_completion(struct request *req, unsigned int bytes) { if (blk_do_io_stat(req)) { const int rw = rq_data_dir(req); struct hd_struct *part; int cpu; cpu = part_stat_lock(); part = req->part; part_stat_add(cpu, part, sectors[rw], bytes >> 9); part_stat_unlock(); } } static void blk_account_io_done(struct request *req) { /* * Account IO completion. flush_rq isn't accounted as a * normal IO on queueing nor completion. Accounting the * containing request is enough. */ if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { unsigned long duration = jiffies - req->start_time; const int rw = rq_data_dir(req); struct hd_struct *part; int cpu; cpu = part_stat_lock(); part = req->part; part_stat_inc(cpu, part, ios[rw]); part_stat_add(cpu, part, ticks[rw], duration); part_round_stats(cpu, part); part_dec_in_flight(part, rw); hd_struct_put(part); part_stat_unlock(); } } /** * blk_peek_request - peek at the top of a request queue * @q: request queue to peek at * * Description: * Return the request at the top of @q. The returned request * should be started using blk_start_request() before LLD starts * processing it. * * Return: * Pointer to the request at the top of @q if available. Null * otherwise. * * Context: * queue_lock must be held. */ struct request *blk_peek_request(struct request_queue *q) { struct request *rq; int ret; while ((rq = __elv_next_request(q)) != NULL) { if (!(rq->cmd_flags & REQ_STARTED)) { /* * This is the first time the device driver * sees this request (possibly after * requeueing). Notify IO scheduler. */ if (rq->cmd_flags & REQ_SORTED) elv_activate_rq(q, rq); /* * just mark as started even if we don't start * it, a request that has been delayed should * not be passed by new incoming requests */ rq->cmd_flags |= REQ_STARTED; trace_block_rq_issue(q, rq); } if (!q->boundary_rq || q->boundary_rq == rq) { q->end_sector = rq_end_sector(rq); q->boundary_rq = NULL; } if (rq->cmd_flags & REQ_DONTPREP) break; if (q->dma_drain_size && blk_rq_bytes(rq)) { /* * make sure space for the drain appears we * know we can do this because max_hw_segments * has been adjusted to be one fewer than the * device can handle */ rq->nr_phys_segments++; } if (!q->prep_rq_fn) break; ret = q->prep_rq_fn(q, rq); if (ret == BLKPREP_OK) { break; } else if (ret == BLKPREP_DEFER) { /* * the request may have been (partially) prepped. * we need to keep this request in the front to * avoid resource deadlock. REQ_STARTED will * prevent other fs requests from passing this one. */ if (q->dma_drain_size && blk_rq_bytes(rq) && !(rq->cmd_flags & REQ_DONTPREP)) { /* * remove the space for the drain we added * so that we don't add it again */ --rq->nr_phys_segments; } rq = NULL; break; } else if (ret == BLKPREP_KILL) { rq->cmd_flags |= REQ_QUIET; /* * Mark this request as started so we don't trigger * any debug logic in the end I/O path. */ blk_start_request(rq); __blk_end_request_all(rq, -EIO); } else { printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); break; } } return rq; } EXPORT_SYMBOL(blk_peek_request); void blk_dequeue_request(struct request *rq) { struct request_queue *q = rq->q; BUG_ON(list_empty(&rq->queuelist)); BUG_ON(ELV_ON_HASH(rq)); list_del_init(&rq->queuelist); /* * the time frame between a request being removed from the lists * and to it is freed is accounted as io that is in progress at * the driver side. */ if (blk_account_rq(rq)) { q->in_flight[rq_is_sync(rq)]++; set_io_start_time_ns(rq); } } /** * blk_start_request - start request processing on the driver * @req: request to dequeue * * Description: * Dequeue @req and start timeout timer on it. This hands off the * request to the driver. * * Block internal functions which don't want to start timer should * call blk_dequeue_request(). * * Context: * queue_lock must be held. */ void blk_start_request(struct request *req) { blk_dequeue_request(req); /* * We are now handing the request to the hardware, initialize * resid_len to full count and add the timeout handler. */ req->resid_len = blk_rq_bytes(req); if (unlikely(blk_bidi_rq(req))) req->next_rq->resid_len = blk_rq_bytes(req->next_rq); blk_add_timer(req); } EXPORT_SYMBOL(blk_start_request); /** * blk_fetch_request - fetch a request from a request queue * @q: request queue to fetch a request from * * Description: * Return the request at the top of @q. The request is started on * return and LLD can start processing it immediately. * * Return: * Pointer to the request at the top of @q if available. Null * otherwise. * * Context: * queue_lock must be held. */ struct request *blk_fetch_request(struct request_queue *q) { struct request *rq; rq = blk_peek_request(q); if (rq) blk_start_request(rq); return rq; } EXPORT_SYMBOL(blk_fetch_request); /** * blk_update_request - Special helper function for request stacking drivers * @req: the request being processed * @error: %0 for success, < %0 for error * @nr_bytes: number of bytes to complete @req * * Description: * Ends I/O on a number of bytes attached to @req, but doesn't complete * the request structure even if @req doesn't have leftover. * If @req has leftover, sets it up for the next range of segments. * * This special helper function is only for request stacking drivers * (e.g. request-based dm) so that they can handle partial completion. * Actual device drivers should use blk_end_request instead. * * Passing the result of blk_rq_bytes() as @nr_bytes guarantees * %false return from this function. * * Return: * %false - this request doesn't have any more data * %true - this request has more data **/ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) { int total_bytes, bio_nbytes, next_idx = 0; struct bio *bio; if (!req->bio) return false; trace_block_rq_complete(req->q, req); /* * For fs requests, rq is just carrier of independent bio's * and each partial completion should be handled separately. * Reset per-request error on each partial completion. * * TODO: tj: This is too subtle. It would be better to let * low level drivers do what they see fit. */ if (req->cmd_type == REQ_TYPE_FS) req->errors = 0; if (error && req->cmd_type == REQ_TYPE_FS && !(req->cmd_flags & REQ_QUIET)) { char *error_type; switch (error) { case -ENOLINK: error_type = "recoverable transport"; break; case -EREMOTEIO: error_type = "critical target"; break; case -EBADE: error_type = "critical nexus"; break; case -EIO: default: error_type = "I/O"; break; } printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", error_type, req->rq_disk ? req->rq_disk->disk_name : "?", (unsigned long long)blk_rq_pos(req)); } blk_account_io_completion(req, nr_bytes); total_bytes = bio_nbytes = 0; while ((bio = req->bio) != NULL) { int nbytes; if (nr_bytes >= bio->bi_size) { req->bio = bio->bi_next; nbytes = bio->bi_size; req_bio_endio(req, bio, nbytes, error); next_idx = 0; bio_nbytes = 0; } else { int idx = bio->bi_idx + next_idx; if (unlikely(idx >= bio->bi_vcnt)) { blk_dump_rq_flags(req, "__end_that"); printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", __func__, idx, bio->bi_vcnt); break; } nbytes = bio_iovec_idx(bio, idx)->bv_len; BIO_BUG_ON(nbytes > bio->bi_size); /* * not a complete bvec done */ if (unlikely(nbytes > nr_bytes)) { bio_nbytes += nr_bytes; total_bytes += nr_bytes; break; } /* * advance to the next vector */ next_idx++; bio_nbytes += nbytes; } total_bytes += nbytes; nr_bytes -= nbytes; bio = req->bio; if (bio) { /* * end more in this run, or just return 'not-done' */ if (unlikely(nr_bytes <= 0)) break; } } /* * completely done */ if (!req->bio) { /* * Reset counters so that the request stacking driver * can find how many bytes remain in the request * later. */ req->__data_len = 0; return false; } /* * if the request wasn't completed, update state */ if (bio_nbytes) { req_bio_endio(req, bio, bio_nbytes, error); bio->bi_idx += next_idx; bio_iovec(bio)->bv_offset += nr_bytes; bio_iovec(bio)->bv_len -= nr_bytes; } req->__data_len -= total_bytes; req->buffer = bio_data(req->bio); /* update sector only for requests with clear definition of sector */ if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) req->__sector += total_bytes >> 9; /* mixed attributes always follow the first bio */ if (req->cmd_flags & REQ_MIXED_MERGE) { req->cmd_flags &= ~REQ_FAILFAST_MASK; req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; } /* * If total number of sectors is less than the first segment * size, something has gone terribly wrong. */ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { blk_dump_rq_flags(req, "request botched"); req->__data_len = blk_rq_cur_bytes(req); } /* recalculate the number of segments */ blk_recalc_rq_segments(req); return true; } EXPORT_SYMBOL_GPL(blk_update_request); static bool blk_update_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes) { if (blk_update_request(rq, error, nr_bytes)) return true; /* Bidi request must be completed as a whole */ if (unlikely(blk_bidi_rq(rq)) && blk_update_request(rq->next_rq, error, bidi_bytes)) return true; if (blk_queue_add_random(rq->q)) add_disk_randomness(rq->rq_disk); return false; } /** * blk_unprep_request - unprepare a request * @req: the request * * This function makes a request ready for complete resubmission (or * completion). It happens only after all error handling is complete, * so represents the appropriate moment to deallocate any resources * that were allocated to the request in the prep_rq_fn. The queue * lock is held when calling this. */ void blk_unprep_request(struct request *req) { struct request_queue *q = req->q; req->cmd_flags &= ~REQ_DONTPREP; if (q->unprep_rq_fn) q->unprep_rq_fn(q, req); } EXPORT_SYMBOL_GPL(blk_unprep_request); /* * queue lock must be held */ static void blk_finish_request(struct request *req, int error) { if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); BUG_ON(blk_queued_rq(req)); if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) laptop_io_completion(&req->q->backing_dev_info); blk_delete_timer(req); if (req->cmd_flags & REQ_DONTPREP) blk_unprep_request(req); blk_account_io_done(req); if (req->end_io) req->end_io(req, error); else { if (blk_bidi_rq(req)) __blk_put_request(req->next_rq->q, req->next_rq); __blk_put_request(req->q, req); } } /** * blk_end_bidi_request - Complete a bidi request * @rq: the request to complete * @error: %0 for success, < %0 for error * @nr_bytes: number of bytes to complete @rq * @bidi_bytes: number of bytes to complete @rq->next_rq * * Description: * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. * Drivers that supports bidi can safely call this member for any * type of request, bidi or uni. In the later case @bidi_bytes is * just ignored. * * Return: * %false - we are done with this request * %true - still buffers pending for this request **/ static bool blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes) { struct request_queue *q = rq->q; unsigned long flags; if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) return true; spin_lock_irqsave(q->queue_lock, flags); blk_finish_request(rq, error); spin_unlock_irqrestore(q->queue_lock, flags); return false; } /** * __blk_end_bidi_request - Complete a bidi request with queue lock held * @rq: the request to complete * @error: %0 for success, < %0 for error * @nr_bytes: number of bytes to complete @rq * @bidi_bytes: number of bytes to complete @rq->next_rq * * Description: * Identical to blk_end_bidi_request() except that queue lock is * assumed to be locked on entry and remains so on return. * * Return: * %false - we are done with this request * %true - still buffers pending for this request **/ static bool __blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes) { if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) return true; blk_finish_request(rq, error); return false; } /** * blk_end_request - Helper function for drivers to complete the request. * @rq: the request being processed * @error: %0 for success, < %0 for error * @nr_bytes: number of bytes to complete * * Description: * Ends I/O on a number of bytes attached to @rq. * If @rq has leftover, sets it up for the next range of segments. * * Return: * %false - we are done with this request * %true - still buffers pending for this request **/ bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { return blk_end_bidi_request(rq, error, nr_bytes, 0); } EXPORT_SYMBOL(blk_end_request); /** * blk_end_request_all - Helper function for drives to finish the request. * @rq: the request to finish * @error: %0 for success, < %0 for error * * Description: * Completely finish @rq. */ void blk_end_request_all(struct request *rq, int error) { bool pending; unsigned int bidi_bytes = 0; if (unlikely(blk_bidi_rq(rq))) bidi_bytes = blk_rq_bytes(rq->next_rq); pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); BUG_ON(pending); } EXPORT_SYMBOL(blk_end_request_all); /** * blk_end_request_cur - Helper function to finish the current request chunk. * @rq: the request to finish the current chunk for * @error: %0 for success, < %0 for error * * Description: * Complete the current consecutively mapped chunk from @rq. * * Return: * %false - we are done with this request * %true - still buffers pending for this request */ bool blk_end_request_cur(struct request *rq, int error) { return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } EXPORT_SYMBOL(blk_end_request_cur); /** * blk_end_request_err - Finish a request till the next failure boundary. * @rq: the request to finish till the next failure boundary for * @error: must be negative errno * * Description: * Complete @rq till the next failure boundary. * * Return: * %false - we are done with this request * %true - still buffers pending for this request */ bool blk_end_request_err(struct request *rq, int error) { WARN_ON(error >= 0); return blk_end_request(rq, error, blk_rq_err_bytes(rq)); } EXPORT_SYMBOL_GPL(blk_end_request_err); /** * __blk_end_request - Helper function for drivers to complete the request. * @rq: the request being processed * @error: %0 for success, < %0 for error * @nr_bytes: number of bytes to complete * * Description: * Must be called with queue lock held unlike blk_end_request(). * * Return: * %false - we are done with this request * %true - still buffers pending for this request **/ bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { return __blk_end_bidi_request(rq, error, nr_bytes, 0); } EXPORT_SYMBOL(__blk_end_request); /** * __blk_end_request_all - Helper function for drives to finish the request. * @rq: the request to finish * @error: %0 for success, < %0 for error * * Description: * Completely finish @rq. Must be called with queue lock held. */ void __blk_end_request_all(struct request *rq, int error) { bool pending; unsigned int bidi_bytes = 0; if (unlikely(blk_bidi_rq(rq))) bidi_bytes = blk_rq_bytes(rq->next_rq); pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); BUG_ON(pending); } EXPORT_SYMBOL(__blk_end_request_all); /** * __blk_end_request_cur - Helper function to finish the current request chunk. * @rq: the request to finish the current chunk for * @error: %0 for success, < %0 for error * * Description: * Complete the current consecutively mapped chunk from @rq. Must * be called with queue lock held. * * Return: * %false - we are done with this request * %true - still buffers pending for this request */ bool __blk_end_request_cur(struct request *rq, int error) { return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } EXPORT_SYMBOL(__blk_end_request_cur); /** * __blk_end_request_err - Finish a request till the next failure boundary. * @rq: the request to finish till the next failure boundary for * @error: must be negative errno * * Description: * Complete @rq till the next failure boundary. Must be called * with queue lock held. * * Return: * %false - we are done with this request * %true - still buffers pending for this request */ bool __blk_end_request_err(struct request *rq, int error) { WARN_ON(error >= 0); return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); } EXPORT_SYMBOL_GPL(__blk_end_request_err); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) { /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ rq->cmd_flags |= bio->bi_rw & REQ_WRITE; if (bio_has_data(bio)) { rq->nr_phys_segments = bio_phys_segments(q, bio); rq->buffer = bio_data(bio); } rq->__data_len = bio->bi_size; rq->bio = rq->biotail = bio; if (bio->bi_bdev) rq->rq_disk = bio->bi_bdev->bd_disk; } #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE /** * rq_flush_dcache_pages - Helper function to flush all pages in a request * @rq: the request to be flushed * * Description: * Flush all pages in @rq. */ void rq_flush_dcache_pages(struct request *rq) { struct req_iterator iter; struct bio_vec *bvec; rq_for_each_segment(bvec, rq, iter) flush_dcache_page(bvec->bv_page); } EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); #endif /** * blk_lld_busy - Check if underlying low-level drivers of a device are busy * @q : the queue of the device being checked * * Description: * Check if underlying low-level drivers of a device are busy. * If the drivers want to export their busy state, they must set own * exporting function using blk_queue_lld_busy() first. * * Basically, this function is used only by request stacking drivers * to stop dispatching requests to underlying devices when underlying * devices are busy. This behavior helps more I/O merging on the queue * of the request stacking driver and prevents I/O throughput regression * on burst I/O load. * * Return: * 0 - Not busy (The request stacking driver should dispatch request) * 1 - Busy (The request stacking driver should stop dispatching request) */ int blk_lld_busy(struct request_queue *q) { if (q->lld_busy_fn) return q->lld_busy_fn(q); return 0; } EXPORT_SYMBOL_GPL(blk_lld_busy); /** * blk_rq_unprep_clone - Helper function to free all bios in a cloned request * @rq: the clone request to be cleaned up * * Description: * Free all bios in @rq for a cloned request. */ void blk_rq_unprep_clone(struct request *rq) { struct bio *bio; while ((bio = rq->bio) != NULL) { rq->bio = bio->bi_next; bio_put(bio); } } EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); /* * Copy attributes of the original request to the clone request. * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. */ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { dst->cpu = src->cpu; dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; dst->cmd_type = src->cmd_type; dst->__sector = blk_rq_pos(src); dst->__data_len = blk_rq_bytes(src); dst->nr_phys_segments = src->nr_phys_segments; dst->ioprio = src->ioprio; dst->extra_len = src->extra_len; } /** * blk_rq_prep_clone - Helper function to setup clone request * @rq: the request to be setup * @rq_src: original request to be cloned * @bs: bio_set that bios for clone are allocated from * @gfp_mask: memory allocation mask for bio * @bio_ctr: setup function to be called for each clone bio. * Returns %0 for success, non %0 for failure. * @data: private data to be passed to @bio_ctr * * Description: * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) * are not copied, and copying such parts is the caller's responsibility. * Also, pages which the original bios are pointing to are not copied * and the cloned bios just point same pages. * So cloned bios must be completed before original bios, which means * the caller must complete @rq before @rq_src. */ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data) { struct bio *bio, *bio_src; if (!bs) bs = fs_bio_set; blk_rq_init(NULL, rq); __rq_for_each_bio(bio_src, rq_src) { bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); if (!bio) goto free_and_out; __bio_clone(bio, bio_src); if (bio_integrity(bio_src) && bio_integrity_clone(bio, bio_src, gfp_mask, bs)) goto free_and_out; if (bio_ctr && bio_ctr(bio, bio_src, data)) goto free_and_out; if (rq->bio) { rq->biotail->bi_next = bio; rq->biotail = bio; } else rq->bio = rq->biotail = bio; } __blk_rq_prep_clone(rq, rq_src); return 0; free_and_out: if (bio) bio_free(bio, bs); blk_rq_unprep_clone(rq); return -ENOMEM; } EXPORT_SYMBOL_GPL(blk_rq_prep_clone); int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) { return queue_work(kblockd_workqueue, work); } EXPORT_SYMBOL(kblockd_schedule_work); int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work(kblockd_workqueue, dwork, delay); } EXPORT_SYMBOL(kblockd_schedule_delayed_work); #define PLUG_MAGIC 0x91827364 void blk_start_plug(struct blk_plug *plug) { struct task_struct *tsk = current; plug->magic = PLUG_MAGIC; INIT_LIST_HEAD(&plug->list); INIT_LIST_HEAD(&plug->cb_list); plug->should_sort = 0; /* * If this is a nested plug, don't actually assign it. It will be * flushed on its own. */ if (!tsk->plug) { /* * Store ordering should not be needed here, since a potential * preempt will imply a full memory barrier */ tsk->plug = plug; } } EXPORT_SYMBOL(blk_start_plug); static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) { struct request *rqa = container_of(a, struct request, queuelist); struct request *rqb = container_of(b, struct request, queuelist); return !(rqa->q <= rqb->q); } /* * If 'from_schedule' is true, then postpone the dispatch of requests * until a safe kblockd context. We due this to avoid accidental big * additional stack usage in driver dispatch, in places where the originally * plugger did not intend it. */ static void queue_unplugged(struct request_queue *q, unsigned int depth, bool from_schedule) __releases(q->queue_lock) { trace_block_unplug(q, depth, !from_schedule); /* * If we are punting this to kblockd, then we can safely drop * the queue_lock before waking kblockd (which needs to take * this lock). */ if (from_schedule) { spin_unlock(q->queue_lock); blk_run_queue_async(q); } else { __blk_run_queue(q); spin_unlock(q->queue_lock); } } static void flush_plug_callbacks(struct blk_plug *plug) { LIST_HEAD(callbacks); if (list_empty(&plug->cb_list)) return; list_splice_init(&plug->cb_list, &callbacks); while (!list_empty(&callbacks)) { struct blk_plug_cb *cb = list_first_entry(&callbacks, struct blk_plug_cb, list); list_del(&cb->list); cb->callback(cb); } } void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; unsigned long flags; struct request *rq; LIST_HEAD(list); unsigned int depth; BUG_ON(plug->magic != PLUG_MAGIC); flush_plug_callbacks(plug); if (list_empty(&plug->list)) return; list_splice_init(&plug->list, &list); if (plug->should_sort) { list_sort(NULL, &list, plug_rq_cmp); plug->should_sort = 0; } q = NULL; depth = 0; /* * Save and disable interrupts here, to avoid doing it for every * queue lock we have to take. */ local_irq_save(flags); while (!list_empty(&list)) { rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); BUG_ON(!rq->q); if (rq->q != q) { /* * This drops the queue lock */ if (q) queue_unplugged(q, depth, from_schedule); q = rq->q; depth = 0; spin_lock(q->queue_lock); } /* * rq is already accounted, so use raw insert */ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); else __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); depth++; } /* * This drops the queue lock */ if (q) queue_unplugged(q, depth, from_schedule); local_irq_restore(flags); } void blk_finish_plug(struct blk_plug *plug) { blk_flush_plug_list(plug, false); if (plug == current->plug) current->plug = NULL; } EXPORT_SYMBOL(blk_finish_plug); int __init blk_dev_init(void) { BUILD_BUG_ON(__REQ_NR_BITS > 8 * sizeof(((struct request *)0)->cmd_flags)); /* used for unplugging and affects IO latency/throughput - HIGHPRI */ kblockd_workqueue = alloc_workqueue("kblockd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!kblockd_workqueue) panic("Failed to create kblockd\n"); request_cachep = kmem_cache_create("blkdev_requests", sizeof(struct request), 0, SLAB_PANIC, NULL); blk_requestq_cachep = kmem_cache_create("blkdev_queue", sizeof(struct request_queue), 0, SLAB_PANIC, NULL); return 0; }
gpl-2.0
danshuk/u-boot-mini2440
cpu/mpc824x/drivers/epic/epic1.c
70
15039
/************************************************** * * copyright @ motorola, 1999 * *************************************************/ #include <mpc824x.h> #include <common.h> #include "epic.h" #define PRINT(format, args...) printf(format , ## args) typedef void (*VOIDFUNCPTR) (void); /* ptr to function returning void */ struct SrcVecTable SrcVecTable[MAXVEC] = /* Addr/Vector cross-reference tbl */ { { EPIC_EX_INT0_VEC_REG, "External Direct/Serial Source 0"}, { EPIC_EX_INT1_VEC_REG, "External Direct/Serial Source 1"}, { EPIC_EX_INT2_VEC_REG, "External Direct/Serial Source 2"}, { EPIC_EX_INT3_VEC_REG, "External Direct/Serial Source 3"}, { EPIC_EX_INT4_VEC_REG, "External Direct/Serial Source 4"}, { EPIC_SR_INT5_VEC_REG, "External Serial Source 5"}, { EPIC_SR_INT6_VEC_REG, "External Serial Source 6"}, { EPIC_SR_INT7_VEC_REG, "External Serial Source 7"}, { EPIC_SR_INT8_VEC_REG, "External Serial Source 8"}, { EPIC_SR_INT9_VEC_REG, "External Serial Source 9"}, { EPIC_SR_INT10_VEC_REG, "External Serial Source 10"}, { EPIC_SR_INT11_VEC_REG, "External Serial Source 11"}, { EPIC_SR_INT12_VEC_REG, "External Serial Source 12"}, { EPIC_SR_INT13_VEC_REG, "External Serial Source 13"}, { EPIC_SR_INT14_VEC_REG, "External Serial Source 14"}, { EPIC_SR_INT15_VEC_REG, "External Serial Source 15"}, { EPIC_I2C_INT_VEC_REG, "Internal I2C Source"}, { EPIC_DMA0_INT_VEC_REG, "Internal DMA0 Source"}, { EPIC_DMA1_INT_VEC_REG, "Internal DMA1 Source"}, { EPIC_MSG_INT_VEC_REG, "Internal Message Source"}, }; VOIDFUNCPTR intVecTbl[MAXVEC]; /* Interrupt vector table */ /**************************************************************************** * epicInit - Initialize the EPIC registers * * This routine resets the Global Configuration Register, thus it: * - Disables all interrupts * - Sets epic registers to reset values * - Sets the value of the Processor Current Task Priority to the * highest priority (0xF). * epicInit then sets the EPIC operation mode to Mixed Mode (vs. Pass * Through or 8259 compatible mode). * * If IRQType (input) is Direct IRQs: * - IRQType is written to the SIE bit of the EPIC Interrupt * Configuration register (ICR). * - clkRatio is ignored. * If IRQType is Serial IRQs: * - both IRQType and clkRatio will be written to the ICR register */ void epicInit ( unsigned int IRQType, /* Direct or Serial */ unsigned int clkRatio /* Clk Ratio for Serial IRQs */ ) { ULONG tmp; tmp = sysEUMBBARRead(EPIC_GLOBAL_REG); tmp |= 0xa0000000; /* Set the Global Conf. register */ sysEUMBBARWrite(EPIC_GLOBAL_REG, tmp); /* * Wait for EPIC to reset - CLH */ while( (sysEUMBBARRead(EPIC_GLOBAL_REG) & 0x80000000) == 1); sysEUMBBARWrite(EPIC_GLOBAL_REG, 0x20000000); tmp = sysEUMBBARRead(EPIC_INT_CONF_REG); /* Read interrupt conf. reg */ if (IRQType == EPIC_DIRECT_IRQ) /* direct mode */ sysEUMBBARWrite(EPIC_INT_CONF_REG, tmp & 0xf7ffffff); else /* Serial mode */ { tmp = (clkRatio << 28) | 0x08000000; /* Set clock ratio */ sysEUMBBARWrite(EPIC_INT_CONF_REG, tmp); } while (epicIntAck() != 0xff) /* Clear all pending interrupts */ epicEOI(); } /**************************************************************************** * epicIntEnable - Enable an interrupt source * * This routine clears the mask bit of an external, an internal or * a Timer register to enable the interrupt. * * RETURNS: None */ void epicIntEnable(int intVec) { ULONG tmp; ULONG srAddr; srAddr = SrcVecTable[intVec].srcAddr; /* Retrieve src Vec/Prio register */ tmp = sysEUMBBARRead(srAddr); tmp &= ~EPIC_VEC_PRI_MASK; /* Clear the mask bit */ tmp |= (EPIC_VEC_PRI_DFLT_PRI << 16); /* Set priority to Default - CLH */ tmp |= intVec; /* Set Vector number */ sysEUMBBARWrite(srAddr, tmp); return; } /**************************************************************************** * epicIntDisable - Disable an interrupt source * * This routine sets the mask bit of an external, an internal or * a Timer register to disable the interrupt. * * RETURNS: OK or ERROR * */ void epicIntDisable ( int intVec /* Interrupt vector number */ ) { ULONG tmp, srAddr; srAddr = SrcVecTable[intVec].srcAddr; tmp = sysEUMBBARRead(srAddr); tmp |= 0x80000000; /* Set the mask bit */ sysEUMBBARWrite(srAddr, tmp); return; } /**************************************************************************** * epicIntSourceConfig - Set properties of an interrupt source * * This function sets interrupt properites (Polarity, Sense, Interrupt * Prority, and Interrupt Vector) of an Interrupt Source. The properties * can be set when the current source is not in-request or in-service, * which is determined by the Activity bit. This routine return ERROR * if the the Activity bit is 1 (in-request or in-service). * * This function assumes that the Source Vector/Priority register (input) * is a valid address. * * RETURNS: OK or ERROR */ int epicIntSourceConfig ( int Vect, /* interrupt source vector number */ int Polarity, /* interrupt source polarity */ int Sense, /* interrupt source Sense */ int Prio /* interrupt source priority */ ) { ULONG tmp, newVal; ULONG actBit, srAddr; srAddr = SrcVecTable[Vect].srcAddr; tmp = sysEUMBBARRead(srAddr); actBit = (tmp & 40000000) >> 30; /* retrieve activity bit - bit 30 */ if (actBit == 1) return ERROR; tmp &= 0xff30ff00; /* Erase previously set P,S,Prio,Vector bits */ newVal = (Polarity << 23) | (Sense << 22) | (Prio << 16) | Vect; sysEUMBBARWrite(srAddr, tmp | newVal ); return (OK); } /**************************************************************************** * epicIntAck - acknowledge an interrupt * * This function reads the Interrupt acknowldge register and return * the vector number of the highest pending interrupt. * * RETURNS: Interrupt Vector number. */ unsigned int epicIntAck(void) { return(sysEUMBBARRead( EPIC_PROC_INT_ACK_REG )); } /**************************************************************************** * epicEOI - signal an end of interrupt * * This function writes 0x0 to the EOI register to signal end of interrupt. * It is usually called after an interrupt routine is served. * * RETURNS: None */ void epicEOI(void) { sysEUMBBARWrite(EPIC_PROC_EOI_REG, 0x0); } /**************************************************************************** * epicCurTaskPrioSet - sets the priority of the Processor Current Task * * This function should be called after epicInit() to lower the priority * of the processor current task. * * RETURNS: OK or ERROR */ int epicCurTaskPrioSet ( int prioNum /* New priority value */ ) { if ( (prioNum < 0) || (prioNum > 0xF)) return ERROR; sysEUMBBARWrite(EPIC_PROC_CTASK_PRI_REG, prioNum); return OK; } /************************************************************************ * function: epicIntTaskGet * * description: Get value of processor current interrupt task priority register * * note: ***********************************************************************/ unsigned char epicIntTaskGet() { /* get the interrupt task priority register */ ULONG reg; unsigned char rec; reg = sysEUMBBARRead( EPIC_PROC_CTASK_PRI_REG ); rec = ( reg & 0x0F ); return rec; } /************************************************************** * function: epicISR * * description: EPIC service routine called by the core exception * at 0x500 * * note: **************************************************************/ unsigned int epicISR(void) { return 0; } /************************************************************ * function: epicModeGet * * description: query EPIC mode, return 0 if pass through mode * return 1 if mixed mode * * note: *************************************************************/ unsigned int epicModeGet(void) { ULONG val; val = sysEUMBBARRead( EPIC_GLOBAL_REG ); return (( val & 0x20000000 ) >> 29); } /********************************************* * function: epicConfigGet * * description: Get the EPIC interrupt Configuration * return 0 if not error, otherwise return 1 * * note: ********************************************/ void epicConfigGet( unsigned int *clkRatio, unsigned int *serEnable) { ULONG val; val = sysEUMBBARRead( EPIC_INT_CONF_REG ); *clkRatio = ( val & 0x70000000 ) >> 28; *serEnable = ( val & 0x8000000 ) >> 27; } /******************************************************************* * sysEUMBBARRead - Read a 32-bit EUMBBAR register * * This routine reads the content of a register in the Embedded * Utilities Memory Block, and swaps to big endian before returning * the value. * * RETURNS: The content of the specified EUMBBAR register. */ ULONG sysEUMBBARRead ( ULONG regNum ) { ULONG temp; temp = *(ULONG *) (CFG_EUMB_ADDR + regNum); return ( LONGSWAP(temp)); } /******************************************************************* * sysEUMBBARWrite - Write a 32-bit EUMBBAR register * * This routine swaps the value to little endian then writes it to * a register in the Embedded Utilities Memory Block address space. * * RETURNS: N/A */ void sysEUMBBARWrite ( ULONG regNum, /* EUMBBAR register address */ ULONG regVal /* Value to be written */ ) { *(ULONG *) (CFG_EUMB_ADDR + regNum) = LONGSWAP(regVal); return ; } /******************************************************** * function: epicVendorId * * description: return the EPIC Vendor Identification * register: * * siliccon version, device id, and vendor id * * note: ********************************************************/ void epicVendorId ( unsigned int *step, unsigned int *devId, unsigned int *venId ) { ULONG val; val = sysEUMBBARRead( EPIC_VENDOR_ID_REG ); *step = ( val & 0x00FF0000 ) >> 16; *devId = ( val & 0x0000FF00 ) >> 8; *venId = ( val & 0x000000FF ); } /************************************************** * function: epicFeatures * * description: return the number of IRQ supported, * number of CPU, and the version of the * OpenEPIC * * note: *************************************************/ void epicFeatures ( unsigned int *noIRQs, unsigned int *noCPUs, unsigned int *verId ) { ULONG val; val = sysEUMBBARRead( EPIC_FEATURES_REG ); *noIRQs = ( val & 0x07FF0000 ) >> 16; *noCPUs = ( val & 0x00001F00 ) >> 8; *verId = ( val & 0x000000FF ); } /********************************************************* * function: epciTmFrequncySet * * description: Set the timer frequency reporting register ********************************************************/ void epicTmFrequencySet( unsigned int frq ) { sysEUMBBARWrite(EPIC_TM_FREQ_REG, frq); } /******************************************************* * function: epicTmFrequncyGet * * description: Get the current value of the Timer Frequency * Reporting register * ******************************************************/ unsigned int epicTmFrequencyGet(void) { return( sysEUMBBARRead(EPIC_TM_FREQ_REG)) ; } /**************************************************** * function: epicTmBaseSet * * description: Set the #n global timer base count register * return 0 if no error, otherwise return 1. * * note: ****************************************************/ unsigned int epicTmBaseSet ( ULONG srcAddr, /* Address of the Timer Base register */ unsigned int cnt, /* Base count */ unsigned int inhibit /* 1 - count inhibit */ ) { unsigned int val = 0x80000000; /* First inhibit counting the timer */ sysEUMBBARWrite(srcAddr, val) ; /* set the new value */ val = (cnt & 0x7fffffff) | ((inhibit & 0x1) << 31); sysEUMBBARWrite(srcAddr, val) ; return 0; } /*********************************************************************** * function: epicTmBaseGet * * description: Get the current value of the global timer base count register * return 0 if no error, otherwise return 1. * * note: ***********************************************************************/ unsigned int epicTmBaseGet( ULONG srcAddr, unsigned int *val ) { *val = sysEUMBBARRead( srcAddr ); *val = *val & 0x7fffffff; return 0; } /*********************************************************** * function: epicTmCountGet * * description: Get the value of a given global timer * current count register * return 0 if no error, otherwise return 1 * note: **********************************************************/ unsigned int epicTmCountGet( ULONG srcAddr, unsigned int *val ) { *val = sysEUMBBARRead( srcAddr ); *val = *val & 0x7fffffff; return 0; } /*********************************************************** * function: epicTmInhibit * * description: Stop counting of a given global timer * return 0 if no error, otherwise return 1 * * note: ***********************************************************/ unsigned int epicTmInhibit( unsigned int srcAddr ) { ULONG val; val = sysEUMBBARRead( srcAddr ); val |= 0x80000000; sysEUMBBARWrite( srcAddr, val ); return 0; } /****************************************************************** * function: epicTmEnable * * description: Enable counting of a given global timer * return 0 if no error, otherwise return 1 * * note: *****************************************************************/ unsigned int epicTmEnable( ULONG srcAddr ) { ULONG val; val = sysEUMBBARRead( srcAddr ); val &= 0x7fffffff; sysEUMBBARWrite( srcAddr, val ); return 0; } void epicSourcePrint(int Vect) { ULONG srcVal; srcVal = sysEUMBBARRead(SrcVecTable[Vect].srcAddr); PRINT("%s\n", SrcVecTable[Vect].srcName); PRINT("Address = 0x%lx\n", SrcVecTable[Vect].srcAddr); PRINT("Vector = %ld\n", (srcVal & 0x000000FF) ); PRINT("Mask = %ld\n", srcVal >> 31); PRINT("Activitiy = %ld\n", (srcVal & 40000000) >> 30); PRINT("Polarity = %ld\n", (srcVal & 0x00800000) >> 23); PRINT("Sense = %ld\n", (srcVal & 0x00400000) >> 22); PRINT("Priority = %ld\n", (srcVal & 0x000F0000) >> 16); }
gpl-2.0
agayev/linux
fs/ufs/util.c
70
6164
/* * linux/fs/ufs/util.c * * Copyright (C) 1998 * Daniel Pirkl <daniel.pirkl@email.cz> * Charles University, Faculty of Mathematics and Physics */ #include <linux/string.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include "ufs_fs.h" #include "ufs.h" #include "swab.h" #include "util.h" struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi, struct super_block *sb, u64 fragment, u64 size) { struct ufs_buffer_head * ubh; unsigned i, j ; u64 count = 0; if (size & ~uspi->s_fmask) return NULL; count = size >> uspi->s_fshift; if (count > UFS_MAXFRAG) return NULL; ubh = kmalloc (sizeof (struct ufs_buffer_head), GFP_NOFS); if (!ubh) return NULL; ubh->fragment = fragment; ubh->count = count; for (i = 0; i < count; i++) if (!(ubh->bh[i] = sb_bread(sb, fragment + i))) goto failed; for (; i < UFS_MAXFRAG; i++) ubh->bh[i] = NULL; return ubh; failed: for (j = 0; j < i; j++) brelse (ubh->bh[j]); kfree(ubh); return NULL; } struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi, struct super_block *sb, u64 fragment, u64 size) { unsigned i, j; u64 count = 0; if (size & ~uspi->s_fmask) return NULL; count = size >> uspi->s_fshift; if (count <= 0 || count > UFS_MAXFRAG) return NULL; USPI_UBH(uspi)->fragment = fragment; USPI_UBH(uspi)->count = count; for (i = 0; i < count; i++) if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i))) goto failed; for (; i < UFS_MAXFRAG; i++) USPI_UBH(uspi)->bh[i] = NULL; return USPI_UBH(uspi); failed: for (j = 0; j < i; j++) brelse (USPI_UBH(uspi)->bh[j]); return NULL; } void ubh_brelse (struct ufs_buffer_head * ubh) { unsigned i; if (!ubh) return; for (i = 0; i < ubh->count; i++) brelse (ubh->bh[i]); kfree (ubh); } void ubh_brelse_uspi (struct ufs_sb_private_info * uspi) { unsigned i; if (!USPI_UBH(uspi)) return; for ( i = 0; i < USPI_UBH(uspi)->count; i++ ) { brelse (USPI_UBH(uspi)->bh[i]); USPI_UBH(uspi)->bh[i] = NULL; } } void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh) { unsigned i; if (!ubh) return; for ( i = 0; i < ubh->count; i++ ) mark_buffer_dirty (ubh->bh[i]); } void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag) { unsigned i; if (!ubh) return; if (flag) { for ( i = 0; i < ubh->count; i++ ) set_buffer_uptodate (ubh->bh[i]); } else { for ( i = 0; i < ubh->count; i++ ) clear_buffer_uptodate (ubh->bh[i]); } } void ubh_sync_block(struct ufs_buffer_head *ubh) { if (ubh) { unsigned i; for (i = 0; i < ubh->count; i++) write_dirty_buffer(ubh->bh[i], WRITE); for (i = 0; i < ubh->count; i++) wait_on_buffer(ubh->bh[i]); } } void ubh_bforget (struct ufs_buffer_head * ubh) { unsigned i; if (!ubh) return; for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] ) bforget (ubh->bh[i]); } int ubh_buffer_dirty (struct ufs_buffer_head * ubh) { unsigned i; unsigned result = 0; if (!ubh) return 0; for ( i = 0; i < ubh->count; i++ ) result |= buffer_dirty(ubh->bh[i]); return result; } void _ubh_ubhcpymem_(struct ufs_sb_private_info * uspi, unsigned char * mem, struct ufs_buffer_head * ubh, unsigned size) { unsigned len, bhno; if (size > (ubh->count << uspi->s_fshift)) size = ubh->count << uspi->s_fshift; bhno = 0; while (size) { len = min_t(unsigned int, size, uspi->s_fsize); memcpy (mem, ubh->bh[bhno]->b_data, len); mem += uspi->s_fsize; size -= len; bhno++; } } void _ubh_memcpyubh_(struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh, unsigned char * mem, unsigned size) { unsigned len, bhno; if (size > (ubh->count << uspi->s_fshift)) size = ubh->count << uspi->s_fshift; bhno = 0; while (size) { len = min_t(unsigned int, size, uspi->s_fsize); memcpy (ubh->bh[bhno]->b_data, mem, len); mem += uspi->s_fsize; size -= len; bhno++; } } dev_t ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi) { __u32 fs32; dev_t dev; if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[1]); else fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[0]); switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUNx86: case UFS_ST_SUN: if ((fs32 & 0xffff0000) == 0 || (fs32 & 0xffff0000) == 0xffff0000) dev = old_decode_dev(fs32 & 0x7fff); else dev = MKDEV(sysv_major(fs32), sysv_minor(fs32)); break; default: dev = old_decode_dev(fs32); break; } return dev; } void ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev) { __u32 fs32; switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUNx86: case UFS_ST_SUN: fs32 = sysv_encode_dev(dev); if ((fs32 & 0xffff8000) == 0) { fs32 = old_encode_dev(dev); } break; default: fs32 = old_encode_dev(dev); break; } if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) ufsi->i_u1.i_data[1] = cpu_to_fs32(sb, fs32); else ufsi->i_u1.i_data[0] = cpu_to_fs32(sb, fs32); } /** * ufs_get_locked_page() - locate, pin and lock a pagecache page, if not exist * read it from disk. * @mapping: the address_space to search * @index: the page index * * Locates the desired pagecache page, if not exist we'll read it, * locks it, increments its reference * count and returns its address. * */ struct page *ufs_get_locked_page(struct address_space *mapping, pgoff_t index) { struct page *page; page = find_lock_page(mapping, index); if (!page) { page = read_mapping_page(mapping, index, NULL); if (IS_ERR(page)) { printk(KERN_ERR "ufs_change_blocknr: " "read_mapping_page error: ino %lu, index: %lu\n", mapping->host->i_ino, index); goto out; } lock_page(page); if (unlikely(page->mapping == NULL)) { /* Truncate got there first */ unlock_page(page); put_page(page); page = NULL; goto out; } if (!PageUptodate(page) || PageError(page)) { unlock_page(page); put_page(page); printk(KERN_ERR "ufs_change_blocknr: " "can not read page: ino %lu, index: %lu\n", mapping->host->i_ino, index); page = ERR_PTR(-EIO); } } out: return page; }
gpl-2.0
civato/Note8.0-StormBorn
drivers/input/touchscreen/wacom/w9002_flash.c
70
27437
/* * w9002_flash.c - Wacom Digitizer Controller Flash Driver * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/wacom_i2c.h> #include "w9002_flash.h" static int wacom_i2c_flash_chksum(struct wacom_i2c *wac_i2c, unsigned char *flash_data, unsigned long *max_address) { unsigned long i; unsigned long chksum = 0; for (i = 0x0000; i <= *max_address; i++) chksum += flash_data[i]; chksum &= 0xFFFF; return (int)chksum; } static int wacom_flash_cmd(struct wacom_i2c *wac_i2c) { int rv, len, i; u8 buf[10]; bool i2c_mode = WACOM_I2C_MODE_BOOT; #if defined(CONFIG_MACH_KONA) buf[0] = 0x0d; buf[1] = FLASH_START0; buf[2] = FLASH_START1; buf[3] = FLASH_START2; buf[4] = FLASH_START3; buf[5] = FLASH_START4; buf[6] = FLASH_START5; buf[7] = 0x0d; len = 8; rv = wacom_i2c_send(wac_i2c, buf, len, i2c_mode); #else for (i = 0; i < 2; ++i) { len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x32; buf[len++] = CMD_SET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, i2c_mode); if (rv < 0) { printk(KERN_DEBUG "epen:fail change to normal:%d\n", rv); i2c_mode = WACOM_I2C_MODE_NORMAL; continue; } len = 0; buf[len++] = 5; buf[len++] = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 2; buf[len++] = 2; rv = wacom_i2c_send(wac_i2c, buf, len, i2c_mode); if (rv < 0) { printk(KERN_DEBUG "epen:fail change to normal:%d\n", rv); i2c_mode = WACOM_I2C_MODE_NORMAL; continue; } } #endif if (rv < 0) { printk(KERN_ERR "Sending flash command failed\n"); return -1; } printk(KERN_DEBUG "epen:flash cmd sent:%d\n", rv); msleep(500); return 0; } static bool flash_query(struct wacom_i2c *wac_i2c) { int rv, ECH; u8 buf[4]; u16 len; unsigned char command[CMD_SIZE]; unsigned char response[RSP_SIZE]; len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; printk(KERN_DEBUG "epen: %s\n", __func__); rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 1 rv:%d\n", rv); return false; } command[0] = 5; command[1] = 0; command[2] = 5; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_QUERY; command[6] = ECH = 7; rv = wacom_i2c_send(wac_i2c, command, 7, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 2 rv:%d\n", rv); return false; } len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x38; buf[len++] = CMD_GET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 3 rv:%d\n", rv); return false; } len = 0; buf[len++] = 5; buf[len++] = 0; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 4 rv:%d\n", rv); return false; } usleep_range(10000, 10000); rv = wacom_i2c_recv(wac_i2c, response, BOOT_RSP_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 5 rv:%d\n", rv); return false; } if ((response[3] != QUERY_CMD) || (response[4] != ECH)) { printk(KERN_DEBUG "epen: res3:%d res4:%d\n", response[3], response[4]); return false; } if (response[5] != QUERY_RSP) { printk(KERN_DEBUG "epen: res5:%d\n", response[5]); return false; } return true; } static bool flash_blver(struct wacom_i2c *wac_i2c, int *blver) { int rv, ECH; u8 buf[4]; u16 len; unsigned char command[CMD_SIZE]; unsigned char response[RSP_SIZE]; len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 1 rv:%d\n", rv); return false; } command[0] = 5; command[1] = 0; command[2] = 5; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_BLVER; command[6] = ECH = 7; rv = wacom_i2c_send(wac_i2c, command, 7, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 2 rv:%d\n", rv); return false; } usleep_range(10000, 10000); len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x38; buf[len++] = CMD_GET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 3 rv:%d\n", rv); return false; } len = 0; buf[len++] = 5; buf[len++] = 0; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 4 rv:%d\n", rv); return false; } usleep_range(10000, 10000); rv = wacom_i2c_recv(wac_i2c, response, BOOT_RSP_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 5 rv:%d\n", rv); return false; } if ((response[3] != BOOT_CMD) || (response[4] != ECH)) return false; *blver = (int)response[5]; return true; } static bool flash_mputype(struct wacom_i2c *wac_i2c, int *pMpuType) { int rv, ECH; u8 buf[4]; u16 len; unsigned char command[CMD_SIZE]; unsigned char response[RSP_SIZE]; len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; /* Command-MSB, SET_REPORT */ rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 1 rv:%d\n", rv); return false; } command[0] = 5; command[1] = 0; command[2] = 5; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_MPU; command[6] = ECH = 7; rv = wacom_i2c_send(wac_i2c, command, 7, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 2 rv:%d\n", rv); return false; } usleep_range(10000, 10000); len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x38; buf[len++] = CMD_GET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 3 rv:%d\n", rv); return false; } len = 0; buf[len++] = 5; buf[len++] = 0; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 4 rv:%d\n", rv); return false; } usleep_range(1000, 1000); rv = wacom_i2c_recv(wac_i2c, response, BOOT_RSP_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 5 rv:%d\n", rv); return false; } if ((response[3] != MPU_CMD) || (response[4] != ECH)) return false; *pMpuType = (int)response[5]; return true; } static bool flash_security_unlock(struct wacom_i2c *wac_i2c, int *status) { int rv, ECH; u8 buf[4]; u16 len; unsigned char command[CMD_SIZE]; unsigned char response[RSP_SIZE]; len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 1 rv:%d\n", rv); return false; } command[0] = 5; command[1] = 0; command[2] = 5; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_SECURITY_UNLOCK; command[6] = ECH = 7; rv = wacom_i2c_send(wac_i2c, command, 7, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 2 rv:%d\n", rv); return false; } usleep_range(10000, 10000); len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x38; buf[len++] = CMD_GET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 3 rv:%d\n", rv); return 0; } len = 0; buf[len++] = 5; buf[len++] = 0; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 4 rv:%d\n", rv); return false; } usleep_range(1000, 1000); rv = wacom_i2c_recv(wac_i2c, response, BOOT_RSP_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 5 rv:%d\n", rv); return false; } if ((response[3] != SEC_CMD) || (response[4] != ECH)) return false; *status = (int)response[5]; return true; } static bool flash_end(struct wacom_i2c *wac_i2c) { int rv, ECH; u8 buf[4]; u16 len; unsigned char command[CMD_SIZE]; len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 1 rv:%d\n", rv); return false; } command[0] = 5; command[1] = 0; command[2] = 5; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_EXIT; command[6] = ECH = 7; rv = wacom_i2c_send(wac_i2c, command, 7, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 2 rv:%d\n", rv); return false; } return true; } static int GetBLVersion(struct wacom_i2c *wac_i2c, int *pBLVer) { int rv; int retry = 0; wacom_flash_cmd(wac_i2c); do { msleep(100); rv = flash_query(wac_i2c); retry++; } while (rv < 0 && retry < 10); if (rv < 0) return EXIT_FAIL_GET_BOOT_LOADER_VERSION; rv = flash_blver(wac_i2c, pBLVer); if (rv) return EXIT_OK; else return EXIT_FAIL_GET_BOOT_LOADER_VERSION; } static int GetMpuType(struct wacom_i2c *wac_i2c, int *pMpuType) { int rv; if (!flash_query(wac_i2c)) { if (!wacom_flash_cmd(wac_i2c)) { return EXIT_FAIL_ENTER_FLASH_MODE; } else { msleep(100); if (!flash_query(wac_i2c)) return EXIT_FAIL_FLASH_QUERY; } } rv = flash_mputype(wac_i2c, pMpuType); if (rv) return EXIT_OK; else return EXIT_FAIL_GET_MPU_TYPE; } static int SetSecurityUnlock(struct wacom_i2c *wac_i2c, int *pStatus) { int rv; if (!flash_query(wac_i2c)) { if (!wacom_flash_cmd(wac_i2c)) { return EXIT_FAIL_ENTER_FLASH_MODE; } else { msleep(100); if (!flash_query(wac_i2c)) return EXIT_FAIL_FLASH_QUERY; } } rv = flash_security_unlock(wac_i2c, pStatus); if (rv) return EXIT_OK; else return EXIT_FAIL; } static bool flash_erase(struct wacom_i2c *wac_i2c, bool bAllUserArea, int *eraseBlock, int num) { int rv, ECH; unsigned char sum; unsigned char buf[72]; unsigned char cmd_chksum; u16 len; int i, j; unsigned char command[CMD_SIZE]; unsigned char response[RSP_SIZE]; for (i = 0; i < num; i++) { /*msleep(500);*/ retry: len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: failing 1:%d\n", i); return false; } command[0] = 5; command[1] = 0; command[2] = 7; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_ERASE_FLASH; command[6] = ECH = i; command[7] = *eraseBlock; eraseBlock++; sum = 0; for (j = 0; j < 8; j++) sum += command[j]; cmd_chksum = ~sum + 1; command[8] = cmd_chksum; rv = wacom_i2c_send(wac_i2c, command, 9, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: failing 2:%d\n", i); return false; } switch (i) { case 0: msleep(3000); break; case 1: msleep(3000); break; case 2: msleep(5000); break; case 3: msleep(500); break; default: msleep(5000); break; } len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x38; buf[len++] = CMD_GET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: failing 3:%d\n", i); return false; } len = 0; buf[len++] = 5; buf[len++] = 0; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: failing 4:%d\n", i); return false; } rv = wacom_i2c_recv(wac_i2c, response, BOOT_RSP_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: failing 5:%d\n", i); return false; } if ((response[3] != ERS_CMD) || (response[4] != ECH)) { printk(KERN_DEBUG "epen: failing 6:%d\n", i); return false; } if (response[5] == 0x80) { printk(KERN_DEBUG "epen: retry\n"); goto retry; } if (response[5] != ACK) { printk(KERN_DEBUG "epen: failing 7:%d res5:%d\n", i, response[5]); return false; } } return true; } static bool is_flash_marking(struct wacom_i2c *wac_i2c, size_t data_size, bool *bMarking, int iMpuID) { const int MAX_CMD_SIZE = (12 + FLASH_BLOCK_SIZE + 2); int rv, ECH; unsigned char flash_data[FLASH_BLOCK_SIZE]; unsigned char buf[300]; unsigned char sum; int len; unsigned int i, j; unsigned char response[RSP_SIZE]; unsigned char command[MAX_CMD_SIZE]; *bMarking = false; printk(KERN_DEBUG "epen: started\n"); for (i = 0; i < FLASH_BLOCK_SIZE; i++) flash_data[i] = 0xFF; flash_data[56] = 0x00; len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 1 rv:%d\n", rv); return false; } command[0] = 5; command[1] = 0; command[2] = 76; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_VERIFY_FLASH; command[6] = ECH = 1; command[7] = 0xC0; command[8] = 0x1F; command[9] = 0x01; command[10] = 0x00; command[11] = 8; sum = 0; for (j = 0; j < 12; j++) sum += command[j]; command[MAX_CMD_SIZE - 2] = ~sum + 1; sum = 0; printk(KERN_DEBUG "epen: start writing command\n"); for (i = 12; i < (FLASH_BLOCK_SIZE + 12); i++) { command[i] = flash_data[i - 12]; sum += flash_data[i - 12]; } command[MAX_CMD_SIZE - 1] = ~sum + 1; printk(KERN_DEBUG "epen: sending command\n"); rv = wacom_i2c_send(wac_i2c, command, MAX_CMD_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 2 rv:%d\n", rv); return false; } usleep_range(10000, 10000); len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x38; buf[len++] = CMD_GET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 3 rv:%d\n", rv); return false; } len = 0; buf[len++] = 5; buf[len++] = 0; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 4 rv:%d\n", rv); return false; } rv = wacom_i2c_recv(wac_i2c, response, RSP_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 5 rv:%d\n", rv); return false; } printk(KERN_DEBUG "epen: checking response\n"); if ((response[3] != MARK_CMD) || (response[4] != ECH) || (response[5] != ACK)) { printk(KERN_DEBUG "epen: fails res3:%d res4:%d res5:%d\n", response[3], response[4], response[5]); return false; } *bMarking = true; return true; } static bool flash_write_block(struct wacom_i2c *wac_i2c, char *flash_data, unsigned long ulAddress, u8 *pcommand_id) { const int MAX_COM_SIZE = (12 + FLASH_BLOCK_SIZE + 2); int len, ECH; unsigned char buf[300]; int rv; unsigned char sum; unsigned char command[MAX_COM_SIZE]; unsigned char response[RSP_SIZE]; unsigned int i; len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) return false; command[0] = 5; command[1] = 0; command[2] = 76; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_WRITE_FLASH; command[6] = ECH = ++(*pcommand_id); command[7] = ulAddress & 0x000000ff; command[8] = (ulAddress & 0x0000ff00) >> 8; command[9] = (ulAddress & 0x00ff0000) >> 16; command[10] = (ulAddress & 0xff000000) >> 24; command[11] = 8; sum = 0; for (i = 0; i < 12; i++) sum += command[i]; command[MAX_COM_SIZE - 2] = ~sum + 1; sum = 0; for (i = 12; i < (FLASH_BLOCK_SIZE + 12); i++) { command[i] = flash_data[ulAddress + (i - 12)]; sum += flash_data[ulAddress + (i - 12)]; } command[MAX_COM_SIZE - 1] = ~sum + 1; rv = wacom_i2c_send(wac_i2c, command, BOOT_CMD_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 1 rv:%d\n", rv); return false; } usleep_range(10000, 10000); len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x38; buf[len++] = CMD_GET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 2 rv:%d\n", rv); return false; } len = 0; buf[len++] = 5; buf[len++] = 0; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 3 rv:%d\n", rv); return false; } rv = wacom_i2c_recv(wac_i2c, response, BOOT_RSP_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 4 rv:%d\n", rv); return false; } if ((response[3] != WRITE_CMD) || (response[4] != ECH) || response[5] != ACK) return false; return true; } static bool flash_write(struct wacom_i2c *wac_i2c, unsigned char *flash_data, size_t data_size, unsigned long start_address, unsigned long *max_address, int mpuType) { unsigned long ulAddress; int i; bool rv; unsigned long pageNo = 0; u8 command_id = 0; printk(KERN_DEBUG "epen: flash_write start\n"); for (ulAddress = start_address; ulAddress < *max_address; ulAddress += FLASH_BLOCK_SIZE) { unsigned int j; bool bWrite = false; /* Wacom 2012/10/04: skip if all each data locating on from ulAddr to ulAddr+Block_SIZE_W are 0xff */ for (i = 0; i < FLASH_BLOCK_SIZE; i++) { if (flash_data[ulAddress + i] != 0xFF) break; } if (i == (FLASH_BLOCK_SIZE)) { /*printk(KERN_DEBUG"epen:BLOCK PASSED\n"); */ continue; } /* Wacom 2012/10/04 */ for (j = 0; j < FLASH_BLOCK_SIZE; j++) { if (flash_data[ulAddress + j] == 0xFF) continue; else { bWrite = true; break; } } if (!bWrite) { pageNo++; continue; } rv = flash_write_block(wac_i2c, flash_data, ulAddress, &command_id); if (!rv) return false; pageNo++; } return true; } static bool flash_verify(struct wacom_i2c *wac_i2c, unsigned char *flash_data, size_t data_size, unsigned long start_address, unsigned long *max_address, int mpuType) { int ECH; unsigned long ulAddress; int rv; unsigned long pageNo = 0; u8 command_id = 0; printk(KERN_DEBUG "epen: verify starts\n"); for (ulAddress = start_address; ulAddress < *max_address; ulAddress += FLASH_BLOCK_SIZE) { const int MAX_CMD_SIZE = 12 + FLASH_BLOCK_SIZE + 2; unsigned char buf[300]; unsigned char sum; int len; unsigned int i, j; unsigned char command[MAX_CMD_SIZE]; unsigned char response[RSP_SIZE]; len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 1 rv:%d\n", rv); return false; } command[0] = 5; command[1] = 0; command[2] = 76; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_VERIFY_FLASH; command[6] = ECH = ++command_id; command[7] = ulAddress & 0x000000ff; command[8] = (ulAddress & 0x0000ff00) >> 8; command[9] = (ulAddress & 0x00ff0000) >> 16; command[10] = (ulAddress & 0xff000000) >> 24; command[11] = 8; sum = 0; for (j = 0; j < 12; j++) sum += command[j]; command[MAX_CMD_SIZE - 2] = ~sum + 1; sum = 0; for (i = 12; i < (FLASH_BLOCK_SIZE + 12); i++) { command[i] = flash_data[ulAddress + (i - 12)]; sum += flash_data[ulAddress + (i - 12)]; } command[MAX_CMD_SIZE - 1] = ~sum + 1; rv = wacom_i2c_send(wac_i2c, command, BOOT_CMD_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 2 rv:%d\n", rv); return false; } if (ulAddress <= 0x0ffff) ndelay(250000); else if (ulAddress >= 0x10000 && ulAddress <= 0x20000) ndelay(350000); else usleep_range(10000, 10000); len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x38; buf[len++] = CMD_GET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 3 rv:%d\n", rv); return false; } len = 0; buf[len++] = 5; buf[len++] = 0; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 4 rv:%d\n", rv); return false; } rv = wacom_i2c_recv(wac_i2c, response, BOOT_RSP_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 5 rv:%d\n", rv); return false; } if ((response[3] != VERIFY_CMD) || (response[4] != ECH) || (response[5] != ACK)) { printk(KERN_DEBUG "epen: res3:%d res4:%d res5:%d\n", response[3], response[4], response[5]); return false; } pageNo++; } return true; } static bool flash_marking(struct wacom_i2c *wac_i2c, size_t data_size, bool bMarking, int iMpuID) { const int MAX_CMD_SIZE = 12 + FLASH_BLOCK_SIZE + 2; int rv, ECH; unsigned char flash_data[FLASH_BLOCK_SIZE]; unsigned char buf[300]; unsigned char response[RSP_SIZE]; unsigned char sum; int len; unsigned int i, j; unsigned char command[MAX_CMD_SIZE]; for (i = 0; i < FLASH_BLOCK_SIZE; i++) flash_data[i] = 0xFF; if (bMarking) flash_data[56] = 0x00; len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x37; buf[len++] = CMD_SET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 1 rv:%d\n", rv); return false; } command[0] = 5; command[1] = 0; command[2] = 76; command[3] = 0; command[4] = BOOT_CMD_REPORT_ID; command[5] = BOOT_WRITE_FLASH; command[6] = ECH = 1; command[7] = 0xC0; command[8] = 0x1F; command[9] = 0x01; command[10] = 0x00; command[11] = 8; sum = 0; for (j = 0; j < 12; j++) sum += command[j]; command[MAX_CMD_SIZE - 2] = ~sum + 1; sum = 0; for (i = 12; i < (FLASH_BLOCK_SIZE + 12); i++) { command[i] = flash_data[i - 12]; sum += flash_data[i - 12]; } command[MAX_CMD_SIZE - 1] = ~sum + 1; rv = wacom_i2c_send(wac_i2c, command, BOOT_CMD_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 2 rv:%d\n", rv); return false; } usleep_range(10000, 10000); len = 0; buf[len++] = 4; buf[len++] = 0; buf[len++] = 0x38; buf[len++] = CMD_GET_FEATURE; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 3 rv:%d\n", rv); return false; } len = 0; buf[len++] = 5; buf[len++] = 0; rv = wacom_i2c_send(wac_i2c, buf, len, WACOM_I2C_MODE_BOOT); if (rv < 0) { printk(KERN_DEBUG "epen: 4 rv:%d\n", rv); return false; } printk(KERN_DEBUG "epen: confirming marking\n"); rv = wacom_i2c_recv(wac_i2c, response, BOOT_RSP_SIZE, WACOM_I2C_MODE_BOOT); if (rv < 0) return false; if ((response[3] != 1) || (response[4] != ECH)\ || (response[5] != ACK)) { printk(KERN_DEBUG "epen: failing res3:%d res4:%d res5:%d\n", response[3], response[4], response[5]); return false; } return true; } int wacom_i2c_flash(struct wacom_i2c *wac_i2c) { unsigned long max_address = 0; unsigned long start_address = 0x4000; int eraseBlock[50], eraseBlockNum; bool bRet; int iChecksum; int iBLVer, iMpuType, iStatus; bool bMarking; int iRet; unsigned long ulMaxRange; if (Binary == NULL) { printk(KERN_ERR"[E-PEN] Data is NULL. Exit.\n"); return -1; } #ifdef WACOM_HAVE_FWE_PIN if (wac_i2c->have_fwe_pin) { wac_i2c->wac_pdata->compulsory_flash_mode(true); /*Reset */ wac_i2c->wac_pdata->reset_platform_hw(); msleep(200); printk(KERN_DEBUG "epen: Set FWE\n"); } #endif wake_lock(&wac_i2c->wakelock); printk(KERN_DEBUG "epen:start getting the boot loader version\n"); /*Obtain boot loader version */ iRet = GetBLVersion(wac_i2c, &iBLVer); if (iRet != EXIT_OK) { printk(KERN_DEBUG "epen:failed to get Boot Loader version\n"); goto fw_update_error; } printk(KERN_DEBUG "epen: start getting the MPU version\n"); /*Obtain MPU type: this can be manually done in user space */ iRet = GetMpuType(wac_i2c, &iMpuType); if (iRet != EXIT_OK) { printk(KERN_DEBUG "epen: failed to get MPU type\n"); goto fw_update_error; } /*Set start and end address and block numbers */ eraseBlockNum = 0; start_address = 0x4000; max_address = 0x12FFF; eraseBlock[eraseBlockNum++] = 2; eraseBlock[eraseBlockNum++] = 1; eraseBlock[eraseBlockNum++] = 0; eraseBlock[eraseBlockNum++] = 3; printk(KERN_DEBUG "epen: obtaining the checksum\n"); /*Calculate checksum */ iChecksum = wacom_i2c_flash_chksum(wac_i2c, Binary, &max_address); printk(KERN_DEBUG "epen: Checksum is :%d\n", iChecksum); bRet = true; printk(KERN_DEBUG "epen: setting the security unlock\n"); /*Unlock security */ iRet = SetSecurityUnlock(wac_i2c, &iStatus); if (iRet != EXIT_OK) { printk(KERN_DEBUG "epen: failed to set security unlock\n"); goto fw_update_error; } /*Set adress range */ ulMaxRange = max_address; ulMaxRange -= start_address; ulMaxRange >>= 6; if (max_address > (ulMaxRange << 6)) ulMaxRange++; printk(KERN_DEBUG "epen: connecting to Wacom Digitizer\n"); printk(KERN_DEBUG "epen: erasing the current firmware\n"); /*Erase the old program */ bRet = flash_erase(wac_i2c, true, eraseBlock, eraseBlockNum); if (!bRet) { printk(KERN_DEBUG "epen: failed to erase the user program\n"); iRet = EXIT_FAIL_ERASE; goto fw_update_error; } printk(KERN_DEBUG "epen: erasing done\n"); max_address = 0x11FC0; printk(KERN_DEBUG "epen: writing new firmware\n"); /*Write the new program */ bRet = flash_write(wac_i2c, Binary, DATA_SIZE, start_address, &max_address, iMpuType); if (!bRet) { printk(KERN_DEBUG "epen: failed to write firmware\n"); iRet = EXIT_FAIL_WRITE_FIRMWARE; goto fw_update_error; } printk(KERN_DEBUG "epen: start marking\n"); /*Set mark in writing process */ bRet = flash_marking(wac_i2c, DATA_SIZE, true, iMpuType); if (!bRet) { printk(KERN_DEBUG "epen: failed to mark firmware\n"); iRet = EXIT_FAIL_WRITE_FIRMWARE; goto fw_update_error; } /*Set the address for verify */ start_address = 0x4000; max_address = 0x11FBF; printk(KERN_DEBUG "epen: start the verification\n"); /*Verify the written program */ bRet = flash_verify(wac_i2c, Binary, DATA_SIZE, start_address, &max_address, iMpuType); if (!bRet) { printk(KERN_DEBUG "epen: failed to verify the firmware\n"); iRet = EXIT_FAIL_VERIFY_FIRMWARE; goto fw_update_error; } printk(KERN_DEBUG "epen: checking the mark\n"); /*Set mark */ bRet = is_flash_marking(wac_i2c, DATA_SIZE, &bMarking, iMpuType); if (!bRet) { printk(KERN_DEBUG "epen: marking firmwrae failed\n"); iRet = EXIT_FAIL_WRITING_MARK_NOT_SET; goto fw_update_error; } /*Enable */ printk(KERN_DEBUG "epen: closing the boot mode\n"); bRet = flash_end(wac_i2c); if (!bRet) { printk(KERN_DEBUG "epen: closing boot mode failed\n"); iRet = EXIT_FAIL_WRITING_MARK_NOT_SET; goto fw_update_error; } iRet = EXIT_OK; printk(KERN_DEBUG "epen: write and verify completed\n"); fw_update_error: wake_unlock(&wac_i2c->wakelock); #ifdef WACOM_HAVE_FWE_PIN if (wac_i2c->have_fwe_pin) { wac_i2c->wac_pdata->compulsory_flash_mode(false); /*Reset */ wac_i2c->wac_pdata->reset_platform_hw(); msleep(200); } #endif return iRet; }
gpl-2.0
janztec/empc-arpi-linux
drivers/pinctrl/pinctrl-u300.c
326
37101
/* * Driver for the U300 pin controller * * Based on the original U300 padmux functions * Copyright (C) 2009-2011 ST-Ericsson AB * Author: Martin Persson <martin.persson@stericsson.com> * Author: Linus Walleij <linus.walleij@linaro.org> * * The DB3350 design and control registers are oriented around pads rather than * pins, so we enumerate the pads we can mux rather than actual pins. The pads * are connected to different pins in different packaging types, so it would * be confusing. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinconf-generic.h> #include "pinctrl-coh901.h" /* * Register definitions for the U300 Padmux control registers in the * system controller */ /* PAD MUX Control register 1 (LOW) 16bit (R/W) */ #define U300_SYSCON_PMC1LR 0x007C #define U300_SYSCON_PMC1LR_MASK 0xFFFF #define U300_SYSCON_PMC1LR_CDI_MASK 0xC000 #define U300_SYSCON_PMC1LR_CDI_CDI 0x0000 #define U300_SYSCON_PMC1LR_CDI_EMIF 0x4000 /* For BS335 */ #define U300_SYSCON_PMC1LR_CDI_CDI2 0x8000 #define U300_SYSCON_PMC1LR_CDI_WCDMA_APP_GPIO 0xC000 /* For BS365 */ #define U300_SYSCON_PMC1LR_CDI_GPIO 0x8000 #define U300_SYSCON_PMC1LR_CDI_WCDMA 0xC000 /* Common defs */ #define U300_SYSCON_PMC1LR_PDI_MASK 0x3000 #define U300_SYSCON_PMC1LR_PDI_PDI 0x0000 #define U300_SYSCON_PMC1LR_PDI_EGG 0x1000 #define U300_SYSCON_PMC1LR_PDI_WCDMA 0x3000 #define U300_SYSCON_PMC1LR_MMCSD_MASK 0x0C00 #define U300_SYSCON_PMC1LR_MMCSD_MMCSD 0x0000 #define U300_SYSCON_PMC1LR_MMCSD_MSPRO 0x0400 #define U300_SYSCON_PMC1LR_MMCSD_DSP 0x0800 #define U300_SYSCON_PMC1LR_MMCSD_WCDMA 0x0C00 #define U300_SYSCON_PMC1LR_ETM_MASK 0x0300 #define U300_SYSCON_PMC1LR_ETM_ACC 0x0000 #define U300_SYSCON_PMC1LR_ETM_APP 0x0100 #define U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK 0x00C0 #define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC 0x0000 #define U300_SYSCON_PMC1LR_EMIF_1_CS2_NFIF 0x0040 #define U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM 0x0080 #define U300_SYSCON_PMC1LR_EMIF_1_CS2_STATIC_2GB 0x00C0 #define U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK 0x0030 #define U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC 0x0000 #define U300_SYSCON_PMC1LR_EMIF_1_CS1_NFIF 0x0010 #define U300_SYSCON_PMC1LR_EMIF_1_CS1_SDRAM 0x0020 #define U300_SYSCON_PMC1LR_EMIF_1_CS1_SEMI 0x0030 #define U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK 0x000C #define U300_SYSCON_PMC1LR_EMIF_1_CS0_STATIC 0x0000 #define U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF 0x0004 #define U300_SYSCON_PMC1LR_EMIF_1_CS0_SDRAM 0x0008 #define U300_SYSCON_PMC1LR_EMIF_1_CS0_SEMI 0x000C #define U300_SYSCON_PMC1LR_EMIF_1_MASK 0x0003 #define U300_SYSCON_PMC1LR_EMIF_1_STATIC 0x0000 #define U300_SYSCON_PMC1LR_EMIF_1_SDRAM0 0x0001 #define U300_SYSCON_PMC1LR_EMIF_1_SDRAM1 0x0002 #define U300_SYSCON_PMC1LR_EMIF_1 0x0003 /* PAD MUX Control register 2 (HIGH) 16bit (R/W) */ #define U300_SYSCON_PMC1HR 0x007E #define U300_SYSCON_PMC1HR_MASK 0xFFFF #define U300_SYSCON_PMC1HR_MISC_2_MASK 0xC000 #define U300_SYSCON_PMC1HR_MISC_2_APP_GPIO 0x0000 #define U300_SYSCON_PMC1HR_MISC_2_MSPRO 0x4000 #define U300_SYSCON_PMC1HR_MISC_2_DSP 0x8000 #define U300_SYSCON_PMC1HR_MISC_2_AAIF 0xC000 #define U300_SYSCON_PMC1HR_APP_GPIO_2_MASK 0x3000 #define U300_SYSCON_PMC1HR_APP_GPIO_2_APP_GPIO 0x0000 #define U300_SYSCON_PMC1HR_APP_GPIO_2_NFIF 0x1000 #define U300_SYSCON_PMC1HR_APP_GPIO_2_DSP 0x2000 #define U300_SYSCON_PMC1HR_APP_GPIO_2_AAIF 0x3000 #define U300_SYSCON_PMC1HR_APP_GPIO_1_MASK 0x0C00 #define U300_SYSCON_PMC1HR_APP_GPIO_1_APP_GPIO 0x0000 #define U300_SYSCON_PMC1HR_APP_GPIO_1_MMC 0x0400 #define U300_SYSCON_PMC1HR_APP_GPIO_1_DSP 0x0800 #define U300_SYSCON_PMC1HR_APP_GPIO_1_AAIF 0x0C00 #define U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK 0x0300 #define U300_SYSCON_PMC1HR_APP_SPI_CS_2_APP_GPIO 0x0000 #define U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI 0x0100 #define U300_SYSCON_PMC1HR_APP_SPI_CS_2_AAIF 0x0300 #define U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK 0x00C0 #define U300_SYSCON_PMC1HR_APP_SPI_CS_1_APP_GPIO 0x0000 #define U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI 0x0040 #define U300_SYSCON_PMC1HR_APP_SPI_CS_1_AAIF 0x00C0 #define U300_SYSCON_PMC1HR_APP_SPI_2_MASK 0x0030 #define U300_SYSCON_PMC1HR_APP_SPI_2_APP_GPIO 0x0000 #define U300_SYSCON_PMC1HR_APP_SPI_2_SPI 0x0010 #define U300_SYSCON_PMC1HR_APP_SPI_2_DSP 0x0020 #define U300_SYSCON_PMC1HR_APP_SPI_2_AAIF 0x0030 #define U300_SYSCON_PMC1HR_APP_UART0_2_MASK 0x000C #define U300_SYSCON_PMC1HR_APP_UART0_2_APP_GPIO 0x0000 #define U300_SYSCON_PMC1HR_APP_UART0_2_UART0 0x0004 #define U300_SYSCON_PMC1HR_APP_UART0_2_NFIF_CS 0x0008 #define U300_SYSCON_PMC1HR_APP_UART0_2_AAIF 0x000C #define U300_SYSCON_PMC1HR_APP_UART0_1_MASK 0x0003 #define U300_SYSCON_PMC1HR_APP_UART0_1_APP_GPIO 0x0000 #define U300_SYSCON_PMC1HR_APP_UART0_1_UART0 0x0001 #define U300_SYSCON_PMC1HR_APP_UART0_1_AAIF 0x0003 /* Padmux 2 control */ #define U300_SYSCON_PMC2R 0x100 #define U300_SYSCON_PMC2R_APP_MISC_0_MASK 0x00C0 #define U300_SYSCON_PMC2R_APP_MISC_0_APP_GPIO 0x0000 #define U300_SYSCON_PMC2R_APP_MISC_0_EMIF_SDRAM 0x0040 #define U300_SYSCON_PMC2R_APP_MISC_0_MMC 0x0080 #define U300_SYSCON_PMC2R_APP_MISC_0_CDI2 0x00C0 #define U300_SYSCON_PMC2R_APP_MISC_1_MASK 0x0300 #define U300_SYSCON_PMC2R_APP_MISC_1_APP_GPIO 0x0000 #define U300_SYSCON_PMC2R_APP_MISC_1_EMIF_SDRAM 0x0100 #define U300_SYSCON_PMC2R_APP_MISC_1_MMC 0x0200 #define U300_SYSCON_PMC2R_APP_MISC_1_CDI2 0x0300 #define U300_SYSCON_PMC2R_APP_MISC_2_MASK 0x0C00 #define U300_SYSCON_PMC2R_APP_MISC_2_APP_GPIO 0x0000 #define U300_SYSCON_PMC2R_APP_MISC_2_EMIF_SDRAM 0x0400 #define U300_SYSCON_PMC2R_APP_MISC_2_MMC 0x0800 #define U300_SYSCON_PMC2R_APP_MISC_2_CDI2 0x0C00 #define U300_SYSCON_PMC2R_APP_MISC_3_MASK 0x3000 #define U300_SYSCON_PMC2R_APP_MISC_3_APP_GPIO 0x0000 #define U300_SYSCON_PMC2R_APP_MISC_3_EMIF_SDRAM 0x1000 #define U300_SYSCON_PMC2R_APP_MISC_3_MMC 0x2000 #define U300_SYSCON_PMC2R_APP_MISC_3_CDI2 0x3000 #define U300_SYSCON_PMC2R_APP_MISC_4_MASK 0xC000 #define U300_SYSCON_PMC2R_APP_MISC_4_APP_GPIO 0x0000 #define U300_SYSCON_PMC2R_APP_MISC_4_EMIF_SDRAM 0x4000 #define U300_SYSCON_PMC2R_APP_MISC_4_MMC 0x8000 #define U300_SYSCON_PMC2R_APP_MISC_4_ACC_GPIO 0xC000 /* TODO: More SYSCON registers missing */ #define U300_SYSCON_PMC3R 0x10C #define U300_SYSCON_PMC3R_APP_MISC_11_MASK 0xC000 #define U300_SYSCON_PMC3R_APP_MISC_11_SPI 0x4000 #define U300_SYSCON_PMC3R_APP_MISC_10_MASK 0x3000 #define U300_SYSCON_PMC3R_APP_MISC_10_SPI 0x1000 /* TODO: Missing other configs */ #define U300_SYSCON_PMC4R 0x168 #define U300_SYSCON_PMC4R_APP_MISC_12_MASK 0x0003 #define U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO 0x0000 #define U300_SYSCON_PMC4R_APP_MISC_13_MASK 0x000C #define U300_SYSCON_PMC4R_APP_MISC_13_CDI 0x0000 #define U300_SYSCON_PMC4R_APP_MISC_13_SMIA 0x0004 #define U300_SYSCON_PMC4R_APP_MISC_13_SMIA2 0x0008 #define U300_SYSCON_PMC4R_APP_MISC_13_APP_GPIO 0x000C #define U300_SYSCON_PMC4R_APP_MISC_14_MASK 0x0030 #define U300_SYSCON_PMC4R_APP_MISC_14_CDI 0x0000 #define U300_SYSCON_PMC4R_APP_MISC_14_SMIA 0x0010 #define U300_SYSCON_PMC4R_APP_MISC_14_CDI2 0x0020 #define U300_SYSCON_PMC4R_APP_MISC_14_APP_GPIO 0x0030 #define U300_SYSCON_PMC4R_APP_MISC_16_MASK 0x0300 #define U300_SYSCON_PMC4R_APP_MISC_16_APP_GPIO_13 0x0000 #define U300_SYSCON_PMC4R_APP_MISC_16_APP_UART1_CTS 0x0100 #define U300_SYSCON_PMC4R_APP_MISC_16_EMIF_1_STATIC_CS5_N 0x0200 #define DRIVER_NAME "pinctrl-u300" /* * The DB3350 has 467 pads, I have enumerated the pads clockwise around the * edges of the silicon, finger by finger. LTCORNER upper left is pad 0. * Data taken from the PadRing chart, arranged like this: * * 0 ..... 104 * 466 105 * . . * . . * 358 224 * 357 .... 225 */ #define U300_NUM_PADS 467 /* Pad names for the pinmux subsystem */ static const struct pinctrl_pin_desc u300_pads[] = { /* Pads along the top edge of the chip */ PINCTRL_PIN(0, "P PAD VDD 28"), PINCTRL_PIN(1, "P PAD GND 28"), PINCTRL_PIN(2, "PO SIM RST N"), PINCTRL_PIN(3, "VSSIO 25"), PINCTRL_PIN(4, "VSSA ADDA ESDSUB"), PINCTRL_PIN(5, "PWR VSSCOMMON"), PINCTRL_PIN(6, "PI ADC I1 POS"), PINCTRL_PIN(7, "PI ADC I1 NEG"), PINCTRL_PIN(8, "PWR VSSAD0"), PINCTRL_PIN(9, "PWR VCCAD0"), PINCTRL_PIN(10, "PI ADC Q1 NEG"), PINCTRL_PIN(11, "PI ADC Q1 POS"), PINCTRL_PIN(12, "PWR VDDAD"), PINCTRL_PIN(13, "PWR GNDAD"), PINCTRL_PIN(14, "PI ADC I2 POS"), PINCTRL_PIN(15, "PI ADC I2 NEG"), PINCTRL_PIN(16, "PWR VSSAD1"), PINCTRL_PIN(17, "PWR VCCAD1"), PINCTRL_PIN(18, "PI ADC Q2 NEG"), PINCTRL_PIN(19, "PI ADC Q2 POS"), PINCTRL_PIN(20, "VSSA ADDA ESDSUB"), PINCTRL_PIN(21, "PWR VCCGPAD"), PINCTRL_PIN(22, "PI TX POW"), PINCTRL_PIN(23, "PWR VSSGPAD"), PINCTRL_PIN(24, "PO DAC I POS"), PINCTRL_PIN(25, "PO DAC I NEG"), PINCTRL_PIN(26, "PO DAC Q POS"), PINCTRL_PIN(27, "PO DAC Q NEG"), PINCTRL_PIN(28, "PWR VSSDA"), PINCTRL_PIN(29, "PWR VCCDA"), PINCTRL_PIN(30, "VSSA ADDA ESDSUB"), PINCTRL_PIN(31, "P PAD VDDIO 11"), PINCTRL_PIN(32, "PI PLL 26 FILTVDD"), PINCTRL_PIN(33, "PI PLL 26 VCONT"), PINCTRL_PIN(34, "PWR AGNDPLL2V5 32 13"), PINCTRL_PIN(35, "PWR AVDDPLL2V5 32 13"), PINCTRL_PIN(36, "VDDA PLL ESD"), PINCTRL_PIN(37, "VSSA PLL ESD"), PINCTRL_PIN(38, "VSS PLL"), PINCTRL_PIN(39, "VDDC PLL"), PINCTRL_PIN(40, "PWR AGNDPLL2V5 26 60"), PINCTRL_PIN(41, "PWR AVDDPLL2V5 26 60"), PINCTRL_PIN(42, "PWR AVDDPLL2V5 26 208"), PINCTRL_PIN(43, "PWR AGNDPLL2V5 26 208"), PINCTRL_PIN(44, "PWR AVDDPLL2V5 13 208"), PINCTRL_PIN(45, "PWR AGNDPLL2V5 13 208"), PINCTRL_PIN(46, "P PAD VSSIO 11"), PINCTRL_PIN(47, "P PAD VSSIO 12"), PINCTRL_PIN(48, "PI POW RST N"), PINCTRL_PIN(49, "VDDC IO"), PINCTRL_PIN(50, "P PAD VDDIO 16"), PINCTRL_PIN(51, "PO RF WCDMA EN 4"), PINCTRL_PIN(52, "PO RF WCDMA EN 3"), PINCTRL_PIN(53, "PO RF WCDMA EN 2"), PINCTRL_PIN(54, "PO RF WCDMA EN 1"), PINCTRL_PIN(55, "PO RF WCDMA EN 0"), PINCTRL_PIN(56, "PO GSM PA ENABLE"), PINCTRL_PIN(57, "PO RF DATA STRB"), PINCTRL_PIN(58, "PO RF DATA2"), PINCTRL_PIN(59, "PIO RF DATA1"), PINCTRL_PIN(60, "PIO RF DATA0"), PINCTRL_PIN(61, "P PAD VDD 11"), PINCTRL_PIN(62, "P PAD GND 11"), PINCTRL_PIN(63, "P PAD VSSIO 16"), PINCTRL_PIN(64, "P PAD VDDIO 18"), PINCTRL_PIN(65, "PO RF CTRL STRB2"), PINCTRL_PIN(66, "PO RF CTRL STRB1"), PINCTRL_PIN(67, "PO RF CTRL STRB0"), PINCTRL_PIN(68, "PIO RF CTRL DATA"), PINCTRL_PIN(69, "PO RF CTRL CLK"), PINCTRL_PIN(70, "PO TX ADC STRB"), PINCTRL_PIN(71, "PO ANT SW 2"), PINCTRL_PIN(72, "PO ANT SW 3"), PINCTRL_PIN(73, "PO ANT SW 0"), PINCTRL_PIN(74, "PO ANT SW 1"), PINCTRL_PIN(75, "PO M CLKRQ"), PINCTRL_PIN(76, "PI M CLK"), PINCTRL_PIN(77, "PI RTC CLK"), PINCTRL_PIN(78, "P PAD VDD 8"), PINCTRL_PIN(79, "P PAD GND 8"), PINCTRL_PIN(80, "P PAD VSSIO 13"), PINCTRL_PIN(81, "P PAD VDDIO 13"), PINCTRL_PIN(82, "PO SYS 1 CLK"), PINCTRL_PIN(83, "PO SYS 2 CLK"), PINCTRL_PIN(84, "PO SYS 0 CLK"), PINCTRL_PIN(85, "PI SYS 0 CLKRQ"), PINCTRL_PIN(86, "PO PWR MNGT CTRL 1"), PINCTRL_PIN(87, "PO PWR MNGT CTRL 0"), PINCTRL_PIN(88, "PO RESOUT2 RST N"), PINCTRL_PIN(89, "PO RESOUT1 RST N"), PINCTRL_PIN(90, "PO RESOUT0 RST N"), PINCTRL_PIN(91, "PI SERVICE N"), PINCTRL_PIN(92, "P PAD VDD 29"), PINCTRL_PIN(93, "P PAD GND 29"), PINCTRL_PIN(94, "P PAD VSSIO 8"), PINCTRL_PIN(95, "P PAD VDDIO 8"), PINCTRL_PIN(96, "PI EXT IRQ1 N"), PINCTRL_PIN(97, "PI EXT IRQ0 N"), PINCTRL_PIN(98, "PIO DC ON"), PINCTRL_PIN(99, "PIO ACC APP I2C DATA"), PINCTRL_PIN(100, "PIO ACC APP I2C CLK"), PINCTRL_PIN(101, "P PAD VDD 12"), PINCTRL_PIN(102, "P PAD GND 12"), PINCTRL_PIN(103, "P PAD VSSIO 14"), PINCTRL_PIN(104, "P PAD VDDIO 14"), /* Pads along the right edge of the chip */ PINCTRL_PIN(105, "PIO APP I2C1 DATA"), PINCTRL_PIN(106, "PIO APP I2C1 CLK"), PINCTRL_PIN(107, "PO KEY OUT0"), PINCTRL_PIN(108, "PO KEY OUT1"), PINCTRL_PIN(109, "PO KEY OUT2"), PINCTRL_PIN(110, "PO KEY OUT3"), PINCTRL_PIN(111, "PO KEY OUT4"), PINCTRL_PIN(112, "PI KEY IN0"), PINCTRL_PIN(113, "PI KEY IN1"), PINCTRL_PIN(114, "PI KEY IN2"), PINCTRL_PIN(115, "P PAD VDDIO 15"), PINCTRL_PIN(116, "P PAD VSSIO 15"), PINCTRL_PIN(117, "P PAD GND 13"), PINCTRL_PIN(118, "P PAD VDD 13"), PINCTRL_PIN(119, "PI KEY IN3"), PINCTRL_PIN(120, "PI KEY IN4"), PINCTRL_PIN(121, "PI KEY IN5"), PINCTRL_PIN(122, "PIO APP PCM I2S1 DATA B"), PINCTRL_PIN(123, "PIO APP PCM I2S1 DATA A"), PINCTRL_PIN(124, "PIO APP PCM I2S1 WS"), PINCTRL_PIN(125, "PIO APP PCM I2S1 CLK"), PINCTRL_PIN(126, "PIO APP PCM I2S0 DATA B"), PINCTRL_PIN(127, "PIO APP PCM I2S0 DATA A"), PINCTRL_PIN(128, "PIO APP PCM I2S0 WS"), PINCTRL_PIN(129, "PIO APP PCM I2S0 CLK"), PINCTRL_PIN(130, "P PAD VDD 17"), PINCTRL_PIN(131, "P PAD GND 17"), PINCTRL_PIN(132, "P PAD VSSIO 19"), PINCTRL_PIN(133, "P PAD VDDIO 19"), PINCTRL_PIN(134, "UART0 RTS"), PINCTRL_PIN(135, "UART0 CTS"), PINCTRL_PIN(136, "UART0 TX"), PINCTRL_PIN(137, "UART0 RX"), PINCTRL_PIN(138, "PIO ACC SPI DO"), PINCTRL_PIN(139, "PIO ACC SPI DI"), PINCTRL_PIN(140, "PIO ACC SPI CS0 N"), PINCTRL_PIN(141, "PIO ACC SPI CS1 N"), PINCTRL_PIN(142, "PIO ACC SPI CS2 N"), PINCTRL_PIN(143, "PIO ACC SPI CLK"), PINCTRL_PIN(144, "PO PDI EXT RST N"), PINCTRL_PIN(145, "P PAD VDDIO 22"), PINCTRL_PIN(146, "P PAD VSSIO 22"), PINCTRL_PIN(147, "P PAD GND 18"), PINCTRL_PIN(148, "P PAD VDD 18"), PINCTRL_PIN(149, "PIO PDI C0"), PINCTRL_PIN(150, "PIO PDI C1"), PINCTRL_PIN(151, "PIO PDI C2"), PINCTRL_PIN(152, "PIO PDI C3"), PINCTRL_PIN(153, "PIO PDI C4"), PINCTRL_PIN(154, "PIO PDI C5"), PINCTRL_PIN(155, "PIO PDI D0"), PINCTRL_PIN(156, "PIO PDI D1"), PINCTRL_PIN(157, "PIO PDI D2"), PINCTRL_PIN(158, "PIO PDI D3"), PINCTRL_PIN(159, "P PAD VDDIO 21"), PINCTRL_PIN(160, "P PAD VSSIO 21"), PINCTRL_PIN(161, "PIO PDI D4"), PINCTRL_PIN(162, "PIO PDI D5"), PINCTRL_PIN(163, "PIO PDI D6"), PINCTRL_PIN(164, "PIO PDI D7"), PINCTRL_PIN(165, "PIO MS INS"), PINCTRL_PIN(166, "MMC DATA DIR LS"), PINCTRL_PIN(167, "MMC DATA 3"), PINCTRL_PIN(168, "MMC DATA 2"), PINCTRL_PIN(169, "MMC DATA 1"), PINCTRL_PIN(170, "MMC DATA 0"), PINCTRL_PIN(171, "MMC CMD DIR LS"), PINCTRL_PIN(172, "P PAD VDD 27"), PINCTRL_PIN(173, "P PAD GND 27"), PINCTRL_PIN(174, "P PAD VSSIO 20"), PINCTRL_PIN(175, "P PAD VDDIO 20"), PINCTRL_PIN(176, "MMC CMD"), PINCTRL_PIN(177, "MMC CLK"), PINCTRL_PIN(178, "PIO APP GPIO 14"), PINCTRL_PIN(179, "PIO APP GPIO 13"), PINCTRL_PIN(180, "PIO APP GPIO 11"), PINCTRL_PIN(181, "PIO APP GPIO 25"), PINCTRL_PIN(182, "PIO APP GPIO 24"), PINCTRL_PIN(183, "PIO APP GPIO 23"), PINCTRL_PIN(184, "PIO APP GPIO 22"), PINCTRL_PIN(185, "PIO APP GPIO 21"), PINCTRL_PIN(186, "PIO APP GPIO 20"), PINCTRL_PIN(187, "P PAD VDD 19"), PINCTRL_PIN(188, "P PAD GND 19"), PINCTRL_PIN(189, "P PAD VSSIO 23"), PINCTRL_PIN(190, "P PAD VDDIO 23"), PINCTRL_PIN(191, "PIO APP GPIO 19"), PINCTRL_PIN(192, "PIO APP GPIO 18"), PINCTRL_PIN(193, "PIO APP GPIO 17"), PINCTRL_PIN(194, "PIO APP GPIO 16"), PINCTRL_PIN(195, "PI CI D1"), PINCTRL_PIN(196, "PI CI D0"), PINCTRL_PIN(197, "PI CI HSYNC"), PINCTRL_PIN(198, "PI CI VSYNC"), PINCTRL_PIN(199, "PI CI EXT CLK"), PINCTRL_PIN(200, "PO CI EXT RST N"), PINCTRL_PIN(201, "P PAD VSSIO 43"), PINCTRL_PIN(202, "P PAD VDDIO 43"), PINCTRL_PIN(203, "PI CI D6"), PINCTRL_PIN(204, "PI CI D7"), PINCTRL_PIN(205, "PI CI D2"), PINCTRL_PIN(206, "PI CI D3"), PINCTRL_PIN(207, "PI CI D4"), PINCTRL_PIN(208, "PI CI D5"), PINCTRL_PIN(209, "PI CI D8"), PINCTRL_PIN(210, "PI CI D9"), PINCTRL_PIN(211, "P PAD VDD 20"), PINCTRL_PIN(212, "P PAD GND 20"), PINCTRL_PIN(213, "P PAD VSSIO 24"), PINCTRL_PIN(214, "P PAD VDDIO 24"), PINCTRL_PIN(215, "P PAD VDDIO 26"), PINCTRL_PIN(216, "PO EMIF 1 A26"), PINCTRL_PIN(217, "PO EMIF 1 A25"), PINCTRL_PIN(218, "P PAD VSSIO 26"), PINCTRL_PIN(219, "PO EMIF 1 A24"), PINCTRL_PIN(220, "PO EMIF 1 A23"), /* Pads along the bottom edge of the chip */ PINCTRL_PIN(221, "PO EMIF 1 A22"), PINCTRL_PIN(222, "PO EMIF 1 A21"), PINCTRL_PIN(223, "P PAD VDD 21"), PINCTRL_PIN(224, "P PAD GND 21"), PINCTRL_PIN(225, "P PAD VSSIO 27"), PINCTRL_PIN(226, "P PAD VDDIO 27"), PINCTRL_PIN(227, "PO EMIF 1 A20"), PINCTRL_PIN(228, "PO EMIF 1 A19"), PINCTRL_PIN(229, "PO EMIF 1 A18"), PINCTRL_PIN(230, "PO EMIF 1 A17"), PINCTRL_PIN(231, "P PAD VDDIO 28"), PINCTRL_PIN(232, "P PAD VSSIO 28"), PINCTRL_PIN(233, "PO EMIF 1 A16"), PINCTRL_PIN(234, "PIO EMIF 1 D15"), PINCTRL_PIN(235, "PO EMIF 1 A15"), PINCTRL_PIN(236, "PIO EMIF 1 D14"), PINCTRL_PIN(237, "P PAD VDD 22"), PINCTRL_PIN(238, "P PAD GND 22"), PINCTRL_PIN(239, "P PAD VSSIO 29"), PINCTRL_PIN(240, "P PAD VDDIO 29"), PINCTRL_PIN(241, "PO EMIF 1 A14"), PINCTRL_PIN(242, "PIO EMIF 1 D13"), PINCTRL_PIN(243, "PO EMIF 1 A13"), PINCTRL_PIN(244, "PIO EMIF 1 D12"), PINCTRL_PIN(245, "P PAD VSSIO 30"), PINCTRL_PIN(246, "P PAD VDDIO 30"), PINCTRL_PIN(247, "PO EMIF 1 A12"), PINCTRL_PIN(248, "PIO EMIF 1 D11"), PINCTRL_PIN(249, "PO EMIF 1 A11"), PINCTRL_PIN(250, "PIO EMIF 1 D10"), PINCTRL_PIN(251, "P PAD VSSIO 31"), PINCTRL_PIN(252, "P PAD VDDIO 31"), PINCTRL_PIN(253, "PO EMIF 1 A10"), PINCTRL_PIN(254, "PIO EMIF 1 D09"), PINCTRL_PIN(255, "PO EMIF 1 A09"), PINCTRL_PIN(256, "P PAD VDDIO 32"), PINCTRL_PIN(257, "P PAD VSSIO 32"), PINCTRL_PIN(258, "P PAD GND 24"), PINCTRL_PIN(259, "P PAD VDD 24"), PINCTRL_PIN(260, "PIO EMIF 1 D08"), PINCTRL_PIN(261, "PO EMIF 1 A08"), PINCTRL_PIN(262, "PIO EMIF 1 D07"), PINCTRL_PIN(263, "PO EMIF 1 A07"), PINCTRL_PIN(264, "P PAD VDDIO 33"), PINCTRL_PIN(265, "P PAD VSSIO 33"), PINCTRL_PIN(266, "PIO EMIF 1 D06"), PINCTRL_PIN(267, "PO EMIF 1 A06"), PINCTRL_PIN(268, "PIO EMIF 1 D05"), PINCTRL_PIN(269, "PO EMIF 1 A05"), PINCTRL_PIN(270, "P PAD VDDIO 34"), PINCTRL_PIN(271, "P PAD VSSIO 34"), PINCTRL_PIN(272, "PIO EMIF 1 D04"), PINCTRL_PIN(273, "PO EMIF 1 A04"), PINCTRL_PIN(274, "PIO EMIF 1 D03"), PINCTRL_PIN(275, "PO EMIF 1 A03"), PINCTRL_PIN(276, "P PAD VDDIO 35"), PINCTRL_PIN(277, "P PAD VSSIO 35"), PINCTRL_PIN(278, "P PAD GND 23"), PINCTRL_PIN(279, "P PAD VDD 23"), PINCTRL_PIN(280, "PIO EMIF 1 D02"), PINCTRL_PIN(281, "PO EMIF 1 A02"), PINCTRL_PIN(282, "PIO EMIF 1 D01"), PINCTRL_PIN(283, "PO EMIF 1 A01"), PINCTRL_PIN(284, "P PAD VDDIO 36"), PINCTRL_PIN(285, "P PAD VSSIO 36"), PINCTRL_PIN(286, "PIO EMIF 1 D00"), PINCTRL_PIN(287, "PO EMIF 1 BE1 N"), PINCTRL_PIN(288, "PO EMIF 1 BE0 N"), PINCTRL_PIN(289, "PO EMIF 1 ADV N"), PINCTRL_PIN(290, "P PAD VDDIO 37"), PINCTRL_PIN(291, "P PAD VSSIO 37"), PINCTRL_PIN(292, "PO EMIF 1 SD CKE0"), PINCTRL_PIN(293, "PO EMIF 1 OE N"), PINCTRL_PIN(294, "PO EMIF 1 WE N"), PINCTRL_PIN(295, "P PAD VDDIO 38"), PINCTRL_PIN(296, "P PAD VSSIO 38"), PINCTRL_PIN(297, "PO EMIF 1 CLK"), PINCTRL_PIN(298, "PIO EMIF 1 SD CLK"), PINCTRL_PIN(299, "P PAD VSSIO 45 (not bonded)"), PINCTRL_PIN(300, "P PAD VDDIO 42"), PINCTRL_PIN(301, "P PAD VSSIO 42"), PINCTRL_PIN(302, "P PAD GND 31"), PINCTRL_PIN(303, "P PAD VDD 31"), PINCTRL_PIN(304, "PI EMIF 1 RET CLK"), PINCTRL_PIN(305, "PI EMIF 1 WAIT N"), PINCTRL_PIN(306, "PI EMIF 1 NFIF READY"), PINCTRL_PIN(307, "PO EMIF 1 SD CKE1"), PINCTRL_PIN(308, "PO EMIF 1 CS3 N"), PINCTRL_PIN(309, "P PAD VDD 25"), PINCTRL_PIN(310, "P PAD GND 25"), PINCTRL_PIN(311, "P PAD VSSIO 39"), PINCTRL_PIN(312, "P PAD VDDIO 39"), PINCTRL_PIN(313, "PO EMIF 1 CS2 N"), PINCTRL_PIN(314, "PO EMIF 1 CS1 N"), PINCTRL_PIN(315, "PO EMIF 1 CS0 N"), PINCTRL_PIN(316, "PO ETM TRACE PKT0"), PINCTRL_PIN(317, "PO ETM TRACE PKT1"), PINCTRL_PIN(318, "PO ETM TRACE PKT2"), PINCTRL_PIN(319, "P PAD VDD 30"), PINCTRL_PIN(320, "P PAD GND 30"), PINCTRL_PIN(321, "P PAD VSSIO 44"), PINCTRL_PIN(322, "P PAD VDDIO 44"), PINCTRL_PIN(323, "PO ETM TRACE PKT3"), PINCTRL_PIN(324, "PO ETM TRACE PKT4"), PINCTRL_PIN(325, "PO ETM TRACE PKT5"), PINCTRL_PIN(326, "PO ETM TRACE PKT6"), PINCTRL_PIN(327, "PO ETM TRACE PKT7"), PINCTRL_PIN(328, "PO ETM PIPE STAT0"), PINCTRL_PIN(329, "P PAD VDD 26"), PINCTRL_PIN(330, "P PAD GND 26"), PINCTRL_PIN(331, "P PAD VSSIO 40"), PINCTRL_PIN(332, "P PAD VDDIO 40"), PINCTRL_PIN(333, "PO ETM PIPE STAT1"), PINCTRL_PIN(334, "PO ETM PIPE STAT2"), PINCTRL_PIN(335, "PO ETM TRACE CLK"), PINCTRL_PIN(336, "PO ETM TRACE SYNC"), PINCTRL_PIN(337, "PIO ACC GPIO 33"), PINCTRL_PIN(338, "PIO ACC GPIO 32"), PINCTRL_PIN(339, "PIO ACC GPIO 30"), PINCTRL_PIN(340, "PIO ACC GPIO 29"), PINCTRL_PIN(341, "P PAD VDDIO 17"), PINCTRL_PIN(342, "P PAD VSSIO 17"), PINCTRL_PIN(343, "P PAD GND 15"), PINCTRL_PIN(344, "P PAD VDD 15"), PINCTRL_PIN(345, "PIO ACC GPIO 28"), PINCTRL_PIN(346, "PIO ACC GPIO 27"), PINCTRL_PIN(347, "PIO ACC GPIO 16"), PINCTRL_PIN(348, "PI TAP TMS"), PINCTRL_PIN(349, "PI TAP TDI"), PINCTRL_PIN(350, "PO TAP TDO"), PINCTRL_PIN(351, "PI TAP RST N"), /* Pads along the left edge of the chip */ PINCTRL_PIN(352, "PI EMU MODE 0"), PINCTRL_PIN(353, "PO TAP RET CLK"), PINCTRL_PIN(354, "PI TAP CLK"), PINCTRL_PIN(355, "PO EMIF 0 SD CS N"), PINCTRL_PIN(356, "PO EMIF 0 SD CAS N"), PINCTRL_PIN(357, "PO EMIF 0 SD WE N"), PINCTRL_PIN(358, "P PAD VDDIO 1"), PINCTRL_PIN(359, "P PAD VSSIO 1"), PINCTRL_PIN(360, "P PAD GND 1"), PINCTRL_PIN(361, "P PAD VDD 1"), PINCTRL_PIN(362, "PO EMIF 0 SD CKE"), PINCTRL_PIN(363, "PO EMIF 0 SD DQML"), PINCTRL_PIN(364, "PO EMIF 0 SD DQMU"), PINCTRL_PIN(365, "PO EMIF 0 SD RAS N"), PINCTRL_PIN(366, "PIO EMIF 0 D15"), PINCTRL_PIN(367, "PO EMIF 0 A15"), PINCTRL_PIN(368, "PIO EMIF 0 D14"), PINCTRL_PIN(369, "PO EMIF 0 A14"), PINCTRL_PIN(370, "PIO EMIF 0 D13"), PINCTRL_PIN(371, "PO EMIF 0 A13"), PINCTRL_PIN(372, "P PAD VDDIO 2"), PINCTRL_PIN(373, "P PAD VSSIO 2"), PINCTRL_PIN(374, "P PAD GND 2"), PINCTRL_PIN(375, "P PAD VDD 2"), PINCTRL_PIN(376, "PIO EMIF 0 D12"), PINCTRL_PIN(377, "PO EMIF 0 A12"), PINCTRL_PIN(378, "PIO EMIF 0 D11"), PINCTRL_PIN(379, "PO EMIF 0 A11"), PINCTRL_PIN(380, "PIO EMIF 0 D10"), PINCTRL_PIN(381, "PO EMIF 0 A10"), PINCTRL_PIN(382, "PIO EMIF 0 D09"), PINCTRL_PIN(383, "PO EMIF 0 A09"), PINCTRL_PIN(384, "PIO EMIF 0 D08"), PINCTRL_PIN(385, "PO EMIF 0 A08"), PINCTRL_PIN(386, "PIO EMIF 0 D07"), PINCTRL_PIN(387, "PO EMIF 0 A07"), PINCTRL_PIN(388, "P PAD VDDIO 3"), PINCTRL_PIN(389, "P PAD VSSIO 3"), PINCTRL_PIN(390, "P PAD GND 3"), PINCTRL_PIN(391, "P PAD VDD 3"), PINCTRL_PIN(392, "PO EFUSE RDOUT1"), PINCTRL_PIN(393, "PIO EMIF 0 D06"), PINCTRL_PIN(394, "PO EMIF 0 A06"), PINCTRL_PIN(395, "PIO EMIF 0 D05"), PINCTRL_PIN(396, "PO EMIF 0 A05"), PINCTRL_PIN(397, "PIO EMIF 0 D04"), PINCTRL_PIN(398, "PO EMIF 0 A04"), PINCTRL_PIN(399, "A PADS/A VDDCO1v82v5 GND 80U SF LIN VDDCO AF"), PINCTRL_PIN(400, "PWR VDDCO AF"), PINCTRL_PIN(401, "PWR EFUSE HV1"), PINCTRL_PIN(402, "P PAD VSSIO 4"), PINCTRL_PIN(403, "P PAD VDDIO 4"), PINCTRL_PIN(404, "P PAD GND 4"), PINCTRL_PIN(405, "P PAD VDD 4"), PINCTRL_PIN(406, "PIO EMIF 0 D03"), PINCTRL_PIN(407, "PO EMIF 0 A03"), PINCTRL_PIN(408, "PWR EFUSE HV2"), PINCTRL_PIN(409, "PWR EFUSE HV3"), PINCTRL_PIN(410, "PIO EMIF 0 D02"), PINCTRL_PIN(411, "PO EMIF 0 A02"), PINCTRL_PIN(412, "PIO EMIF 0 D01"), PINCTRL_PIN(413, "P PAD VDDIO 5"), PINCTRL_PIN(414, "P PAD VSSIO 5"), PINCTRL_PIN(415, "P PAD GND 5"), PINCTRL_PIN(416, "P PAD VDD 5"), PINCTRL_PIN(417, "PO EMIF 0 A01"), PINCTRL_PIN(418, "PIO EMIF 0 D00"), PINCTRL_PIN(419, "IF 0 SD CLK"), PINCTRL_PIN(420, "APP SPI CLK"), PINCTRL_PIN(421, "APP SPI DO"), PINCTRL_PIN(422, "APP SPI DI"), PINCTRL_PIN(423, "APP SPI CS0"), PINCTRL_PIN(424, "APP SPI CS1"), PINCTRL_PIN(425, "APP SPI CS2"), PINCTRL_PIN(426, "PIO APP GPIO 10"), PINCTRL_PIN(427, "P PAD VDDIO 41"), PINCTRL_PIN(428, "P PAD VSSIO 41"), PINCTRL_PIN(429, "P PAD GND 6"), PINCTRL_PIN(430, "P PAD VDD 6"), PINCTRL_PIN(431, "PIO ACC SDIO0 CMD"), PINCTRL_PIN(432, "PIO ACC SDIO0 CK"), PINCTRL_PIN(433, "PIO ACC SDIO0 D3"), PINCTRL_PIN(434, "PIO ACC SDIO0 D2"), PINCTRL_PIN(435, "PIO ACC SDIO0 D1"), PINCTRL_PIN(436, "PIO ACC SDIO0 D0"), PINCTRL_PIN(437, "PIO USB PU"), PINCTRL_PIN(438, "PIO USB SP"), PINCTRL_PIN(439, "PIO USB DAT VP"), PINCTRL_PIN(440, "PIO USB SE0 VM"), PINCTRL_PIN(441, "PIO USB OE"), PINCTRL_PIN(442, "PIO USB SUSP"), PINCTRL_PIN(443, "P PAD VSSIO 6"), PINCTRL_PIN(444, "P PAD VDDIO 6"), PINCTRL_PIN(445, "PIO USB PUEN"), PINCTRL_PIN(446, "PIO ACC UART0 RX"), PINCTRL_PIN(447, "PIO ACC UART0 TX"), PINCTRL_PIN(448, "PIO ACC UART0 CTS"), PINCTRL_PIN(449, "PIO ACC UART0 RTS"), PINCTRL_PIN(450, "PIO ACC UART3 RX"), PINCTRL_PIN(451, "PIO ACC UART3 TX"), PINCTRL_PIN(452, "PIO ACC UART3 CTS"), PINCTRL_PIN(453, "PIO ACC UART3 RTS"), PINCTRL_PIN(454, "PIO ACC IRDA TX"), PINCTRL_PIN(455, "P PAD VDDIO 7"), PINCTRL_PIN(456, "P PAD VSSIO 7"), PINCTRL_PIN(457, "P PAD GND 7"), PINCTRL_PIN(458, "P PAD VDD 7"), PINCTRL_PIN(459, "PIO ACC IRDA RX"), PINCTRL_PIN(460, "PIO ACC PCM I2S CLK"), PINCTRL_PIN(461, "PIO ACC PCM I2S WS"), PINCTRL_PIN(462, "PIO ACC PCM I2S DATA A"), PINCTRL_PIN(463, "PIO ACC PCM I2S DATA B"), PINCTRL_PIN(464, "PO SIM CLK"), PINCTRL_PIN(465, "PIO ACC IRDA SD"), PINCTRL_PIN(466, "PIO SIM DATA"), }; /** * @dev: a pointer back to containing device * @virtbase: the offset to the controller in virtual memory */ struct u300_pmx { struct device *dev; struct pinctrl_dev *pctl; void __iomem *virtbase; }; /** * u300_pmx_registers - the array of registers read/written for each pinmux * shunt setting */ const u32 u300_pmx_registers[] = { U300_SYSCON_PMC1LR, U300_SYSCON_PMC1HR, U300_SYSCON_PMC2R, U300_SYSCON_PMC3R, U300_SYSCON_PMC4R, }; /** * struct u300_pin_group - describes a U300 pin group * @name: the name of this specific pin group * @pins: an array of discrete physical pins used in this group, taken * from the driver-local pin enumeration space * @num_pins: the number of pins in this group array, i.e. the number of * elements in .pins so we can iterate over that array */ struct u300_pin_group { const char *name; const unsigned int *pins; const unsigned num_pins; }; /** * struct pmx_onmask - mask bits to enable/disable padmux * @mask: mask bits to disable * @val: mask bits to enable * * onmask lazy dog: * onmask = { * {"PMC1LR" mask, "PMC1LR" value}, * {"PMC1HR" mask, "PMC1HR" value}, * {"PMC2R" mask, "PMC2R" value}, * {"PMC3R" mask, "PMC3R" value}, * {"PMC4R" mask, "PMC4R" value} * } */ struct u300_pmx_mask { u16 mask; u16 bits; }; /* The chip power pins are VDD, GND, VDDIO and VSSIO */ static const unsigned power_pins[] = { 0, 1, 3, 31, 46, 47, 49, 50, 61, 62, 63, 64, 78, 79, 80, 81, 92, 93, 94, 95, 101, 102, 103, 104, 115, 116, 117, 118, 130, 131, 132, 133, 145, 146, 147, 148, 159, 160, 172, 173, 174, 175, 187, 188, 189, 190, 201, 202, 211, 212, 213, 214, 215, 218, 223, 224, 225, 226, 231, 232, 237, 238, 239, 240, 245, 246, 251, 252, 256, 257, 258, 259, 264, 265, 270, 271, 276, 277, 278, 279, 284, 285, 290, 291, 295, 296, 299, 300, 301, 302, 303, 309, 310, 311, 312, 319, 320, 321, 322, 329, 330, 331, 332, 341, 342, 343, 344, 358, 359, 360, 361, 372, 373, 374, 375, 388, 389, 390, 391, 402, 403, 404, 405, 413, 414, 415, 416, 427, 428, 429, 430, 443, 444, 455, 456, 457, 458 }; static const unsigned emif0_pins[] = { 355, 356, 357, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 393, 394, 395, 396, 397, 398, 406, 407, 410, 411, 412, 417, 418 }; static const unsigned emif1_pins[] = { 216, 217, 219, 220, 221, 222, 227, 228, 229, 230, 233, 234, 235, 236, 241, 242, 243, 244, 247, 248, 249, 250, 253, 254, 255, 260, 261, 262, 263, 266, 267, 268, 269, 272, 273, 274, 275, 280, 281, 282, 283, 286, 287, 288, 289, 292, 293, 294, 297, 298, 304, 305, 306, 307, 308, 313, 314, 315 }; static const unsigned uart0_pins[] = { 134, 135, 136, 137 }; static const unsigned mmc0_pins[] = { 166, 167, 168, 169, 170, 171, 176, 177 }; static const unsigned spi0_pins[] = { 420, 421, 422, 423, 424, 425 }; static const struct u300_pmx_mask emif0_mask[] = { {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, }; static const struct u300_pmx_mask emif1_mask[] = { /* * This connects the SDRAM to CS2 and a NAND flash to * CS0 on the EMIF. */ { U300_SYSCON_PMC1LR_EMIF_1_CS2_MASK | U300_SYSCON_PMC1LR_EMIF_1_CS1_MASK | U300_SYSCON_PMC1LR_EMIF_1_CS0_MASK | U300_SYSCON_PMC1LR_EMIF_1_MASK, U300_SYSCON_PMC1LR_EMIF_1_CS2_SDRAM | U300_SYSCON_PMC1LR_EMIF_1_CS1_STATIC | U300_SYSCON_PMC1LR_EMIF_1_CS0_NFIF | U300_SYSCON_PMC1LR_EMIF_1_SDRAM0 }, {0, 0}, {0, 0}, {0, 0}, {0, 0}, }; static const struct u300_pmx_mask uart0_mask[] = { {0, 0}, { U300_SYSCON_PMC1HR_APP_UART0_1_MASK | U300_SYSCON_PMC1HR_APP_UART0_2_MASK, U300_SYSCON_PMC1HR_APP_UART0_1_UART0 | U300_SYSCON_PMC1HR_APP_UART0_2_UART0 }, {0, 0}, {0, 0}, {0, 0}, }; static const struct u300_pmx_mask mmc0_mask[] = { { U300_SYSCON_PMC1LR_MMCSD_MASK, U300_SYSCON_PMC1LR_MMCSD_MMCSD}, {0, 0}, {0, 0}, {0, 0}, { U300_SYSCON_PMC4R_APP_MISC_12_MASK, U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO } }; static const struct u300_pmx_mask spi0_mask[] = { {0, 0}, { U300_SYSCON_PMC1HR_APP_SPI_2_MASK | U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK | U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK, U300_SYSCON_PMC1HR_APP_SPI_2_SPI | U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI | U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI }, {0, 0}, {0, 0}, {0, 0} }; static const struct u300_pin_group u300_pin_groups[] = { { .name = "powergrp", .pins = power_pins, .num_pins = ARRAY_SIZE(power_pins), }, { .name = "emif0grp", .pins = emif0_pins, .num_pins = ARRAY_SIZE(emif0_pins), }, { .name = "emif1grp", .pins = emif1_pins, .num_pins = ARRAY_SIZE(emif1_pins), }, { .name = "uart0grp", .pins = uart0_pins, .num_pins = ARRAY_SIZE(uart0_pins), }, { .name = "mmc0grp", .pins = mmc0_pins, .num_pins = ARRAY_SIZE(mmc0_pins), }, { .name = "spi0grp", .pins = spi0_pins, .num_pins = ARRAY_SIZE(spi0_pins), }, }; static int u300_get_groups_count(struct pinctrl_dev *pctldev) { return ARRAY_SIZE(u300_pin_groups); } static const char *u300_get_group_name(struct pinctrl_dev *pctldev, unsigned selector) { return u300_pin_groups[selector].name; } static int u300_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector, const unsigned **pins, unsigned *num_pins) { *pins = u300_pin_groups[selector].pins; *num_pins = u300_pin_groups[selector].num_pins; return 0; } static void u300_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset) { seq_printf(s, " " DRIVER_NAME); } static const struct pinctrl_ops u300_pctrl_ops = { .get_groups_count = u300_get_groups_count, .get_group_name = u300_get_group_name, .get_group_pins = u300_get_group_pins, .pin_dbg_show = u300_pin_dbg_show, }; /* * Here we define the available functions and their corresponding pin groups */ /** * struct u300_pmx_func - describes U300 pinmux functions * @name: the name of this specific function * @groups: corresponding pin groups * @onmask: bits to set to enable this when doing pin muxing */ struct u300_pmx_func { const char *name; const char * const *groups; const unsigned num_groups; const struct u300_pmx_mask *mask; }; static const char * const powergrps[] = { "powergrp" }; static const char * const emif0grps[] = { "emif0grp" }; static const char * const emif1grps[] = { "emif1grp" }; static const char * const uart0grps[] = { "uart0grp" }; static const char * const mmc0grps[] = { "mmc0grp" }; static const char * const spi0grps[] = { "spi0grp" }; static const struct u300_pmx_func u300_pmx_functions[] = { { .name = "power", .groups = powergrps, .num_groups = ARRAY_SIZE(powergrps), /* Mask is N/A */ }, { .name = "emif0", .groups = emif0grps, .num_groups = ARRAY_SIZE(emif0grps), .mask = emif0_mask, }, { .name = "emif1", .groups = emif1grps, .num_groups = ARRAY_SIZE(emif1grps), .mask = emif1_mask, }, { .name = "uart0", .groups = uart0grps, .num_groups = ARRAY_SIZE(uart0grps), .mask = uart0_mask, }, { .name = "mmc0", .groups = mmc0grps, .num_groups = ARRAY_SIZE(mmc0grps), .mask = mmc0_mask, }, { .name = "spi0", .groups = spi0grps, .num_groups = ARRAY_SIZE(spi0grps), .mask = spi0_mask, }, }; static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector, bool enable) { u16 regval, val, mask; int i; const struct u300_pmx_mask *upmx_mask; upmx_mask = u300_pmx_functions[selector].mask; for (i = 0; i < ARRAY_SIZE(u300_pmx_registers); i++) { if (enable) val = upmx_mask->bits; else val = 0; mask = upmx_mask->mask; if (mask != 0) { regval = readw(upmx->virtbase + u300_pmx_registers[i]); regval &= ~mask; regval |= val; writew(regval, upmx->virtbase + u300_pmx_registers[i]); } upmx_mask++; } } static int u300_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned selector, unsigned group) { struct u300_pmx *upmx; /* There is nothing to do with the power pins */ if (selector == 0) return 0; upmx = pinctrl_dev_get_drvdata(pctldev); u300_pmx_endisable(upmx, selector, true); return 0; } static int u300_pmx_get_funcs_count(struct pinctrl_dev *pctldev) { return ARRAY_SIZE(u300_pmx_functions); } static const char *u300_pmx_get_func_name(struct pinctrl_dev *pctldev, unsigned selector) { return u300_pmx_functions[selector].name; } static int u300_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector, const char * const **groups, unsigned * const num_groups) { *groups = u300_pmx_functions[selector].groups; *num_groups = u300_pmx_functions[selector].num_groups; return 0; } static const struct pinmux_ops u300_pmx_ops = { .get_functions_count = u300_pmx_get_funcs_count, .get_function_name = u300_pmx_get_func_name, .get_function_groups = u300_pmx_get_groups, .set_mux = u300_pmx_set_mux, }; static int u300_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *config) { struct pinctrl_gpio_range *range = pinctrl_find_gpio_range_from_pin(pctldev, pin); /* We get config for those pins we CAN get it for and that's it */ if (!range) return -ENOTSUPP; return u300_gpio_config_get(range->gc, (pin - range->pin_base + range->base), config); } static int u300_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs, unsigned num_configs) { struct pinctrl_gpio_range *range = pinctrl_find_gpio_range_from_pin(pctldev, pin); int ret, i; if (!range) return -EINVAL; for (i = 0; i < num_configs; i++) { /* Note: none of these configurations take any argument */ ret = u300_gpio_config_set(range->gc, (pin - range->pin_base + range->base), pinconf_to_config_param(configs[i])); if (ret) return ret; } /* for each config */ return 0; } static const struct pinconf_ops u300_pconf_ops = { .is_generic = true, .pin_config_get = u300_pin_config_get, .pin_config_set = u300_pin_config_set, }; static struct pinctrl_desc u300_pmx_desc = { .name = DRIVER_NAME, .pins = u300_pads, .npins = ARRAY_SIZE(u300_pads), .pctlops = &u300_pctrl_ops, .pmxops = &u300_pmx_ops, .confops = &u300_pconf_ops, .owner = THIS_MODULE, }; static int u300_pmx_probe(struct platform_device *pdev) { struct u300_pmx *upmx; struct resource *res; /* Create state holders etc for this driver */ upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL); if (!upmx) return -ENOMEM; upmx->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); upmx->virtbase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(upmx->virtbase)) return PTR_ERR(upmx->virtbase); upmx->pctl = pinctrl_register(&u300_pmx_desc, &pdev->dev, upmx); if (!upmx->pctl) { dev_err(&pdev->dev, "could not register U300 pinmux driver\n"); return -EINVAL; } platform_set_drvdata(pdev, upmx); dev_info(&pdev->dev, "initialized U300 pin control driver\n"); return 0; } static int u300_pmx_remove(struct platform_device *pdev) { struct u300_pmx *upmx = platform_get_drvdata(pdev); pinctrl_unregister(upmx->pctl); return 0; } static const struct of_device_id u300_pinctrl_match[] = { { .compatible = "stericsson,pinctrl-u300" }, {}, }; static struct platform_driver u300_pmx_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = u300_pinctrl_match, }, .probe = u300_pmx_probe, .remove = u300_pmx_remove, }; static int __init u300_pmx_init(void) { return platform_driver_register(&u300_pmx_driver); } arch_initcall(u300_pmx_init); static void __exit u300_pmx_exit(void) { platform_driver_unregister(&u300_pmx_driver); } module_exit(u300_pmx_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); MODULE_DESCRIPTION("U300 pin control driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
gongwan33/hiveboard_linux_with_sonix291_uvcdriver
sound/pci/sonicvibes.c
582
52995
/* * Driver for S3 SonicVibes soundcard * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * BUGS: * It looks like 86c617 rev 3 doesn't supports DDMA buffers above 16MB? * Driver sometimes hangs... Nobody knows why at this moment... * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/info.h> #include <sound/control.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/initval.h> #include <asm/io.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("S3 SonicVibes PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{S3,SonicVibes PCI}}"); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK 1 #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int reverb[SNDRV_CARDS]; static int mge[SNDRV_CARDS]; static unsigned int dmaio = 0x7a00; /* DDMA i/o address */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for S3 SonicVibes soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for S3 SonicVibes soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable S3 SonicVibes soundcard."); module_param_array(reverb, bool, NULL, 0444); MODULE_PARM_DESC(reverb, "Enable reverb (SRAM is present) for S3 SonicVibes soundcard."); module_param_array(mge, bool, NULL, 0444); MODULE_PARM_DESC(mge, "MIC Gain Enable for S3 SonicVibes soundcard."); module_param(dmaio, uint, 0444); MODULE_PARM_DESC(dmaio, "DDMA i/o base address for S3 SonicVibes soundcard."); /* * Enhanced port direct registers */ #define SV_REG(sonic, x) ((sonic)->enh_port + SV_REG_##x) #define SV_REG_CONTROL 0x00 /* R/W: CODEC/Mixer control register */ #define SV_ENHANCED 0x01 /* audio mode select - enhanced mode */ #define SV_TEST 0x02 /* test bit */ #define SV_REVERB 0x04 /* reverb enable */ #define SV_WAVETABLE 0x08 /* wavetable active / FM active if not set */ #define SV_INTA 0x20 /* INTA driving - should be always 1 */ #define SV_RESET 0x80 /* reset chip */ #define SV_REG_IRQMASK 0x01 /* R/W: CODEC/Mixer interrupt mask register */ #define SV_DMAA_MASK 0x01 /* mask DMA-A interrupt */ #define SV_DMAC_MASK 0x04 /* mask DMA-C interrupt */ #define SV_SPEC_MASK 0x08 /* special interrupt mask - should be always masked */ #define SV_UD_MASK 0x40 /* Up/Down button interrupt mask */ #define SV_MIDI_MASK 0x80 /* mask MIDI interrupt */ #define SV_REG_STATUS 0x02 /* R/O: CODEC/Mixer status register */ #define SV_DMAA_IRQ 0x01 /* DMA-A interrupt */ #define SV_DMAC_IRQ 0x04 /* DMA-C interrupt */ #define SV_SPEC_IRQ 0x08 /* special interrupt */ #define SV_UD_IRQ 0x40 /* Up/Down interrupt */ #define SV_MIDI_IRQ 0x80 /* MIDI interrupt */ #define SV_REG_INDEX 0x04 /* R/W: CODEC/Mixer index address register */ #define SV_MCE 0x40 /* mode change enable */ #define SV_TRD 0x80 /* DMA transfer request disabled */ #define SV_REG_DATA 0x05 /* R/W: CODEC/Mixer index data register */ /* * Enhanced port indirect registers */ #define SV_IREG_LEFT_ADC 0x00 /* Left ADC Input Control */ #define SV_IREG_RIGHT_ADC 0x01 /* Right ADC Input Control */ #define SV_IREG_LEFT_AUX1 0x02 /* Left AUX1 Input Control */ #define SV_IREG_RIGHT_AUX1 0x03 /* Right AUX1 Input Control */ #define SV_IREG_LEFT_CD 0x04 /* Left CD Input Control */ #define SV_IREG_RIGHT_CD 0x05 /* Right CD Input Control */ #define SV_IREG_LEFT_LINE 0x06 /* Left Line Input Control */ #define SV_IREG_RIGHT_LINE 0x07 /* Right Line Input Control */ #define SV_IREG_MIC 0x08 /* MIC Input Control */ #define SV_IREG_GAME_PORT 0x09 /* Game Port Control */ #define SV_IREG_LEFT_SYNTH 0x0a /* Left Synth Input Control */ #define SV_IREG_RIGHT_SYNTH 0x0b /* Right Synth Input Control */ #define SV_IREG_LEFT_AUX2 0x0c /* Left AUX2 Input Control */ #define SV_IREG_RIGHT_AUX2 0x0d /* Right AUX2 Input Control */ #define SV_IREG_LEFT_ANALOG 0x0e /* Left Analog Mixer Output Control */ #define SV_IREG_RIGHT_ANALOG 0x0f /* Right Analog Mixer Output Control */ #define SV_IREG_LEFT_PCM 0x10 /* Left PCM Input Control */ #define SV_IREG_RIGHT_PCM 0x11 /* Right PCM Input Control */ #define SV_IREG_DMA_DATA_FMT 0x12 /* DMA Data Format */ #define SV_IREG_PC_ENABLE 0x13 /* Playback/Capture Enable Register */ #define SV_IREG_UD_BUTTON 0x14 /* Up/Down Button Register */ #define SV_IREG_REVISION 0x15 /* Revision */ #define SV_IREG_ADC_OUTPUT_CTRL 0x16 /* ADC Output Control */ #define SV_IREG_DMA_A_UPPER 0x18 /* DMA A Upper Base Count */ #define SV_IREG_DMA_A_LOWER 0x19 /* DMA A Lower Base Count */ #define SV_IREG_DMA_C_UPPER 0x1c /* DMA C Upper Base Count */ #define SV_IREG_DMA_C_LOWER 0x1d /* DMA C Lower Base Count */ #define SV_IREG_PCM_RATE_LOW 0x1e /* PCM Sampling Rate Low Byte */ #define SV_IREG_PCM_RATE_HIGH 0x1f /* PCM Sampling Rate High Byte */ #define SV_IREG_SYNTH_RATE_LOW 0x20 /* Synthesizer Sampling Rate Low Byte */ #define SV_IREG_SYNTH_RATE_HIGH 0x21 /* Synthesizer Sampling Rate High Byte */ #define SV_IREG_ADC_CLOCK 0x22 /* ADC Clock Source Selection */ #define SV_IREG_ADC_ALT_RATE 0x23 /* ADC Alternative Sampling Rate Selection */ #define SV_IREG_ADC_PLL_M 0x24 /* ADC PLL M Register */ #define SV_IREG_ADC_PLL_N 0x25 /* ADC PLL N Register */ #define SV_IREG_SYNTH_PLL_M 0x26 /* Synthesizer PLL M Register */ #define SV_IREG_SYNTH_PLL_N 0x27 /* Synthesizer PLL N Register */ #define SV_IREG_MPU401 0x2a /* MPU-401 UART Operation */ #define SV_IREG_DRIVE_CTRL 0x2b /* Drive Control */ #define SV_IREG_SRS_SPACE 0x2c /* SRS Space Control */ #define SV_IREG_SRS_CENTER 0x2d /* SRS Center Control */ #define SV_IREG_WAVE_SOURCE 0x2e /* Wavetable Sample Source Select */ #define SV_IREG_ANALOG_POWER 0x30 /* Analog Power Down Control */ #define SV_IREG_DIGITAL_POWER 0x31 /* Digital Power Down Control */ #define SV_IREG_ADC_PLL SV_IREG_ADC_PLL_M #define SV_IREG_SYNTH_PLL SV_IREG_SYNTH_PLL_M /* * DMA registers */ #define SV_DMA_ADDR0 0x00 #define SV_DMA_ADDR1 0x01 #define SV_DMA_ADDR2 0x02 #define SV_DMA_ADDR3 0x03 #define SV_DMA_COUNT0 0x04 #define SV_DMA_COUNT1 0x05 #define SV_DMA_COUNT2 0x06 #define SV_DMA_MODE 0x0b #define SV_DMA_RESET 0x0d #define SV_DMA_MASK 0x0f /* * Record sources */ #define SV_RECSRC_RESERVED (0x00<<5) #define SV_RECSRC_CD (0x01<<5) #define SV_RECSRC_DAC (0x02<<5) #define SV_RECSRC_AUX2 (0x03<<5) #define SV_RECSRC_LINE (0x04<<5) #define SV_RECSRC_AUX1 (0x05<<5) #define SV_RECSRC_MIC (0x06<<5) #define SV_RECSRC_OUT (0x07<<5) /* * constants */ #define SV_FULLRATE 48000 #define SV_REFFREQUENCY 24576000 #define SV_ADCMULT 512 #define SV_MODE_PLAY 1 #define SV_MODE_CAPTURE 2 /* */ struct sonicvibes { unsigned long dma1size; unsigned long dma2size; int irq; unsigned long sb_port; unsigned long enh_port; unsigned long synth_port; unsigned long midi_port; unsigned long game_port; unsigned int dmaa_port; struct resource *res_dmaa; unsigned int dmac_port; struct resource *res_dmac; unsigned char enable; unsigned char irqmask; unsigned char revision; unsigned char format; unsigned char srs_space; unsigned char srs_center; unsigned char mpu_switch; unsigned char wave_source; unsigned int mode; struct pci_dev *pci; struct snd_card *card; struct snd_pcm *pcm; struct snd_pcm_substream *playback_substream; struct snd_pcm_substream *capture_substream; struct snd_rawmidi *rmidi; struct snd_hwdep *fmsynth; /* S3FM */ spinlock_t reg_lock; unsigned int p_dma_size; unsigned int c_dma_size; struct snd_kcontrol *master_mute; struct snd_kcontrol *master_volume; #ifdef SUPPORT_JOYSTICK struct gameport *gameport; #endif }; static struct pci_device_id snd_sonic_ids[] = { { PCI_VDEVICE(S3, 0xca00), 0, }, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_sonic_ids); static struct snd_ratden sonicvibes_adc_clock = { .num_min = 4000 * 65536, .num_max = 48000UL * 65536, .num_step = 1, .den = 65536, }; static struct snd_pcm_hw_constraint_ratdens snd_sonicvibes_hw_constraints_adc_clock = { .nrats = 1, .rats = &sonicvibes_adc_clock, }; /* * common I/O routines */ static inline void snd_sonicvibes_setdmaa(struct sonicvibes * sonic, unsigned int addr, unsigned int count) { count--; outl(addr, sonic->dmaa_port + SV_DMA_ADDR0); outl(count, sonic->dmaa_port + SV_DMA_COUNT0); outb(0x18, sonic->dmaa_port + SV_DMA_MODE); #if 0 printk(KERN_DEBUG "program dmaa: addr = 0x%x, paddr = 0x%x\n", addr, inl(sonic->dmaa_port + SV_DMA_ADDR0)); #endif } static inline void snd_sonicvibes_setdmac(struct sonicvibes * sonic, unsigned int addr, unsigned int count) { /* note: dmac is working in word mode!!! */ count >>= 1; count--; outl(addr, sonic->dmac_port + SV_DMA_ADDR0); outl(count, sonic->dmac_port + SV_DMA_COUNT0); outb(0x14, sonic->dmac_port + SV_DMA_MODE); #if 0 printk(KERN_DEBUG "program dmac: addr = 0x%x, paddr = 0x%x\n", addr, inl(sonic->dmac_port + SV_DMA_ADDR0)); #endif } static inline unsigned int snd_sonicvibes_getdmaa(struct sonicvibes * sonic) { return (inl(sonic->dmaa_port + SV_DMA_COUNT0) & 0xffffff) + 1; } static inline unsigned int snd_sonicvibes_getdmac(struct sonicvibes * sonic) { /* note: dmac is working in word mode!!! */ return ((inl(sonic->dmac_port + SV_DMA_COUNT0) & 0xffffff) + 1) << 1; } static void snd_sonicvibes_out1(struct sonicvibes * sonic, unsigned char reg, unsigned char value) { outb(reg, SV_REG(sonic, INDEX)); udelay(10); outb(value, SV_REG(sonic, DATA)); udelay(10); } static void snd_sonicvibes_out(struct sonicvibes * sonic, unsigned char reg, unsigned char value) { unsigned long flags; spin_lock_irqsave(&sonic->reg_lock, flags); outb(reg, SV_REG(sonic, INDEX)); udelay(10); outb(value, SV_REG(sonic, DATA)); udelay(10); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static unsigned char snd_sonicvibes_in1(struct sonicvibes * sonic, unsigned char reg) { unsigned char value; outb(reg, SV_REG(sonic, INDEX)); udelay(10); value = inb(SV_REG(sonic, DATA)); udelay(10); return value; } static unsigned char snd_sonicvibes_in(struct sonicvibes * sonic, unsigned char reg) { unsigned long flags; unsigned char value; spin_lock_irqsave(&sonic->reg_lock, flags); outb(reg, SV_REG(sonic, INDEX)); udelay(10); value = inb(SV_REG(sonic, DATA)); udelay(10); spin_unlock_irqrestore(&sonic->reg_lock, flags); return value; } #if 0 static void snd_sonicvibes_debug(struct sonicvibes * sonic) { printk(KERN_DEBUG "SV REGS: INDEX = 0x%02x ", inb(SV_REG(sonic, INDEX))); printk(" STATUS = 0x%02x\n", inb(SV_REG(sonic, STATUS))); printk(KERN_DEBUG " 0x00: left input = 0x%02x ", snd_sonicvibes_in(sonic, 0x00)); printk(" 0x20: synth rate low = 0x%02x\n", snd_sonicvibes_in(sonic, 0x20)); printk(KERN_DEBUG " 0x01: right input = 0x%02x ", snd_sonicvibes_in(sonic, 0x01)); printk(" 0x21: synth rate high = 0x%02x\n", snd_sonicvibes_in(sonic, 0x21)); printk(KERN_DEBUG " 0x02: left AUX1 = 0x%02x ", snd_sonicvibes_in(sonic, 0x02)); printk(" 0x22: ADC clock = 0x%02x\n", snd_sonicvibes_in(sonic, 0x22)); printk(KERN_DEBUG " 0x03: right AUX1 = 0x%02x ", snd_sonicvibes_in(sonic, 0x03)); printk(" 0x23: ADC alt rate = 0x%02x\n", snd_sonicvibes_in(sonic, 0x23)); printk(KERN_DEBUG " 0x04: left CD = 0x%02x ", snd_sonicvibes_in(sonic, 0x04)); printk(" 0x24: ADC pll M = 0x%02x\n", snd_sonicvibes_in(sonic, 0x24)); printk(KERN_DEBUG " 0x05: right CD = 0x%02x ", snd_sonicvibes_in(sonic, 0x05)); printk(" 0x25: ADC pll N = 0x%02x\n", snd_sonicvibes_in(sonic, 0x25)); printk(KERN_DEBUG " 0x06: left line = 0x%02x ", snd_sonicvibes_in(sonic, 0x06)); printk(" 0x26: Synth pll M = 0x%02x\n", snd_sonicvibes_in(sonic, 0x26)); printk(KERN_DEBUG " 0x07: right line = 0x%02x ", snd_sonicvibes_in(sonic, 0x07)); printk(" 0x27: Synth pll N = 0x%02x\n", snd_sonicvibes_in(sonic, 0x27)); printk(KERN_DEBUG " 0x08: MIC = 0x%02x ", snd_sonicvibes_in(sonic, 0x08)); printk(" 0x28: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x28)); printk(KERN_DEBUG " 0x09: Game port = 0x%02x ", snd_sonicvibes_in(sonic, 0x09)); printk(" 0x29: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x29)); printk(KERN_DEBUG " 0x0a: left synth = 0x%02x ", snd_sonicvibes_in(sonic, 0x0a)); printk(" 0x2a: MPU401 = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2a)); printk(KERN_DEBUG " 0x0b: right synth = 0x%02x ", snd_sonicvibes_in(sonic, 0x0b)); printk(" 0x2b: drive ctrl = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2b)); printk(KERN_DEBUG " 0x0c: left AUX2 = 0x%02x ", snd_sonicvibes_in(sonic, 0x0c)); printk(" 0x2c: SRS space = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2c)); printk(KERN_DEBUG " 0x0d: right AUX2 = 0x%02x ", snd_sonicvibes_in(sonic, 0x0d)); printk(" 0x2d: SRS center = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2d)); printk(KERN_DEBUG " 0x0e: left analog = 0x%02x ", snd_sonicvibes_in(sonic, 0x0e)); printk(" 0x2e: wave source = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2e)); printk(KERN_DEBUG " 0x0f: right analog = 0x%02x ", snd_sonicvibes_in(sonic, 0x0f)); printk(" 0x2f: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2f)); printk(KERN_DEBUG " 0x10: left PCM = 0x%02x ", snd_sonicvibes_in(sonic, 0x10)); printk(" 0x30: analog power = 0x%02x\n", snd_sonicvibes_in(sonic, 0x30)); printk(KERN_DEBUG " 0x11: right PCM = 0x%02x ", snd_sonicvibes_in(sonic, 0x11)); printk(" 0x31: analog power = 0x%02x\n", snd_sonicvibes_in(sonic, 0x31)); printk(KERN_DEBUG " 0x12: DMA data format = 0x%02x ", snd_sonicvibes_in(sonic, 0x12)); printk(" 0x32: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x32)); printk(KERN_DEBUG " 0x13: P/C enable = 0x%02x ", snd_sonicvibes_in(sonic, 0x13)); printk(" 0x33: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x33)); printk(KERN_DEBUG " 0x14: U/D button = 0x%02x ", snd_sonicvibes_in(sonic, 0x14)); printk(" 0x34: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x34)); printk(KERN_DEBUG " 0x15: revision = 0x%02x ", snd_sonicvibes_in(sonic, 0x15)); printk(" 0x35: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x35)); printk(KERN_DEBUG " 0x16: ADC output ctrl = 0x%02x ", snd_sonicvibes_in(sonic, 0x16)); printk(" 0x36: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x36)); printk(KERN_DEBUG " 0x17: --- = 0x%02x ", snd_sonicvibes_in(sonic, 0x17)); printk(" 0x37: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x37)); printk(KERN_DEBUG " 0x18: DMA A upper cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x18)); printk(" 0x38: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x38)); printk(KERN_DEBUG " 0x19: DMA A lower cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x19)); printk(" 0x39: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x39)); printk(KERN_DEBUG " 0x1a: --- = 0x%02x ", snd_sonicvibes_in(sonic, 0x1a)); printk(" 0x3a: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3a)); printk(KERN_DEBUG " 0x1b: --- = 0x%02x ", snd_sonicvibes_in(sonic, 0x1b)); printk(" 0x3b: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3b)); printk(KERN_DEBUG " 0x1c: DMA C upper cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x1c)); printk(" 0x3c: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3c)); printk(KERN_DEBUG " 0x1d: DMA C upper cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x1d)); printk(" 0x3d: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3d)); printk(KERN_DEBUG " 0x1e: PCM rate low = 0x%02x ", snd_sonicvibes_in(sonic, 0x1e)); printk(" 0x3e: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3e)); printk(KERN_DEBUG " 0x1f: PCM rate high = 0x%02x ", snd_sonicvibes_in(sonic, 0x1f)); printk(" 0x3f: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3f)); } #endif static void snd_sonicvibes_setfmt(struct sonicvibes * sonic, unsigned char mask, unsigned char value) { unsigned long flags; spin_lock_irqsave(&sonic->reg_lock, flags); outb(SV_MCE | SV_IREG_DMA_DATA_FMT, SV_REG(sonic, INDEX)); if (mask) { sonic->format = inb(SV_REG(sonic, DATA)); udelay(10); } sonic->format = (sonic->format & mask) | value; outb(sonic->format, SV_REG(sonic, DATA)); udelay(10); outb(0, SV_REG(sonic, INDEX)); udelay(10); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static void snd_sonicvibes_pll(unsigned int rate, unsigned int *res_r, unsigned int *res_m, unsigned int *res_n) { unsigned int r, m = 0, n = 0; unsigned int xm, xn, xr, xd, metric = ~0U; if (rate < 625000 / SV_ADCMULT) rate = 625000 / SV_ADCMULT; if (rate > 150000000 / SV_ADCMULT) rate = 150000000 / SV_ADCMULT; /* slight violation of specs, needed for continuous sampling rates */ for (r = 0; rate < 75000000 / SV_ADCMULT; r += 0x20, rate <<= 1); for (xn = 3; xn < 33; xn++) /* 35 */ for (xm = 3; xm < 257; xm++) { xr = ((SV_REFFREQUENCY / SV_ADCMULT) * xm) / xn; if (xr >= rate) xd = xr - rate; else xd = rate - xr; if (xd < metric) { metric = xd; m = xm - 2; n = xn - 2; } } *res_r = r; *res_m = m; *res_n = n; #if 0 printk(KERN_DEBUG "metric = %i, xm = %i, xn = %i\n", metric, xm, xn); printk(KERN_DEBUG "pll: m = 0x%x, r = 0x%x, n = 0x%x\n", reg, m, r, n); #endif } static void snd_sonicvibes_setpll(struct sonicvibes * sonic, unsigned char reg, unsigned int rate) { unsigned long flags; unsigned int r, m, n; snd_sonicvibes_pll(rate, &r, &m, &n); if (sonic != NULL) { spin_lock_irqsave(&sonic->reg_lock, flags); snd_sonicvibes_out1(sonic, reg, m); snd_sonicvibes_out1(sonic, reg + 1, r | n); spin_unlock_irqrestore(&sonic->reg_lock, flags); } } static void snd_sonicvibes_set_adc_rate(struct sonicvibes * sonic, unsigned int rate) { unsigned long flags; unsigned int div; unsigned char clock; div = 48000 / rate; if (div > 8) div = 8; if ((48000 / div) == rate) { /* use the alternate clock */ clock = 0x10; } else { /* use the PLL source */ clock = 0x00; snd_sonicvibes_setpll(sonic, SV_IREG_ADC_PLL, rate); } spin_lock_irqsave(&sonic->reg_lock, flags); snd_sonicvibes_out1(sonic, SV_IREG_ADC_ALT_RATE, (div - 1) << 4); snd_sonicvibes_out1(sonic, SV_IREG_ADC_CLOCK, clock); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static int snd_sonicvibes_hw_constraint_dac_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int rate, div, r, m, n; if (hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min == hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->max) { rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min; div = 48000 / rate; if (div > 8) div = 8; if ((48000 / div) == rate) { params->rate_num = rate; params->rate_den = 1; } else { snd_sonicvibes_pll(rate, &r, &m, &n); snd_BUG_ON(SV_REFFREQUENCY % 16); snd_BUG_ON(SV_ADCMULT % 512); params->rate_num = (SV_REFFREQUENCY/16) * (n+2) * r; params->rate_den = (SV_ADCMULT/512) * (m+2); } } return 0; } static void snd_sonicvibes_set_dac_rate(struct sonicvibes * sonic, unsigned int rate) { unsigned int div; unsigned long flags; div = (rate * 65536 + SV_FULLRATE / 2) / SV_FULLRATE; if (div > 65535) div = 65535; spin_lock_irqsave(&sonic->reg_lock, flags); snd_sonicvibes_out1(sonic, SV_IREG_PCM_RATE_HIGH, div >> 8); snd_sonicvibes_out1(sonic, SV_IREG_PCM_RATE_LOW, div); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static int snd_sonicvibes_trigger(struct sonicvibes * sonic, int what, int cmd) { int result = 0; spin_lock(&sonic->reg_lock); if (cmd == SNDRV_PCM_TRIGGER_START) { if (!(sonic->enable & what)) { sonic->enable |= what; snd_sonicvibes_out1(sonic, SV_IREG_PC_ENABLE, sonic->enable); } } else if (cmd == SNDRV_PCM_TRIGGER_STOP) { if (sonic->enable & what) { sonic->enable &= ~what; snd_sonicvibes_out1(sonic, SV_IREG_PC_ENABLE, sonic->enable); } } else { result = -EINVAL; } spin_unlock(&sonic->reg_lock); return result; } static irqreturn_t snd_sonicvibes_interrupt(int irq, void *dev_id) { struct sonicvibes *sonic = dev_id; unsigned char status; status = inb(SV_REG(sonic, STATUS)); if (!(status & (SV_DMAA_IRQ | SV_DMAC_IRQ | SV_MIDI_IRQ))) return IRQ_NONE; if (status == 0xff) { /* failure */ outb(sonic->irqmask = ~0, SV_REG(sonic, IRQMASK)); snd_printk(KERN_ERR "IRQ failure - interrupts disabled!!\n"); return IRQ_HANDLED; } if (sonic->pcm) { if (status & SV_DMAA_IRQ) snd_pcm_period_elapsed(sonic->playback_substream); if (status & SV_DMAC_IRQ) snd_pcm_period_elapsed(sonic->capture_substream); } if (sonic->rmidi) { if (status & SV_MIDI_IRQ) snd_mpu401_uart_interrupt(irq, sonic->rmidi->private_data); } if (status & SV_UD_IRQ) { unsigned char udreg; int vol, oleft, oright, mleft, mright; spin_lock(&sonic->reg_lock); udreg = snd_sonicvibes_in1(sonic, SV_IREG_UD_BUTTON); vol = udreg & 0x3f; if (!(udreg & 0x40)) vol = -vol; oleft = mleft = snd_sonicvibes_in1(sonic, SV_IREG_LEFT_ANALOG); oright = mright = snd_sonicvibes_in1(sonic, SV_IREG_RIGHT_ANALOG); oleft &= 0x1f; oright &= 0x1f; oleft += vol; if (oleft < 0) oleft = 0; if (oleft > 0x1f) oleft = 0x1f; oright += vol; if (oright < 0) oright = 0; if (oright > 0x1f) oright = 0x1f; if (udreg & 0x80) { mleft ^= 0x80; mright ^= 0x80; } oleft |= mleft & 0x80; oright |= mright & 0x80; snd_sonicvibes_out1(sonic, SV_IREG_LEFT_ANALOG, oleft); snd_sonicvibes_out1(sonic, SV_IREG_RIGHT_ANALOG, oright); spin_unlock(&sonic->reg_lock); snd_ctl_notify(sonic->card, SNDRV_CTL_EVENT_MASK_VALUE, &sonic->master_mute->id); snd_ctl_notify(sonic->card, SNDRV_CTL_EVENT_MASK_VALUE, &sonic->master_volume->id); } return IRQ_HANDLED; } /* * PCM part */ static int snd_sonicvibes_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); return snd_sonicvibes_trigger(sonic, 1, cmd); } static int snd_sonicvibes_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); return snd_sonicvibes_trigger(sonic, 2, cmd); } static int snd_sonicvibes_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_sonicvibes_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_sonicvibes_playback_prepare(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char fmt = 0; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); sonic->p_dma_size = size; count--; if (runtime->channels > 1) fmt |= 1; if (snd_pcm_format_width(runtime->format) == 16) fmt |= 2; snd_sonicvibes_setfmt(sonic, ~3, fmt); snd_sonicvibes_set_dac_rate(sonic, runtime->rate); spin_lock_irq(&sonic->reg_lock); snd_sonicvibes_setdmaa(sonic, runtime->dma_addr, size); snd_sonicvibes_out1(sonic, SV_IREG_DMA_A_UPPER, count >> 8); snd_sonicvibes_out1(sonic, SV_IREG_DMA_A_LOWER, count); spin_unlock_irq(&sonic->reg_lock); return 0; } static int snd_sonicvibes_capture_prepare(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char fmt = 0; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); sonic->c_dma_size = size; count >>= 1; count--; if (runtime->channels > 1) fmt |= 0x10; if (snd_pcm_format_width(runtime->format) == 16) fmt |= 0x20; snd_sonicvibes_setfmt(sonic, ~0x30, fmt); snd_sonicvibes_set_adc_rate(sonic, runtime->rate); spin_lock_irq(&sonic->reg_lock); snd_sonicvibes_setdmac(sonic, runtime->dma_addr, size); snd_sonicvibes_out1(sonic, SV_IREG_DMA_C_UPPER, count >> 8); snd_sonicvibes_out1(sonic, SV_IREG_DMA_C_LOWER, count); spin_unlock_irq(&sonic->reg_lock); return 0; } static snd_pcm_uframes_t snd_sonicvibes_playback_pointer(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); size_t ptr; if (!(sonic->enable & 1)) return 0; ptr = sonic->p_dma_size - snd_sonicvibes_getdmaa(sonic); return bytes_to_frames(substream->runtime, ptr); } static snd_pcm_uframes_t snd_sonicvibes_capture_pointer(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); size_t ptr; if (!(sonic->enable & 2)) return 0; ptr = sonic->c_dma_size - snd_sonicvibes_getdmac(sonic); return bytes_to_frames(substream->runtime, ptr); } static struct snd_pcm_hardware snd_sonicvibes_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 32, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_sonicvibes_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 32, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static int snd_sonicvibes_playback_open(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; sonic->mode |= SV_MODE_PLAY; sonic->playback_substream = substream; runtime->hw = snd_sonicvibes_playback; snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_sonicvibes_hw_constraint_dac_rate, NULL, SNDRV_PCM_HW_PARAM_RATE, -1); return 0; } static int snd_sonicvibes_capture_open(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; sonic->mode |= SV_MODE_CAPTURE; sonic->capture_substream = substream; runtime->hw = snd_sonicvibes_capture; snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_sonicvibes_hw_constraints_adc_clock); return 0; } static int snd_sonicvibes_playback_close(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); sonic->playback_substream = NULL; sonic->mode &= ~SV_MODE_PLAY; return 0; } static int snd_sonicvibes_capture_close(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); sonic->capture_substream = NULL; sonic->mode &= ~SV_MODE_CAPTURE; return 0; } static struct snd_pcm_ops snd_sonicvibes_playback_ops = { .open = snd_sonicvibes_playback_open, .close = snd_sonicvibes_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sonicvibes_hw_params, .hw_free = snd_sonicvibes_hw_free, .prepare = snd_sonicvibes_playback_prepare, .trigger = snd_sonicvibes_playback_trigger, .pointer = snd_sonicvibes_playback_pointer, }; static struct snd_pcm_ops snd_sonicvibes_capture_ops = { .open = snd_sonicvibes_capture_open, .close = snd_sonicvibes_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sonicvibes_hw_params, .hw_free = snd_sonicvibes_hw_free, .prepare = snd_sonicvibes_capture_prepare, .trigger = snd_sonicvibes_capture_trigger, .pointer = snd_sonicvibes_capture_pointer, }; static int __devinit snd_sonicvibes_pcm(struct sonicvibes * sonic, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(sonic->card, "s3_86c617", device, 1, 1, &pcm)) < 0) return err; if (snd_BUG_ON(!pcm)) return -EINVAL; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sonicvibes_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sonicvibes_capture_ops); pcm->private_data = sonic; pcm->info_flags = 0; strcpy(pcm->name, "S3 SonicVibes"); sonic->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(sonic->pci), 64*1024, 128*1024); if (rpcm) *rpcm = pcm; return 0; } /* * Mixer part */ #define SONICVIBES_MUX(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_sonicvibes_info_mux, \ .get = snd_sonicvibes_get_mux, .put = snd_sonicvibes_put_mux } static int snd_sonicvibes_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[7] = { "CD", "PCM", "Aux1", "Line", "Aux0", "Mic", "Mix" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 2; uinfo->value.enumerated.items = 7; if (uinfo->value.enumerated.item >= 7) uinfo->value.enumerated.item = 6; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_sonicvibes_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); spin_lock_irq(&sonic->reg_lock); ucontrol->value.enumerated.item[0] = ((snd_sonicvibes_in1(sonic, SV_IREG_LEFT_ADC) & SV_RECSRC_OUT) >> 5) - 1; ucontrol->value.enumerated.item[1] = ((snd_sonicvibes_in1(sonic, SV_IREG_RIGHT_ADC) & SV_RECSRC_OUT) >> 5) - 1; spin_unlock_irq(&sonic->reg_lock); return 0; } static int snd_sonicvibes_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); unsigned short left, right, oval1, oval2; int change; if (ucontrol->value.enumerated.item[0] >= 7 || ucontrol->value.enumerated.item[1] >= 7) return -EINVAL; left = (ucontrol->value.enumerated.item[0] + 1) << 5; right = (ucontrol->value.enumerated.item[1] + 1) << 5; spin_lock_irq(&sonic->reg_lock); oval1 = snd_sonicvibes_in1(sonic, SV_IREG_LEFT_ADC); oval2 = snd_sonicvibes_in1(sonic, SV_IREG_RIGHT_ADC); left = (oval1 & ~SV_RECSRC_OUT) | left; right = (oval2 & ~SV_RECSRC_OUT) | right; change = left != oval1 || right != oval2; snd_sonicvibes_out1(sonic, SV_IREG_LEFT_ADC, left); snd_sonicvibes_out1(sonic, SV_IREG_RIGHT_ADC, right); spin_unlock_irq(&sonic->reg_lock); return change; } #define SONICVIBES_SINGLE(xname, xindex, reg, shift, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_sonicvibes_info_single, \ .get = snd_sonicvibes_get_single, .put = snd_sonicvibes_put_single, \ .private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) } static int snd_sonicvibes_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_sonicvibes_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; spin_lock_irq(&sonic->reg_lock); ucontrol->value.integer.value[0] = (snd_sonicvibes_in1(sonic, reg)>> shift) & mask; spin_unlock_irq(&sonic->reg_lock); if (invert) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } static int snd_sonicvibes_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; int change; unsigned short val, oval; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; val <<= shift; spin_lock_irq(&sonic->reg_lock); oval = snd_sonicvibes_in1(sonic, reg); val = (oval & ~(mask << shift)) | val; change = val != oval; snd_sonicvibes_out1(sonic, reg, val); spin_unlock_irq(&sonic->reg_lock); return change; } #define SONICVIBES_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_sonicvibes_info_double, \ .get = snd_sonicvibes_get_double, .put = snd_sonicvibes_put_double, \ .private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22) } static int snd_sonicvibes_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 24) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_sonicvibes_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; spin_lock_irq(&sonic->reg_lock); ucontrol->value.integer.value[0] = (snd_sonicvibes_in1(sonic, left_reg) >> shift_left) & mask; ucontrol->value.integer.value[1] = (snd_sonicvibes_in1(sonic, right_reg) >> shift_right) & mask; spin_unlock_irq(&sonic->reg_lock); if (invert) { ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1]; } return 0; } static int snd_sonicvibes_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; int change; unsigned short val1, val2, oval1, oval2; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; if (invert) { val1 = mask - val1; val2 = mask - val2; } val1 <<= shift_left; val2 <<= shift_right; spin_lock_irq(&sonic->reg_lock); oval1 = snd_sonicvibes_in1(sonic, left_reg); oval2 = snd_sonicvibes_in1(sonic, right_reg); val1 = (oval1 & ~(mask << shift_left)) | val1; val2 = (oval2 & ~(mask << shift_right)) | val2; change = val1 != oval1 || val2 != oval2; snd_sonicvibes_out1(sonic, left_reg, val1); snd_sonicvibes_out1(sonic, right_reg, val2); spin_unlock_irq(&sonic->reg_lock); return change; } static struct snd_kcontrol_new snd_sonicvibes_controls[] __devinitdata = { SONICVIBES_DOUBLE("Capture Volume", 0, SV_IREG_LEFT_ADC, SV_IREG_RIGHT_ADC, 0, 0, 15, 0), SONICVIBES_DOUBLE("Aux Playback Switch", 0, SV_IREG_LEFT_AUX1, SV_IREG_RIGHT_AUX1, 7, 7, 1, 1), SONICVIBES_DOUBLE("Aux Playback Volume", 0, SV_IREG_LEFT_AUX1, SV_IREG_RIGHT_AUX1, 0, 0, 31, 1), SONICVIBES_DOUBLE("CD Playback Switch", 0, SV_IREG_LEFT_CD, SV_IREG_RIGHT_CD, 7, 7, 1, 1), SONICVIBES_DOUBLE("CD Playback Volume", 0, SV_IREG_LEFT_CD, SV_IREG_RIGHT_CD, 0, 0, 31, 1), SONICVIBES_DOUBLE("Line Playback Switch", 0, SV_IREG_LEFT_LINE, SV_IREG_RIGHT_LINE, 7, 7, 1, 1), SONICVIBES_DOUBLE("Line Playback Volume", 0, SV_IREG_LEFT_LINE, SV_IREG_RIGHT_LINE, 0, 0, 31, 1), SONICVIBES_SINGLE("Mic Playback Switch", 0, SV_IREG_MIC, 7, 1, 1), SONICVIBES_SINGLE("Mic Playback Volume", 0, SV_IREG_MIC, 0, 15, 1), SONICVIBES_SINGLE("Mic Boost", 0, SV_IREG_LEFT_ADC, 4, 1, 0), SONICVIBES_DOUBLE("Synth Playback Switch", 0, SV_IREG_LEFT_SYNTH, SV_IREG_RIGHT_SYNTH, 7, 7, 1, 1), SONICVIBES_DOUBLE("Synth Playback Volume", 0, SV_IREG_LEFT_SYNTH, SV_IREG_RIGHT_SYNTH, 0, 0, 31, 1), SONICVIBES_DOUBLE("Aux Playback Switch", 1, SV_IREG_LEFT_AUX2, SV_IREG_RIGHT_AUX2, 7, 7, 1, 1), SONICVIBES_DOUBLE("Aux Playback Volume", 1, SV_IREG_LEFT_AUX2, SV_IREG_RIGHT_AUX2, 0, 0, 31, 1), SONICVIBES_DOUBLE("Master Playback Switch", 0, SV_IREG_LEFT_ANALOG, SV_IREG_RIGHT_ANALOG, 7, 7, 1, 1), SONICVIBES_DOUBLE("Master Playback Volume", 0, SV_IREG_LEFT_ANALOG, SV_IREG_RIGHT_ANALOG, 0, 0, 31, 1), SONICVIBES_DOUBLE("PCM Playback Switch", 0, SV_IREG_LEFT_PCM, SV_IREG_RIGHT_PCM, 7, 7, 1, 1), SONICVIBES_DOUBLE("PCM Playback Volume", 0, SV_IREG_LEFT_PCM, SV_IREG_RIGHT_PCM, 0, 0, 63, 1), SONICVIBES_SINGLE("Loopback Capture Switch", 0, SV_IREG_ADC_OUTPUT_CTRL, 0, 1, 0), SONICVIBES_SINGLE("Loopback Capture Volume", 0, SV_IREG_ADC_OUTPUT_CTRL, 2, 63, 1), SONICVIBES_MUX("Capture Source", 0) }; static void snd_sonicvibes_master_free(struct snd_kcontrol *kcontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); sonic->master_mute = NULL; sonic->master_volume = NULL; } static int __devinit snd_sonicvibes_mixer(struct sonicvibes * sonic) { struct snd_card *card; struct snd_kcontrol *kctl; unsigned int idx; int err; if (snd_BUG_ON(!sonic || !sonic->card)) return -EINVAL; card = sonic->card; strcpy(card->mixername, "S3 SonicVibes"); for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_controls); idx++) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_sonicvibes_controls[idx], sonic))) < 0) return err; switch (idx) { case 0: case 1: kctl->private_free = snd_sonicvibes_master_free; break; } } return 0; } /* */ static void snd_sonicvibes_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct sonicvibes *sonic = entry->private_data; unsigned char tmp; tmp = sonic->srs_space & 0x0f; snd_iprintf(buffer, "SRS 3D : %s\n", sonic->srs_space & 0x80 ? "off" : "on"); snd_iprintf(buffer, "SRS Space : %s\n", tmp == 0x00 ? "100%" : tmp == 0x01 ? "75%" : tmp == 0x02 ? "50%" : tmp == 0x03 ? "25%" : "0%"); tmp = sonic->srs_center & 0x0f; snd_iprintf(buffer, "SRS Center : %s\n", tmp == 0x00 ? "100%" : tmp == 0x01 ? "75%" : tmp == 0x02 ? "50%" : tmp == 0x03 ? "25%" : "0%"); tmp = sonic->wave_source & 0x03; snd_iprintf(buffer, "WaveTable Source : %s\n", tmp == 0x00 ? "on-board ROM" : tmp == 0x01 ? "PCI bus" : "on-board ROM + PCI bus"); tmp = sonic->mpu_switch; snd_iprintf(buffer, "Onboard synth : %s\n", tmp & 0x01 ? "on" : "off"); snd_iprintf(buffer, "Ext. Rx to synth : %s\n", tmp & 0x02 ? "on" : "off"); snd_iprintf(buffer, "MIDI to ext. Tx : %s\n", tmp & 0x04 ? "on" : "off"); } static void __devinit snd_sonicvibes_proc_init(struct sonicvibes * sonic) { struct snd_info_entry *entry; if (! snd_card_proc_new(sonic->card, "sonicvibes", &entry)) snd_info_set_text_ops(entry, sonic, snd_sonicvibes_proc_read); } /* */ #ifdef SUPPORT_JOYSTICK static struct snd_kcontrol_new snd_sonicvibes_game_control __devinitdata = SONICVIBES_SINGLE("Joystick Speed", 0, SV_IREG_GAME_PORT, 1, 15, 0); static int __devinit snd_sonicvibes_create_gameport(struct sonicvibes *sonic) { struct gameport *gp; sonic->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "sonicvibes: cannot allocate memory for gameport\n"); return -ENOMEM; } gameport_set_name(gp, "SonicVibes Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(sonic->pci)); gameport_set_dev_parent(gp, &sonic->pci->dev); gp->io = sonic->game_port; gameport_register_port(gp); snd_ctl_add(sonic->card, snd_ctl_new1(&snd_sonicvibes_game_control, sonic)); return 0; } static void snd_sonicvibes_free_gameport(struct sonicvibes *sonic) { if (sonic->gameport) { gameport_unregister_port(sonic->gameport); sonic->gameport = NULL; } } #else static inline int snd_sonicvibes_create_gameport(struct sonicvibes *sonic) { return -ENOSYS; } static inline void snd_sonicvibes_free_gameport(struct sonicvibes *sonic) { } #endif static int snd_sonicvibes_free(struct sonicvibes *sonic) { snd_sonicvibes_free_gameport(sonic); pci_write_config_dword(sonic->pci, 0x40, sonic->dmaa_port); pci_write_config_dword(sonic->pci, 0x48, sonic->dmac_port); if (sonic->irq >= 0) free_irq(sonic->irq, sonic); release_and_free_resource(sonic->res_dmaa); release_and_free_resource(sonic->res_dmac); pci_release_regions(sonic->pci); pci_disable_device(sonic->pci); kfree(sonic); return 0; } static int snd_sonicvibes_dev_free(struct snd_device *device) { struct sonicvibes *sonic = device->device_data; return snd_sonicvibes_free(sonic); } static int __devinit snd_sonicvibes_create(struct snd_card *card, struct pci_dev *pci, int reverb, int mge, struct sonicvibes ** rsonic) { struct sonicvibes *sonic; unsigned int dmaa, dmac; int err; static struct snd_device_ops ops = { .dev_free = snd_sonicvibes_dev_free, }; *rsonic = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; /* check, if we can restrict PCI DMA transfers to 24 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(24)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(24)) < 0) { snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } sonic = kzalloc(sizeof(*sonic), GFP_KERNEL); if (sonic == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&sonic->reg_lock); sonic->card = card; sonic->pci = pci; sonic->irq = -1; if ((err = pci_request_regions(pci, "S3 SonicVibes")) < 0) { kfree(sonic); pci_disable_device(pci); return err; } sonic->sb_port = pci_resource_start(pci, 0); sonic->enh_port = pci_resource_start(pci, 1); sonic->synth_port = pci_resource_start(pci, 2); sonic->midi_port = pci_resource_start(pci, 3); sonic->game_port = pci_resource_start(pci, 4); if (request_irq(pci->irq, snd_sonicvibes_interrupt, IRQF_SHARED, "S3 SonicVibes", sonic)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_sonicvibes_free(sonic); return -EBUSY; } sonic->irq = pci->irq; pci_read_config_dword(pci, 0x40, &dmaa); pci_read_config_dword(pci, 0x48, &dmac); dmaio &= ~0x0f; dmaa &= ~0x0f; dmac &= ~0x0f; if (!dmaa) { dmaa = dmaio; dmaio += 0x10; snd_printk(KERN_INFO "BIOS did not allocate DDMA channel A i/o, allocated at 0x%x\n", dmaa); } if (!dmac) { dmac = dmaio; dmaio += 0x10; snd_printk(KERN_INFO "BIOS did not allocate DDMA channel C i/o, allocated at 0x%x\n", dmac); } pci_write_config_dword(pci, 0x40, dmaa); pci_write_config_dword(pci, 0x48, dmac); if ((sonic->res_dmaa = request_region(dmaa, 0x10, "S3 SonicVibes DDMA-A")) == NULL) { snd_sonicvibes_free(sonic); snd_printk(KERN_ERR "unable to grab DDMA-A port at 0x%x-0x%x\n", dmaa, dmaa + 0x10 - 1); return -EBUSY; } if ((sonic->res_dmac = request_region(dmac, 0x10, "S3 SonicVibes DDMA-C")) == NULL) { snd_sonicvibes_free(sonic); snd_printk(KERN_ERR "unable to grab DDMA-C port at 0x%x-0x%x\n", dmac, dmac + 0x10 - 1); return -EBUSY; } pci_read_config_dword(pci, 0x40, &sonic->dmaa_port); pci_read_config_dword(pci, 0x48, &sonic->dmac_port); sonic->dmaa_port &= ~0x0f; sonic->dmac_port &= ~0x0f; pci_write_config_dword(pci, 0x40, sonic->dmaa_port | 9); /* enable + enhanced */ pci_write_config_dword(pci, 0x48, sonic->dmac_port | 9); /* enable */ /* ok.. initialize S3 SonicVibes chip */ outb(SV_RESET, SV_REG(sonic, CONTROL)); /* reset chip */ udelay(100); outb(0, SV_REG(sonic, CONTROL)); /* release reset */ udelay(100); outb(SV_ENHANCED | SV_INTA | (reverb ? SV_REVERB : 0), SV_REG(sonic, CONTROL)); inb(SV_REG(sonic, STATUS)); /* clear IRQs */ #if 1 snd_sonicvibes_out(sonic, SV_IREG_DRIVE_CTRL, 0); /* drive current 16mA */ #else snd_sonicvibes_out(sonic, SV_IREG_DRIVE_CTRL, 0x40); /* drive current 8mA */ #endif snd_sonicvibes_out(sonic, SV_IREG_PC_ENABLE, sonic->enable = 0); /* disable playback & capture */ outb(sonic->irqmask = ~(SV_DMAA_MASK | SV_DMAC_MASK | SV_UD_MASK), SV_REG(sonic, IRQMASK)); inb(SV_REG(sonic, STATUS)); /* clear IRQs */ snd_sonicvibes_out(sonic, SV_IREG_ADC_CLOCK, 0); /* use PLL as clock source */ snd_sonicvibes_out(sonic, SV_IREG_ANALOG_POWER, 0); /* power up analog parts */ snd_sonicvibes_out(sonic, SV_IREG_DIGITAL_POWER, 0); /* power up digital parts */ snd_sonicvibes_setpll(sonic, SV_IREG_ADC_PLL, 8000); snd_sonicvibes_out(sonic, SV_IREG_SRS_SPACE, sonic->srs_space = 0x80); /* SRS space off */ snd_sonicvibes_out(sonic, SV_IREG_SRS_CENTER, sonic->srs_center = 0x00);/* SRS center off */ snd_sonicvibes_out(sonic, SV_IREG_MPU401, sonic->mpu_switch = 0x05); /* MPU-401 switch */ snd_sonicvibes_out(sonic, SV_IREG_WAVE_SOURCE, sonic->wave_source = 0x00); /* onboard ROM */ snd_sonicvibes_out(sonic, SV_IREG_PCM_RATE_LOW, (8000 * 65536 / SV_FULLRATE) & 0xff); snd_sonicvibes_out(sonic, SV_IREG_PCM_RATE_HIGH, ((8000 * 65536 / SV_FULLRATE) >> 8) & 0xff); snd_sonicvibes_out(sonic, SV_IREG_LEFT_ADC, mge ? 0xd0 : 0xc0); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_ADC, 0xc0); snd_sonicvibes_out(sonic, SV_IREG_LEFT_AUX1, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_AUX1, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_CD, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_CD, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_LINE, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_LINE, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_MIC, 0x8f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_SYNTH, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_SYNTH, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_AUX2, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_AUX2, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_ANALOG, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_ANALOG, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_PCM, 0xbf); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_PCM, 0xbf); snd_sonicvibes_out(sonic, SV_IREG_ADC_OUTPUT_CTRL, 0xfc); #if 0 snd_sonicvibes_debug(sonic); #endif sonic->revision = snd_sonicvibes_in(sonic, SV_IREG_REVISION); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, sonic, &ops)) < 0) { snd_sonicvibes_free(sonic); return err; } snd_sonicvibes_proc_init(sonic); snd_card_set_dev(card, &pci->dev); *rsonic = sonic; return 0; } /* * MIDI section */ static struct snd_kcontrol_new snd_sonicvibes_midi_controls[] __devinitdata = { SONICVIBES_SINGLE("SonicVibes Wave Source RAM", 0, SV_IREG_WAVE_SOURCE, 0, 1, 0), SONICVIBES_SINGLE("SonicVibes Wave Source RAM+ROM", 0, SV_IREG_WAVE_SOURCE, 1, 1, 0), SONICVIBES_SINGLE("SonicVibes Onboard Synth", 0, SV_IREG_MPU401, 0, 1, 0), SONICVIBES_SINGLE("SonicVibes External Rx to Synth", 0, SV_IREG_MPU401, 1, 1, 0), SONICVIBES_SINGLE("SonicVibes External Tx", 0, SV_IREG_MPU401, 2, 1, 0) }; static int snd_sonicvibes_midi_input_open(struct snd_mpu401 * mpu) { struct sonicvibes *sonic = mpu->private_data; outb(sonic->irqmask &= ~SV_MIDI_MASK, SV_REG(sonic, IRQMASK)); return 0; } static void snd_sonicvibes_midi_input_close(struct snd_mpu401 * mpu) { struct sonicvibes *sonic = mpu->private_data; outb(sonic->irqmask |= SV_MIDI_MASK, SV_REG(sonic, IRQMASK)); } static int __devinit snd_sonicvibes_midi(struct sonicvibes * sonic, struct snd_rawmidi *rmidi) { struct snd_mpu401 * mpu = rmidi->private_data; struct snd_card *card = sonic->card; struct snd_rawmidi_str *dir; unsigned int idx; int err; mpu->private_data = sonic; mpu->open_input = snd_sonicvibes_midi_input_open; mpu->close_input = snd_sonicvibes_midi_input_close; dir = &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]; for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_midi_controls); idx++) if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_sonicvibes_midi_controls[idx], sonic))) < 0) return err; return 0; } static int __devinit snd_sonic_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct sonicvibes *sonic; struct snd_rawmidi *midi_uart; struct snd_opl3 *opl3; int idx, err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; for (idx = 0; idx < 5; idx++) { if (pci_resource_start(pci, idx) == 0 || !(pci_resource_flags(pci, idx) & IORESOURCE_IO)) { snd_card_free(card); return -ENODEV; } } if ((err = snd_sonicvibes_create(card, pci, reverb[dev] ? 1 : 0, mge[dev] ? 1 : 0, &sonic)) < 0) { snd_card_free(card); return err; } strcpy(card->driver, "SonicVibes"); strcpy(card->shortname, "S3 SonicVibes"); sprintf(card->longname, "%s rev %i at 0x%llx, irq %i", card->shortname, sonic->revision, (unsigned long long)pci_resource_start(pci, 1), sonic->irq); if ((err = snd_sonicvibes_pcm(sonic, 0, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_sonicvibes_mixer(sonic)) < 0) { snd_card_free(card); return err; } if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_SONICVIBES, sonic->midi_port, MPU401_INFO_INTEGRATED, sonic->irq, 0, &midi_uart)) < 0) { snd_card_free(card); return err; } snd_sonicvibes_midi(sonic, midi_uart); if ((err = snd_opl3_create(card, sonic->synth_port, sonic->synth_port + 2, OPL3_HW_OPL3_SV, 1, &opl3)) < 0) { snd_card_free(card); return err; } if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return err; } snd_sonicvibes_create_gameport(sonic); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_sonic_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "S3 SonicVibes", .id_table = snd_sonic_ids, .probe = snd_sonic_probe, .remove = __devexit_p(snd_sonic_remove), }; static int __init alsa_card_sonicvibes_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_sonicvibes_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_sonicvibes_init) module_exit(alsa_card_sonicvibes_exit)
gpl-2.0
jiankangshiye/linux-2.6.32.63-mini2440
drivers/staging/comedi/drivers/addi-data/addi_common.c
582
57084
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data-com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You shoud also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-----------------------------------------------------------------------+ | Project : ADDI DATA | Compiler : GCC | | Modulname : addi_common.c | Version : 2.96 | +-------------------------------+---------------------------------------+ | Author : | Date : | +-----------------------------------------------------------------------+ | Description : ADDI COMMON Main Module | +-----------------------------------------------------------------------+ | CONFIG OPTIONS | | option[0] - PCI bus number - if bus number and slot number are 0, | | then driver search for first unused card | | option[1] - PCI slot number | | | | option[2] = 0 - DMA ENABLE | | = 1 - DMA DISABLE | +----------+-----------+------------------------------------------------+ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/timer.h> #include <linux/pci.h> #include "../../comedidev.h" #include <asm/io.h> #if defined(CONFIG_APCI_1710) || defined(CONFIG_APCI_3200) || defined(CONFIG_APCI_3300) #include <asm/i387.h> #endif #include "../comedi_fc.h" #include "addi_common.h" #include "addi_amcc_s5933.h" /* Update-0.7.57->0.7.68MODULE_AUTHOR("ADDI-DATA GmbH <info@addi-data.com>"); */ /* Update-0.7.57->0.7.68MODULE_DESCRIPTION("Comedi ADDI-DATA module"); */ /* Update-0.7.57->0.7.68MODULE_LICENSE("GPL"); */ #define devpriv ((struct addi_private *)dev->private) #define this_board ((struct addi_board *)dev->board_ptr) #if defined(CONFIG_APCI_1710) || defined(CONFIG_APCI_3200) || defined(CONFIG_APCI_3300) /* BYTE b_SaveFPUReg [94]; */ void fpu_begin(void) { /* asm ("fstenv b_SaveFPUReg"); */ kernel_fpu_begin(); } void fpu_end(void) { /* asm ("frstor b_SaveFPUReg"); */ kernel_fpu_end(); } #endif #include "addi_eeprom.c" #if (defined (CONFIG_APCI_3120) || defined (CONFIG_APCI_3001)) #include "hwdrv_apci3120.c" #endif #ifdef CONFIG_APCI_1032 #include "hwdrv_apci1032.c" #endif #ifdef CONFIG_APCI_1516 #include "hwdrv_apci1516.c" #endif #ifdef CONFIG_APCI_2016 #include "hwdrv_apci2016.c" #endif #ifdef CONFIG_APCI_2032 #include "hwdrv_apci2032.c" #endif #ifdef CONFIG_APCI_2200 #include "hwdrv_apci2200.c" #endif #ifdef CONFIG_APCI_1564 #include "hwdrv_apci1564.c" #endif #ifdef CONFIG_APCI_1500 #include "hwdrv_apci1500.c" #endif #ifdef CONFIG_APCI_3501 #include "hwdrv_apci3501.c" #endif #ifdef CONFIG_APCI_035 #include "hwdrv_apci035.c" #endif #if (defined (CONFIG_APCI_3200) || defined (CONFIG_APCI_3300)) #include "hwdrv_apci3200.c" #endif #ifdef CONFIG_APCI_1710 #include "hwdrv_APCI1710.c" #endif #ifdef CONFIG_APCI_16XX #include "hwdrv_apci16xx.c" #endif #ifdef CONFIG_APCI_3XXX #include "hwdrv_apci3xxx.c" #endif #ifndef COMEDI_SUBD_TTLIO #define COMEDI_SUBD_TTLIO 11 /* Digital Input Output But TTL */ #endif static DEFINE_PCI_DEVICE_TABLE(addi_apci_tbl) = { #ifdef CONFIG_APCI_3120 {APCI3120_BOARD_VENDOR_ID, 0x818D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1032 {APCI1032_BOARD_VENDOR_ID, 0x1003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1516 {APCI1516_BOARD_VENDOR_ID, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_2016 {APCI2016_BOARD_VENDOR_ID, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_2032 {APCI2032_BOARD_VENDOR_ID, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_2200 {APCI2200_BOARD_VENDOR_ID, 0x1005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1564 {APCI1564_BOARD_VENDOR_ID, 0x1006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1500 {APCI1500_BOARD_VENDOR_ID, 0x80fc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3001 {APCI3120_BOARD_VENDOR_ID, 0x828D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3501 {APCI3501_BOARD_VENDOR_ID, 0x3001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_035 {APCI035_BOARD_VENDOR_ID, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3200 {APCI3200_BOARD_VENDOR_ID, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3300 {APCI3200_BOARD_VENDOR_ID, 0x3007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_1710 {APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_16XX {0x15B8, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x100A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif #ifdef CONFIG_APCI_3XXX {0x15B8, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x300F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x300E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x301F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3023, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x300B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x15B8, 0x3024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, #endif {0} }; MODULE_DEVICE_TABLE(pci, addi_apci_tbl); static const struct addi_board boardtypes[] = { #ifdef CONFIG_APCI_3120 {"apci3120", APCI3120_BOARD_VENDOR_ID, 0x818D, AMCC_OP_REG_SIZE, APCI3120_ADDRESS_RANGE, 8, 0, ADDIDATA_NO_EEPROM, NULL, 16, 8, 16, 8, 0xffff, 0x3fff, &range_apci3120_ai, &range_apci3120_ao, 4, 4, 0x0f, 0, NULL, 1, 1, 1, 10000, 100000, v_APCI3120_Interrupt, i_APCI3120_Reset, i_APCI3120_InsnConfigAnalogInput, i_APCI3120_InsnReadAnalogInput, NULL, NULL, i_APCI3120_CommandTestAnalogInput, i_APCI3120_CommandAnalogInput, i_APCI3120_StopCyclicAcquisition, NULL, i_APCI3120_InsnWriteAnalogOutput, NULL, NULL, i_APCI3120_InsnReadDigitalInput, NULL, i_APCI3120_InsnBitsDigitalInput, i_APCI3120_InsnConfigDigitalOutput, i_APCI3120_InsnWriteDigitalOutput, i_APCI3120_InsnBitsDigitalOutput, NULL, i_APCI3120_InsnConfigTimer, i_APCI3120_InsnWriteTimer, i_APCI3120_InsnReadTimer, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1032 {"apci1032", APCI1032_BOARD_VENDOR_ID, 0x1003, 4, APCI1032_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, NULL, 0, 0, 0, 0, 0, v_APCI1032_Interrupt, i_APCI1032_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1032_ConfigDigitalInput, i_APCI1032_Read1DigitalInput, NULL, i_APCI1032_ReadMoreDigitalInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1516 {"apci1516", APCI1516_BOARD_VENDOR_ID, 0x1001, 128, APCI1516_ADDRESS_RANGE, 32, 0, ADDIDATA_EEPROM, ADDIDATA_S5920, 0, 0, 0, 0, 0, 0, NULL, NULL, 8, 8, 0, 0, NULL, 0, 1, 0, 0, 0, NULL, i_APCI1516_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1516_Read1DigitalInput, NULL, i_APCI1516_ReadMoreDigitalInput, i_APCI1516_ConfigDigitalOutput, i_APCI1516_WriteDigitalOutput, i_APCI1516_ReadDigitalOutput, NULL, i_APCI1516_ConfigWatchdog, i_APCI1516_StartStopWriteWatchdog, i_APCI1516_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_2016 {"apci2016", APCI2016_BOARD_VENDOR_ID, 0x1002, 128, APCI2016_ADDRESS_RANGE, 32, 0, ADDIDATA_EEPROM, ADDIDATA_S5920, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 16, 0, 0, NULL, 0, 1, 0, 0, 0, NULL, i_APCI2016_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI2016_ConfigDigitalOutput, i_APCI2016_WriteDigitalOutput, i_APCI2016_BitsDigitalOutput, NULL, i_APCI2016_ConfigWatchdog, i_APCI2016_StartStopWriteWatchdog, i_APCI2016_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_2032 {"apci2032", APCI2032_BOARD_VENDOR_ID, 0x1004, 4, APCI2032_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 32, 0xffffffff, 0, NULL, 0, 1, 0, 0, 0, v_APCI2032_Interrupt, i_APCI2032_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI2032_ConfigDigitalOutput, i_APCI2032_WriteDigitalOutput, i_APCI2032_ReadDigitalOutput, i_APCI2032_ReadInterruptStatus, i_APCI2032_ConfigWatchdog, i_APCI2032_StartStopWriteWatchdog, i_APCI2032_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_2200 {"apci2200", APCI2200_BOARD_VENDOR_ID, 0x1005, 4, APCI2200_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 8, 16, 0, 0, NULL, 0, 1, 0, 0, 0, NULL, i_APCI2200_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI2200_Read1DigitalInput, NULL, i_APCI2200_ReadMoreDigitalInput, i_APCI2200_ConfigDigitalOutput, i_APCI2200_WriteDigitalOutput, i_APCI2200_ReadDigitalOutput, NULL, i_APCI2200_ConfigWatchdog, i_APCI2200_StartStopWriteWatchdog, i_APCI2200_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1564 {"apci1564", APCI1564_BOARD_VENDOR_ID, 0x1006, 128, APCI1564_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 32, 32, 0xffffffff, 0, NULL, 0, 1, 0, 0, 0, v_APCI1564_Interrupt, i_APCI1564_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1564_ConfigDigitalInput, i_APCI1564_Read1DigitalInput, NULL, i_APCI1564_ReadMoreDigitalInput, i_APCI1564_ConfigDigitalOutput, i_APCI1564_WriteDigitalOutput, i_APCI1564_ReadDigitalOutput, i_APCI1564_ReadInterruptStatus, i_APCI1564_ConfigTimerCounterWatchdog, i_APCI1564_StartStopWriteTimerCounterWatchdog, i_APCI1564_ReadTimerCounterWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1500 {"apci1500", APCI1500_BOARD_VENDOR_ID, 0x80fc, 128, APCI1500_ADDRESS_RANGE, 4, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 16, 16, 0xffff, 0, NULL, 0, 1, 0, 0, 0, v_APCI1500_Interrupt, i_APCI1500_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1500_ConfigDigitalInputEvent, i_APCI1500_Initialisation, i_APCI1500_StartStopInputEvent, i_APCI1500_ReadMoreDigitalInput, i_APCI1500_ConfigDigitalOutputErrorInterrupt, i_APCI1500_WriteDigitalOutput, i_APCI1500_ConfigureInterrupt, NULL, i_APCI1500_ConfigCounterTimerWatchdog, i_APCI1500_StartStopTriggerTimerCounterWatchdog, i_APCI1500_ReadInterruptMask, i_APCI1500_ReadCounterTimerWatchdog, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3001 {"apci3001", APCI3120_BOARD_VENDOR_ID, 0x828D, AMCC_OP_REG_SIZE, APCI3120_ADDRESS_RANGE, 8, 0, ADDIDATA_NO_EEPROM, NULL, 16, 8, 16, 0, 0xfff, 0, &range_apci3120_ai, NULL, 4, 4, 0x0f, 0, NULL, 1, 1, 1, 10000, 100000, v_APCI3120_Interrupt, i_APCI3120_Reset, i_APCI3120_InsnConfigAnalogInput, i_APCI3120_InsnReadAnalogInput, NULL, NULL, i_APCI3120_CommandTestAnalogInput, i_APCI3120_CommandAnalogInput, i_APCI3120_StopCyclicAcquisition, NULL, NULL, NULL, NULL, i_APCI3120_InsnReadDigitalInput, NULL, i_APCI3120_InsnBitsDigitalInput, i_APCI3120_InsnConfigDigitalOutput, i_APCI3120_InsnWriteDigitalOutput, i_APCI3120_InsnBitsDigitalOutput, NULL, i_APCI3120_InsnConfigTimer, i_APCI3120_InsnWriteTimer, i_APCI3120_InsnReadTimer, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3501 {"apci3501", APCI3501_BOARD_VENDOR_ID, 0x3001, 64, APCI3501_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_S5933, 0, 0, 0, 8, 0, 16383, NULL, &range_apci3501_ao, 2, 2, 0x3, 0, NULL, 0, 1, 0, 0, 0, v_APCI3501_Interrupt, i_APCI3501_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3501_ConfigAnalogOutput, i_APCI3501_WriteAnalogOutput, NULL, NULL, NULL, NULL, i_APCI3501_ReadDigitalInput, i_APCI3501_ConfigDigitalOutput, i_APCI3501_WriteDigitalOutput, i_APCI3501_ReadDigitalOutput, NULL, i_APCI3501_ConfigTimerCounterWatchdog, i_APCI3501_StartStopWriteTimerCounterWatchdog, i_APCI3501_ReadTimerCounterWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_035 {"apci035", APCI035_BOARD_VENDOR_ID, 0x0300, 127, APCI035_ADDRESS_RANGE, 0, 0, 1, ADDIDATA_S5920, 16, 8, 16, 0, 0xff, 0, &range_apci035_ai, NULL, 0, 0, 0, 0, NULL, 0, 1, 0, 10000, 100000, v_APCI035_Interrupt, i_APCI035_Reset, i_APCI035_ConfigAnalogInput, i_APCI035_ReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI035_ConfigTimerWatchdog, i_APCI035_StartStopWriteTimerWatchdog, i_APCI035_ReadTimerWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3200 {"apci3200", APCI3200_BOARD_VENDOR_ID, 0x3000, 128, 256, 4, 4, ADDIDATA_EEPROM, ADDIDATA_S5920, 16, 8, 16, 0, 0x3ffff, 0, &range_apci3200_ai, NULL, 4, 4, 0, 0, NULL, 0, 0, 0, 10000, 100000, v_APCI3200_Interrupt, i_APCI3200_Reset, i_APCI3200_ConfigAnalogInput, i_APCI3200_ReadAnalogInput, i_APCI3200_InsnWriteReleaseAnalogInput, i_APCI3200_InsnBits_AnalogInput_Test, i_APCI3200_CommandTestAnalogInput, i_APCI3200_CommandAnalogInput, i_APCI3200_StopCyclicAcquisition, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3200_ReadDigitalInput, i_APCI3200_ConfigDigitalOutput, i_APCI3200_WriteDigitalOutput, i_APCI3200_ReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3300 /* Begin JK .20.10.2004 = APCI-3300 integration */ {"apci3300", APCI3200_BOARD_VENDOR_ID, 0x3007, 128, 256, 4, 4, ADDIDATA_EEPROM, ADDIDATA_S5920, 0, 8, 8, 0, 0x3ffff, 0, &range_apci3300_ai, NULL, 4, 4, 0, 0, NULL, 0, 0, 0, 10000, 100000, v_APCI3200_Interrupt, i_APCI3200_Reset, i_APCI3200_ConfigAnalogInput, i_APCI3200_ReadAnalogInput, i_APCI3200_InsnWriteReleaseAnalogInput, i_APCI3200_InsnBits_AnalogInput_Test, i_APCI3200_CommandTestAnalogInput, i_APCI3200_CommandAnalogInput, i_APCI3200_StopCyclicAcquisition, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3200_ReadDigitalInput, i_APCI3200_ConfigDigitalOutput, i_APCI3200_WriteDigitalOutput, i_APCI3200_ReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1710 {"apci1710", APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID, 128, 8, 256, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 0, 0, 0, NULL, 0, 0, 0, 0, 0, v_APCI1710_Interrupt, i_APCI1710_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_16XX {"apci1648", 0x15B8, 0x1009, 128, 0, 0, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 0, 0, 48, &range_apci16xx_ttl, 0, 0, 0, 0, 0, NULL, i_APCI16XX_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI16XX_InsnConfigInitTTLIO, i_APCI16XX_InsnBitsReadTTLIO, i_APCI16XX_InsnReadTTLIOAllPortValue, i_APCI16XX_InsnBitsWriteTTLIO}, {"apci1696", 0x15B8, 0x100A, 128, 0, 0, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 0, 0, 96, &range_apci16xx_ttl, 0, 0, 0, 0, 0, NULL, i_APCI16XX_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI16XX_InsnConfigInitTTLIO, i_APCI16XX_InsnBitsReadTTLIO, i_APCI16XX_InsnReadTTLIOAllPortValue, i_APCI16XX_InsnBitsWriteTTLIO}, #endif #ifdef CONFIG_APCI_3XXX {"apci3000-16", 0x15B8, 0x3010, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 4095, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3000-8", 0x15B8, 0x300F, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 4095, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3000-4", 0x15B8, 0x300E, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 4095, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3006-16", 0x15B8, 0x3013, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 65535, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3006-8", 0x15B8, 0x3014, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 65535, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3006-4", 0x15B8, 0x3015, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3010-16", 0x15B8, 0x3016, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 4095, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3010-8", 0x15B8, 0x3017, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 4095, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3010-4", 0x15B8, 0x3018, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 4095, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3016-16", 0x15B8, 0x3019, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3016-8", 0x15B8, 0x301A, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3016-4", 0x15B8, 0x301B, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3100-16-4", 0x15B8, 0x301C, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3100-8-4", 0x15B8, 0x301D, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3106-16-4", 0x15B8, 0x301E, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3106-8-4", 0x15B8, 0x301F, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3110-16-4", 0x15B8, 0x3020, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3110-8-4", 0x15B8, 0x3021, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3116-16-4", 0x15B8, 0x3022, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3116-8-4", 0x15B8, 0x3023, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3003", 0x15B8, 0x300B, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 4, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 7, 2500, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3002-16", 0x15B8, 0x3002, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 16, 16, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3002-8", 0x15B8, 0x3003, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 8, 8, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3002-4", 0x15B8, 0x3004, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 4, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3500", 0x15B8, 0x3024, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 0, 0, 4, 0, 4095, NULL, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 0, 0, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, #endif }; #define n_boardtypes (sizeof(boardtypes)/sizeof(struct addi_board)) struct comedi_driver driver_addi = { .driver_name = "addi_common", .module = THIS_MODULE, .attach = i_ADDI_Attach, .detach = i_ADDI_Detach, .num_names = n_boardtypes, .board_name = &boardtypes[0].pc_DriverName, .offset = sizeof(struct addi_board), }; COMEDI_PCI_INITCLEANUP(driver_addi, addi_apci_tbl); /* +----------------------------------------------------------------------------+ | Function name :static int i_ADDI_Attach(struct comedi_device *dev, | | struct comedi_devconfig *it) | | | +----------------------------------------------------------------------------+ | Task :Detects the card. | | Configure the driver for a particular board. | | This function does all the initializations and memory | | allocation of data structures for the driver. | +----------------------------------------------------------------------------+ | Input Parameters :struct comedi_device *dev | | struct comedi_devconfig *it | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ static int i_ADDI_Attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret, pages, i, n_subdevices; unsigned int dw_Dummy; resource_size_t io_addr[5]; unsigned int irq; resource_size_t iobase_a, iobase_main, iobase_addon, iobase_reserved; struct pcilst_struct *card = NULL; unsigned char pci_bus, pci_slot, pci_func; int i_Dma = 0; static char c_Identifier[150]; sprintf(c_Identifier, "Addi-Data GmbH Comedi %s", this_board->pc_DriverName); ret = alloc_private(dev, sizeof(struct addi_private)); if (ret < 0) return -ENOMEM; if (!pci_list_builded) { v_pci_card_list_init(this_board->i_VendorId, 1); /* 1 for displaying the list.. */ pci_list_builded = 1; } /* printk("comedi%d: addi_common: board=%s",dev->minor,this_board->pc_DriverName); */ if ((this_board->i_Dma) && (it->options[2] == 0)) { i_Dma = 1; } card = ptr_select_and_alloc_pci_card(this_board->i_VendorId, this_board->i_DeviceId, it->options[0], it->options[1], i_Dma); if (card == NULL) return -EIO; devpriv->allocated = 1; if ((i_pci_card_data(card, &pci_bus, &pci_slot, &pci_func, &io_addr[0], &irq)) < 0) { i_pci_card_free(card); printk(" - Can't get AMCC data!\n"); return -EIO; } iobase_a = io_addr[0]; iobase_main = io_addr[1]; iobase_addon = io_addr[2]; iobase_reserved = io_addr[3]; printk("\nBus %d: Slot %d: Funct%d\nBase0: 0x%8llx\nBase1: 0x%8llx\nBase2: 0x%8llx\nBase3: 0x%8llx\n", pci_bus, pci_slot, pci_func, (unsigned long long)io_addr[0], (unsigned long long)io_addr[1], (unsigned long long)io_addr[2], (unsigned long long)io_addr[3]); if ((this_board->pc_EepromChip == NULL) || (strcmp(this_board->pc_EepromChip, ADDIDATA_9054) != 0)) { /************************************/ /* Test if more that 1 address used */ /************************************/ if (this_board->i_IorangeBase1 != 0) { dev->iobase = (unsigned long)iobase_main; /* DAQ base address... */ } else { dev->iobase = (unsigned long)iobase_a; /* DAQ base address... */ } dev->board_name = this_board->pc_DriverName; devpriv->amcc = card; devpriv->iobase = (int) dev->iobase; devpriv->i_IobaseAmcc = (int) iobase_a; /* AMCC base address... */ devpriv->i_IobaseAddon = (int) iobase_addon; /* ADD ON base address.... */ devpriv->i_IobaseReserved = (int) iobase_reserved; devpriv->ps_BoardInfo = this_board; } else { dev->board_name = this_board->pc_DriverName; dev->iobase = (unsigned long)io_addr[2]; devpriv->amcc = card; devpriv->iobase = (int) io_addr[2]; devpriv->ps_BoardInfo = this_board; devpriv->i_IobaseReserved = (int) io_addr[3]; printk("\nioremap begin"); devpriv->dw_AiBase = (unsigned long) ioremap(io_addr[3], this_board->i_IorangeBase3); printk("\nioremap end"); } /* ## */ if (irq > 0) { if (request_irq(irq, v_ADDI_Interrupt, IRQF_SHARED, c_Identifier, dev) < 0) { printk(", unable to allocate IRQ %u, DISABLING IT", irq); irq = 0; /* Can't use IRQ */ } else { printk("\nirq=%u", irq); } } else { printk(", IRQ disabled"); } printk("\nOption %d %d %d\n", it->options[0], it->options[1], it->options[2]); dev->irq = irq; /* Read eepeom and fill addi_board Structure */ if (this_board->i_PCIEeprom) { printk("\nPCI Eeprom used"); if (!(strcmp(this_board->pc_EepromChip, "S5920"))) { /* Set 3 wait stait */ if (!(strcmp(this_board->pc_DriverName, "apci035"))) { outl(0x80808082, devpriv->i_IobaseAmcc + 0x60); } else { outl(0x83838383, devpriv->i_IobaseAmcc + 0x60); } /* Enable the interrupt for the controler */ dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38); outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38); printk("\nEnable the interrupt for the controler"); } printk("\nRead Eeprom"); i_EepromReadMainHeader(io_addr[0], this_board->pc_EepromChip, dev); } else { printk("\nPCI Eeprom unused"); } if (it->options[2] > 0) { devpriv->us_UseDma = ADDI_DISABLE; } else { devpriv->us_UseDma = ADDI_ENABLE; } if (this_board->i_Dma) { printk("\nDMA used"); if (devpriv->us_UseDma == ADDI_ENABLE) { /* alloc DMA buffers */ devpriv->b_DmaDoubleBuffer = 0; for (i = 0; i < 2; i++) { for (pages = 4; pages >= 0; pages--) { devpriv->ul_DmaBufferVirtual[i] = (void *) __get_free_pages(GFP_KERNEL, pages); if (devpriv->ul_DmaBufferVirtual[i]) break; } if (devpriv->ul_DmaBufferVirtual[i]) { devpriv->ui_DmaBufferPages[i] = pages; devpriv->ui_DmaBufferSize[i] = PAGE_SIZE * pages; devpriv->ui_DmaBufferSamples[i] = devpriv-> ui_DmaBufferSize[i] >> 1; devpriv->ul_DmaBufferHw[i] = virt_to_bus((void *)devpriv-> ul_DmaBufferVirtual[i]); } } if (!devpriv->ul_DmaBufferVirtual[0]) { printk (", Can't allocate DMA buffer, DMA disabled!"); devpriv->us_UseDma = ADDI_DISABLE; } if (devpriv->ul_DmaBufferVirtual[1]) { devpriv->b_DmaDoubleBuffer = 1; } } if ((devpriv->us_UseDma == ADDI_ENABLE)) { printk("\nDMA ENABLED\n"); } else { printk("\nDMA DISABLED\n"); } } if (!strcmp(this_board->pc_DriverName, "apci1710")) { #ifdef CONFIG_APCI_1710 i_ADDI_AttachPCI1710(dev); /* save base address */ devpriv->s_BoardInfos.ui_Address = io_addr[2]; #endif } else { /* Update-0.7.57->0.7.68dev->n_subdevices = 7; */ n_subdevices = 7; ret = alloc_subdevices(dev, n_subdevices); if (ret < 0) return ret; /* Allocate and Initialise AI Subdevice Structures */ s = dev->subdevices + 0; if ((this_board->i_NbrAiChannel) || (this_board->i_NbrAiChannelDiff)) { dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF; if (this_board->i_NbrAiChannel) { s->n_chan = this_board->i_NbrAiChannel; devpriv->b_SingelDiff = 0; } else { s->n_chan = this_board->i_NbrAiChannelDiff; devpriv->b_SingelDiff = 1; } s->maxdata = this_board->i_AiMaxdata; s->len_chanlist = this_board->i_AiChannelList; s->range_table = this_board->pr_AiRangelist; /* Set the initialisation flag */ devpriv->b_AiInitialisation = 1; s->insn_config = this_board->i_hwdrv_InsnConfigAnalogInput; s->insn_read = this_board->i_hwdrv_InsnReadAnalogInput; s->insn_write = this_board->i_hwdrv_InsnWriteAnalogInput; s->insn_bits = this_board->i_hwdrv_InsnBitsAnalogInput; s->do_cmdtest = this_board->i_hwdrv_CommandTestAnalogInput; s->do_cmd = this_board->i_hwdrv_CommandAnalogInput; s->cancel = this_board->i_hwdrv_CancelAnalogInput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise AO Subdevice Structures */ s = dev->subdevices + 1; if (this_board->i_NbrAoChannel) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->i_NbrAoChannel; s->maxdata = this_board->i_AoMaxdata; s->len_chanlist = this_board->i_NbrAoChannel; s->range_table = this_board->pr_AoRangelist; s->insn_config = this_board->i_hwdrv_InsnConfigAnalogOutput; s->insn_write = this_board->i_hwdrv_InsnWriteAnalogOutput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise DI Subdevice Structures */ s = dev->subdevices + 2; if (this_board->i_NbrDiChannel) { s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->i_NbrDiChannel; s->maxdata = 1; s->len_chanlist = this_board->i_NbrDiChannel; s->range_table = &range_digital; s->io_bits = 0; /* all bits input */ s->insn_config = this_board->i_hwdrv_InsnConfigDigitalInput; s->insn_read = this_board->i_hwdrv_InsnReadDigitalInput; s->insn_write = this_board->i_hwdrv_InsnWriteDigitalInput; s->insn_bits = this_board->i_hwdrv_InsnBitsDigitalInput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise DO Subdevice Structures */ s = dev->subdevices + 3; if (this_board->i_NbrDoChannel) { s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->i_NbrDoChannel; s->maxdata = this_board->i_DoMaxdata; s->len_chanlist = this_board->i_NbrDoChannel; s->range_table = &range_digital; s->io_bits = 0xf; /* all bits output */ s->insn_config = this_board->i_hwdrv_InsnConfigDigitalOutput; /* for digital output memory.. */ s->insn_write = this_board->i_hwdrv_InsnWriteDigitalOutput; s->insn_bits = this_board->i_hwdrv_InsnBitsDigitalOutput; s->insn_read = this_board->i_hwdrv_InsnReadDigitalOutput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise Timer Subdevice Structures */ s = dev->subdevices + 4; if (this_board->i_Timer) { s->type = COMEDI_SUBD_TIMER; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = 1; s->maxdata = 0; s->len_chanlist = 1; s->range_table = &range_digital; s->insn_write = this_board->i_hwdrv_InsnWriteTimer; s->insn_read = this_board->i_hwdrv_InsnReadTimer; s->insn_config = this_board->i_hwdrv_InsnConfigTimer; s->insn_bits = this_board->i_hwdrv_InsnBitsTimer; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise TTL */ s = dev->subdevices + 5; if (this_board->i_NbrTTLChannel) { s->type = COMEDI_SUBD_TTLIO; s->subdev_flags = SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->i_NbrTTLChannel; s->maxdata = 1; s->io_bits = 0; /* all bits input */ s->len_chanlist = this_board->i_NbrTTLChannel; s->range_table = &range_digital; s->insn_config = this_board->i_hwdr_ConfigInitTTLIO; s->insn_bits = this_board->i_hwdr_ReadTTLIOBits; s->insn_read = this_board->i_hwdr_ReadTTLIOAllPortValue; s->insn_write = this_board->i_hwdr_WriteTTLIOChlOnOff; } else { s->type = COMEDI_SUBD_UNUSED; } /* EEPROM */ s = dev->subdevices + 6; if (this_board->i_PCIEeprom) { s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->n_chan = 256; s->maxdata = 0xffff; s->insn_read = i_ADDIDATA_InsnReadEeprom; } else { s->type = COMEDI_SUBD_UNUSED; } } printk("\ni_ADDI_Attach end\n"); i_ADDI_Reset(dev); devpriv->b_ValidDriver = 1; return 0; } /* +----------------------------------------------------------------------------+ | Function name : static int i_ADDI_Detach(struct comedi_device *dev) | | | | | +----------------------------------------------------------------------------+ | Task : Deallocates resources of the addi_common driver | | Free the DMA buffers, unregister irq. | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ static int i_ADDI_Detach(struct comedi_device *dev) { if (dev->private) { if (devpriv->b_ValidDriver) { i_ADDI_Reset(dev); } if (dev->irq) { free_irq(dev->irq, dev); } if ((devpriv->ps_BoardInfo->pc_EepromChip == NULL) || (strcmp(devpriv->ps_BoardInfo->pc_EepromChip, ADDIDATA_9054) != 0)) { if (devpriv->allocated) { i_pci_card_free(devpriv->amcc); } if (devpriv->ul_DmaBufferVirtual[0]) { free_pages((unsigned long)devpriv-> ul_DmaBufferVirtual[0], devpriv->ui_DmaBufferPages[0]); } if (devpriv->ul_DmaBufferVirtual[1]) { free_pages((unsigned long)devpriv-> ul_DmaBufferVirtual[1], devpriv->ui_DmaBufferPages[1]); } } else { iounmap((void *)devpriv->dw_AiBase); if (devpriv->allocated) { i_pci_card_free(devpriv->amcc); } } if (pci_list_builded) { /* v_pci_card_list_cleanup(PCI_VENDOR_ID_AMCC); */ v_pci_card_list_cleanup(this_board->i_VendorId); pci_list_builded = 0; } } return 0; } /* +----------------------------------------------------------------------------+ | Function name : static int i_ADDI_Reset(struct comedi_device *dev) | | | +----------------------------------------------------------------------------+ | Task : Disables all interrupts, Resets digital output to low, | | Set all analog output to low | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ static int i_ADDI_Reset(struct comedi_device *dev) { this_board->i_hwdrv_Reset(dev); return 0; } /* Interrupt function */ /* +----------------------------------------------------------------------------+ | Function name : | |static void v_ADDI_Interrupt(int irq, void *d) | | | +----------------------------------------------------------------------------+ | Task : Registerd interrupt routine | | | +----------------------------------------------------------------------------+ | Input Parameters : int irq | | | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ static irqreturn_t v_ADDI_Interrupt(int irq, void *d) { struct comedi_device *dev = d; this_board->v_hwdrv_Interrupt(irq, d); return IRQ_RETVAL(1); } /* EEPROM Read Function */ /* +----------------------------------------------------------------------------+ | Function name : | |INT i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | +----------------------------------------------------------------------------+ | Task : Read 256 words from EEPROM | | | +----------------------------------------------------------------------------+ | Input Parameters :(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ static int i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned short w_Data; unsigned short w_Address; w_Address = CR_CHAN(insn->chanspec); /* address to be read as 0,1,2,3...255 */ w_Data = w_EepromReadWord(devpriv->i_IobaseAmcc, this_board->pc_EepromChip, 0x100 + (2 * w_Address)); data[0] = w_Data; /* multiplied by 2 bcozinput will be like 0,1,2...255 */ return insn->n; }
gpl-2.0
MichaelQQ/Linux-PE
drivers/iio/inkern.c
838
13165
/* The industrial I/O core in kernel channel mapping * * Copyright (c) 2011 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/iio/iio.h> #include "iio_core.h" #include <linux/iio/machine.h> #include <linux/iio/driver.h> #include <linux/iio/consumer.h> struct iio_map_internal { struct iio_dev *indio_dev; struct iio_map *map; struct list_head l; }; static LIST_HEAD(iio_map_list); static DEFINE_MUTEX(iio_map_list_lock); int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps) { int i = 0, ret = 0; struct iio_map_internal *mapi; if (maps == NULL) return 0; mutex_lock(&iio_map_list_lock); while (maps[i].consumer_dev_name != NULL) { mapi = kzalloc(sizeof(*mapi), GFP_KERNEL); if (mapi == NULL) { ret = -ENOMEM; goto error_ret; } mapi->map = &maps[i]; mapi->indio_dev = indio_dev; list_add(&mapi->l, &iio_map_list); i++; } error_ret: mutex_unlock(&iio_map_list_lock); return ret; } EXPORT_SYMBOL_GPL(iio_map_array_register); /* * Remove all map entries associated with the given iio device */ int iio_map_array_unregister(struct iio_dev *indio_dev) { int ret = -ENODEV; struct iio_map_internal *mapi; struct list_head *pos, *tmp; mutex_lock(&iio_map_list_lock); list_for_each_safe(pos, tmp, &iio_map_list) { mapi = list_entry(pos, struct iio_map_internal, l); if (indio_dev == mapi->indio_dev) { list_del(&mapi->l); kfree(mapi); ret = 0; } } mutex_unlock(&iio_map_list_lock); return ret; } EXPORT_SYMBOL_GPL(iio_map_array_unregister); static const struct iio_chan_spec *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name) { int i; const struct iio_chan_spec *chan = NULL; for (i = 0; i < indio_dev->num_channels; i++) if (indio_dev->channels[i].datasheet_name && strcmp(name, indio_dev->channels[i].datasheet_name) == 0) { chan = &indio_dev->channels[i]; break; } return chan; } #ifdef CONFIG_OF static int iio_dev_node_match(struct device *dev, void *data) { return dev->of_node == data && dev->type == &iio_device_type; } static int __of_iio_channel_get(struct iio_channel *channel, struct device_node *np, int index) { struct device *idev; struct iio_dev *indio_dev; int err; struct of_phandle_args iiospec; err = of_parse_phandle_with_args(np, "io-channels", "#io-channel-cells", index, &iiospec); if (err) return err; idev = bus_find_device(&iio_bus_type, NULL, iiospec.np, iio_dev_node_match); of_node_put(iiospec.np); if (idev == NULL) return -EPROBE_DEFER; indio_dev = dev_to_iio_dev(idev); channel->indio_dev = indio_dev; index = iiospec.args_count ? iiospec.args[0] : 0; if (index >= indio_dev->num_channels) { err = -EINVAL; goto err_put; } channel->channel = &indio_dev->channels[index]; return 0; err_put: iio_device_put(indio_dev); return err; } static struct iio_channel *of_iio_channel_get(struct device_node *np, int index) { struct iio_channel *channel; int err; if (index < 0) return ERR_PTR(-EINVAL); channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (channel == NULL) return ERR_PTR(-ENOMEM); err = __of_iio_channel_get(channel, np, index); if (err) goto err_free_channel; return channel; err_free_channel: kfree(channel); return ERR_PTR(err); } static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, const char *name) { struct iio_channel *chan = NULL; /* Walk up the tree of devices looking for a matching iio channel */ while (np) { int index = 0; /* * For named iio channels, first look up the name in the * "io-channel-names" property. If it cannot be found, the * index will be an error code, and of_iio_channel_get() * will fail. */ if (name) index = of_property_match_string(np, "io-channel-names", name); chan = of_iio_channel_get(np, index); if (!IS_ERR(chan)) break; else if (name && index >= 0) { pr_err("ERROR: could not get IIO channel %s:%s(%i)\n", np->full_name, name ? name : "", index); return chan; } /* * No matching IIO channel found on this node. * If the parent node has a "io-channel-ranges" property, * then we can try one of its channels. */ np = np->parent; if (np && !of_get_property(np, "io-channel-ranges", NULL)) break; } return chan; } static struct iio_channel *of_iio_channel_get_all(struct device *dev) { struct iio_channel *chans; int i, mapind, nummaps = 0; int ret; do { ret = of_parse_phandle_with_args(dev->of_node, "io-channels", "#io-channel-cells", nummaps, NULL); if (ret < 0) break; } while (++nummaps); if (nummaps == 0) /* no error, return NULL to search map table */ return NULL; /* NULL terminated array to save passing size */ chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL); if (chans == NULL) return ERR_PTR(-ENOMEM); /* Search for OF matches */ for (mapind = 0; mapind < nummaps; mapind++) { ret = __of_iio_channel_get(&chans[mapind], dev->of_node, mapind); if (ret) goto error_free_chans; } return chans; error_free_chans: for (i = 0; i < mapind; i++) iio_device_put(chans[i].indio_dev); kfree(chans); return ERR_PTR(ret); } #else /* CONFIG_OF */ static inline struct iio_channel * of_iio_channel_get_by_name(struct device_node *np, const char *name) { return NULL; } static inline struct iio_channel *of_iio_channel_get_all(struct device *dev) { return NULL; } #endif /* CONFIG_OF */ static struct iio_channel *iio_channel_get_sys(const char *name, const char *channel_name) { struct iio_map_internal *c_i = NULL, *c = NULL; struct iio_channel *channel; int err; if (name == NULL && channel_name == NULL) return ERR_PTR(-ENODEV); /* first find matching entry the channel map */ mutex_lock(&iio_map_list_lock); list_for_each_entry(c_i, &iio_map_list, l) { if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) || (channel_name && strcmp(channel_name, c_i->map->consumer_channel) != 0)) continue; c = c_i; iio_device_get(c->indio_dev); break; } mutex_unlock(&iio_map_list_lock); if (c == NULL) return ERR_PTR(-ENODEV); channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (channel == NULL) { err = -ENOMEM; goto error_no_mem; } channel->indio_dev = c->indio_dev; if (c->map->adc_channel_label) { channel->channel = iio_chan_spec_from_name(channel->indio_dev, c->map->adc_channel_label); if (channel->channel == NULL) { err = -EINVAL; goto error_no_chan; } } return channel; error_no_chan: kfree(channel); error_no_mem: iio_device_put(c->indio_dev); return ERR_PTR(err); } struct iio_channel *iio_channel_get(struct device *dev, const char *channel_name) { const char *name = dev ? dev_name(dev) : NULL; struct iio_channel *channel; if (dev) { channel = of_iio_channel_get_by_name(dev->of_node, channel_name); if (channel != NULL) return channel; } return iio_channel_get_sys(name, channel_name); } EXPORT_SYMBOL_GPL(iio_channel_get); void iio_channel_release(struct iio_channel *channel) { iio_device_put(channel->indio_dev); kfree(channel); } EXPORT_SYMBOL_GPL(iio_channel_release); struct iio_channel *iio_channel_get_all(struct device *dev) { const char *name; struct iio_channel *chans; struct iio_map_internal *c = NULL; int nummaps = 0; int mapind = 0; int i, ret; if (dev == NULL) return ERR_PTR(-EINVAL); chans = of_iio_channel_get_all(dev); if (chans) return chans; name = dev_name(dev); mutex_lock(&iio_map_list_lock); /* first count the matching maps */ list_for_each_entry(c, &iio_map_list, l) if (name && strcmp(name, c->map->consumer_dev_name) != 0) continue; else nummaps++; if (nummaps == 0) { ret = -ENODEV; goto error_ret; } /* NULL terminated array to save passing size */ chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL); if (chans == NULL) { ret = -ENOMEM; goto error_ret; } /* for each map fill in the chans element */ list_for_each_entry(c, &iio_map_list, l) { if (name && strcmp(name, c->map->consumer_dev_name) != 0) continue; chans[mapind].indio_dev = c->indio_dev; chans[mapind].data = c->map->consumer_data; chans[mapind].channel = iio_chan_spec_from_name(chans[mapind].indio_dev, c->map->adc_channel_label); if (chans[mapind].channel == NULL) { ret = -EINVAL; goto error_free_chans; } iio_device_get(chans[mapind].indio_dev); mapind++; } if (mapind == 0) { ret = -ENODEV; goto error_free_chans; } mutex_unlock(&iio_map_list_lock); return chans; error_free_chans: for (i = 0; i < nummaps; i++) iio_device_put(chans[i].indio_dev); kfree(chans); error_ret: mutex_unlock(&iio_map_list_lock); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(iio_channel_get_all); void iio_channel_release_all(struct iio_channel *channels) { struct iio_channel *chan = &channels[0]; while (chan->indio_dev) { iio_device_put(chan->indio_dev); chan++; } kfree(channels); } EXPORT_SYMBOL_GPL(iio_channel_release_all); static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, enum iio_chan_info_enum info) { int unused; if (val2 == NULL) val2 = &unused; return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel, val, val2, info); } int iio_read_channel_raw(struct iio_channel *chan, int *val) { int ret; mutex_lock(&chan->indio_dev->info_exist_lock); if (chan->indio_dev->info == NULL) { ret = -ENODEV; goto err_unlock; } ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); err_unlock: mutex_unlock(&chan->indio_dev->info_exist_lock); return ret; } EXPORT_SYMBOL_GPL(iio_read_channel_raw); static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, int raw, int *processed, unsigned int scale) { int scale_type, scale_val, scale_val2, offset; s64 raw64 = raw; int ret; ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET); if (ret >= 0) raw64 += offset; scale_type = iio_channel_read(chan, &scale_val, &scale_val2, IIO_CHAN_INFO_SCALE); if (scale_type < 0) return scale_type; switch (scale_type) { case IIO_VAL_INT: *processed = raw64 * scale_val; break; case IIO_VAL_INT_PLUS_MICRO: if (scale_val2 < 0) *processed = -raw64 * scale_val; else *processed = raw64 * scale_val; *processed += div_s64(raw64 * (s64)scale_val2 * scale, 1000000LL); break; case IIO_VAL_INT_PLUS_NANO: if (scale_val2 < 0) *processed = -raw64 * scale_val; else *processed = raw64 * scale_val; *processed += div_s64(raw64 * (s64)scale_val2 * scale, 1000000000LL); break; case IIO_VAL_FRACTIONAL: *processed = div_s64(raw64 * (s64)scale_val * scale, scale_val2); break; case IIO_VAL_FRACTIONAL_LOG2: *processed = (raw64 * (s64)scale_val * scale) >> scale_val2; break; default: return -EINVAL; } return 0; } int iio_convert_raw_to_processed(struct iio_channel *chan, int raw, int *processed, unsigned int scale) { int ret; mutex_lock(&chan->indio_dev->info_exist_lock); if (chan->indio_dev->info == NULL) { ret = -ENODEV; goto err_unlock; } ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed, scale); err_unlock: mutex_unlock(&chan->indio_dev->info_exist_lock); return ret; } EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed); int iio_read_channel_processed(struct iio_channel *chan, int *val) { int ret; mutex_lock(&chan->indio_dev->info_exist_lock); if (chan->indio_dev->info == NULL) { ret = -ENODEV; goto err_unlock; } if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) { ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_PROCESSED); } else { ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); if (ret < 0) goto err_unlock; ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1); } err_unlock: mutex_unlock(&chan->indio_dev->info_exist_lock); return ret; } EXPORT_SYMBOL_GPL(iio_read_channel_processed); int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2) { int ret; mutex_lock(&chan->indio_dev->info_exist_lock); if (chan->indio_dev->info == NULL) { ret = -ENODEV; goto err_unlock; } ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE); err_unlock: mutex_unlock(&chan->indio_dev->info_exist_lock); return ret; } EXPORT_SYMBOL_GPL(iio_read_channel_scale); int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type) { int ret = 0; /* Need to verify underlying driver has not gone away */ mutex_lock(&chan->indio_dev->info_exist_lock); if (chan->indio_dev->info == NULL) { ret = -ENODEV; goto err_unlock; } *type = chan->channel->type; err_unlock: mutex_unlock(&chan->indio_dev->info_exist_lock); return ret; } EXPORT_SYMBOL_GPL(iio_get_channel_type);
gpl-2.0
friedrich420/SPRINT-Note-4-AEL-Kernel-Lollipop-Source
drivers/ata/sata_nv.c
2374
69757
/* * sata_nv.c - NVIDIA nForce SATA * * Copyright 2004 NVIDIA Corp. All rights reserved. * Copyright 2004 Andrew Chew * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * No hardware documentation available outside of NVIDIA. * This driver programs the NVIDIA SATA controller in a similar * fashion as with other PCI IDE BMDMA controllers, with a few * NV-specific details such as register offsets, SATA phy location, * hotplug info, etc. * * CK804/MCP04 controllers support an alternate programming interface * similar to the ADMA specification (with some modifications). * This allows the use of NCQ. Non-DMA-mapped ATA commands are still * sent through the legacy interface. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <linux/libata.h> #define DRV_NAME "sata_nv" #define DRV_VERSION "3.5" #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL enum { NV_MMIO_BAR = 5, NV_PORTS = 2, NV_PIO_MASK = ATA_PIO4, NV_MWDMA_MASK = ATA_MWDMA2, NV_UDMA_MASK = ATA_UDMA6, NV_PORT0_SCR_REG_OFFSET = 0x00, NV_PORT1_SCR_REG_OFFSET = 0x40, /* INT_STATUS/ENABLE */ NV_INT_STATUS = 0x10, NV_INT_ENABLE = 0x11, NV_INT_STATUS_CK804 = 0x440, NV_INT_ENABLE_CK804 = 0x441, /* INT_STATUS/ENABLE bits */ NV_INT_DEV = 0x01, NV_INT_PM = 0x02, NV_INT_ADDED = 0x04, NV_INT_REMOVED = 0x08, NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */ NV_INT_ALL = 0x0f, NV_INT_MASK = NV_INT_DEV | NV_INT_ADDED | NV_INT_REMOVED, /* INT_CONFIG */ NV_INT_CONFIG = 0x12, NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI // For PCI config register 20 NV_MCP_SATA_CFG_20 = 0x50, NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04, NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17), NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16), NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14), NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12), NV_ADMA_MAX_CPBS = 32, NV_ADMA_CPB_SZ = 128, NV_ADMA_APRD_SZ = 16, NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) / NV_ADMA_APRD_SZ, NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5, NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ, NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS * (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ), /* BAR5 offset to ADMA general registers */ NV_ADMA_GEN = 0x400, NV_ADMA_GEN_CTL = 0x00, NV_ADMA_NOTIFIER_CLEAR = 0x30, /* BAR5 offset to ADMA ports */ NV_ADMA_PORT = 0x480, /* size of ADMA port register space */ NV_ADMA_PORT_SIZE = 0x100, /* ADMA port registers */ NV_ADMA_CTL = 0x40, NV_ADMA_CPB_COUNT = 0x42, NV_ADMA_NEXT_CPB_IDX = 0x43, NV_ADMA_STAT = 0x44, NV_ADMA_CPB_BASE_LOW = 0x48, NV_ADMA_CPB_BASE_HIGH = 0x4C, NV_ADMA_APPEND = 0x50, NV_ADMA_NOTIFIER = 0x68, NV_ADMA_NOTIFIER_ERROR = 0x6C, /* NV_ADMA_CTL register bits */ NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0), NV_ADMA_CTL_CHANNEL_RESET = (1 << 5), NV_ADMA_CTL_GO = (1 << 7), NV_ADMA_CTL_AIEN = (1 << 8), NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11), NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12), /* CPB response flag bits */ NV_CPB_RESP_DONE = (1 << 0), NV_CPB_RESP_ATA_ERR = (1 << 3), NV_CPB_RESP_CMD_ERR = (1 << 4), NV_CPB_RESP_CPB_ERR = (1 << 7), /* CPB control flag bits */ NV_CPB_CTL_CPB_VALID = (1 << 0), NV_CPB_CTL_QUEUE = (1 << 1), NV_CPB_CTL_APRD_VALID = (1 << 2), NV_CPB_CTL_IEN = (1 << 3), NV_CPB_CTL_FPDMA = (1 << 4), /* APRD flags */ NV_APRD_WRITE = (1 << 1), NV_APRD_END = (1 << 2), NV_APRD_CONT = (1 << 3), /* NV_ADMA_STAT flags */ NV_ADMA_STAT_TIMEOUT = (1 << 0), NV_ADMA_STAT_HOTUNPLUG = (1 << 1), NV_ADMA_STAT_HOTPLUG = (1 << 2), NV_ADMA_STAT_CPBERR = (1 << 4), NV_ADMA_STAT_SERROR = (1 << 5), NV_ADMA_STAT_CMD_COMPLETE = (1 << 6), NV_ADMA_STAT_IDLE = (1 << 8), NV_ADMA_STAT_LEGACY = (1 << 9), NV_ADMA_STAT_STOPPED = (1 << 10), NV_ADMA_STAT_DONE = (1 << 12), NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR | NV_ADMA_STAT_TIMEOUT, /* port flags */ NV_ADMA_PORT_REGISTER_MODE = (1 << 0), NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1), /* MCP55 reg offset */ NV_CTL_MCP55 = 0x400, NV_INT_STATUS_MCP55 = 0x440, NV_INT_ENABLE_MCP55 = 0x444, NV_NCQ_REG_MCP55 = 0x448, /* MCP55 */ NV_INT_ALL_MCP55 = 0xffff, NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */ NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd, /* SWNCQ ENABLE BITS*/ NV_CTL_PRI_SWNCQ = 0x02, NV_CTL_SEC_SWNCQ = 0x04, /* SW NCQ status bits*/ NV_SWNCQ_IRQ_DEV = (1 << 0), NV_SWNCQ_IRQ_PM = (1 << 1), NV_SWNCQ_IRQ_ADDED = (1 << 2), NV_SWNCQ_IRQ_REMOVED = (1 << 3), NV_SWNCQ_IRQ_BACKOUT = (1 << 4), NV_SWNCQ_IRQ_SDBFIS = (1 << 5), NV_SWNCQ_IRQ_DHREGFIS = (1 << 6), NV_SWNCQ_IRQ_DMASETUP = (1 << 7), NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED | NV_SWNCQ_IRQ_REMOVED, }; /* ADMA Physical Region Descriptor - one SG segment */ struct nv_adma_prd { __le64 addr; __le32 len; u8 flags; u8 packet_len; __le16 reserved; }; enum nv_adma_regbits { CMDEND = (1 << 15), /* end of command list */ WNB = (1 << 14), /* wait-not-BSY */ IGN = (1 << 13), /* ignore this entry */ CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */ DA2 = (1 << (2 + 8)), DA1 = (1 << (1 + 8)), DA0 = (1 << (0 + 8)), }; /* ADMA Command Parameter Block The first 5 SG segments are stored inside the Command Parameter Block itself. If there are more than 5 segments the remainder are stored in a separate memory area indicated by next_aprd. */ struct nv_adma_cpb { u8 resp_flags; /* 0 */ u8 reserved1; /* 1 */ u8 ctl_flags; /* 2 */ /* len is length of taskfile in 64 bit words */ u8 len; /* 3 */ u8 tag; /* 4 */ u8 next_cpb_idx; /* 5 */ __le16 reserved2; /* 6-7 */ __le16 tf[12]; /* 8-31 */ struct nv_adma_prd aprd[5]; /* 32-111 */ __le64 next_aprd; /* 112-119 */ __le64 reserved3; /* 120-127 */ }; struct nv_adma_port_priv { struct nv_adma_cpb *cpb; dma_addr_t cpb_dma; struct nv_adma_prd *aprd; dma_addr_t aprd_dma; void __iomem *ctl_block; void __iomem *gen_block; void __iomem *notifier_clear_block; u64 adma_dma_mask; u8 flags; int last_issue_ncq; }; struct nv_host_priv { unsigned long type; }; struct defer_queue { u32 defer_bits; unsigned int head; unsigned int tail; unsigned int tag[ATA_MAX_QUEUE]; }; enum ncq_saw_flag_list { ncq_saw_d2h = (1U << 0), ncq_saw_dmas = (1U << 1), ncq_saw_sdb = (1U << 2), ncq_saw_backout = (1U << 3), }; struct nv_swncq_port_priv { struct ata_bmdma_prd *prd; /* our SG list */ dma_addr_t prd_dma; /* and its DMA mapping */ void __iomem *sactive_block; void __iomem *irq_block; void __iomem *tag_block; u32 qc_active; unsigned int last_issue_tag; /* fifo circular queue to store deferral command */ struct defer_queue defer_queue; /* for NCQ interrupt analysis */ u32 dhfis_bits; u32 dmafis_bits; u32 sdbfis_bits; unsigned int ncq_flags; }; #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT))))) static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); #ifdef CONFIG_PM static int nv_pci_device_resume(struct pci_dev *pdev); #endif static void nv_ck804_host_stop(struct ata_host *host); static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int nv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static void nv_nf2_freeze(struct ata_port *ap); static void nv_nf2_thaw(struct ata_port *ap); static void nv_ck804_freeze(struct ata_port *ap); static void nv_ck804_thaw(struct ata_port *ap); static int nv_adma_slave_config(struct scsi_device *sdev); static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); static void nv_adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); static void nv_adma_irq_clear(struct ata_port *ap); static int nv_adma_port_start(struct ata_port *ap); static void nv_adma_port_stop(struct ata_port *ap); #ifdef CONFIG_PM static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg); static int nv_adma_port_resume(struct ata_port *ap); #endif static void nv_adma_freeze(struct ata_port *ap); static void nv_adma_thaw(struct ata_port *ap); static void nv_adma_error_handler(struct ata_port *ap); static void nv_adma_host_stop(struct ata_host *host); static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc); static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf); static void nv_mcp55_thaw(struct ata_port *ap); static void nv_mcp55_freeze(struct ata_port *ap); static void nv_swncq_error_handler(struct ata_port *ap); static int nv_swncq_slave_config(struct scsi_device *sdev); static int nv_swncq_port_start(struct ata_port *ap); static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance); #ifdef CONFIG_PM static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg); static int nv_swncq_port_resume(struct ata_port *ap); #endif enum nv_host_type { GENERIC, NFORCE2, NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */ CK804, ADMA, MCP5x, SWNCQ, }; static const struct pci_device_id nv_pci_tbl[] = { { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC }, { } /* terminate list */ }; static struct pci_driver nv_pci_driver = { .name = DRV_NAME, .id_table = nv_pci_tbl, .probe = nv_init_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = nv_pci_device_resume, #endif .remove = ata_pci_remove_one, }; static struct scsi_host_template nv_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct scsi_host_template nv_adma_sht = { ATA_NCQ_SHT(DRV_NAME), .can_queue = NV_ADMA_MAX_CPBS, .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN, .dma_boundary = NV_ADMA_DMA_BOUNDARY, .slave_configure = nv_adma_slave_config, }; static struct scsi_host_template nv_swncq_sht = { ATA_NCQ_SHT(DRV_NAME), .can_queue = ATA_MAX_QUEUE, .sg_tablesize = LIBATA_MAX_PRD, .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = nv_swncq_slave_config, }; /* * NV SATA controllers have various different problems with hardreset * protocol depending on the specific controller and device. * * GENERIC: * * bko11195 reports that link doesn't come online after hardreset on * generic nv's and there have been several other similar reports on * linux-ide. * * bko12351#c23 reports that warmplug on MCP61 doesn't work with * softreset. * * NF2/3: * * bko3352 reports nf2/3 controllers can't determine device signature * reliably after hardreset. The following thread reports detection * failure on cold boot with the standard debouncing timing. * * http://thread.gmane.org/gmane.linux.ide/34098 * * bko12176 reports that hardreset fails to bring up the link during * boot on nf2. * * CK804: * * For initial probing after boot and hot plugging, hardreset mostly * works fine on CK804 but curiously, reprobing on the initial port * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg * FIS in somewhat undeterministic way. * * SWNCQ: * * bko12351 reports that when SWNCQ is enabled, for hotplug to work, * hardreset should be used and hardreset can't report proper * signature, which suggests that mcp5x is closer to nf2 as long as * reset quirkiness is concerned. * * bko12703 reports that boot probing fails for intel SSD with * hardreset. Link fails to come online. Softreset works fine. * * The failures are varied but the following patterns seem true for * all flavors. * * - Softreset during boot always works. * * - Hardreset during boot sometimes fails to bring up the link on * certain comibnations and device signature acquisition is * unreliable. * * - Hardreset is often necessary after hotplug. * * So, preferring softreset for boot probing and error handling (as * hardreset might bring down the link) but using hardreset for * post-boot probing should work around the above issues in most * cases. Define nv_hardreset() which only kicks in for post-boot * probing and use it for all variants. */ static struct ata_port_operations nv_generic_ops = { .inherits = &ata_bmdma_port_ops, .lost_interrupt = ATA_OP_NULL, .scr_read = nv_scr_read, .scr_write = nv_scr_write, .hardreset = nv_hardreset, }; static struct ata_port_operations nv_nf2_ops = { .inherits = &nv_generic_ops, .freeze = nv_nf2_freeze, .thaw = nv_nf2_thaw, }; static struct ata_port_operations nv_ck804_ops = { .inherits = &nv_generic_ops, .freeze = nv_ck804_freeze, .thaw = nv_ck804_thaw, .host_stop = nv_ck804_host_stop, }; static struct ata_port_operations nv_adma_ops = { .inherits = &nv_ck804_ops, .check_atapi_dma = nv_adma_check_atapi_dma, .sff_tf_read = nv_adma_tf_read, .qc_defer = ata_std_qc_defer, .qc_prep = nv_adma_qc_prep, .qc_issue = nv_adma_qc_issue, .sff_irq_clear = nv_adma_irq_clear, .freeze = nv_adma_freeze, .thaw = nv_adma_thaw, .error_handler = nv_adma_error_handler, .post_internal_cmd = nv_adma_post_internal_cmd, .port_start = nv_adma_port_start, .port_stop = nv_adma_port_stop, #ifdef CONFIG_PM .port_suspend = nv_adma_port_suspend, .port_resume = nv_adma_port_resume, #endif .host_stop = nv_adma_host_stop, }; static struct ata_port_operations nv_swncq_ops = { .inherits = &nv_generic_ops, .qc_defer = ata_std_qc_defer, .qc_prep = nv_swncq_qc_prep, .qc_issue = nv_swncq_qc_issue, .freeze = nv_mcp55_freeze, .thaw = nv_mcp55_thaw, .error_handler = nv_swncq_error_handler, #ifdef CONFIG_PM .port_suspend = nv_swncq_port_suspend, .port_resume = nv_swncq_port_resume, #endif .port_start = nv_swncq_port_start, }; struct nv_pi_priv { irq_handler_t irq_handler; struct scsi_host_template *sht; }; #define NV_PI_PRIV(_irq_handler, _sht) \ &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht } static const struct ata_port_info nv_port_info[] = { /* generic */ { .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_generic_ops, .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht), }, /* nforce2/3 */ { .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_nf2_ops, .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht), }, /* ck804 */ { .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_ck804_ops, .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht), }, /* ADMA */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_adma_ops, .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht), }, /* MCP5x */ { .flags = ATA_FLAG_SATA, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_generic_ops, .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht), }, /* SWNCQ */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, .pio_mask = NV_PIO_MASK, .mwdma_mask = NV_MWDMA_MASK, .udma_mask = NV_UDMA_MASK, .port_ops = &nv_swncq_ops, .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht), }, }; MODULE_AUTHOR("NVIDIA"); MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, nv_pci_tbl); MODULE_VERSION(DRV_VERSION); static bool adma_enabled; static bool swncq_enabled = 1; static bool msi_enabled; static void nv_adma_register_mode(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 tmp, status; int count = 0; if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) return; status = readw(mmio + NV_ADMA_STAT); while (!(status & NV_ADMA_STAT_IDLE) && count < 20) { ndelay(50); status = readw(mmio + NV_ADMA_STAT); count++; } if (count == 20) ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n", status); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); count = 0; status = readw(mmio + NV_ADMA_STAT); while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) { ndelay(50); status = readw(mmio + NV_ADMA_STAT); count++; } if (count == 20) ata_port_warn(ap, "timeout waiting for ADMA LEGACY, stat=0x%hx\n", status); pp->flags |= NV_ADMA_PORT_REGISTER_MODE; } static void nv_adma_mode(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 tmp, status; int count = 0; if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) return; WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); status = readw(mmio + NV_ADMA_STAT); while (((status & NV_ADMA_STAT_LEGACY) || !(status & NV_ADMA_STAT_IDLE)) && count < 20) { ndelay(50); status = readw(mmio + NV_ADMA_STAT); count++; } if (count == 20) ata_port_warn(ap, "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n", status); pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; } static int nv_adma_slave_config(struct scsi_device *sdev) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct nv_adma_port_priv *pp = ap->private_data; struct nv_adma_port_priv *port0, *port1; struct scsi_device *sdev0, *sdev1; struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned long segment_boundary, flags; unsigned short sg_tablesize; int rc; int adma_enable; u32 current_reg, new_reg, config_mask; rc = ata_scsi_slave_config(sdev); if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) /* Not a proper libata device, ignore */ return rc; spin_lock_irqsave(ap->lock, flags); if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { /* * NVIDIA reports that ADMA mode does not support ATAPI commands. * Therefore ATAPI commands are sent through the legacy interface. * However, the legacy interface only supports 32-bit DMA. * Restrict DMA parameters as required by the legacy interface * when an ATAPI device is connected. */ segment_boundary = ATA_DMA_BOUNDARY; /* Subtract 1 since an extra entry may be needed for padding, see libata-scsi.c */ sg_tablesize = LIBATA_MAX_PRD - 1; /* Since the legacy DMA engine is in use, we need to disable ADMA on the port. */ adma_enable = 0; nv_adma_register_mode(ap); } else { segment_boundary = NV_ADMA_DMA_BOUNDARY; sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; adma_enable = 1; } pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg); if (ap->port_no == 1) config_mask = NV_MCP_SATA_CFG_20_PORT1_EN | NV_MCP_SATA_CFG_20_PORT1_PWB_EN; else config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | NV_MCP_SATA_CFG_20_PORT0_PWB_EN; if (adma_enable) { new_reg = current_reg | config_mask; pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; } else { new_reg = current_reg & ~config_mask; pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; } if (current_reg != new_reg) pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); port0 = ap->host->ports[0]->private_data; port1 = ap->host->ports[1]->private_data; sdev0 = ap->host->ports[0]->link.device[0].sdev; sdev1 = ap->host->ports[1]->link.device[0].sdev; if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { /** We have to set the DMA mask to 32-bit if either port is in ATAPI mode, since they are on the same PCI device which is used for DMA mapping. If we set the mask we also need to set the bounce limit on both ports to ensure that the block layer doesn't feed addresses that cause DMA mapping to choke. If either SCSI device is not allocated yet, it's OK since that port will discover its correct setting when it does get allocated. Note: Setting 32-bit mask should not fail. */ if (sdev0) blk_queue_bounce_limit(sdev0->request_queue, ATA_DMA_MASK); if (sdev1) blk_queue_bounce_limit(sdev1->request_queue, ATA_DMA_MASK); pci_set_dma_mask(pdev, ATA_DMA_MASK); } else { /** This shouldn't fail as it was set to this value before */ pci_set_dma_mask(pdev, pp->adma_dma_mask); if (sdev0) blk_queue_bounce_limit(sdev0->request_queue, pp->adma_dma_mask); if (sdev1) blk_queue_bounce_limit(sdev1->request_queue, pp->adma_dma_mask); } blk_queue_segment_boundary(sdev->request_queue, segment_boundary); blk_queue_max_segments(sdev->request_queue, sg_tablesize); ata_port_info(ap, "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", (unsigned long long)*ap->host->dev->dma_mask, segment_boundary, sg_tablesize); spin_unlock_irqrestore(ap->lock, flags); return rc; } static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc) { struct nv_adma_port_priv *pp = qc->ap->private_data; return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); } static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { /* Other than when internal or pass-through commands are executed, the only time this function will be called in ADMA mode will be if a command fails. In the failure case we don't care about going into register mode with ADMA commands pending, as the commands will all shortly be aborted anyway. We assume that NCQ commands are not issued via passthrough, which is the only way that switching into ADMA mode could abort outstanding commands. */ nv_adma_register_mode(ap); ata_sff_tf_read(ap, tf); } static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb) { unsigned int idx = 0; if (tf->flags & ATA_TFLAG_ISADDR) { if (tf->flags & ATA_TFLAG_LBA48) { cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal); cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam); cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah); cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature); } else cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB); cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect); cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal); cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam); cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); } if (tf->flags & ATA_TFLAG_DEVICE) cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); while (idx < 12) cpb[idx++] = cpu_to_le16(IGN); return idx; } static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) { struct nv_adma_port_priv *pp = ap->private_data; u8 flags = pp->cpb[cpb_num].resp_flags; VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags); if (unlikely((force_err || flags & (NV_CPB_RESP_ATA_ERR | NV_CPB_RESP_CMD_ERR | NV_CPB_RESP_CPB_ERR)))) { struct ata_eh_info *ehi = &ap->link.eh_info; int freeze = 0; ata_ehi_clear_desc(ehi); __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags); if (flags & NV_CPB_RESP_ATA_ERR) { ata_ehi_push_desc(ehi, "ATA error"); ehi->err_mask |= AC_ERR_DEV; } else if (flags & NV_CPB_RESP_CMD_ERR) { ata_ehi_push_desc(ehi, "CMD error"); ehi->err_mask |= AC_ERR_DEV; } else if (flags & NV_CPB_RESP_CPB_ERR) { ata_ehi_push_desc(ehi, "CPB error"); ehi->err_mask |= AC_ERR_SYSTEM; freeze = 1; } else { /* notifier error, but no error in CPB flags? */ ata_ehi_push_desc(ehi, "unknown"); ehi->err_mask |= AC_ERR_OTHER; freeze = 1; } /* Kill all commands. EH will determine what actually failed. */ if (freeze) ata_port_freeze(ap); else ata_port_abort(ap); return -1; } if (likely(flags & NV_CPB_RESP_DONE)) return 1; return 0; } static int nv_host_intr(struct ata_port *ap, u8 irq_stat) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); /* freeze if hotplugged */ if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) { ata_port_freeze(ap); return 1; } /* bail out if not our interrupt */ if (!(irq_stat & NV_INT_DEV)) return 0; /* DEV interrupt w/ no active qc? */ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { ata_sff_check_status(ap); return 1; } /* handle interrupt */ return ata_bmdma_port_intr(ap, qc); } static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; int i, handled = 0; u32 notifier_clears[2]; spin_lock(&host->lock); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 status; u32 gen_ctl; u32 notifier, notifier_error; notifier_clears[i] = 0; /* if ADMA is disabled, use standard ata interrupt handler */ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) >> (NV_INT_PORT_SHIFT * i); handled += nv_host_intr(ap, irq_stat); continue; } /* if in ATA register mode, check for standard interrupts */ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) >> (NV_INT_PORT_SHIFT * i); if (ata_tag_valid(ap->link.active_tag)) /** NV_INT_DEV indication seems unreliable at times at least in ADMA mode. Force it on always when a command is active, to prevent losing interrupts. */ irq_stat |= NV_INT_DEV; handled += nv_host_intr(ap, irq_stat); } notifier = readl(mmio + NV_ADMA_NOTIFIER); notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); notifier_clears[i] = notifier | notifier_error; gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && !notifier_error) /* Nothing to do */ continue; status = readw(mmio + NV_ADMA_STAT); /* * Clear status. Ensure the controller sees the * clearing before we start looking at any of the CPB * statuses, so that any CPB completions after this * point in the handler will raise another interrupt. */ writew(status, mmio + NV_ADMA_STAT); readw(mmio + NV_ADMA_STAT); /* flush posted write */ rmb(); handled++; /* irq handled if we got here */ /* freeze if hotplugged or controller error */ if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG | NV_ADMA_STAT_TIMEOUT | NV_ADMA_STAT_SERROR))) { struct ata_eh_info *ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); if (status & NV_ADMA_STAT_TIMEOUT) { ehi->err_mask |= AC_ERR_SYSTEM; ata_ehi_push_desc(ehi, "timeout"); } else if (status & NV_ADMA_STAT_HOTPLUG) { ata_ehi_hotplugged(ehi); ata_ehi_push_desc(ehi, "hotplug"); } else if (status & NV_ADMA_STAT_HOTUNPLUG) { ata_ehi_hotplugged(ehi); ata_ehi_push_desc(ehi, "hot unplug"); } else if (status & NV_ADMA_STAT_SERROR) { /* let EH analyze SError and figure out cause */ ata_ehi_push_desc(ehi, "SError"); } else ata_ehi_push_desc(ehi, "unknown"); ata_port_freeze(ap); continue; } if (status & (NV_ADMA_STAT_DONE | NV_ADMA_STAT_CPBERR | NV_ADMA_STAT_CMD_COMPLETE)) { u32 check_commands = notifier_clears[i]; u32 done_mask = 0; int pos, rc; if (status & NV_ADMA_STAT_CPBERR) { /* check all active commands */ if (ata_tag_valid(ap->link.active_tag)) check_commands = 1 << ap->link.active_tag; else check_commands = ap->link.sactive; } /* check CPBs for completed commands */ while ((pos = ffs(check_commands))) { pos--; rc = nv_adma_check_cpb(ap, pos, notifier_error & (1 << pos)); if (rc > 0) done_mask |= 1 << pos; else if (unlikely(rc < 0)) check_commands = 0; check_commands &= ~(1 << pos); } ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); } } if (notifier_clears[0] || notifier_clears[1]) { /* Note: Both notifier clear registers must be written if either is set, even if one is zero, according to NVIDIA. */ struct nv_adma_port_priv *pp = host->ports[0]->private_data; writel(notifier_clears[0], pp->notifier_clear_block); pp = host->ports[1]->private_data; writel(notifier_clears[1], pp->notifier_clear_block); } spin_unlock(&host->lock); return IRQ_RETVAL(handled); } static void nv_adma_freeze(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 tmp; nv_ck804_freeze(ap); if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) return; /* clear any outstanding CK804 notifications */ writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); /* Disable interrupt */ tmp = readw(mmio + NV_ADMA_CTL); writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ } static void nv_adma_thaw(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 tmp; nv_ck804_thaw(ap); if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) return; /* Enable interrupt */ tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ } static void nv_adma_irq_clear(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u32 notifier_clears[2]; if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { ata_bmdma_irq_clear(ap); return; } /* clear any outstanding CK804 notifications */ writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); /* clear ADMA status */ writew(0xffff, mmio + NV_ADMA_STAT); /* clear notifiers - note both ports need to be written with something even though we are only clearing on one */ if (ap->port_no == 0) { notifier_clears[0] = 0xFFFFFFFF; notifier_clears[1] = 0; } else { notifier_clears[0] = 0; notifier_clears[1] = 0xFFFFFFFF; } pp = ap->host->ports[0]->private_data; writel(notifier_clears[0], pp->notifier_clear_block); pp = ap->host->ports[1]->private_data; writel(notifier_clears[1], pp->notifier_clear_block); } static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) { struct nv_adma_port_priv *pp = qc->ap->private_data; if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) ata_bmdma_post_internal_cmd(qc); } static int nv_adma_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct nv_adma_port_priv *pp; int rc; void *mem; dma_addr_t mem_dma; void __iomem *mmio; struct pci_dev *pdev = to_pci_dev(dev); u16 tmp; VPRINTK("ENTER\n"); /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and pad buffers */ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) return rc; /* we might fallback to bmdma, allocate bmdma resources */ rc = ata_bmdma_port_start(ap); if (rc) return rc; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; pp->ctl_block = mmio; pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; pp->notifier_clear_block = pp->gen_block + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); /* Now that the legacy PRD and padding buffer are allocated we can safely raise the DMA mask to allocate the CPB/APRD table. These are allowed to fail since we store the value that ends up being used to set as the bounce limit in slave_config later if needed. */ pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); pp->adma_dma_mask = *dev->dma_mask; mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); if (!mem) return -ENOMEM; memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ); /* * First item in chunk of DMA memory: * 128-byte command parameter block (CPB) * one for each command tag */ pp->cpb = mem; pp->cpb_dma = mem_dma; writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; /* * Second item: block of ADMA_SGTBL_LEN s/g entries */ pp->aprd = mem; pp->aprd_dma = mem_dma; ap->private_data = pp; /* clear any outstanding interrupt conditions */ writew(0xffff, mmio + NV_ADMA_STAT); /* initialize port variables */ pp->flags = NV_ADMA_PORT_REGISTER_MODE; /* clear CPB fetch count */ writew(0, mmio + NV_ADMA_CPB_COUNT); /* clear GO for register mode, enable interrupt */ tmp = readw(mmio + NV_ADMA_CTL); writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ udelay(1); writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ return 0; } static void nv_adma_port_stop(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; VPRINTK("ENTER\n"); writew(0, mmio + NV_ADMA_CTL); } #ifdef CONFIG_PM static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; /* Go to register mode - clears GO */ nv_adma_register_mode(ap); /* clear CPB fetch count */ writew(0, mmio + NV_ADMA_CPB_COUNT); /* disable interrupt, shut down port */ writew(0, mmio + NV_ADMA_CTL); return 0; } static int nv_adma_port_resume(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; void __iomem *mmio = pp->ctl_block; u16 tmp; /* set CPB block location */ writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); /* clear any outstanding interrupt conditions */ writew(0xffff, mmio + NV_ADMA_STAT); /* initialize port variables */ pp->flags |= NV_ADMA_PORT_REGISTER_MODE; /* clear CPB fetch count */ writew(0, mmio + NV_ADMA_CPB_COUNT); /* clear GO for register mode, enable interrupt */ tmp = readw(mmio + NV_ADMA_CTL); writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ udelay(1); writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ return 0; } #endif static void nv_adma_setup_port(struct ata_port *ap) { void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; struct ata_ioports *ioport = &ap->ioaddr; VPRINTK("ENTER\n"); mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; ioport->cmd_addr = mmio; ioport->data_addr = mmio + (ATA_REG_DATA * 4); ioport->error_addr = ioport->feature_addr = mmio + (ATA_REG_ERR * 4); ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4); ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4); ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4); ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4); ioport->device_addr = mmio + (ATA_REG_DEVICE * 4); ioport->status_addr = ioport->command_addr = mmio + (ATA_REG_STATUS * 4); ioport->altstatus_addr = ioport->ctl_addr = mmio + 0x20; } static int nv_adma_host_init(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); unsigned int i; u32 tmp32; VPRINTK("ENTER\n"); /* enable ADMA on the ports */ pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN | NV_MCP_SATA_CFG_20_PORT0_PWB_EN | NV_MCP_SATA_CFG_20_PORT1_EN | NV_MCP_SATA_CFG_20_PORT1_PWB_EN; pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); for (i = 0; i < host->n_ports; i++) nv_adma_setup_port(host->ports[i]); return 0; } static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, struct scatterlist *sg, int idx, struct nv_adma_prd *aprd) { u8 flags = 0; if (qc->tf.flags & ATA_TFLAG_WRITE) flags |= NV_APRD_WRITE; if (idx == qc->n_elem - 1) flags |= NV_APRD_END; else if (idx != 4) flags |= NV_APRD_CONT; aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg))); aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */ aprd->flags = flags; aprd->packet_len = 0; } static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) { struct nv_adma_port_priv *pp = qc->ap->private_data; struct nv_adma_prd *aprd; struct scatterlist *sg; unsigned int si; VPRINTK("ENTER\n"); for_each_sg(qc->sg, sg, qc->n_elem, si) { aprd = (si < 5) ? &cpb->aprd[si] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)]; nv_adma_fill_aprd(qc, sg, si, aprd); } if (si > 5) cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); else cpb->next_aprd = cpu_to_le64(0); } static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) { struct nv_adma_port_priv *pp = qc->ap->private_data; /* ADMA engine can only be used for non-ATAPI DMA commands, or interrupt-driven no-data commands. */ if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || (qc->tf.flags & ATA_TFLAG_POLLING)) return 1; if ((qc->flags & ATA_QCFLAG_DMAMAP) || (qc->tf.protocol == ATA_PROT_NODATA)) return 0; return 1; } static void nv_adma_qc_prep(struct ata_queued_cmd *qc) { struct nv_adma_port_priv *pp = qc->ap->private_data; struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; u8 ctl_flags = NV_CPB_CTL_CPB_VALID | NV_CPB_CTL_IEN; if (nv_adma_use_reg_mode(qc)) { BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); ata_bmdma_qc_prep(qc); return; } cpb->resp_flags = NV_CPB_RESP_DONE; wmb(); cpb->ctl_flags = 0; wmb(); cpb->len = 3; cpb->tag = qc->tag; cpb->next_cpb_idx = 0; /* turn on NCQ flags for NCQ commands */ if (qc->tf.protocol == ATA_PROT_NCQ) ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA; VPRINTK("qc->flags = 0x%lx\n", qc->flags); nv_adma_tf_to_cpb(&qc->tf, cpb->tf); if (qc->flags & ATA_QCFLAG_DMAMAP) { nv_adma_fill_sg(qc, cpb); ctl_flags |= NV_CPB_CTL_APRD_VALID; } else memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5); /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are finished filling in all of the contents */ wmb(); cpb->ctl_flags = ctl_flags; wmb(); cpb->resp_flags = 0; } static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) { struct nv_adma_port_priv *pp = qc->ap->private_data; void __iomem *mmio = pp->ctl_block; int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); VPRINTK("ENTER\n"); /* We can't handle result taskfile with NCQ commands, since retrieving the taskfile switches us out of ADMA mode and would abort existing commands. */ if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && (qc->flags & ATA_QCFLAG_RESULT_TF))) { ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); return AC_ERR_SYSTEM; } if (nv_adma_use_reg_mode(qc)) { /* use ATA register mode */ VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); return ata_bmdma_qc_issue(qc); } else nv_adma_mode(qc->ap); /* write append register, command tag in lower 8 bits and (number of cpbs to append -1) in top 8 bits */ wmb(); if (curr_ncq != pp->last_issue_ncq) { /* Seems to need some delay before switching between NCQ and non-NCQ commands, else we get command timeouts and such. */ udelay(20); pp->last_issue_ncq = curr_ncq; } writew(qc->tag, mmio + NV_ADMA_APPEND); DPRINTK("Issued tag %u\n", qc->tag); return 0; } static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; unsigned int i; unsigned int handled = 0; unsigned long flags; spin_lock_irqsave(&host->lock, flags); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { handled += ata_bmdma_port_intr(ap, qc); } else { /* * No request pending? Clear interrupt status * anyway, in case there's one pending. */ ap->ops->sff_check_status(ap); } } spin_unlock_irqrestore(&host->lock, flags); return IRQ_RETVAL(handled); } static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat) { int i, handled = 0; for (i = 0; i < host->n_ports; i++) { handled += nv_host_intr(host->ports[i], irq_stat); irq_stat >>= NV_INT_PORT_SHIFT; } return IRQ_RETVAL(handled); } static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; u8 irq_stat; irqreturn_t ret; spin_lock(&host->lock); irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); ret = nv_do_interrupt(host, irq_stat); spin_unlock(&host->lock); return ret; } static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; u8 irq_stat; irqreturn_t ret; spin_lock(&host->lock); irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); ret = nv_do_interrupt(host, irq_stat); spin_unlock(&host->lock); return ret; } static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) return -EINVAL; *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4)); return 0; } static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) return -EINVAL; iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); return 0; } static int nv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { struct ata_eh_context *ehc = &link->eh_context; /* Do hardreset iff it's post-boot probing, please read the * comment above port ops for details. */ if (!(link->ap->pflags & ATA_PFLAG_LOADING) && !ata_dev_enabled(link->device)) sata_link_hardreset(link, sata_deb_timing_hotplug, deadline, NULL, NULL); else { const unsigned long *timing = sata_ehc_deb_timing(ehc); int rc; if (!(ehc->i.flags & ATA_EHI_QUIET)) ata_link_info(link, "nv: skipping hardreset on occupied port\n"); /* make sure the link is online */ rc = sata_link_resume(link, timing, deadline); /* whine about phy resume failure but proceed */ if (rc && rc != -EOPNOTSUPP) ata_link_warn(link, "failed to resume link (errno=%d)\n", rc); } /* device signature acquisition is unreliable */ return -EAGAIN; } static void nv_nf2_freeze(struct ata_port *ap) { void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; mask = ioread8(scr_addr + NV_INT_ENABLE); mask &= ~(NV_INT_ALL << shift); iowrite8(mask, scr_addr + NV_INT_ENABLE); } static void nv_nf2_thaw(struct ata_port *ap) { void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS); mask = ioread8(scr_addr + NV_INT_ENABLE); mask |= (NV_INT_MASK << shift); iowrite8(mask, scr_addr + NV_INT_ENABLE); } static void nv_ck804_freeze(struct ata_port *ap) { void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; mask = readb(mmio_base + NV_INT_ENABLE_CK804); mask &= ~(NV_INT_ALL << shift); writeb(mask, mmio_base + NV_INT_ENABLE_CK804); } static void nv_ck804_thaw(struct ata_port *ap) { void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT; u8 mask; writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804); mask = readb(mmio_base + NV_INT_ENABLE_CK804); mask |= (NV_INT_MASK << shift); writeb(mask, mmio_base + NV_INT_ENABLE_CK804); } static void nv_mcp55_freeze(struct ata_port *ap) { void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; u32 mask; writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55); mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask &= ~(NV_INT_ALL_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); } static void nv_mcp55_thaw(struct ata_port *ap) { void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; u32 mask; writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55); mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask |= (NV_INT_MASK_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); } static void nv_adma_error_handler(struct ata_port *ap) { struct nv_adma_port_priv *pp = ap->private_data; if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { void __iomem *mmio = pp->ctl_block; int i; u16 tmp; if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); u32 status = readw(mmio + NV_ADMA_STAT); u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT); u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX); ata_port_err(ap, "EH in ADMA mode, notifier 0x%X " "notifier_error 0x%X gen_ctl 0x%X status 0x%X " "next cpb count 0x%X next cpb idx 0x%x\n", notifier, notifier_error, gen_ctl, status, cpb_count, next_cpb_idx); for (i = 0; i < NV_ADMA_MAX_CPBS; i++) { struct nv_adma_cpb *cpb = &pp->cpb[i]; if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || ap->link.sactive & (1 << i)) ata_port_err(ap, "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", i, cpb->ctl_flags, cpb->resp_flags); } } /* Push us back into port register mode for error handling. */ nv_adma_register_mode(ap); /* Mark all of the CPBs as invalid to prevent them from being executed */ for (i = 0; i < NV_ADMA_MAX_CPBS; i++) pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; /* clear CPB fetch count */ writew(0, mmio + NV_ADMA_CPB_COUNT); /* Reset channel */ tmp = readw(mmio + NV_ADMA_CTL); writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ udelay(1); writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); readw(mmio + NV_ADMA_CTL); /* flush posted write */ } ata_bmdma_error_handler(ap); } static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) { struct nv_swncq_port_priv *pp = ap->private_data; struct defer_queue *dq = &pp->defer_queue; /* queue is full */ WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE); dq->defer_bits |= (1 << qc->tag); dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag; } static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap) { struct nv_swncq_port_priv *pp = ap->private_data; struct defer_queue *dq = &pp->defer_queue; unsigned int tag; if (dq->head == dq->tail) /* null queue */ return NULL; tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)]; dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON; WARN_ON(!(dq->defer_bits & (1 << tag))); dq->defer_bits &= ~(1 << tag); return ata_qc_from_tag(ap, tag); } static void nv_swncq_fis_reinit(struct ata_port *ap) { struct nv_swncq_port_priv *pp = ap->private_data; pp->dhfis_bits = 0; pp->dmafis_bits = 0; pp->sdbfis_bits = 0; pp->ncq_flags = 0; } static void nv_swncq_pp_reinit(struct ata_port *ap) { struct nv_swncq_port_priv *pp = ap->private_data; struct defer_queue *dq = &pp->defer_queue; dq->head = 0; dq->tail = 0; dq->defer_bits = 0; pp->qc_active = 0; pp->last_issue_tag = ATA_TAG_POISON; nv_swncq_fis_reinit(ap); } static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis) { struct nv_swncq_port_priv *pp = ap->private_data; writew(fis, pp->irq_block); } static void __ata_bmdma_stop(struct ata_port *ap) { struct ata_queued_cmd qc; qc.ap = ap; ata_bmdma_stop(&qc); } static void nv_swncq_ncq_stop(struct ata_port *ap) { struct nv_swncq_port_priv *pp = ap->private_data; unsigned int i; u32 sactive; u32 done_mask; ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n", ap->qc_active, ap->link.sactive); ata_port_err(ap, "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n " "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n", pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag, pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n", ap->ops->sff_check_status(ap), ioread8(ap->ioaddr.error_addr)); sactive = readl(pp->sactive_block); done_mask = pp->qc_active ^ sactive; ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n"); for (i = 0; i < ATA_MAX_QUEUE; i++) { u8 err = 0; if (pp->qc_active & (1 << i)) err = 0; else if (done_mask & (1 << i)) err = 1; else continue; ata_port_err(ap, "tag 0x%x: %01x %01x %01x %01x %s\n", i, (pp->dhfis_bits >> i) & 0x1, (pp->dmafis_bits >> i) & 0x1, (pp->sdbfis_bits >> i) & 0x1, (sactive >> i) & 0x1, (err ? "error! tag doesn't exit" : " ")); } nv_swncq_pp_reinit(ap); ap->ops->sff_irq_clear(ap); __ata_bmdma_stop(ap); nv_swncq_irq_clear(ap, 0xffff); } static void nv_swncq_error_handler(struct ata_port *ap) { struct ata_eh_context *ehc = &ap->link.eh_context; if (ap->link.sactive) { nv_swncq_ncq_stop(ap); ehc->i.action |= ATA_EH_RESET; } ata_bmdma_error_handler(ap); } #ifdef CONFIG_PM static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg) { void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; u32 tmp; /* clear irq */ writel(~0, mmio + NV_INT_STATUS_MCP55); /* disable irq */ writel(0, mmio + NV_INT_ENABLE_MCP55); /* disable swncq */ tmp = readl(mmio + NV_CTL_MCP55); tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ); writel(tmp, mmio + NV_CTL_MCP55); return 0; } static int nv_swncq_port_resume(struct ata_port *ap) { void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; u32 tmp; /* clear irq */ writel(~0, mmio + NV_INT_STATUS_MCP55); /* enable irq */ writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55); /* enable swncq */ tmp = readl(mmio + NV_CTL_MCP55); writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55); return 0; } #endif static void nv_swncq_host_init(struct ata_host *host) { u32 tmp; void __iomem *mmio = host->iomap[NV_MMIO_BAR]; struct pci_dev *pdev = to_pci_dev(host->dev); u8 regval; /* disable ECO 398 */ pci_read_config_byte(pdev, 0x7f, &regval); regval &= ~(1 << 7); pci_write_config_byte(pdev, 0x7f, regval); /* enable swncq */ tmp = readl(mmio + NV_CTL_MCP55); VPRINTK("HOST_CTL:0x%X\n", tmp); writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55); /* enable irq intr */ tmp = readl(mmio + NV_INT_ENABLE_MCP55); VPRINTK("HOST_ENABLE:0x%X\n", tmp); writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55); /* clear port irq */ writel(~0x0, mmio + NV_INT_STATUS_MCP55); } static int nv_swncq_slave_config(struct scsi_device *sdev) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct ata_device *dev; int rc; u8 rev; u8 check_maxtor = 0; unsigned char model_num[ATA_ID_PROD_LEN + 1]; rc = ata_scsi_slave_config(sdev); if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) /* Not a proper libata device, ignore */ return rc; dev = &ap->link.device[sdev->id]; if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI) return rc; /* if MCP51 and Maxtor, then disable ncq */ if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA || pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2) check_maxtor = 1; /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */ if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA || pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) { pci_read_config_byte(pdev, 0x8, &rev); if (rev <= 0xa2) check_maxtor = 1; } if (!check_maxtor) return rc; ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (strncmp(model_num, "Maxtor", 6) == 0) { ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT); ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth); } return rc; } static int nv_swncq_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; struct nv_swncq_port_priv *pp; int rc; /* we might fallback to bmdma, allocate bmdma resources */ rc = ata_bmdma_port_start(ap); if (rc) return rc; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE, &pp->prd_dma, GFP_KERNEL); if (!pp->prd) return -ENOMEM; memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE); ap->private_data = pp; pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; return 0; } static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) { if (qc->tf.protocol != ATA_PROT_NCQ) { ata_bmdma_qc_prep(qc); return; } if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; nv_swncq_fill_sg(qc); } static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct scatterlist *sg; struct nv_swncq_port_priv *pp = ap->private_data; struct ata_bmdma_prd *prd; unsigned int si, idx; prd = pp->prd + ATA_MAX_PRD * qc->tag; idx = 0; for_each_sg(qc->sg, sg, qc->n_elem, si) { u32 addr, offset; u32 sg_len, len; addr = (u32)sg_dma_address(sg); sg_len = sg_dma_len(sg); while (sg_len) { offset = addr & 0xffff; len = sg_len; if ((offset + sg_len) > 0x10000) len = 0x10000 - offset; prd[idx].addr = cpu_to_le32(addr); prd[idx].flags_len = cpu_to_le32(len & 0xffff); idx++; sg_len -= len; addr += len; } } prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); } static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, struct ata_queued_cmd *qc) { struct nv_swncq_port_priv *pp = ap->private_data; if (qc == NULL) return 0; DPRINTK("Enter\n"); writel((1 << qc->tag), pp->sactive_block); pp->last_issue_tag = qc->tag; pp->dhfis_bits &= ~(1 << qc->tag); pp->dmafis_bits &= ~(1 << qc->tag); pp->qc_active |= (0x1 << qc->tag); ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->sff_exec_command(ap, &qc->tf); DPRINTK("Issued tag %u\n", qc->tag); return 0; } static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct nv_swncq_port_priv *pp = ap->private_data; if (qc->tf.protocol != ATA_PROT_NCQ) return ata_bmdma_qc_issue(qc); DPRINTK("Enter\n"); if (!pp->qc_active) nv_swncq_issue_atacmd(ap, qc); else nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ return 0; } static void nv_swncq_hotplug(struct ata_port *ap, u32 fis) { u32 serror; struct ata_eh_info *ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); /* AHCI needs SError cleared; otherwise, it might lock up */ sata_scr_read(&ap->link, SCR_ERROR, &serror); sata_scr_write(&ap->link, SCR_ERROR, serror); /* analyze @irq_stat */ if (fis & NV_SWNCQ_IRQ_ADDED) ata_ehi_push_desc(ehi, "hot plug"); else if (fis & NV_SWNCQ_IRQ_REMOVED) ata_ehi_push_desc(ehi, "hot unplug"); ata_ehi_hotplugged(ehi); /* okay, let's hand over to EH */ ehi->serror |= serror; ata_port_freeze(ap); } static int nv_swncq_sdbfis(struct ata_port *ap) { struct ata_queued_cmd *qc; struct nv_swncq_port_priv *pp = ap->private_data; struct ata_eh_info *ehi = &ap->link.eh_info; u32 sactive; u32 done_mask; u8 host_stat; u8 lack_dhfis = 0; host_stat = ap->ops->bmdma_status(ap); if (unlikely(host_stat & ATA_DMA_ERR)) { /* error when transferring data to/from memory */ ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); ehi->err_mask |= AC_ERR_HOST_BUS; ehi->action |= ATA_EH_RESET; return -EINVAL; } ap->ops->sff_irq_clear(ap); __ata_bmdma_stop(ap); sactive = readl(pp->sactive_block); done_mask = pp->qc_active ^ sactive; pp->qc_active &= ~done_mask; pp->dhfis_bits &= ~done_mask; pp->dmafis_bits &= ~done_mask; pp->sdbfis_bits |= done_mask; ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); if (!ap->qc_active) { DPRINTK("over\n"); nv_swncq_pp_reinit(ap); return 0; } if (pp->qc_active & pp->dhfis_bits) return 0; if ((pp->ncq_flags & ncq_saw_backout) || (pp->qc_active ^ pp->dhfis_bits)) /* if the controller can't get a device to host register FIS, * The driver needs to reissue the new command. */ lack_dhfis = 1; DPRINTK("id 0x%x QC: qc_active 0x%x," "SWNCQ:qc_active 0x%X defer_bits %X " "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n", ap->print_id, ap->qc_active, pp->qc_active, pp->defer_queue.defer_bits, pp->dhfis_bits, pp->dmafis_bits, pp->last_issue_tag); nv_swncq_fis_reinit(ap); if (lack_dhfis) { qc = ata_qc_from_tag(ap, pp->last_issue_tag); nv_swncq_issue_atacmd(ap, qc); return 0; } if (pp->defer_queue.defer_bits) { /* send deferral queue command */ qc = nv_swncq_qc_from_dq(ap); WARN_ON(qc == NULL); nv_swncq_issue_atacmd(ap, qc); } return 0; } static inline u32 nv_swncq_tag(struct ata_port *ap) { struct nv_swncq_port_priv *pp = ap->private_data; u32 tag; tag = readb(pp->tag_block) >> 2; return (tag & 0x1f); } static void nv_swncq_dmafis(struct ata_port *ap) { struct ata_queued_cmd *qc; unsigned int rw; u8 dmactl; u32 tag; struct nv_swncq_port_priv *pp = ap->private_data; __ata_bmdma_stop(ap); tag = nv_swncq_tag(ap); DPRINTK("dma setup tag 0x%x\n", tag); qc = ata_qc_from_tag(ap, tag); if (unlikely(!qc)) return; rw = qc->tf.flags & ATA_TFLAG_WRITE; /* load PRD table addr. */ iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); /* specify data direction, triple-check start bit is clear */ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); dmactl &= ~ATA_DMA_WR; if (!rw) dmactl |= ATA_DMA_WR; iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); } static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) { struct nv_swncq_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; struct ata_eh_info *ehi = &ap->link.eh_info; u32 serror; u8 ata_stat; ata_stat = ap->ops->sff_check_status(ap); nv_swncq_irq_clear(ap, fis); if (!fis) return; if (ap->pflags & ATA_PFLAG_FROZEN) return; if (fis & NV_SWNCQ_IRQ_HOTPLUG) { nv_swncq_hotplug(ap, fis); return; } if (!pp->qc_active) return; if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror)) return; ap->ops->scr_write(&ap->link, SCR_ERROR, serror); if (ata_stat & ATA_ERR) { ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis); ehi->err_mask |= AC_ERR_DEV; ehi->serror |= serror; ehi->action |= ATA_EH_RESET; ata_port_freeze(ap); return; } if (fis & NV_SWNCQ_IRQ_BACKOUT) { /* If the IRQ is backout, driver must issue * the new command again some time later. */ pp->ncq_flags |= ncq_saw_backout; } if (fis & NV_SWNCQ_IRQ_SDBFIS) { pp->ncq_flags |= ncq_saw_sdb; DPRINTK("id 0x%x SWNCQ: qc_active 0x%X " "dhfis 0x%X dmafis 0x%X sactive 0x%X\n", ap->print_id, pp->qc_active, pp->dhfis_bits, pp->dmafis_bits, readl(pp->sactive_block)); if (nv_swncq_sdbfis(ap) < 0) goto irq_error; } if (fis & NV_SWNCQ_IRQ_DHREGFIS) { /* The interrupt indicates the new command * was transmitted correctly to the drive. */ pp->dhfis_bits |= (0x1 << pp->last_issue_tag); pp->ncq_flags |= ncq_saw_d2h; if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) { ata_ehi_push_desc(ehi, "illegal fis transaction"); ehi->err_mask |= AC_ERR_HSM; ehi->action |= ATA_EH_RESET; goto irq_error; } if (!(fis & NV_SWNCQ_IRQ_DMASETUP) && !(pp->ncq_flags & ncq_saw_dmas)) { ata_stat = ap->ops->sff_check_status(ap); if (ata_stat & ATA_BUSY) goto irq_exit; if (pp->defer_queue.defer_bits) { DPRINTK("send next command\n"); qc = nv_swncq_qc_from_dq(ap); nv_swncq_issue_atacmd(ap, qc); } } } if (fis & NV_SWNCQ_IRQ_DMASETUP) { /* program the dma controller with appropriate PRD buffers * and start the DMA transfer for requested command. */ pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); pp->ncq_flags |= ncq_saw_dmas; nv_swncq_dmafis(ap); } irq_exit: return; irq_error: ata_ehi_push_desc(ehi, "fis:0x%x", fis); ata_port_freeze(ap); return; } static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; unsigned int i; unsigned int handled = 0; unsigned long flags; u32 irq_stat; spin_lock_irqsave(&host->lock, flags); irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ap->link.sactive) { nv_swncq_host_interrupt(ap, (u16)irq_stat); handled = 1; } else { if (irq_stat) /* reserve Hotplug */ nv_swncq_irq_clear(ap, 0xfff0); handled += nv_host_intr(ap, (u8)irq_stat); } irq_stat >>= NV_INT_PORT_SHIFT_MCP55; } spin_unlock_irqrestore(&host->lock, flags); return IRQ_RETVAL(handled); } static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct ata_port_info *ppi[] = { NULL, NULL }; struct nv_pi_priv *ipriv; struct ata_host *host; struct nv_host_priv *hpriv; int rc; u32 bar; void __iomem *base; unsigned long type = ent->driver_data; // Make sure this is a SATA controller by counting the number of bars // (NVIDIA SATA controllers will always have six bars). Otherwise, // it's an IDE controller and we ignore it. for (bar = 0; bar < 6; bar++) if (pci_resource_start(pdev, bar) == 0) return -ENODEV; ata_print_version_once(&pdev->dev, DRV_VERSION); rc = pcim_enable_device(pdev); if (rc) return rc; /* determine type and allocate host */ if (type == CK804 && adma_enabled) { dev_notice(&pdev->dev, "Using ADMA mode\n"); type = ADMA; } else if (type == MCP5x && swncq_enabled) { dev_notice(&pdev->dev, "Using SWNCQ mode\n"); type = SWNCQ; } ppi[0] = &nv_port_info[type]; ipriv = ppi[0]->private_data; rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; hpriv->type = type; host->private_data = hpriv; /* request and iomap NV_MMIO_BAR */ rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME); if (rc) return rc; /* configure SCR access */ base = host->iomap[NV_MMIO_BAR]; host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET; host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET; /* enable SATA space for CK804 */ if (type >= CK804) { u8 regval; pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval); regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN; pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); } /* init ADMA */ if (type == ADMA) { rc = nv_adma_host_init(host); if (rc) return rc; } else if (type == SWNCQ) nv_swncq_host_init(host); if (msi_enabled) { dev_notice(&pdev->dev, "Using MSI\n"); pci_enable_msi(pdev); } pci_set_master(pdev); return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht); } #ifdef CONFIG_PM static int nv_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); struct nv_host_priv *hpriv = host->private_data; int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { if (hpriv->type >= CK804) { u8 regval; pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval); regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN; pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); } if (hpriv->type == ADMA) { u32 tmp32; struct nv_adma_port_priv *pp; /* enable/disable ADMA on the ports appropriately */ pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); pp = host->ports[0]->private_data; if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN | NV_MCP_SATA_CFG_20_PORT0_PWB_EN); else tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN | NV_MCP_SATA_CFG_20_PORT0_PWB_EN); pp = host->ports[1]->private_data; if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN | NV_MCP_SATA_CFG_20_PORT1_PWB_EN); else tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN | NV_MCP_SATA_CFG_20_PORT1_PWB_EN); pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); } } ata_host_resume(host); return 0; } #endif static void nv_ck804_host_stop(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); u8 regval; /* disable SATA space for CK804 */ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval); regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN; pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); } static void nv_adma_host_stop(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); u32 tmp32; /* disable ADMA on the ports */ pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN | NV_MCP_SATA_CFG_20_PORT0_PWB_EN | NV_MCP_SATA_CFG_20_PORT1_EN | NV_MCP_SATA_CFG_20_PORT1_PWB_EN); pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); nv_ck804_host_stop(host); } module_pci_driver(nv_pci_driver); module_param_named(adma, adma_enabled, bool, 0444); MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)"); module_param_named(swncq, swncq_enabled, bool, 0444); MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); module_param_named(msi, msi_enabled, bool, 0444); MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
gpl-2.0
Xperia-P/android_kernel_sony_u8500
net/iucv/iucv.c
2374
54142
/* * IUCV base infrastructure. * * Copyright IBM Corp. 2001, 2009 * * Author(s): * Original source: * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 * Xenia Tkatschow (xenia@us.ibm.com) * 2Gb awareness and general cleanup: * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) * Rewritten for af_iucv: * Martin Schwidefsky <schwidefsky@de.ibm.com> * PM functions: * Ursula Braun (ursula.braun@de.ibm.com) * * Documentation used: * The original source * CP Programming Service, IBM document # SC24-5760 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define KMSG_COMPONENT "iucv" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/device.h> #include <linux/cpu.h> #include <linux/reboot.h> #include <net/iucv/iucv.h> #include <asm/atomic.h> #include <asm/ebcdic.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/smp.h> /* * FLAGS: * All flags are defined in the field IPFLAGS1 of each function * and can be found in CP Programming Services. * IPSRCCLS - Indicates you have specified a source class. * IPTRGCLS - Indicates you have specified a target class. * IPFGPID - Indicates you have specified a pathid. * IPFGMID - Indicates you have specified a message ID. * IPNORPY - Indicates a one-way message. No reply expected. * IPALL - Indicates that all paths are affected. */ #define IUCV_IPSRCCLS 0x01 #define IUCV_IPTRGCLS 0x01 #define IUCV_IPFGPID 0x02 #define IUCV_IPFGMID 0x04 #define IUCV_IPNORPY 0x10 #define IUCV_IPALL 0x80 static int iucv_bus_match(struct device *dev, struct device_driver *drv) { return 0; } enum iucv_pm_states { IUCV_PM_INITIAL = 0, IUCV_PM_FREEZING = 1, IUCV_PM_THAWING = 2, IUCV_PM_RESTORING = 3, }; static enum iucv_pm_states iucv_pm_state; static int iucv_pm_prepare(struct device *); static void iucv_pm_complete(struct device *); static int iucv_pm_freeze(struct device *); static int iucv_pm_thaw(struct device *); static int iucv_pm_restore(struct device *); static const struct dev_pm_ops iucv_pm_ops = { .prepare = iucv_pm_prepare, .complete = iucv_pm_complete, .freeze = iucv_pm_freeze, .thaw = iucv_pm_thaw, .restore = iucv_pm_restore, }; struct bus_type iucv_bus = { .name = "iucv", .match = iucv_bus_match, .pm = &iucv_pm_ops, }; EXPORT_SYMBOL(iucv_bus); struct device *iucv_root; EXPORT_SYMBOL(iucv_root); static int iucv_available; /* General IUCV interrupt structure */ struct iucv_irq_data { u16 ippathid; u8 ipflags1; u8 iptype; u32 res2[8]; }; struct iucv_irq_list { struct list_head list; struct iucv_irq_data data; }; static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; /* * Queue of interrupt buffers lock for delivery via the tasklet * (fast but can't call smp_call_function). */ static LIST_HEAD(iucv_task_queue); /* * The tasklet for fast delivery of iucv interrupts. */ static void iucv_tasklet_fn(unsigned long); static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); /* * Queue of interrupt buffers for delivery via a work queue * (slower but can call smp_call_function). */ static LIST_HEAD(iucv_work_queue); /* * The work element to deliver path pending interrupts. */ static void iucv_work_fn(struct work_struct *work); static DECLARE_WORK(iucv_work, iucv_work_fn); /* * Spinlock protecting task and work queue. */ static DEFINE_SPINLOCK(iucv_queue_lock); enum iucv_command_codes { IUCV_QUERY = 0, IUCV_RETRIEVE_BUFFER = 2, IUCV_SEND = 4, IUCV_RECEIVE = 5, IUCV_REPLY = 6, IUCV_REJECT = 8, IUCV_PURGE = 9, IUCV_ACCEPT = 10, IUCV_CONNECT = 11, IUCV_DECLARE_BUFFER = 12, IUCV_QUIESCE = 13, IUCV_RESUME = 14, IUCV_SEVER = 15, IUCV_SETMASK = 16, IUCV_SETCONTROLMASK = 17, }; /* * Error messages that are used with the iucv_sever function. They get * converted to EBCDIC. */ static char iucv_error_no_listener[16] = "NO LISTENER"; static char iucv_error_no_memory[16] = "NO MEMORY"; static char iucv_error_pathid[16] = "INVALID PATHID"; /* * iucv_handler_list: List of registered handlers. */ static LIST_HEAD(iucv_handler_list); /* * iucv_path_table: an array of iucv_path structures. */ static struct iucv_path **iucv_path_table; static unsigned long iucv_max_pathid; /* * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table */ static DEFINE_SPINLOCK(iucv_table_lock); /* * iucv_active_cpu: contains the number of the cpu executing the tasklet * or the work handler. Needed for iucv_path_sever called from tasklet. */ static int iucv_active_cpu = -1; /* * Mutex and wait queue for iucv_register/iucv_unregister. */ static DEFINE_MUTEX(iucv_register_mutex); /* * Counter for number of non-smp capable handlers. */ static int iucv_nonsmp_handler; /* * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, * iucv_path_quiesce and iucv_path_sever. */ struct iucv_cmd_control { u16 ippathid; u8 ipflags1; u8 iprcode; u16 ipmsglim; u16 res1; u8 ipvmid[8]; u8 ipuser[16]; u8 iptarget[8]; } __attribute__ ((packed,aligned(8))); /* * Data in parameter list iucv structure. Used by iucv_message_send, * iucv_message_send2way and iucv_message_reply. */ struct iucv_cmd_dpl { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u32 iptrgcls; u8 iprmmsg[8]; u32 ipsrccls; u32 ipmsgtag; u32 ipbfadr2; u32 ipbfln2f; u32 res; } __attribute__ ((packed,aligned(8))); /* * Data in buffer iucv structure. Used by iucv_message_receive, * iucv_message_reject, iucv_message_send, iucv_message_send2way * and iucv_declare_cpu. */ struct iucv_cmd_db { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u32 iptrgcls; u32 ipbfadr1; u32 ipbfln1f; u32 ipsrccls; u32 ipmsgtag; u32 ipbfadr2; u32 ipbfln2f; u32 res; } __attribute__ ((packed,aligned(8))); /* * Purge message iucv structure. Used by iucv_message_purge. */ struct iucv_cmd_purge { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u8 ipaudit[3]; u8 res1[5]; u32 res2; u32 ipsrccls; u32 ipmsgtag; u32 res3[3]; } __attribute__ ((packed,aligned(8))); /* * Set mask iucv structure. Used by iucv_enable_cpu. */ struct iucv_cmd_set_mask { u8 ipmask; u8 res1[2]; u8 iprcode; u32 res2[9]; } __attribute__ ((packed,aligned(8))); union iucv_param { struct iucv_cmd_control ctrl; struct iucv_cmd_dpl dpl; struct iucv_cmd_db db; struct iucv_cmd_purge purge; struct iucv_cmd_set_mask set_mask; }; /* * Anchor for per-cpu IUCV command parameter block. */ static union iucv_param *iucv_param[NR_CPUS]; static union iucv_param *iucv_param_irq[NR_CPUS]; /** * iucv_call_b2f0 * @code: identifier of IUCV call to CP. * @parm: pointer to a struct iucv_parm block * * Calls CP to execute IUCV commands. * * Returns the result of the CP IUCV call. */ static inline int iucv_call_b2f0(int command, union iucv_param *parm) { register unsigned long reg0 asm ("0"); register unsigned long reg1 asm ("1"); int ccode; reg0 = command; reg1 = virt_to_phys(parm); asm volatile( " .long 0xb2f01000\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) : "m" (*parm) : "cc"); return (ccode == 1) ? parm->ctrl.iprcode : ccode; } /** * iucv_query_maxconn * * Determines the maximum number of connections that may be established. * * Returns the maximum number of connections or -EPERM is IUCV is not * available. */ static int iucv_query_maxconn(void) { register unsigned long reg0 asm ("0"); register unsigned long reg1 asm ("1"); void *param; int ccode; param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); if (!param) return -ENOMEM; reg0 = IUCV_QUERY; reg1 = (unsigned long) param; asm volatile ( " .long 0xb2f01000\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); if (ccode == 0) iucv_max_pathid = reg1; kfree(param); return ccode ? -EPERM : 0; } /** * iucv_allow_cpu * @data: unused * * Allow iucv interrupts on this cpu. */ static void iucv_allow_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; /* * Enable all iucv interrupts. * ipmask contains bits for the different interrupts * 0x80 - Flag to allow nonpriority message pending interrupts * 0x40 - Flag to allow priority message pending interrupts * 0x20 - Flag to allow nonpriority message completion interrupts * 0x10 - Flag to allow priority message completion interrupts * 0x08 - Flag to allow IUCV control interrupts */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0xf8; iucv_call_b2f0(IUCV_SETMASK, parm); /* * Enable all iucv control interrupts. * ipmask contains bits for the different interrupts * 0x80 - Flag to allow pending connections interrupts * 0x40 - Flag to allow connection complete interrupts * 0x20 - Flag to allow connection severed interrupts * 0x10 - Flag to allow connection quiesced interrupts * 0x08 - Flag to allow connection resumed interrupts */ memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0xf8; iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); /* Set indication that iucv interrupts are allowed for this cpu. */ cpumask_set_cpu(cpu, &iucv_irq_cpumask); } /** * iucv_block_cpu * @data: unused * * Block iucv interrupts on this cpu. */ static void iucv_block_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; /* Disable all iucv interrupts. */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); iucv_call_b2f0(IUCV_SETMASK, parm); /* Clear indication that iucv interrupts are allowed for this cpu. */ cpumask_clear_cpu(cpu, &iucv_irq_cpumask); } /** * iucv_block_cpu_almost * @data: unused * * Allow connection-severed interrupts only on this cpu. */ static void iucv_block_cpu_almost(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; /* Allow iucv control interrupts only */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0x08; iucv_call_b2f0(IUCV_SETMASK, parm); /* Allow iucv-severed interrupt only */ memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0x20; iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); /* Clear indication that iucv interrupts are allowed for this cpu. */ cpumask_clear_cpu(cpu, &iucv_irq_cpumask); } /** * iucv_declare_cpu * @data: unused * * Declare a interrupt buffer on this cpu. */ static void iucv_declare_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; int rc; if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) return; /* Declare interrupt buffer. */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); if (rc) { char *err = "Unknown"; switch (rc) { case 0x03: err = "Directory error"; break; case 0x0a: err = "Invalid length"; break; case 0x13: err = "Buffer already exists"; break; case 0x3e: err = "Buffer overlap"; break; case 0x5c: err = "Paging or storage error"; break; } pr_warning("Defining an interrupt buffer on CPU %i" " failed with 0x%02x (%s)\n", cpu, rc, err); return; } /* Set indication that an iucv buffer exists for this cpu. */ cpumask_set_cpu(cpu, &iucv_buffer_cpumask); if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) /* Enable iucv interrupts on this cpu. */ iucv_allow_cpu(NULL); else /* Disable iucv interrupts on this cpu. */ iucv_block_cpu(NULL); } /** * iucv_retrieve_cpu * @data: unused * * Retrieve interrupt buffer on this cpu. */ static void iucv_retrieve_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) return; /* Block iucv interrupts. */ iucv_block_cpu(NULL); /* Retrieve interrupt buffer. */ parm = iucv_param_irq[cpu]; iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); /* Clear indication that an iucv buffer exists for this cpu. */ cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); } /** * iucv_setmask_smp * * Allow iucv interrupts on all cpus. */ static void iucv_setmask_mp(void) { int cpu; get_online_cpus(); for_each_online_cpu(cpu) /* Enable all cpus with a declared buffer. */ if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) smp_call_function_single(cpu, iucv_allow_cpu, NULL, 1); put_online_cpus(); } /** * iucv_setmask_up * * Allow iucv interrupts on a single cpu. */ static void iucv_setmask_up(void) { cpumask_t cpumask; int cpu; /* Disable all cpu but the first in cpu_irq_cpumask. */ cpumask_copy(&cpumask, &iucv_irq_cpumask); cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); for_each_cpu(cpu, &cpumask) smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); } /** * iucv_enable * * This function makes iucv ready for use. It allocates the pathid * table, declares an iucv interrupt buffer and enables the iucv * interrupts. Called when the first user has registered an iucv * handler. */ static int iucv_enable(void) { size_t alloc_size; int cpu, rc; get_online_cpus(); rc = -ENOMEM; alloc_size = iucv_max_pathid * sizeof(struct iucv_path); iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); if (!iucv_path_table) goto out; /* Declare per cpu buffers. */ rc = -EIO; for_each_online_cpu(cpu) smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); if (cpumask_empty(&iucv_buffer_cpumask)) /* No cpu could declare an iucv buffer. */ goto out; put_online_cpus(); return 0; out: kfree(iucv_path_table); iucv_path_table = NULL; put_online_cpus(); return rc; } /** * iucv_disable * * This function shuts down iucv. It disables iucv interrupts, retrieves * the iucv interrupt buffer and frees the pathid table. Called after the * last user unregister its iucv handler. */ static void iucv_disable(void) { get_online_cpus(); on_each_cpu(iucv_retrieve_cpu, NULL, 1); kfree(iucv_path_table); iucv_path_table = NULL; put_online_cpus(); } static int __cpuinit iucv_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { cpumask_t cpumask; long cpu = (long) hcpu; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_irq_data[cpu]) return notifier_from_errno(-ENOMEM); iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param[cpu]) { kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; return notifier_from_errno(-ENOMEM); } iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param_irq[cpu]) { kfree(iucv_param[cpu]); iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; return notifier_from_errno(-ENOMEM); } break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: kfree(iucv_param_irq[cpu]); iucv_param_irq[cpu] = NULL; kfree(iucv_param[cpu]); iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: if (!iucv_path_table) break; smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: if (!iucv_path_table) break; cpumask_copy(&cpumask, &iucv_buffer_cpumask); cpumask_clear_cpu(cpu, &cpumask); if (cpumask_empty(&cpumask)) /* Can't offline last IUCV enabled cpu. */ return notifier_from_errno(-EINVAL); smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); if (cpumask_empty(&iucv_irq_cpumask)) smp_call_function_single( cpumask_first(&iucv_buffer_cpumask), iucv_allow_cpu, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block __refdata iucv_cpu_notifier = { .notifier_call = iucv_cpu_notify, }; /** * iucv_sever_pathid * @pathid: path identification number. * @userdata: 16-bytes of user data. * * Sever an iucv path to free up the pathid. Used internally. */ static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) { union iucv_param *parm; parm = iucv_param_irq[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ippathid = pathid; return iucv_call_b2f0(IUCV_SEVER, parm); } /** * __iucv_cleanup_queue * @dummy: unused dummy argument * * Nop function called via smp_call_function to force work items from * pending external iucv interrupts to the work queue. */ static void __iucv_cleanup_queue(void *dummy) { } /** * iucv_cleanup_queue * * Function called after a path has been severed to find all remaining * work items for the now stale pathid. The caller needs to hold the * iucv_table_lock. */ static void iucv_cleanup_queue(void) { struct iucv_irq_list *p, *n; /* * When a path is severed, the pathid can be reused immediately * on a iucv connect or a connection pending interrupt. Remove * all entries from the task queue that refer to a stale pathid * (iucv_path_table[ix] == NULL). Only then do the iucv connect * or deliver the connection pending interrupt. To get all the * pending interrupts force them to the work queue by calling * an empty function on all cpus. */ smp_call_function(__iucv_cleanup_queue, NULL, 1); spin_lock_irq(&iucv_queue_lock); list_for_each_entry_safe(p, n, &iucv_task_queue, list) { /* Remove stale work items from the task queue. */ if (iucv_path_table[p->data.ippathid] == NULL) { list_del(&p->list); kfree(p); } } spin_unlock_irq(&iucv_queue_lock); } /** * iucv_register: * @handler: address of iucv handler structure * @smp: != 0 indicates that the handler can deal with out of order messages * * Registers a driver with IUCV. * * Returns 0 on success, -ENOMEM if the memory allocation for the pathid * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. */ int iucv_register(struct iucv_handler *handler, int smp) { int rc; if (!iucv_available) return -ENOSYS; mutex_lock(&iucv_register_mutex); if (!smp) iucv_nonsmp_handler++; if (list_empty(&iucv_handler_list)) { rc = iucv_enable(); if (rc) goto out_mutex; } else if (!smp && iucv_nonsmp_handler == 1) iucv_setmask_up(); INIT_LIST_HEAD(&handler->paths); spin_lock_bh(&iucv_table_lock); list_add_tail(&handler->list, &iucv_handler_list); spin_unlock_bh(&iucv_table_lock); rc = 0; out_mutex: mutex_unlock(&iucv_register_mutex); return rc; } EXPORT_SYMBOL(iucv_register); /** * iucv_unregister * @handler: address of iucv handler structure * @smp: != 0 indicates that the handler can deal with out of order messages * * Unregister driver from IUCV. */ void iucv_unregister(struct iucv_handler *handler, int smp) { struct iucv_path *p, *n; mutex_lock(&iucv_register_mutex); spin_lock_bh(&iucv_table_lock); /* Remove handler from the iucv_handler_list. */ list_del_init(&handler->list); /* Sever all pathids still referring to the handler. */ list_for_each_entry_safe(p, n, &handler->paths, list) { iucv_sever_pathid(p->pathid, NULL); iucv_path_table[p->pathid] = NULL; list_del(&p->list); iucv_path_free(p); } spin_unlock_bh(&iucv_table_lock); if (!smp) iucv_nonsmp_handler--; if (list_empty(&iucv_handler_list)) iucv_disable(); else if (!smp && iucv_nonsmp_handler == 0) iucv_setmask_mp(); mutex_unlock(&iucv_register_mutex); } EXPORT_SYMBOL(iucv_unregister); static int iucv_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { int i; get_online_cpus(); on_each_cpu(iucv_block_cpu, NULL, 1); preempt_disable(); for (i = 0; i < iucv_max_pathid; i++) { if (iucv_path_table[i]) iucv_sever_pathid(i, NULL); } preempt_enable(); put_online_cpus(); iucv_disable(); return NOTIFY_DONE; } static struct notifier_block iucv_reboot_notifier = { .notifier_call = iucv_reboot_event, }; /** * iucv_path_accept * @path: address of iucv path structure * @handler: address of iucv handler structure * @userdata: 16 bytes of data reflected to the communication partner * @private: private data passed to interrupt handlers for this path * * This function is issued after the user received a connection pending * external interrupt and now wishes to complete the IUCV communication path. * * Returns the result of the CP IUCV call. */ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, u8 userdata[16], void *private) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } /* Prepare parameter block. */ parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->ctrl.ippathid = path->pathid; parm->ctrl.ipmsglim = path->msglim; if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ipflags1 = path->flags; rc = iucv_call_b2f0(IUCV_ACCEPT, parm); if (!rc) { path->private = private; path->msglim = parm->ctrl.ipmsglim; path->flags = parm->ctrl.ipflags1; } out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_path_accept); /** * iucv_path_connect * @path: address of iucv path structure * @handler: address of iucv handler structure * @userid: 8-byte user identification * @system: 8-byte target system identification * @userdata: 16 bytes of data reflected to the communication partner * @private: private data passed to interrupt handlers for this path * * This function establishes an IUCV path. Although the connect may complete * successfully, you are not able to use the path until you receive an IUCV * Connection Complete external interrupt. * * Returns the result of the CP IUCV call. */ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, u8 userid[8], u8 system[8], u8 userdata[16], void *private) { union iucv_param *parm; int rc; spin_lock_bh(&iucv_table_lock); iucv_cleanup_queue(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->ctrl.ipmsglim = path->msglim; parm->ctrl.ipflags1 = path->flags; if (userid) { memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); } if (system) { memcpy(parm->ctrl.iptarget, system, sizeof(parm->ctrl.iptarget)); ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); } if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); rc = iucv_call_b2f0(IUCV_CONNECT, parm); if (!rc) { if (parm->ctrl.ippathid < iucv_max_pathid) { path->pathid = parm->ctrl.ippathid; path->msglim = parm->ctrl.ipmsglim; path->flags = parm->ctrl.ipflags1; path->handler = handler; path->private = private; list_add_tail(&path->list, &handler->paths); iucv_path_table[path->pathid] = path; } else { iucv_sever_pathid(parm->ctrl.ippathid, iucv_error_pathid); rc = -EIO; } } out: spin_unlock_bh(&iucv_table_lock); return rc; } EXPORT_SYMBOL(iucv_path_connect); /** * iucv_path_quiesce: * @path: address of iucv path structure * @userdata: 16 bytes of data reflected to the communication partner * * This function temporarily suspends incoming messages on an IUCV path. * You can later reactivate the path by invoking the iucv_resume function. * * Returns the result from the CP IUCV call. */ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ippathid = path->pathid; rc = iucv_call_b2f0(IUCV_QUIESCE, parm); out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_path_quiesce); /** * iucv_path_resume: * @path: address of iucv path structure * @userdata: 16 bytes of data reflected to the communication partner * * This function resumes incoming messages on an IUCV path that has * been stopped with iucv_path_quiesce. * * Returns the result from the CP IUCV call. */ int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ippathid = path->pathid; rc = iucv_call_b2f0(IUCV_RESUME, parm); out: local_bh_enable(); return rc; } /** * iucv_path_sever * @path: address of iucv path structure * @userdata: 16 bytes of data reflected to the communication partner * * This function terminates an IUCV path. * * Returns the result from the CP IUCV call. */ int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) { int rc; preempt_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } if (iucv_active_cpu != smp_processor_id()) spin_lock_bh(&iucv_table_lock); rc = iucv_sever_pathid(path->pathid, userdata); iucv_path_table[path->pathid] = NULL; list_del_init(&path->list); if (iucv_active_cpu != smp_processor_id()) spin_unlock_bh(&iucv_table_lock); out: preempt_enable(); return rc; } EXPORT_SYMBOL(iucv_path_sever); /** * iucv_message_purge * @path: address of iucv path structure * @msg: address of iucv msg structure * @srccls: source class of message * * Cancels a message you have sent. * * Returns the result from the CP IUCV call. */ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, u32 srccls) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->purge.ippathid = path->pathid; parm->purge.ipmsgid = msg->id; parm->purge.ipsrccls = srccls; parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; rc = iucv_call_b2f0(IUCV_PURGE, parm); if (!rc) { msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; msg->tag = parm->purge.ipmsgtag; } out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_purge); /** * iucv_message_receive_iprmdata * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is received (IUCV_IPBUFLST) * @buffer: address of data buffer or address of struct iucv_array * @size: length of data buffer * @residual: * * Internal function used by iucv_message_receive and __iucv_message_receive * to receive RMDATA data stored in struct iucv_message. */ static int iucv_message_receive_iprmdata(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *buffer, size_t size, size_t *residual) { struct iucv_array *array; u8 *rmmsg; size_t copy; /* * Message is 8 bytes long and has been stored to the * message descriptor itself. */ if (residual) *residual = abs(size - 8); rmmsg = msg->rmmsg; if (flags & IUCV_IPBUFLST) { /* Copy to struct iucv_array. */ size = (size < 8) ? size : 8; for (array = buffer; size > 0; array++) { copy = min_t(size_t, size, array->length); memcpy((u8 *)(addr_t) array->address, rmmsg, copy); rmmsg += copy; size -= copy; } } else { /* Copy to direct buffer. */ memcpy(buffer, rmmsg, min_t(size_t, size, 8)); } return 0; } /** * __iucv_message_receive * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is received (IUCV_IPBUFLST) * @buffer: address of data buffer or address of struct iucv_array * @size: length of data buffer * @residual: * * This function receives messages that are being sent to you over * established paths. This function will deal with RMDATA messages * embedded in struct iucv_message as well. * * Locking: no locking * * Returns the result from the CP IUCV call. */ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *buffer, size_t size, size_t *residual) { union iucv_param *parm; int rc; if (msg->flags & IUCV_IPRMDATA) return iucv_message_receive_iprmdata(path, msg, flags, buffer, size, residual); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ipbfadr1 = (u32)(addr_t) buffer; parm->db.ipbfln1f = (u32) size; parm->db.ipmsgid = msg->id; parm->db.ippathid = path->pathid; parm->db.iptrgcls = msg->class; parm->db.ipflags1 = (flags | IUCV_IPFGPID | IUCV_IPFGMID | IUCV_IPTRGCLS); rc = iucv_call_b2f0(IUCV_RECEIVE, parm); if (!rc || rc == 5) { msg->flags = parm->db.ipflags1; if (residual) *residual = parm->db.ipbfln1f; } out: return rc; } EXPORT_SYMBOL(__iucv_message_receive); /** * iucv_message_receive * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is received (IUCV_IPBUFLST) * @buffer: address of data buffer or address of struct iucv_array * @size: length of data buffer * @residual: * * This function receives messages that are being sent to you over * established paths. This function will deal with RMDATA messages * embedded in struct iucv_message as well. * * Locking: local_bh_enable/local_bh_disable * * Returns the result from the CP IUCV call. */ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *buffer, size_t size, size_t *residual) { int rc; if (msg->flags & IUCV_IPRMDATA) return iucv_message_receive_iprmdata(path, msg, flags, buffer, size, residual); local_bh_disable(); rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_receive); /** * iucv_message_reject * @path: address of iucv path structure * @msg: address of iucv msg structure * * The reject function refuses a specified message. Between the time you * are notified of a message and the time that you complete the message, * the message may be rejected. * * Returns the result from the CP IUCV call. */ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ippathid = path->pathid; parm->db.ipmsgid = msg->id; parm->db.iptrgcls = msg->class; parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); rc = iucv_call_b2f0(IUCV_REJECT, parm); out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_reject); /** * iucv_message_reply * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) * @reply: address of reply data buffer or address of struct iucv_array * @size: length of reply data buffer * * This function responds to the two-way messages that you receive. You * must identify completely the message to which you wish to reply. ie, * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into * the parameter list. * * Returns the result from the CP IUCV call. */ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, u8 flags, void *reply, size_t size) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { parm->dpl.ippathid = path->pathid; parm->dpl.ipflags1 = flags; parm->dpl.ipmsgid = msg->id; parm->dpl.iptrgcls = msg->class; memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); } else { parm->db.ipbfadr1 = (u32)(addr_t) reply; parm->db.ipbfln1f = (u32) size; parm->db.ippathid = path->pathid; parm->db.ipflags1 = flags; parm->db.ipmsgid = msg->id; parm->db.iptrgcls = msg->class; } rc = iucv_call_b2f0(IUCV_REPLY, parm); out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_reply); /** * __iucv_message_send * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) * @srccls: source class of message * @buffer: address of send buffer or address of struct iucv_array * @size: length of send buffer * * This function transmits data to another application. Data to be * transmitted is in a buffer and this is a one-way message and the * receiver will not reply to the message. * * Locking: no locking * * Returns the result from the CP IUCV call. */ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size) { union iucv_param *parm; int rc; if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { /* Message of 8 bytes can be placed into the parameter list. */ parm->dpl.ippathid = path->pathid; parm->dpl.ipflags1 = flags | IUCV_IPNORPY; parm->dpl.iptrgcls = msg->class; parm->dpl.ipsrccls = srccls; parm->dpl.ipmsgtag = msg->tag; memcpy(parm->dpl.iprmmsg, buffer, 8); } else { parm->db.ipbfadr1 = (u32)(addr_t) buffer; parm->db.ipbfln1f = (u32) size; parm->db.ippathid = path->pathid; parm->db.ipflags1 = flags | IUCV_IPNORPY; parm->db.iptrgcls = msg->class; parm->db.ipsrccls = srccls; parm->db.ipmsgtag = msg->tag; } rc = iucv_call_b2f0(IUCV_SEND, parm); if (!rc) msg->id = parm->db.ipmsgid; out: return rc; } EXPORT_SYMBOL(__iucv_message_send); /** * iucv_message_send * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) * @srccls: source class of message * @buffer: address of send buffer or address of struct iucv_array * @size: length of send buffer * * This function transmits data to another application. Data to be * transmitted is in a buffer and this is a one-way message and the * receiver will not reply to the message. * * Locking: local_bh_enable/local_bh_disable * * Returns the result from the CP IUCV call. */ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size) { int rc; local_bh_disable(); rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_send); /** * iucv_message_send2way * @path: address of iucv path structure * @msg: address of iucv msg structure * @flags: how the message is sent and the reply is received * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) * @srccls: source class of message * @buffer: address of send buffer or address of struct iucv_array * @size: length of send buffer * @ansbuf: address of answer buffer or address of struct iucv_array * @asize: size of reply buffer * * This function transmits data to another application. Data to be * transmitted is in a buffer. The receiver of the send is expected to * reply to the message and a buffer is provided into which IUCV moves * the reply to this message. * * Returns the result from the CP IUCV call. */ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, u8 flags, u32 srccls, void *buffer, size_t size, void *answer, size_t asize, size_t *residual) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { parm->dpl.ippathid = path->pathid; parm->dpl.ipflags1 = path->flags; /* priority message */ parm->dpl.iptrgcls = msg->class; parm->dpl.ipsrccls = srccls; parm->dpl.ipmsgtag = msg->tag; parm->dpl.ipbfadr2 = (u32)(addr_t) answer; parm->dpl.ipbfln2f = (u32) asize; memcpy(parm->dpl.iprmmsg, buffer, 8); } else { parm->db.ippathid = path->pathid; parm->db.ipflags1 = path->flags; /* priority message */ parm->db.iptrgcls = msg->class; parm->db.ipsrccls = srccls; parm->db.ipmsgtag = msg->tag; parm->db.ipbfadr1 = (u32)(addr_t) buffer; parm->db.ipbfln1f = (u32) size; parm->db.ipbfadr2 = (u32)(addr_t) answer; parm->db.ipbfln2f = (u32) asize; } rc = iucv_call_b2f0(IUCV_SEND, parm); if (!rc) msg->id = parm->db.ipmsgid; out: local_bh_enable(); return rc; } EXPORT_SYMBOL(iucv_message_send2way); /** * iucv_path_pending * @data: Pointer to external interrupt buffer * * Process connection pending work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_pending { u16 ippathid; u8 ipflags1; u8 iptype; u16 ipmsglim; u16 res1; u8 ipvmid[8]; u8 ipuser[16]; u32 res3; u8 ippollfg; u8 res4[3]; } __packed; static void iucv_path_pending(struct iucv_irq_data *data) { struct iucv_path_pending *ipp = (void *) data; struct iucv_handler *handler; struct iucv_path *path; char *error; BUG_ON(iucv_path_table[ipp->ippathid]); /* New pathid, handler found. Create a new path struct. */ error = iucv_error_no_memory; path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); if (!path) goto out_sever; path->pathid = ipp->ippathid; iucv_path_table[path->pathid] = path; EBCASC(ipp->ipvmid, 8); /* Call registered handler until one is found that wants the path. */ list_for_each_entry(handler, &iucv_handler_list, list) { if (!handler->path_pending) continue; /* * Add path to handler to allow a call to iucv_path_sever * inside the path_pending function. If the handler returns * an error remove the path from the handler again. */ list_add(&path->list, &handler->paths); path->handler = handler; if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) return; list_del(&path->list); path->handler = NULL; } /* No handler wanted the path. */ iucv_path_table[path->pathid] = NULL; iucv_path_free(path); error = iucv_error_no_listener; out_sever: iucv_sever_pathid(ipp->ippathid, error); } /** * iucv_path_complete * @data: Pointer to external interrupt buffer * * Process connection complete work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_complete { u16 ippathid; u8 ipflags1; u8 iptype; u16 ipmsglim; u16 res1; u8 res2[8]; u8 ipuser[16]; u32 res3; u8 ippollfg; u8 res4[3]; } __packed; static void iucv_path_complete(struct iucv_irq_data *data) { struct iucv_path_complete *ipc = (void *) data; struct iucv_path *path = iucv_path_table[ipc->ippathid]; if (path) path->flags = ipc->ipflags1; if (path && path->handler && path->handler->path_complete) path->handler->path_complete(path, ipc->ipuser); } /** * iucv_path_severed * @data: Pointer to external interrupt buffer * * Process connection severed work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_severed { u16 ippathid; u8 res1; u8 iptype; u32 res2; u8 res3[8]; u8 ipuser[16]; u32 res4; u8 ippollfg; u8 res5[3]; } __packed; static void iucv_path_severed(struct iucv_irq_data *data) { struct iucv_path_severed *ips = (void *) data; struct iucv_path *path = iucv_path_table[ips->ippathid]; if (!path || !path->handler) /* Already severed */ return; if (path->handler->path_severed) path->handler->path_severed(path, ips->ipuser); else { iucv_sever_pathid(path->pathid, NULL); iucv_path_table[path->pathid] = NULL; list_del(&path->list); iucv_path_free(path); } } /** * iucv_path_quiesced * @data: Pointer to external interrupt buffer * * Process connection quiesced work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_quiesced { u16 ippathid; u8 res1; u8 iptype; u32 res2; u8 res3[8]; u8 ipuser[16]; u32 res4; u8 ippollfg; u8 res5[3]; } __packed; static void iucv_path_quiesced(struct iucv_irq_data *data) { struct iucv_path_quiesced *ipq = (void *) data; struct iucv_path *path = iucv_path_table[ipq->ippathid]; if (path && path->handler && path->handler->path_quiesced) path->handler->path_quiesced(path, ipq->ipuser); } /** * iucv_path_resumed * @data: Pointer to external interrupt buffer * * Process connection resumed work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_path_resumed { u16 ippathid; u8 res1; u8 iptype; u32 res2; u8 res3[8]; u8 ipuser[16]; u32 res4; u8 ippollfg; u8 res5[3]; } __packed; static void iucv_path_resumed(struct iucv_irq_data *data) { struct iucv_path_resumed *ipr = (void *) data; struct iucv_path *path = iucv_path_table[ipr->ippathid]; if (path && path->handler && path->handler->path_resumed) path->handler->path_resumed(path, ipr->ipuser); } /** * iucv_message_complete * @data: Pointer to external interrupt buffer * * Process message complete work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_message_complete { u16 ippathid; u8 ipflags1; u8 iptype; u32 ipmsgid; u32 ipaudit; u8 iprmmsg[8]; u32 ipsrccls; u32 ipmsgtag; u32 res; u32 ipbfln2f; u8 ippollfg; u8 res2[3]; } __packed; static void iucv_message_complete(struct iucv_irq_data *data) { struct iucv_message_complete *imc = (void *) data; struct iucv_path *path = iucv_path_table[imc->ippathid]; struct iucv_message msg; if (path && path->handler && path->handler->message_complete) { msg.flags = imc->ipflags1; msg.id = imc->ipmsgid; msg.audit = imc->ipaudit; memcpy(msg.rmmsg, imc->iprmmsg, 8); msg.class = imc->ipsrccls; msg.tag = imc->ipmsgtag; msg.length = imc->ipbfln2f; path->handler->message_complete(path, &msg); } } /** * iucv_message_pending * @data: Pointer to external interrupt buffer * * Process message pending work item. Called from tasklet while holding * iucv_table_lock. */ struct iucv_message_pending { u16 ippathid; u8 ipflags1; u8 iptype; u32 ipmsgid; u32 iptrgcls; union { u32 iprmmsg1_u32; u8 iprmmsg1[4]; } ln1msg1; union { u32 ipbfln1f; u8 iprmmsg2[4]; } ln1msg2; u32 res1[3]; u32 ipbfln2f; u8 ippollfg; u8 res2[3]; } __packed; static void iucv_message_pending(struct iucv_irq_data *data) { struct iucv_message_pending *imp = (void *) data; struct iucv_path *path = iucv_path_table[imp->ippathid]; struct iucv_message msg; if (path && path->handler && path->handler->message_pending) { msg.flags = imp->ipflags1; msg.id = imp->ipmsgid; msg.class = imp->iptrgcls; if (imp->ipflags1 & IUCV_IPRMDATA) { memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); msg.length = 8; } else msg.length = imp->ln1msg2.ipbfln1f; msg.reply_size = imp->ipbfln2f; path->handler->message_pending(path, &msg); } } /** * iucv_tasklet_fn: * * This tasklet loops over the queue of irq buffers created by * iucv_external_interrupt, calls the appropriate action handler * and then frees the buffer. */ static void iucv_tasklet_fn(unsigned long ignored) { typedef void iucv_irq_fn(struct iucv_irq_data *); static iucv_irq_fn *irq_fn[] = { [0x02] = iucv_path_complete, [0x03] = iucv_path_severed, [0x04] = iucv_path_quiesced, [0x05] = iucv_path_resumed, [0x06] = iucv_message_complete, [0x07] = iucv_message_complete, [0x08] = iucv_message_pending, [0x09] = iucv_message_pending, }; LIST_HEAD(task_queue); struct iucv_irq_list *p, *n; /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ if (!spin_trylock(&iucv_table_lock)) { tasklet_schedule(&iucv_tasklet); return; } iucv_active_cpu = smp_processor_id(); spin_lock_irq(&iucv_queue_lock); list_splice_init(&iucv_task_queue, &task_queue); spin_unlock_irq(&iucv_queue_lock); list_for_each_entry_safe(p, n, &task_queue, list) { list_del_init(&p->list); irq_fn[p->data.iptype](&p->data); kfree(p); } iucv_active_cpu = -1; spin_unlock(&iucv_table_lock); } /** * iucv_work_fn: * * This work function loops over the queue of path pending irq blocks * created by iucv_external_interrupt, calls the appropriate action * handler and then frees the buffer. */ static void iucv_work_fn(struct work_struct *work) { LIST_HEAD(work_queue); struct iucv_irq_list *p, *n; /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ spin_lock_bh(&iucv_table_lock); iucv_active_cpu = smp_processor_id(); spin_lock_irq(&iucv_queue_lock); list_splice_init(&iucv_work_queue, &work_queue); spin_unlock_irq(&iucv_queue_lock); iucv_cleanup_queue(); list_for_each_entry_safe(p, n, &work_queue, list) { list_del_init(&p->list); iucv_path_pending(&p->data); kfree(p); } iucv_active_cpu = -1; spin_unlock_bh(&iucv_table_lock); } /** * iucv_external_interrupt * @code: irq code * * Handles external interrupts coming in from CP. * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). */ static void iucv_external_interrupt(unsigned int ext_int_code, unsigned int param32, unsigned long param64) { struct iucv_irq_data *p; struct iucv_irq_list *work; kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++; p = iucv_irq_data[smp_processor_id()]; if (p->ippathid >= iucv_max_pathid) { WARN_ON(p->ippathid >= iucv_max_pathid); iucv_sever_pathid(p->ippathid, iucv_error_no_listener); return; } BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); if (!work) { pr_warning("iucv_external_interrupt: out of memory\n"); return; } memcpy(&work->data, p, sizeof(work->data)); spin_lock(&iucv_queue_lock); if (p->iptype == 0x01) { /* Path pending interrupt. */ list_add_tail(&work->list, &iucv_work_queue); schedule_work(&iucv_work); } else { /* The other interrupts. */ list_add_tail(&work->list, &iucv_task_queue); tasklet_schedule(&iucv_tasklet); } spin_unlock(&iucv_queue_lock); } static int iucv_pm_prepare(struct device *dev) { int rc = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_INFO "iucv_pm_prepare\n"); #endif if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) rc = dev->driver->pm->prepare(dev); return rc; } static void iucv_pm_complete(struct device *dev) { #ifdef CONFIG_PM_DEBUG printk(KERN_INFO "iucv_pm_complete\n"); #endif if (dev->driver && dev->driver->pm && dev->driver->pm->complete) dev->driver->pm->complete(dev); } /** * iucv_path_table_empty() - determine if iucv path table is empty * * Returns 0 if there are still iucv pathes defined * 1 if there are no iucv pathes defined */ int iucv_path_table_empty(void) { int i; for (i = 0; i < iucv_max_pathid; i++) { if (iucv_path_table[i]) return 0; } return 1; } /** * iucv_pm_freeze() - Freeze PM callback * @dev: iucv-based device * * disable iucv interrupts * invoke callback function of the iucv-based driver * shut down iucv, if no iucv-pathes are established anymore */ static int iucv_pm_freeze(struct device *dev) { int cpu; struct iucv_irq_list *p, *n; int rc = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_freeze\n"); #endif if (iucv_pm_state != IUCV_PM_FREEZING) { for_each_cpu(cpu, &iucv_irq_cpumask) smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1); cancel_work_sync(&iucv_work); list_for_each_entry_safe(p, n, &iucv_work_queue, list) { list_del_init(&p->list); iucv_sever_pathid(p->data.ippathid, iucv_error_no_listener); kfree(p); } } iucv_pm_state = IUCV_PM_FREEZING; if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) rc = dev->driver->pm->freeze(dev); if (iucv_path_table_empty()) iucv_disable(); return rc; } /** * iucv_pm_thaw() - Thaw PM callback * @dev: iucv-based device * * make iucv ready for use again: allocate path table, declare interrupt buffers * and enable iucv interrupts * invoke callback function of the iucv-based driver */ static int iucv_pm_thaw(struct device *dev) { int rc = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_thaw\n"); #endif iucv_pm_state = IUCV_PM_THAWING; if (!iucv_path_table) { rc = iucv_enable(); if (rc) goto out; } if (cpumask_empty(&iucv_irq_cpumask)) { if (iucv_nonsmp_handler) /* enable interrupts on one cpu */ iucv_allow_cpu(NULL); else /* enable interrupts on all cpus */ iucv_setmask_mp(); } if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) rc = dev->driver->pm->thaw(dev); out: return rc; } /** * iucv_pm_restore() - Restore PM callback * @dev: iucv-based device * * make iucv ready for use again: allocate path table, declare interrupt buffers * and enable iucv interrupts * invoke callback function of the iucv-based driver */ static int iucv_pm_restore(struct device *dev) { int rc = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); #endif if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) pr_warning("Suspending Linux did not completely close all IUCV " "connections\n"); iucv_pm_state = IUCV_PM_RESTORING; if (cpumask_empty(&iucv_irq_cpumask)) { rc = iucv_query_maxconn(); rc = iucv_enable(); if (rc) goto out; } if (dev->driver && dev->driver->pm && dev->driver->pm->restore) rc = dev->driver->pm->restore(dev); out: return rc; } /** * iucv_init * * Allocates and initializes various data structures. */ static int __init iucv_init(void) { int rc; int cpu; if (!MACHINE_IS_VM) { rc = -EPROTONOSUPPORT; goto out; } rc = iucv_query_maxconn(); if (rc) goto out; rc = register_external_interrupt(0x4000, iucv_external_interrupt); if (rc) goto out; iucv_root = root_device_register("iucv"); if (IS_ERR(iucv_root)) { rc = PTR_ERR(iucv_root); goto out_int; } for_each_online_cpu(cpu) { /* Note: GFP_DMA used to get memory below 2G */ iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_irq_data[cpu]) { rc = -ENOMEM; goto out_free; } /* Allocate parameter blocks. */ iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param[cpu]) { rc = -ENOMEM; goto out_free; } iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param_irq[cpu]) { rc = -ENOMEM; goto out_free; } } rc = register_hotcpu_notifier(&iucv_cpu_notifier); if (rc) goto out_free; rc = register_reboot_notifier(&iucv_reboot_notifier); if (rc) goto out_cpu; ASCEBC(iucv_error_no_listener, 16); ASCEBC(iucv_error_no_memory, 16); ASCEBC(iucv_error_pathid, 16); iucv_available = 1; rc = bus_register(&iucv_bus); if (rc) goto out_reboot; return 0; out_reboot: unregister_reboot_notifier(&iucv_reboot_notifier); out_cpu: unregister_hotcpu_notifier(&iucv_cpu_notifier); out_free: for_each_possible_cpu(cpu) { kfree(iucv_param_irq[cpu]); iucv_param_irq[cpu] = NULL; kfree(iucv_param[cpu]); iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; } root_device_unregister(iucv_root); out_int: unregister_external_interrupt(0x4000, iucv_external_interrupt); out: return rc; } /** * iucv_exit * * Frees everything allocated from iucv_init. */ static void __exit iucv_exit(void) { struct iucv_irq_list *p, *n; int cpu; spin_lock_irq(&iucv_queue_lock); list_for_each_entry_safe(p, n, &iucv_task_queue, list) kfree(p); list_for_each_entry_safe(p, n, &iucv_work_queue, list) kfree(p); spin_unlock_irq(&iucv_queue_lock); unregister_reboot_notifier(&iucv_reboot_notifier); unregister_hotcpu_notifier(&iucv_cpu_notifier); for_each_possible_cpu(cpu) { kfree(iucv_param_irq[cpu]); iucv_param_irq[cpu] = NULL; kfree(iucv_param[cpu]); iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; } root_device_unregister(iucv_root); bus_unregister(&iucv_bus); unregister_external_interrupt(0x4000, iucv_external_interrupt); } subsys_initcall(iucv_init); module_exit(iucv_exit); MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); MODULE_LICENSE("GPL");
gpl-2.0
subingangadharan/rpmsg
net/ipv4/netfilter/nf_nat_proto_unknown.c
3398
1523
/* The "unknown" protocol. This is what is used for protocols we * don't understand. It's returned by ip_ct_find_proto(). */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/init.h> #include <linux/netfilter.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_nat_protocol.h> static bool unknown_in_range(const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type manip_type, const union nf_conntrack_man_proto *min, const union nf_conntrack_man_proto *max) { return true; } static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { /* Sorry: we can't help you; if it's not unique, we can't frob anything. */ return; } static bool unknown_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { return true; } const struct nf_nat_protocol nf_nat_unknown_protocol = { /* .me isn't set: getting a ref to this cannot fail. */ .manip_pkt = unknown_manip_pkt, .in_range = unknown_in_range, .unique_tuple = unknown_unique_tuple, };
gpl-2.0
TeamGlade-Devices/android_kernel_sony_msm8930
drivers/dma/ste_dma40.c
3910
87945
/* * Copyright (C) Ericsson AB 2007-2008 * Copyright (C) ST-Ericsson SA 2008-2010 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson * License terms: GNU General Public License (GPL) version 2 */ #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/dmaengine.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/err.h> #include <linux/amba/bus.h> #include <linux/regulator/consumer.h> #include <plat/ste_dma40.h> #include "dmaengine.h" #include "ste_dma40_ll.h" #define D40_NAME "dma40" #define D40_PHY_CHAN -1 /* For masking out/in 2 bit channel positions */ #define D40_CHAN_POS(chan) (2 * (chan / 2)) #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) /* Maximum iterations taken before giving up suspending a channel */ #define D40_SUSPEND_MAX_IT 500 /* Milliseconds */ #define DMA40_AUTOSUSPEND_DELAY 100 /* Hardware requirement on LCLA alignment */ #define LCLA_ALIGNMENT 0x40000 /* Max number of links per event group */ #define D40_LCLA_LINK_PER_EVENT_GRP 128 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP /* Attempts before giving up to trying to get pages that are aligned */ #define MAX_LCLA_ALLOC_ATTEMPTS 256 /* Bit markings for allocation map */ #define D40_ALLOC_FREE (1 << 31) #define D40_ALLOC_PHY (1 << 30) #define D40_ALLOC_LOG_FREE 0 /** * enum 40_command - The different commands and/or statuses. * * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. */ enum d40_command { D40_DMA_STOP = 0, D40_DMA_RUN = 1, D40_DMA_SUSPEND_REQ = 2, D40_DMA_SUSPENDED = 3 }; /* * enum d40_events - The different Event Enables for the event lines. * * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. * @D40_ROUND_EVENTLINE: Status check for event line. */ enum d40_events { D40_DEACTIVATE_EVENTLINE = 0, D40_ACTIVATE_EVENTLINE = 1, D40_SUSPEND_REQ_EVENTLINE = 2, D40_ROUND_EVENTLINE = 3 }; /* * These are the registers that has to be saved and later restored * when the DMA hw is powered off. * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. */ static u32 d40_backup_regs[] = { D40_DREG_LCPA, D40_DREG_LCLA, D40_DREG_PRMSE, D40_DREG_PRMSO, D40_DREG_PRMOE, D40_DREG_PRMOO, }; #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ static u32 d40_backup_regs_v3[] = { D40_DREG_PSEG1, D40_DREG_PSEG2, D40_DREG_PSEG3, D40_DREG_PSEG4, D40_DREG_PCEG1, D40_DREG_PCEG2, D40_DREG_PCEG3, D40_DREG_PCEG4, D40_DREG_RSEG1, D40_DREG_RSEG2, D40_DREG_RSEG3, D40_DREG_RSEG4, D40_DREG_RCEG1, D40_DREG_RCEG2, D40_DREG_RCEG3, D40_DREG_RCEG4, }; #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) static u32 d40_backup_regs_chan[] = { D40_CHAN_REG_SSCFG, D40_CHAN_REG_SSELT, D40_CHAN_REG_SSPTR, D40_CHAN_REG_SSLNK, D40_CHAN_REG_SDCFG, D40_CHAN_REG_SDELT, D40_CHAN_REG_SDPTR, D40_CHAN_REG_SDLNK, }; /** * struct d40_lli_pool - Structure for keeping LLIs in memory * * @base: Pointer to memory area when the pre_alloc_lli's are not large * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if * pre_alloc_lli is used. * @dma_addr: DMA address, if mapped * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. * @pre_alloc_lli: Pre allocated area for the most common case of transfers, * one buffer to one buffer. */ struct d40_lli_pool { void *base; int size; dma_addr_t dma_addr; /* Space for dst and src, plus an extra for padding */ u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; }; /** * struct d40_desc - A descriptor is one DMA job. * * @lli_phy: LLI settings for physical channel. Both src and dst= * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if * lli_len equals one. * @lli_log: Same as above but for logical channels. * @lli_pool: The pool with two entries pre-allocated. * @lli_len: Number of llis of current descriptor. * @lli_current: Number of transferred llis. * @lcla_alloc: Number of LCLA entries allocated. * @txd: DMA engine struct. Used for among other things for communication * during a transfer. * @node: List entry. * @is_in_client_list: true if the client owns this descriptor. * @cyclic: true if this is a cyclic job * * This descriptor is used for both logical and physical transfers. */ struct d40_desc { /* LLI physical */ struct d40_phy_lli_bidir lli_phy; /* LLI logical */ struct d40_log_lli_bidir lli_log; struct d40_lli_pool lli_pool; int lli_len; int lli_current; int lcla_alloc; struct dma_async_tx_descriptor txd; struct list_head node; bool is_in_client_list; bool cyclic; }; /** * struct d40_lcla_pool - LCLA pool settings and data. * * @base: The virtual address of LCLA. 18 bit aligned. * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. * This pointer is only there for clean-up on error. * @pages: The number of pages needed for all physical channels. * Only used later for clean-up on error * @lock: Lock to protect the content in this struct. * @alloc_map: big map over which LCLA entry is own by which job. */ struct d40_lcla_pool { void *base; dma_addr_t dma_addr; void *base_unaligned; int pages; spinlock_t lock; struct d40_desc **alloc_map; }; /** * struct d40_phy_res - struct for handling eventlines mapped to physical * channels. * * @lock: A lock protection this entity. * @reserved: True if used by secure world or otherwise. * @num: The physical channel number of this entity. * @allocated_src: Bit mapped to show which src event line's are mapped to * this physical channel. Can also be free or physically allocated. * @allocated_dst: Same as for src but is dst. * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as * event line number. */ struct d40_phy_res { spinlock_t lock; bool reserved; int num; u32 allocated_src; u32 allocated_dst; }; struct d40_base; /** * struct d40_chan - Struct that describes a channel. * * @lock: A spinlock to protect this struct. * @log_num: The logical number, if any of this channel. * @pending_tx: The number of pending transfers. Used between interrupt handler * and tasklet. * @busy: Set to true when transfer is ongoing on this channel. * @phy_chan: Pointer to physical channel which this instance runs on. If this * point is NULL, then the channel is not allocated. * @chan: DMA engine handle. * @tasklet: Tasklet that gets scheduled from interrupt context to complete a * transfer and call client callback. * @client: Cliented owned descriptor list. * @pending_queue: Submitted jobs, to be issued by issue_pending() * @active: Active descriptor. * @queue: Queued jobs. * @prepare_queue: Prepared jobs. * @dma_cfg: The client configuration of this dma channel. * @configured: whether the dma_cfg configuration is valid * @base: Pointer to the device instance struct. * @src_def_cfg: Default cfg register setting for src. * @dst_def_cfg: Default cfg register setting for dst. * @log_def: Default logical channel settings. * @lcpa: Pointer to dst and src lcpa settings. * @runtime_addr: runtime configured address. * @runtime_direction: runtime configured direction. * * This struct can either "be" a logical or a physical channel. */ struct d40_chan { spinlock_t lock; int log_num; int pending_tx; bool busy; struct d40_phy_res *phy_chan; struct dma_chan chan; struct tasklet_struct tasklet; struct list_head client; struct list_head pending_queue; struct list_head active; struct list_head queue; struct list_head prepare_queue; struct stedma40_chan_cfg dma_cfg; bool configured; struct d40_base *base; /* Default register configurations */ u32 src_def_cfg; u32 dst_def_cfg; struct d40_def_lcsp log_def; struct d40_log_lli_full *lcpa; /* Runtime reconfiguration */ dma_addr_t runtime_addr; enum dma_transfer_direction runtime_direction; }; /** * struct d40_base - The big global struct, one for each probe'd instance. * * @interrupt_lock: Lock used to make sure one interrupt is handle a time. * @execmd_lock: Lock for execute command usage since several channels share * the same physical register. * @dev: The device structure. * @virtbase: The virtual base address of the DMA's register. * @rev: silicon revision detected. * @clk: Pointer to the DMA clock structure. * @phy_start: Physical memory start of the DMA registers. * @phy_size: Size of the DMA register map. * @irq: The IRQ number. * @num_phy_chans: The number of physical channels. Read from HW. This * is the number of available channels for this driver, not counting "Secure * mode" allocated physical channels. * @num_log_chans: The number of logical channels. Calculated from * num_phy_chans. * @dma_both: dma_device channels that can do both memcpy and slave transfers. * @dma_slave: dma_device channels that can do only do slave transfers. * @dma_memcpy: dma_device channels that can do only do memcpy transfers. * @phy_chans: Room for all possible physical channels in system. * @log_chans: Room for all possible logical channels in system. * @lookup_log_chans: Used to map interrupt number to logical channel. Points * to log_chans entries. * @lookup_phy_chans: Used to map interrupt number to physical channel. Points * to phy_chans entries. * @plat_data: Pointer to provided platform_data which is the driver * configuration. * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. * @phy_res: Vector containing all physical channels. * @lcla_pool: lcla pool settings and data. * @lcpa_base: The virtual mapped address of LCPA. * @phy_lcpa: The physical address of the LCPA. * @lcpa_size: The size of the LCPA area. * @desc_slab: cache for descriptors. * @reg_val_backup: Here the values of some hardware registers are stored * before the DMA is powered off. They are restored when the power is back on. * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and * later. * @reg_val_backup_chan: Backup data for standard channel parameter registers. * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. * @initialized: true if the dma has been initialized */ struct d40_base { spinlock_t interrupt_lock; spinlock_t execmd_lock; struct device *dev; void __iomem *virtbase; u8 rev:4; struct clk *clk; phys_addr_t phy_start; resource_size_t phy_size; int irq; int num_phy_chans; int num_log_chans; struct dma_device dma_both; struct dma_device dma_slave; struct dma_device dma_memcpy; struct d40_chan *phy_chans; struct d40_chan *log_chans; struct d40_chan **lookup_log_chans; struct d40_chan **lookup_phy_chans; struct stedma40_platform_data *plat_data; struct regulator *lcpa_regulator; /* Physical half channels */ struct d40_phy_res *phy_res; struct d40_lcla_pool lcla_pool; void *lcpa_base; dma_addr_t phy_lcpa; resource_size_t lcpa_size; struct kmem_cache *desc_slab; u32 reg_val_backup[BACKUP_REGS_SZ]; u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; u32 *reg_val_backup_chan; u16 gcc_pwr_off_mask; bool initialized; }; /** * struct d40_interrupt_lookup - lookup table for interrupt handler * * @src: Interrupt mask register. * @clr: Interrupt clear register. * @is_error: true if this is an error interrupt. * @offset: start delta in the lookup_log_chans in d40_base. If equals to * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. */ struct d40_interrupt_lookup { u32 src; u32 clr; bool is_error; int offset; }; /** * struct d40_reg_val - simple lookup struct * * @reg: The register. * @val: The value that belongs to the register in reg. */ struct d40_reg_val { unsigned int reg; unsigned int val; }; static struct device *chan2dev(struct d40_chan *d40c) { return &d40c->chan.dev->device; } static bool chan_is_physical(struct d40_chan *chan) { return chan->log_num == D40_PHY_CHAN; } static bool chan_is_logical(struct d40_chan *chan) { return !chan_is_physical(chan); } static void __iomem *chan_base(struct d40_chan *chan) { return chan->base->virtbase + D40_DREG_PCBASE + chan->phy_chan->num * D40_DREG_PCDELTA; } #define d40_err(dev, format, arg...) \ dev_err(dev, "[%s] " format, __func__, ## arg) #define chan_err(d40c, format, arg...) \ d40_err(chan2dev(d40c), format, ## arg) static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, int lli_len) { bool is_log = chan_is_logical(d40c); u32 align; void *base; if (is_log) align = sizeof(struct d40_log_lli); else align = sizeof(struct d40_phy_lli); if (lli_len == 1) { base = d40d->lli_pool.pre_alloc_lli; d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); d40d->lli_pool.base = NULL; } else { d40d->lli_pool.size = lli_len * 2 * align; base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); d40d->lli_pool.base = base; if (d40d->lli_pool.base == NULL) return -ENOMEM; } if (is_log) { d40d->lli_log.src = PTR_ALIGN(base, align); d40d->lli_log.dst = d40d->lli_log.src + lli_len; d40d->lli_pool.dma_addr = 0; } else { d40d->lli_phy.src = PTR_ALIGN(base, align); d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, d40d->lli_phy.src, d40d->lli_pool.size, DMA_TO_DEVICE); if (dma_mapping_error(d40c->base->dev, d40d->lli_pool.dma_addr)) { kfree(d40d->lli_pool.base); d40d->lli_pool.base = NULL; d40d->lli_pool.dma_addr = 0; return -ENOMEM; } } return 0; } static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) { if (d40d->lli_pool.dma_addr) dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, d40d->lli_pool.size, DMA_TO_DEVICE); kfree(d40d->lli_pool.base); d40d->lli_pool.base = NULL; d40d->lli_pool.size = 0; d40d->lli_log.src = NULL; d40d->lli_log.dst = NULL; d40d->lli_phy.src = NULL; d40d->lli_phy.dst = NULL; } static int d40_lcla_alloc_one(struct d40_chan *d40c, struct d40_desc *d40d) { unsigned long flags; int i; int ret = -EINVAL; int p; spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP; /* * Allocate both src and dst at the same time, therefore the half * start on 1 since 0 can't be used since zero is used as end marker. */ for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { if (!d40c->base->lcla_pool.alloc_map[p + i]) { d40c->base->lcla_pool.alloc_map[p + i] = d40d; d40d->lcla_alloc++; ret = i; break; } } spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); return ret; } static int d40_lcla_free_all(struct d40_chan *d40c, struct d40_desc *d40d) { unsigned long flags; int i; int ret = -EINVAL; if (chan_is_physical(d40c)) return 0; spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) { d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL; d40d->lcla_alloc--; if (d40d->lcla_alloc == 0) { ret = 0; break; } } } spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); return ret; } static void d40_desc_remove(struct d40_desc *d40d) { list_del(&d40d->node); } static struct d40_desc *d40_desc_get(struct d40_chan *d40c) { struct d40_desc *desc = NULL; if (!list_empty(&d40c->client)) { struct d40_desc *d; struct d40_desc *_d; list_for_each_entry_safe(d, _d, &d40c->client, node) { if (async_tx_test_ack(&d->txd)) { d40_desc_remove(d); desc = d; memset(desc, 0, sizeof(*desc)); break; } } } if (!desc) desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); if (desc) INIT_LIST_HEAD(&desc->node); return desc; } static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) { d40_pool_lli_free(d40c, d40d); d40_lcla_free_all(d40c, d40d); kmem_cache_free(d40c->base->desc_slab, d40d); } static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) { list_add_tail(&desc->node, &d40c->active); } static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) { struct d40_phy_lli *lli_dst = desc->lli_phy.dst; struct d40_phy_lli *lli_src = desc->lli_phy.src; void __iomem *base = chan_base(chan); writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); } static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) { struct d40_lcla_pool *pool = &chan->base->lcla_pool; struct d40_log_lli_bidir *lli = &desc->lli_log; int lli_current = desc->lli_current; int lli_len = desc->lli_len; bool cyclic = desc->cyclic; int curr_lcla = -EINVAL; int first_lcla = 0; bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; bool linkback; /* * We may have partially running cyclic transfers, in case we did't get * enough LCLA entries. */ linkback = cyclic && lli_current == 0; /* * For linkback, we need one LCLA even with only one link, because we * can't link back to the one in LCPA space */ if (linkback || (lli_len - lli_current > 1)) { curr_lcla = d40_lcla_alloc_one(chan, desc); first_lcla = curr_lcla; } /* * For linkback, we normally load the LCPA in the loop since we need to * link it to the second LCLA and not the first. However, if we * couldn't even get a first LCLA, then we have to run in LCPA and * reload manually. */ if (!linkback || curr_lcla == -EINVAL) { unsigned int flags = 0; if (curr_lcla == -EINVAL) flags |= LLI_TERM_INT; d40_log_lli_lcpa_write(chan->lcpa, &lli->dst[lli_current], &lli->src[lli_current], curr_lcla, flags); lli_current++; } if (curr_lcla < 0) goto out; for (; lli_current < lli_len; lli_current++) { unsigned int lcla_offset = chan->phy_chan->num * 1024 + 8 * curr_lcla * 2; struct d40_log_lli *lcla = pool->base + lcla_offset; unsigned int flags = 0; int next_lcla; if (lli_current + 1 < lli_len) next_lcla = d40_lcla_alloc_one(chan, desc); else next_lcla = linkback ? first_lcla : -EINVAL; if (cyclic || next_lcla == -EINVAL) flags |= LLI_TERM_INT; if (linkback && curr_lcla == first_lcla) { /* First link goes in both LCPA and LCLA */ d40_log_lli_lcpa_write(chan->lcpa, &lli->dst[lli_current], &lli->src[lli_current], next_lcla, flags); } /* * One unused LCLA in the cyclic case if the very first * next_lcla fails... */ d40_log_lli_lcla_write(lcla, &lli->dst[lli_current], &lli->src[lli_current], next_lcla, flags); /* * Cache maintenance is not needed if lcla is * mapped in esram */ if (!use_esram_lcla) { dma_sync_single_range_for_device(chan->base->dev, pool->dma_addr, lcla_offset, 2 * sizeof(struct d40_log_lli), DMA_TO_DEVICE); } curr_lcla = next_lcla; if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { lli_current++; break; } } out: desc->lli_current = lli_current; } static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) { if (chan_is_physical(d40c)) { d40_phy_lli_load(d40c, d40d); d40d->lli_current = d40d->lli_len; } else d40_log_lli_to_lcxa(d40c, d40d); } static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) { struct d40_desc *d; if (list_empty(&d40c->active)) return NULL; d = list_first_entry(&d40c->active, struct d40_desc, node); return d; } /* remove desc from current queue and add it to the pending_queue */ static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) { d40_desc_remove(desc); desc->is_in_client_list = false; list_add_tail(&desc->node, &d40c->pending_queue); } static struct d40_desc *d40_first_pending(struct d40_chan *d40c) { struct d40_desc *d; if (list_empty(&d40c->pending_queue)) return NULL; d = list_first_entry(&d40c->pending_queue, struct d40_desc, node); return d; } static struct d40_desc *d40_first_queued(struct d40_chan *d40c) { struct d40_desc *d; if (list_empty(&d40c->queue)) return NULL; d = list_first_entry(&d40c->queue, struct d40_desc, node); return d; } static int d40_psize_2_burst_size(bool is_log, int psize) { if (is_log) { if (psize == STEDMA40_PSIZE_LOG_1) return 1; } else { if (psize == STEDMA40_PSIZE_PHY_1) return 1; } return 2 << psize; } /* * The dma only supports transmitting packages up to * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of * dma elements required to send the entire sg list */ static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) { int dmalen; u32 max_w = max(data_width1, data_width2); u32 min_w = min(data_width1, data_width2); u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); if (seg_max > STEDMA40_MAX_SEG_SIZE) seg_max -= (1 << max_w); if (!IS_ALIGNED(size, 1 << max_w)) return -EINVAL; if (size <= seg_max) dmalen = 1; else { dmalen = size / seg_max; if (dmalen * seg_max < size) dmalen++; } return dmalen; } static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, u32 data_width1, u32 data_width2) { struct scatterlist *sg; int i; int len = 0; int ret; for_each_sg(sgl, sg, sg_len, i) { ret = d40_size_2_dmalen(sg_dma_len(sg), data_width1, data_width2); if (ret < 0) return ret; len += ret; } return len; } #ifdef CONFIG_PM static void dma40_backup(void __iomem *baseaddr, u32 *backup, u32 *regaddr, int num, bool save) { int i; for (i = 0; i < num; i++) { void __iomem *addr = baseaddr + regaddr[i]; if (save) backup[i] = readl_relaxed(addr); else writel_relaxed(backup[i], addr); } } static void d40_save_restore_registers(struct d40_base *base, bool save) { int i; /* Save/Restore channel specific registers */ for (i = 0; i < base->num_phy_chans; i++) { void __iomem *addr; int idx; if (base->phy_res[i].reserved) continue; addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; idx = i * ARRAY_SIZE(d40_backup_regs_chan); dma40_backup(addr, &base->reg_val_backup_chan[idx], d40_backup_regs_chan, ARRAY_SIZE(d40_backup_regs_chan), save); } /* Save/Restore global registers */ dma40_backup(base->virtbase, base->reg_val_backup, d40_backup_regs, ARRAY_SIZE(d40_backup_regs), save); /* Save/Restore registers only existing on dma40 v3 and later */ if (base->rev >= 3) dma40_backup(base->virtbase, base->reg_val_backup_v3, d40_backup_regs_v3, ARRAY_SIZE(d40_backup_regs_v3), save); } #else static void d40_save_restore_registers(struct d40_base *base, bool save) { } #endif static int __d40_execute_command_phy(struct d40_chan *d40c, enum d40_command command) { u32 status; int i; void __iomem *active_reg; int ret = 0; unsigned long flags; u32 wmask; if (command == D40_DMA_STOP) { ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); if (ret) return ret; } spin_lock_irqsave(&d40c->base->execmd_lock, flags); if (d40c->phy_chan->num % 2 == 0) active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; else active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; if (command == D40_DMA_SUSPEND_REQ) { status = (readl(active_reg) & D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> D40_CHAN_POS(d40c->phy_chan->num); if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) goto done; } wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), active_reg); if (command == D40_DMA_SUSPEND_REQ) { for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { status = (readl(active_reg) & D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> D40_CHAN_POS(d40c->phy_chan->num); cpu_relax(); /* * Reduce the number of bus accesses while * waiting for the DMA to suspend. */ udelay(3); if (status == D40_DMA_STOP || status == D40_DMA_SUSPENDED) break; } if (i == D40_SUSPEND_MAX_IT) { chan_err(d40c, "unable to suspend the chl %d (log: %d) status %x\n", d40c->phy_chan->num, d40c->log_num, status); dump_stack(); ret = -EBUSY; } } done: spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); return ret; } static void d40_term_all(struct d40_chan *d40c) { struct d40_desc *d40d; struct d40_desc *_d; /* Release active descriptors */ while ((d40d = d40_first_active_get(d40c))) { d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } /* Release queued descriptors waiting for transfer */ while ((d40d = d40_first_queued(d40c))) { d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } /* Release pending descriptors */ while ((d40d = d40_first_pending(d40c))) { d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } /* Release client owned descriptors */ if (!list_empty(&d40c->client)) list_for_each_entry_safe(d40d, _d, &d40c->client, node) { d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } /* Release descriptors in prepare queue */ if (!list_empty(&d40c->prepare_queue)) list_for_each_entry_safe(d40d, _d, &d40c->prepare_queue, node) { d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } d40c->pending_tx = 0; } static void __d40_config_set_event(struct d40_chan *d40c, enum d40_events event_type, u32 event, int reg) { void __iomem *addr = chan_base(d40c) + reg; int tries; u32 status; switch (event_type) { case D40_DEACTIVATE_EVENTLINE: writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | ~D40_EVENTLINE_MASK(event), addr); break; case D40_SUSPEND_REQ_EVENTLINE: status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> D40_EVENTLINE_POS(event); if (status == D40_DEACTIVATE_EVENTLINE || status == D40_SUSPEND_REQ_EVENTLINE) break; writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) | ~D40_EVENTLINE_MASK(event), addr); for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> D40_EVENTLINE_POS(event); cpu_relax(); /* * Reduce the number of bus accesses while * waiting for the DMA to suspend. */ udelay(3); if (status == D40_DEACTIVATE_EVENTLINE) break; } if (tries == D40_SUSPEND_MAX_IT) { chan_err(d40c, "unable to stop the event_line chl %d (log: %d)" "status %x\n", d40c->phy_chan->num, d40c->log_num, status); } break; case D40_ACTIVATE_EVENTLINE: /* * The hardware sometimes doesn't register the enable when src and dst * event lines are active on the same logical channel. Retry to ensure * it does. Usually only one retry is sufficient. */ tries = 100; while (--tries) { writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | ~D40_EVENTLINE_MASK(event), addr); if (readl(addr) & D40_EVENTLINE_MASK(event)) break; } if (tries != 99) dev_dbg(chan2dev(d40c), "[%s] workaround enable S%cLNK (%d tries)\n", __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', 100 - tries); WARN_ON(!tries); break; case D40_ROUND_EVENTLINE: BUG(); break; } } static void d40_config_set_event(struct d40_chan *d40c, enum d40_events event_type) { /* Enable event line connected to device (or memcpy) */ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SSLNK); } if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SDLNK); } } static u32 d40_chan_has_events(struct d40_chan *d40c) { void __iomem *chanbase = chan_base(d40c); u32 val; val = readl(chanbase + D40_CHAN_REG_SSLNK); val |= readl(chanbase + D40_CHAN_REG_SDLNK); return val; } static int __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) { unsigned long flags; int ret = 0; u32 active_status; void __iomem *active_reg; if (d40c->phy_chan->num % 2 == 0) active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; else active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; spin_lock_irqsave(&d40c->phy_chan->lock, flags); switch (command) { case D40_DMA_STOP: case D40_DMA_SUSPEND_REQ: active_status = (readl(active_reg) & D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> D40_CHAN_POS(d40c->phy_chan->num); if (active_status == D40_DMA_RUN) d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); else d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) ret = __d40_execute_command_phy(d40c, command); break; case D40_DMA_RUN: d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); ret = __d40_execute_command_phy(d40c, command); break; case D40_DMA_SUSPENDED: BUG(); break; } spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); return ret; } static int d40_channel_execute_command(struct d40_chan *d40c, enum d40_command command) { if (chan_is_logical(d40c)) return __d40_execute_command_log(d40c, command); else return __d40_execute_command_phy(d40c, command); } static u32 d40_get_prmo(struct d40_chan *d40c) { static const unsigned int phy_map[] = { [STEDMA40_PCHAN_BASIC_MODE] = D40_DREG_PRMO_PCHAN_BASIC, [STEDMA40_PCHAN_MODULO_MODE] = D40_DREG_PRMO_PCHAN_MODULO, [STEDMA40_PCHAN_DOUBLE_DST_MODE] = D40_DREG_PRMO_PCHAN_DOUBLE_DST, }; static const unsigned int log_map[] = { [STEDMA40_LCHAN_SRC_PHY_DST_LOG] = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, [STEDMA40_LCHAN_SRC_LOG_DST_PHY] = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, [STEDMA40_LCHAN_SRC_LOG_DST_LOG] = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, }; if (chan_is_physical(d40c)) return phy_map[d40c->dma_cfg.mode_opt]; else return log_map[d40c->dma_cfg.mode_opt]; } static void d40_config_write(struct d40_chan *d40c) { u32 addr_base; u32 var; /* Odd addresses are even addresses + 4 */ addr_base = (d40c->phy_chan->num % 2) * 4; /* Setup channel mode to logical or physical */ var = ((u32)(chan_is_logical(d40c)) + 1) << D40_CHAN_POS(d40c->phy_chan->num); writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); /* Setup operational mode option register */ var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); if (chan_is_logical(d40c)) { int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & D40_SREG_ELEM_LOG_LIDX_MASK; void __iomem *chanbase = chan_base(d40c); /* Set default config for CFG reg */ writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); /* Set LIDX for lcla */ writel(lidx, chanbase + D40_CHAN_REG_SSELT); writel(lidx, chanbase + D40_CHAN_REG_SDELT); /* Clear LNK which will be used by d40_chan_has_events() */ writel(0, chanbase + D40_CHAN_REG_SSLNK); writel(0, chanbase + D40_CHAN_REG_SDLNK); } } static u32 d40_residue(struct d40_chan *d40c) { u32 num_elt; if (chan_is_logical(d40c)) num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) >> D40_MEM_LCSP2_ECNT_POS; else { u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; } return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); } static bool d40_tx_is_linked(struct d40_chan *d40c) { bool is_link; if (chan_is_logical(d40c)) is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; else is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) & D40_SREG_LNK_PHYS_LNK_MASK; return is_link; } static int d40_pause(struct d40_chan *d40c) { int res = 0; unsigned long flags; if (!d40c->busy) return 0; pm_runtime_get_sync(d40c->base->dev); spin_lock_irqsave(&d40c->lock, flags); res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return res; } static int d40_resume(struct d40_chan *d40c) { int res = 0; unsigned long flags; if (!d40c->busy) return 0; spin_lock_irqsave(&d40c->lock, flags); pm_runtime_get_sync(d40c->base->dev); /* If bytes left to transfer or linked tx resume job */ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) res = d40_channel_execute_command(d40c, D40_DMA_RUN); pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return res; } static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) { struct d40_chan *d40c = container_of(tx->chan, struct d40_chan, chan); struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); unsigned long flags; dma_cookie_t cookie; spin_lock_irqsave(&d40c->lock, flags); cookie = dma_cookie_assign(tx); d40_desc_queue(d40c, d40d); spin_unlock_irqrestore(&d40c->lock, flags); return cookie; } static int d40_start(struct d40_chan *d40c) { return d40_channel_execute_command(d40c, D40_DMA_RUN); } static struct d40_desc *d40_queue_start(struct d40_chan *d40c) { struct d40_desc *d40d; int err; /* Start queued jobs, if any */ d40d = d40_first_queued(d40c); if (d40d != NULL) { if (!d40c->busy) { d40c->busy = true; pm_runtime_get_sync(d40c->base->dev); } /* Remove from queue */ d40_desc_remove(d40d); /* Add to active queue */ d40_desc_submit(d40c, d40d); /* Initiate DMA job */ d40_desc_load(d40c, d40d); /* Start dma job */ err = d40_start(d40c); if (err) return NULL; } return d40d; } /* called from interrupt context */ static void dma_tc_handle(struct d40_chan *d40c) { struct d40_desc *d40d; /* Get first active entry from list */ d40d = d40_first_active_get(d40c); if (d40d == NULL) return; if (d40d->cyclic) { /* * If this was a paritially loaded list, we need to reloaded * it, and only when the list is completed. We need to check * for done because the interrupt will hit for every link, and * not just the last one. */ if (d40d->lli_current < d40d->lli_len && !d40_tx_is_linked(d40c) && !d40_residue(d40c)) { d40_lcla_free_all(d40c, d40d); d40_desc_load(d40c, d40d); (void) d40_start(d40c); if (d40d->lli_current == d40d->lli_len) d40d->lli_current = 0; } } else { d40_lcla_free_all(d40c, d40d); if (d40d->lli_current < d40d->lli_len) { d40_desc_load(d40c, d40d); /* Start dma job */ (void) d40_start(d40c); return; } if (d40_queue_start(d40c) == NULL) d40c->busy = false; pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); } d40c->pending_tx++; tasklet_schedule(&d40c->tasklet); } static void dma_tasklet(unsigned long data) { struct d40_chan *d40c = (struct d40_chan *) data; struct d40_desc *d40d; unsigned long flags; dma_async_tx_callback callback; void *callback_param; spin_lock_irqsave(&d40c->lock, flags); /* Get first active entry from list */ d40d = d40_first_active_get(d40c); if (d40d == NULL) goto err; if (!d40d->cyclic) dma_cookie_complete(&d40d->txd); /* * If terminating a channel pending_tx is set to zero. * This prevents any finished active jobs to return to the client. */ if (d40c->pending_tx == 0) { spin_unlock_irqrestore(&d40c->lock, flags); return; } /* Callback to client */ callback = d40d->txd.callback; callback_param = d40d->txd.callback_param; if (!d40d->cyclic) { if (async_tx_test_ack(&d40d->txd)) { d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } else { if (!d40d->is_in_client_list) { d40_desc_remove(d40d); d40_lcla_free_all(d40c, d40d); list_add_tail(&d40d->node, &d40c->client); d40d->is_in_client_list = true; } } } d40c->pending_tx--; if (d40c->pending_tx) tasklet_schedule(&d40c->tasklet); spin_unlock_irqrestore(&d40c->lock, flags); if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) callback(callback_param); return; err: /* Rescue manouver if receiving double interrupts */ if (d40c->pending_tx > 0) d40c->pending_tx--; spin_unlock_irqrestore(&d40c->lock, flags); } static irqreturn_t d40_handle_interrupt(int irq, void *data) { static const struct d40_interrupt_lookup il[] = { {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, }; int i; u32 regs[ARRAY_SIZE(il)]; u32 idx; u32 row; long chan = -1; struct d40_chan *d40c; unsigned long flags; struct d40_base *base = data; spin_lock_irqsave(&base->interrupt_lock, flags); /* Read interrupt status of both logical and physical channels */ for (i = 0; i < ARRAY_SIZE(il); i++) regs[i] = readl(base->virtbase + il[i].src); for (;;) { chan = find_next_bit((unsigned long *)regs, BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); /* No more set bits found? */ if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) break; row = chan / BITS_PER_LONG; idx = chan & (BITS_PER_LONG - 1); /* ACK interrupt */ writel(1 << idx, base->virtbase + il[row].clr); if (il[row].offset == D40_PHY_CHAN) d40c = base->lookup_phy_chans[idx]; else d40c = base->lookup_log_chans[il[row].offset + idx]; spin_lock(&d40c->lock); if (!il[row].is_error) dma_tc_handle(d40c); else d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", chan, il[row].offset, idx); spin_unlock(&d40c->lock); } spin_unlock_irqrestore(&base->interrupt_lock, flags); return IRQ_HANDLED; } static int d40_validate_conf(struct d40_chan *d40c, struct stedma40_chan_cfg *conf) { int res = 0; u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; if (!conf->dir) { chan_err(d40c, "Invalid direction.\n"); res = -EINVAL; } if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && d40c->runtime_addr == 0) { chan_err(d40c, "Invalid TX channel address (%d)\n", conf->dst_dev_type); res = -EINVAL; } if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && d40c->runtime_addr == 0) { chan_err(d40c, "Invalid RX channel address (%d)\n", conf->src_dev_type); res = -EINVAL; } if (conf->dir == STEDMA40_MEM_TO_PERIPH && dst_event_group == STEDMA40_DEV_DST_MEMORY) { chan_err(d40c, "Invalid dst\n"); res = -EINVAL; } if (conf->dir == STEDMA40_PERIPH_TO_MEM && src_event_group == STEDMA40_DEV_SRC_MEMORY) { chan_err(d40c, "Invalid src\n"); res = -EINVAL; } if (src_event_group == STEDMA40_DEV_SRC_MEMORY && dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { chan_err(d40c, "No event line\n"); res = -EINVAL; } if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && (src_event_group != dst_event_group)) { chan_err(d40c, "Invalid event group\n"); res = -EINVAL; } if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { /* * DMAC HW supports it. Will be added to this driver, * in case any dma client requires it. */ chan_err(d40c, "periph to periph not supported\n"); res = -EINVAL; } if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * (1 << conf->src_info.data_width) != d40_psize_2_burst_size(is_log, conf->dst_info.psize) * (1 << conf->dst_info.data_width)) { /* * The DMAC hardware only supports * src (burst x width) == dst (burst x width) */ chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); res = -EINVAL; } return res; } static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, int log_event_line, bool is_log, bool *first_user) { unsigned long flags; spin_lock_irqsave(&phy->lock, flags); *first_user = ((phy->allocated_src | phy->allocated_dst) == D40_ALLOC_FREE); if (!is_log) { /* Physical interrupts are masked per physical full channel */ if (phy->allocated_src == D40_ALLOC_FREE && phy->allocated_dst == D40_ALLOC_FREE) { phy->allocated_dst = D40_ALLOC_PHY; phy->allocated_src = D40_ALLOC_PHY; goto found; } else goto not_found; } /* Logical channel */ if (is_src) { if (phy->allocated_src == D40_ALLOC_PHY) goto not_found; if (phy->allocated_src == D40_ALLOC_FREE) phy->allocated_src = D40_ALLOC_LOG_FREE; if (!(phy->allocated_src & (1 << log_event_line))) { phy->allocated_src |= 1 << log_event_line; goto found; } else goto not_found; } else { if (phy->allocated_dst == D40_ALLOC_PHY) goto not_found; if (phy->allocated_dst == D40_ALLOC_FREE) phy->allocated_dst = D40_ALLOC_LOG_FREE; if (!(phy->allocated_dst & (1 << log_event_line))) { phy->allocated_dst |= 1 << log_event_line; goto found; } else goto not_found; } not_found: spin_unlock_irqrestore(&phy->lock, flags); return false; found: spin_unlock_irqrestore(&phy->lock, flags); return true; } static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, int log_event_line) { unsigned long flags; bool is_free = false; spin_lock_irqsave(&phy->lock, flags); if (!log_event_line) { phy->allocated_dst = D40_ALLOC_FREE; phy->allocated_src = D40_ALLOC_FREE; is_free = true; goto out; } /* Logical channel */ if (is_src) { phy->allocated_src &= ~(1 << log_event_line); if (phy->allocated_src == D40_ALLOC_LOG_FREE) phy->allocated_src = D40_ALLOC_FREE; } else { phy->allocated_dst &= ~(1 << log_event_line); if (phy->allocated_dst == D40_ALLOC_LOG_FREE) phy->allocated_dst = D40_ALLOC_FREE; } is_free = ((phy->allocated_src | phy->allocated_dst) == D40_ALLOC_FREE); out: spin_unlock_irqrestore(&phy->lock, flags); return is_free; } static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) { int dev_type; int event_group; int event_line; struct d40_phy_res *phys; int i; int j; int log_num; bool is_src; bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; phys = d40c->base->phy_res; if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { dev_type = d40c->dma_cfg.src_dev_type; log_num = 2 * dev_type; is_src = true; } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { /* dst event lines are used for logical memcpy */ dev_type = d40c->dma_cfg.dst_dev_type; log_num = 2 * dev_type + 1; is_src = false; } else return -EINVAL; event_group = D40_TYPE_TO_GROUP(dev_type); event_line = D40_TYPE_TO_EVENT(dev_type); if (!is_log) { if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { /* Find physical half channel */ for (i = 0; i < d40c->base->num_phy_chans; i++) { if (d40_alloc_mask_set(&phys[i], is_src, 0, is_log, first_phy_user)) goto found_phy; } } else for (j = 0; j < d40c->base->num_phy_chans; j += 8) { int phy_num = j + event_group * 2; for (i = phy_num; i < phy_num + 2; i++) { if (d40_alloc_mask_set(&phys[i], is_src, 0, is_log, first_phy_user)) goto found_phy; } } return -EINVAL; found_phy: d40c->phy_chan = &phys[i]; d40c->log_num = D40_PHY_CHAN; goto out; } if (dev_type == -1) return -EINVAL; /* Find logical channel */ for (j = 0; j < d40c->base->num_phy_chans; j += 8) { int phy_num = j + event_group * 2; if (d40c->dma_cfg.use_fixed_channel) { i = d40c->dma_cfg.phy_channel; if ((i != phy_num) && (i != phy_num + 1)) { dev_err(chan2dev(d40c), "invalid fixed phy channel %d\n", i); return -EINVAL; } if (d40_alloc_mask_set(&phys[i], is_src, event_line, is_log, first_phy_user)) goto found_log; dev_err(chan2dev(d40c), "could not allocate fixed phy channel %d\n", i); return -EINVAL; } /* * Spread logical channels across all available physical rather * than pack every logical channel at the first available phy * channels. */ if (is_src) { for (i = phy_num; i < phy_num + 2; i++) { if (d40_alloc_mask_set(&phys[i], is_src, event_line, is_log, first_phy_user)) goto found_log; } } else { for (i = phy_num + 1; i >= phy_num; i--) { if (d40_alloc_mask_set(&phys[i], is_src, event_line, is_log, first_phy_user)) goto found_log; } } } return -EINVAL; found_log: d40c->phy_chan = &phys[i]; d40c->log_num = log_num; out: if (is_log) d40c->base->lookup_log_chans[d40c->log_num] = d40c; else d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; return 0; } static int d40_config_memcpy(struct d40_chan *d40c) { dma_cap_mask_t cap = d40c->chan.device->cap_mask; if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> memcpy[d40c->chan.chan_id]; } else if (dma_has_cap(DMA_MEMCPY, cap) && dma_has_cap(DMA_SLAVE, cap)) { d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; } else { chan_err(d40c, "No memcpy\n"); return -EINVAL; } return 0; } static int d40_free_dma(struct d40_chan *d40c) { int res = 0; u32 event; struct d40_phy_res *phy = d40c->phy_chan; bool is_src; /* Terminate all queued and active transfers */ d40_term_all(d40c); if (phy == NULL) { chan_err(d40c, "phy == null\n"); return -EINVAL; } if (phy->allocated_src == D40_ALLOC_FREE && phy->allocated_dst == D40_ALLOC_FREE) { chan_err(d40c, "channel already free\n"); return -EINVAL; } if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); is_src = false; } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); is_src = true; } else { chan_err(d40c, "Unknown direction\n"); return -EINVAL; } pm_runtime_get_sync(d40c->base->dev); res = d40_channel_execute_command(d40c, D40_DMA_STOP); if (res) { chan_err(d40c, "stop failed\n"); goto out; } d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); if (chan_is_logical(d40c)) d40c->base->lookup_log_chans[d40c->log_num] = NULL; else d40c->base->lookup_phy_chans[phy->num] = NULL; if (d40c->busy) { pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); } d40c->busy = false; d40c->phy_chan = NULL; d40c->configured = false; out: pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); return res; } static bool d40_is_paused(struct d40_chan *d40c) { void __iomem *chanbase = chan_base(d40c); bool is_paused = false; unsigned long flags; void __iomem *active_reg; u32 status; u32 event; spin_lock_irqsave(&d40c->lock, flags); if (chan_is_physical(d40c)) { if (d40c->phy_chan->num % 2 == 0) active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; else active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; status = (readl(active_reg) & D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> D40_CHAN_POS(d40c->phy_chan->num); if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) is_paused = true; goto _exit; } if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); status = readl(chanbase + D40_CHAN_REG_SDLNK); } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); status = readl(chanbase + D40_CHAN_REG_SSLNK); } else { chan_err(d40c, "Unknown direction\n"); goto _exit; } status = (status & D40_EVENTLINE_MASK(event)) >> D40_EVENTLINE_POS(event); if (status != D40_DMA_RUN) is_paused = true; _exit: spin_unlock_irqrestore(&d40c->lock, flags); return is_paused; } static u32 stedma40_residue(struct dma_chan *chan) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); u32 bytes_left; unsigned long flags; spin_lock_irqsave(&d40c->lock, flags); bytes_left = d40_residue(d40c); spin_unlock_irqrestore(&d40c->lock, flags); return bytes_left; } static int d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, struct scatterlist *sg_src, struct scatterlist *sg_dst, unsigned int sg_len, dma_addr_t src_dev_addr, dma_addr_t dst_dev_addr) { struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; int ret; ret = d40_log_sg_to_lli(sg_src, sg_len, src_dev_addr, desc->lli_log.src, chan->log_def.lcsp1, src_info->data_width, dst_info->data_width); ret = d40_log_sg_to_lli(sg_dst, sg_len, dst_dev_addr, desc->lli_log.dst, chan->log_def.lcsp3, dst_info->data_width, src_info->data_width); return ret < 0 ? ret : 0; } static int d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, struct scatterlist *sg_src, struct scatterlist *sg_dst, unsigned int sg_len, dma_addr_t src_dev_addr, dma_addr_t dst_dev_addr) { struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; unsigned long flags = 0; int ret; if (desc->cyclic) flags |= LLI_CYCLIC | LLI_TERM_INT; ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, desc->lli_phy.src, virt_to_phys(desc->lli_phy.src), chan->src_def_cfg, src_info, dst_info, flags); ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, desc->lli_phy.dst, virt_to_phys(desc->lli_phy.dst), chan->dst_def_cfg, dst_info, src_info, flags); dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, desc->lli_pool.size, DMA_TO_DEVICE); return ret < 0 ? ret : 0; } static struct d40_desc * d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, unsigned int sg_len, unsigned long dma_flags) { struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct d40_desc *desc; int ret; desc = d40_desc_get(chan); if (!desc) return NULL; desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, cfg->dst_info.data_width); if (desc->lli_len < 0) { chan_err(chan, "Unaligned size\n"); goto err; } ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); if (ret < 0) { chan_err(chan, "Could not allocate lli\n"); goto err; } desc->lli_current = 0; desc->txd.flags = dma_flags; desc->txd.tx_submit = d40_tx_submit; dma_async_tx_descriptor_init(&desc->txd, &chan->chan); return desc; err: d40_desc_free(chan, desc); return NULL; } static dma_addr_t d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) { struct stedma40_platform_data *plat = chan->base->plat_data; struct stedma40_chan_cfg *cfg = &chan->dma_cfg; dma_addr_t addr = 0; if (chan->runtime_addr) return chan->runtime_addr; if (direction == DMA_DEV_TO_MEM) addr = plat->dev_rx[cfg->src_dev_type]; else if (direction == DMA_MEM_TO_DEV) addr = plat->dev_tx[cfg->dst_dev_type]; return addr; } static struct dma_async_tx_descriptor * d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, struct scatterlist *sg_dst, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long dma_flags) { struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); dma_addr_t src_dev_addr = 0; dma_addr_t dst_dev_addr = 0; struct d40_desc *desc; unsigned long flags; int ret; if (!chan->phy_chan) { chan_err(chan, "Cannot prepare unallocated channel\n"); return NULL; } spin_lock_irqsave(&chan->lock, flags); desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); if (desc == NULL) goto err; if (sg_next(&sg_src[sg_len - 1]) == sg_src) desc->cyclic = true; if (direction != DMA_TRANS_NONE) { dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); if (direction == DMA_DEV_TO_MEM) src_dev_addr = dev_addr; else if (direction == DMA_MEM_TO_DEV) dst_dev_addr = dev_addr; } if (chan_is_logical(chan)) ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, sg_len, src_dev_addr, dst_dev_addr); else ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, sg_len, src_dev_addr, dst_dev_addr); if (ret) { chan_err(chan, "Failed to prepare %s sg job: %d\n", chan_is_logical(chan) ? "log" : "phy", ret); goto err; } /* * add descriptor to the prepare queue in order to be able * to free them later in terminate_all */ list_add_tail(&desc->node, &chan->prepare_queue); spin_unlock_irqrestore(&chan->lock, flags); return &desc->txd; err: if (desc) d40_desc_free(chan, desc); spin_unlock_irqrestore(&chan->lock, flags); return NULL; } bool stedma40_filter(struct dma_chan *chan, void *data) { struct stedma40_chan_cfg *info = data; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); int err; if (data) { err = d40_validate_conf(d40c, info); if (!err) d40c->dma_cfg = *info; } else err = d40_config_memcpy(d40c); if (!err) d40c->configured = true; return err == 0; } EXPORT_SYMBOL(stedma40_filter); static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) { bool realtime = d40c->dma_cfg.realtime; bool highprio = d40c->dma_cfg.high_priority; u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; u32 event = D40_TYPE_TO_EVENT(dev_type); u32 group = D40_TYPE_TO_GROUP(dev_type); u32 bit = 1 << event; /* Destination event lines are stored in the upper halfword */ if (!src) bit <<= 16; writel(bit, d40c->base->virtbase + prioreg + group * 4); writel(bit, d40c->base->virtbase + rtreg + group * 4); } static void d40_set_prio_realtime(struct d40_chan *d40c) { if (d40c->base->rev < 3) return; if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); } /* DMA ENGINE functions */ static int d40_alloc_chan_resources(struct dma_chan *chan) { int err; unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); bool is_free_phy; spin_lock_irqsave(&d40c->lock, flags); dma_cookie_init(chan); /* If no dma configuration is set use default configuration (memcpy) */ if (!d40c->configured) { err = d40_config_memcpy(d40c); if (err) { chan_err(d40c, "Failed to configure memcpy channel\n"); goto fail; } } err = d40_allocate_channel(d40c, &is_free_phy); if (err) { chan_err(d40c, "Failed to allocate channel\n"); d40c->configured = false; goto fail; } pm_runtime_get_sync(d40c->base->dev); /* Fill in basic CFG register values */ d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg, chan_is_logical(d40c)); d40_set_prio_realtime(d40c); if (chan_is_logical(d40c)) { d40_log_cfg(&d40c->dma_cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) d40c->lcpa = d40c->base->lcpa_base + d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; else d40c->lcpa = d40c->base->lcpa_base + d40c->dma_cfg.dst_dev_type * D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; } dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", chan_is_logical(d40c) ? "logical" : "physical", d40c->phy_chan->num, d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); /* * Only write channel configuration to the DMA if the physical * resource is free. In case of multiple logical channels * on the same physical resource, only the first write is necessary. */ if (is_free_phy) d40_config_write(d40c); fail: pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return err; } static void d40_free_chan_resources(struct dma_chan *chan) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); int err; unsigned long flags; if (d40c->phy_chan == NULL) { chan_err(d40c, "Cannot free unallocated channel\n"); return; } spin_lock_irqsave(&d40c->lock, flags); err = d40_free_dma(d40c); if (err) chan_err(d40c, "Failed to free channel\n"); spin_unlock_irqrestore(&d40c->lock, flags); } static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t size, unsigned long dma_flags) { struct scatterlist dst_sg; struct scatterlist src_sg; sg_init_table(&dst_sg, 1); sg_init_table(&src_sg, 1); sg_dma_address(&dst_sg) = dst; sg_dma_address(&src_sg) = src; sg_dma_len(&dst_sg) = size; sg_dma_len(&src_sg) = size; return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); } static struct dma_async_tx_descriptor * d40_prep_memcpy_sg(struct dma_chan *chan, struct scatterlist *dst_sg, unsigned int dst_nents, struct scatterlist *src_sg, unsigned int src_nents, unsigned long dma_flags) { if (dst_nents != src_nents) return NULL; return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); } static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long dma_flags, void *context) { if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) return NULL; return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); } static struct dma_async_tx_descriptor * dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, void *context) { unsigned int periods = buf_len / period_len; struct dma_async_tx_descriptor *txd; struct scatterlist *sg; int i; sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); for (i = 0; i < periods; i++) { sg_dma_address(&sg[i]) = dma_addr; sg_dma_len(&sg[i]) = period_len; dma_addr += period_len; } sg[periods].offset = 0; sg[periods].length = 0; sg[periods].page_link = ((unsigned long)sg | 0x01) & ~0x02; txd = d40_prep_sg(chan, sg, sg, periods, direction, DMA_PREP_INTERRUPT); kfree(sg); return txd; } static enum dma_status d40_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); enum dma_status ret; if (d40c->phy_chan == NULL) { chan_err(d40c, "Cannot read status of unallocated channel\n"); return -EINVAL; } ret = dma_cookie_status(chan, cookie, txstate); if (ret != DMA_SUCCESS) dma_set_residue(txstate, stedma40_residue(chan)); if (d40_is_paused(d40c)) ret = DMA_PAUSED; return ret; } static void d40_issue_pending(struct dma_chan *chan) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); unsigned long flags; if (d40c->phy_chan == NULL) { chan_err(d40c, "Channel is not allocated!\n"); return; } spin_lock_irqsave(&d40c->lock, flags); list_splice_tail_init(&d40c->pending_queue, &d40c->queue); /* Busy means that queued jobs are already being processed */ if (!d40c->busy) (void) d40_queue_start(d40c); spin_unlock_irqrestore(&d40c->lock, flags); } static void d40_terminate_all(struct dma_chan *chan) { unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); int ret; spin_lock_irqsave(&d40c->lock, flags); pm_runtime_get_sync(d40c->base->dev); ret = d40_channel_execute_command(d40c, D40_DMA_STOP); if (ret) chan_err(d40c, "Failed to stop channel\n"); d40_term_all(d40c); pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); if (d40c->busy) { pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); } d40c->busy = false; spin_unlock_irqrestore(&d40c->lock, flags); } static int dma40_config_to_halfchannel(struct d40_chan *d40c, struct stedma40_half_channel_info *info, enum dma_slave_buswidth width, u32 maxburst) { enum stedma40_periph_data_width addr_width; int psize; switch (width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: addr_width = STEDMA40_BYTE_WIDTH; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: addr_width = STEDMA40_HALFWORD_WIDTH; break; case DMA_SLAVE_BUSWIDTH_4_BYTES: addr_width = STEDMA40_WORD_WIDTH; break; case DMA_SLAVE_BUSWIDTH_8_BYTES: addr_width = STEDMA40_DOUBLEWORD_WIDTH; break; default: dev_err(d40c->base->dev, "illegal peripheral address width " "requested (%d)\n", width); return -EINVAL; } if (chan_is_logical(d40c)) { if (maxburst >= 16) psize = STEDMA40_PSIZE_LOG_16; else if (maxburst >= 8) psize = STEDMA40_PSIZE_LOG_8; else if (maxburst >= 4) psize = STEDMA40_PSIZE_LOG_4; else psize = STEDMA40_PSIZE_LOG_1; } else { if (maxburst >= 16) psize = STEDMA40_PSIZE_PHY_16; else if (maxburst >= 8) psize = STEDMA40_PSIZE_PHY_8; else if (maxburst >= 4) psize = STEDMA40_PSIZE_PHY_4; else psize = STEDMA40_PSIZE_PHY_1; } info->data_width = addr_width; info->psize = psize; info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; return 0; } /* Runtime reconfiguration extension */ static int d40_set_runtime_config(struct dma_chan *chan, struct dma_slave_config *config) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; enum dma_slave_buswidth src_addr_width, dst_addr_width; dma_addr_t config_addr; u32 src_maxburst, dst_maxburst; int ret; src_addr_width = config->src_addr_width; src_maxburst = config->src_maxburst; dst_addr_width = config->dst_addr_width; dst_maxburst = config->dst_maxburst; if (config->direction == DMA_DEV_TO_MEM) { dma_addr_t dev_addr_rx = d40c->base->plat_data->dev_rx[cfg->src_dev_type]; config_addr = config->src_addr; if (dev_addr_rx) dev_dbg(d40c->base->dev, "channel has a pre-wired RX address %08x " "overriding with %08x\n", dev_addr_rx, config_addr); if (cfg->dir != STEDMA40_PERIPH_TO_MEM) dev_dbg(d40c->base->dev, "channel was not configured for peripheral " "to memory transfer (%d) overriding\n", cfg->dir); cfg->dir = STEDMA40_PERIPH_TO_MEM; /* Configure the memory side */ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) dst_addr_width = src_addr_width; if (dst_maxburst == 0) dst_maxburst = src_maxburst; } else if (config->direction == DMA_MEM_TO_DEV) { dma_addr_t dev_addr_tx = d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; config_addr = config->dst_addr; if (dev_addr_tx) dev_dbg(d40c->base->dev, "channel has a pre-wired TX address %08x " "overriding with %08x\n", dev_addr_tx, config_addr); if (cfg->dir != STEDMA40_MEM_TO_PERIPH) dev_dbg(d40c->base->dev, "channel was not configured for memory " "to peripheral transfer (%d) overriding\n", cfg->dir); cfg->dir = STEDMA40_MEM_TO_PERIPH; /* Configure the memory side */ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) src_addr_width = dst_addr_width; if (src_maxburst == 0) src_maxburst = dst_maxburst; } else { dev_err(d40c->base->dev, "unrecognized channel direction %d\n", config->direction); return -EINVAL; } if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { dev_err(d40c->base->dev, "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", src_maxburst, src_addr_width, dst_maxburst, dst_addr_width); return -EINVAL; } ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, src_addr_width, src_maxburst); if (ret) return ret; ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, dst_addr_width, dst_maxburst); if (ret) return ret; /* Fill in register values */ if (chan_is_logical(d40c)) d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); else d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg, false); /* These settings will take precedence later */ d40c->runtime_addr = config_addr; d40c->runtime_direction = config->direction; dev_dbg(d40c->base->dev, "configured channel %s for %s, data width %d/%d, " "maxburst %d/%d elements, LE, no flow control\n", dma_chan_name(chan), (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", src_addr_width, dst_addr_width, src_maxburst, dst_maxburst); return 0; } static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); if (d40c->phy_chan == NULL) { chan_err(d40c, "Channel is not allocated!\n"); return -EINVAL; } switch (cmd) { case DMA_TERMINATE_ALL: d40_terminate_all(chan); return 0; case DMA_PAUSE: return d40_pause(d40c); case DMA_RESUME: return d40_resume(d40c); case DMA_SLAVE_CONFIG: return d40_set_runtime_config(chan, (struct dma_slave_config *) arg); default: break; } /* Other commands are unimplemented */ return -ENXIO; } /* Initialization functions */ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, struct d40_chan *chans, int offset, int num_chans) { int i = 0; struct d40_chan *d40c; INIT_LIST_HEAD(&dma->channels); for (i = offset; i < offset + num_chans; i++) { d40c = &chans[i]; d40c->base = base; d40c->chan.device = dma; spin_lock_init(&d40c->lock); d40c->log_num = D40_PHY_CHAN; INIT_LIST_HEAD(&d40c->active); INIT_LIST_HEAD(&d40c->queue); INIT_LIST_HEAD(&d40c->pending_queue); INIT_LIST_HEAD(&d40c->client); INIT_LIST_HEAD(&d40c->prepare_queue); tasklet_init(&d40c->tasklet, dma_tasklet, (unsigned long) d40c); list_add_tail(&d40c->chan.device_node, &dma->channels); } } static void d40_ops_init(struct d40_base *base, struct dma_device *dev) { if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) dev->device_prep_slave_sg = d40_prep_slave_sg; if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { dev->device_prep_dma_memcpy = d40_prep_memcpy; /* * This controller can only access address at even * 32bit boundaries, i.e. 2^2 */ dev->copy_align = 2; } if (dma_has_cap(DMA_SG, dev->cap_mask)) dev->device_prep_dma_sg = d40_prep_memcpy_sg; if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; dev->device_alloc_chan_resources = d40_alloc_chan_resources; dev->device_free_chan_resources = d40_free_chan_resources; dev->device_issue_pending = d40_issue_pending; dev->device_tx_status = d40_tx_status; dev->device_control = d40_control; dev->dev = base->dev; } static int __init d40_dmaengine_init(struct d40_base *base, int num_reserved_chans) { int err ; d40_chan_init(base, &base->dma_slave, base->log_chans, 0, base->num_log_chans); dma_cap_zero(base->dma_slave.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); d40_ops_init(base, &base->dma_slave); err = dma_async_device_register(&base->dma_slave); if (err) { d40_err(base->dev, "Failed to register slave channels\n"); goto failure1; } d40_chan_init(base, &base->dma_memcpy, base->log_chans, base->num_log_chans, base->plat_data->memcpy_len); dma_cap_zero(base->dma_memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); d40_ops_init(base, &base->dma_memcpy); err = dma_async_device_register(&base->dma_memcpy); if (err) { d40_err(base->dev, "Failed to regsiter memcpy only channels\n"); goto failure2; } d40_chan_init(base, &base->dma_both, base->phy_chans, 0, num_reserved_chans); dma_cap_zero(base->dma_both.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); dma_cap_set(DMA_SG, base->dma_both.cap_mask); dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); d40_ops_init(base, &base->dma_both); err = dma_async_device_register(&base->dma_both); if (err) { d40_err(base->dev, "Failed to register logical and physical capable channels\n"); goto failure3; } return 0; failure3: dma_async_device_unregister(&base->dma_memcpy); failure2: dma_async_device_unregister(&base->dma_slave); failure1: return err; } /* Suspend resume functionality */ #ifdef CONFIG_PM static int dma40_pm_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct d40_base *base = platform_get_drvdata(pdev); int ret = 0; if (!pm_runtime_suspended(dev)) return -EBUSY; if (base->lcpa_regulator) ret = regulator_disable(base->lcpa_regulator); return ret; } static int dma40_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct d40_base *base = platform_get_drvdata(pdev); d40_save_restore_registers(base, true); /* Don't disable/enable clocks for v1 due to HW bugs */ if (base->rev != 1) writel_relaxed(base->gcc_pwr_off_mask, base->virtbase + D40_DREG_GCC); return 0; } static int dma40_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct d40_base *base = platform_get_drvdata(pdev); if (base->initialized) d40_save_restore_registers(base, false); writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); return 0; } static int dma40_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct d40_base *base = platform_get_drvdata(pdev); int ret = 0; if (base->lcpa_regulator) ret = regulator_enable(base->lcpa_regulator); return ret; } static const struct dev_pm_ops dma40_pm_ops = { .suspend = dma40_pm_suspend, .runtime_suspend = dma40_runtime_suspend, .runtime_resume = dma40_runtime_resume, .resume = dma40_resume, }; #define DMA40_PM_OPS (&dma40_pm_ops) #else #define DMA40_PM_OPS NULL #endif /* Initialization functions. */ static int __init d40_phy_res_init(struct d40_base *base) { int i; int num_phy_chans_avail = 0; u32 val[2]; int odd_even_bit = -2; int gcc = D40_DREG_GCC_ENA; val[0] = readl(base->virtbase + D40_DREG_PRSME); val[1] = readl(base->virtbase + D40_DREG_PRSMO); for (i = 0; i < base->num_phy_chans; i++) { base->phy_res[i].num = i; odd_even_bit += 2 * ((i % 2) == 0); if (((val[i % 2] >> odd_even_bit) & 3) == 1) { /* Mark security only channels as occupied */ base->phy_res[i].allocated_src = D40_ALLOC_PHY; base->phy_res[i].allocated_dst = D40_ALLOC_PHY; base->phy_res[i].reserved = true; gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), D40_DREG_GCC_SRC); gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), D40_DREG_GCC_DST); } else { base->phy_res[i].allocated_src = D40_ALLOC_FREE; base->phy_res[i].allocated_dst = D40_ALLOC_FREE; base->phy_res[i].reserved = false; num_phy_chans_avail++; } spin_lock_init(&base->phy_res[i].lock); } /* Mark disabled channels as occupied */ for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { int chan = base->plat_data->disabled_channels[i]; base->phy_res[chan].allocated_src = D40_ALLOC_PHY; base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; base->phy_res[chan].reserved = true; gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), D40_DREG_GCC_SRC); gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), D40_DREG_GCC_DST); num_phy_chans_avail--; } dev_info(base->dev, "%d of %d physical DMA channels available\n", num_phy_chans_avail, base->num_phy_chans); /* Verify settings extended vs standard */ val[0] = readl(base->virtbase + D40_DREG_PRTYP); for (i = 0; i < base->num_phy_chans; i++) { if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && (val[0] & 0x3) != 1) dev_info(base->dev, "[%s] INFO: channel %d is misconfigured (%d)\n", __func__, i, val[0] & 0x3); val[0] = val[0] >> 2; } /* * To keep things simple, Enable all clocks initially. * The clocks will get managed later post channel allocation. * The clocks for the event lines on which reserved channels exists * are not managed here. */ writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); base->gcc_pwr_off_mask = gcc; return num_phy_chans_avail; } static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) { struct stedma40_platform_data *plat_data; struct clk *clk = NULL; void __iomem *virtbase = NULL; struct resource *res = NULL; struct d40_base *base = NULL; int num_log_chans = 0; int num_phy_chans; int i; u32 pid; u32 cid; u8 rev; clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { d40_err(&pdev->dev, "No matching clock found\n"); goto failure; } clk_enable(clk); /* Get IO for DMAC base address */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); if (!res) goto failure; if (request_mem_region(res->start, resource_size(res), D40_NAME " I/O base") == NULL) goto failure; virtbase = ioremap(res->start, resource_size(res)); if (!virtbase) goto failure; /* This is just a regular AMBA PrimeCell ID actually */ for (pid = 0, i = 0; i < 4; i++) pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8); for (cid = 0, i = 0; i < 4; i++) cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) & 255) << (i * 8); if (cid != AMBA_CID) { d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); goto failure; } if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", AMBA_MANF_BITS(pid), AMBA_VENDOR_ST); goto failure; } /* * HW revision: * DB8500ed has revision 0 * ? has revision 1 * DB8500v1 has revision 2 * DB8500v2 has revision 3 */ rev = AMBA_REV_BITS(pid); /* The number of physical channels on this HW */ num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", rev, res->start); if (rev < 2) { d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); goto failure; } plat_data = pdev->dev.platform_data; /* Count the number of logical channels in use */ for (i = 0; i < plat_data->dev_len; i++) if (plat_data->dev_rx[i] != 0) num_log_chans++; for (i = 0; i < plat_data->dev_len; i++) if (plat_data->dev_tx[i] != 0) num_log_chans++; base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + (num_phy_chans + num_log_chans + plat_data->memcpy_len) * sizeof(struct d40_chan), GFP_KERNEL); if (base == NULL) { d40_err(&pdev->dev, "Out of memory\n"); goto failure; } base->rev = rev; base->clk = clk; base->num_phy_chans = num_phy_chans; base->num_log_chans = num_log_chans; base->phy_start = res->start; base->phy_size = resource_size(res); base->virtbase = virtbase; base->plat_data = plat_data; base->dev = &pdev->dev; base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); base->log_chans = &base->phy_chans[num_phy_chans]; base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), GFP_KERNEL); if (!base->phy_res) goto failure; base->lookup_phy_chans = kzalloc(num_phy_chans * sizeof(struct d40_chan *), GFP_KERNEL); if (!base->lookup_phy_chans) goto failure; if (num_log_chans + plat_data->memcpy_len) { /* * The max number of logical channels are event lines for all * src devices and dst devices */ base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * sizeof(struct d40_chan *), GFP_KERNEL); if (!base->lookup_log_chans) goto failure; } base->reg_val_backup_chan = kmalloc(base->num_phy_chans * sizeof(d40_backup_regs_chan), GFP_KERNEL); if (!base->reg_val_backup_chan) goto failure; base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(struct d40_desc *) * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); if (!base->lcla_pool.alloc_map) goto failure; base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 0, SLAB_HWCACHE_ALIGN, NULL); if (base->desc_slab == NULL) goto failure; return base; failure: if (!IS_ERR(clk)) { clk_disable(clk); clk_put(clk); } if (virtbase) iounmap(virtbase); if (res) release_mem_region(res->start, resource_size(res)); if (virtbase) iounmap(virtbase); if (base) { kfree(base->lcla_pool.alloc_map); kfree(base->reg_val_backup_chan); kfree(base->lookup_log_chans); kfree(base->lookup_phy_chans); kfree(base->phy_res); kfree(base); } return NULL; } static void __init d40_hw_init(struct d40_base *base) { static struct d40_reg_val dma_init_reg[] = { /* Clock every part of the DMA block from start */ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, /* Interrupts on all logical channels */ { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} }; int i; u32 prmseo[2] = {0, 0}; u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; u32 pcmis = 0; u32 pcicr = 0; for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) writel(dma_init_reg[i].val, base->virtbase + dma_init_reg[i].reg); /* Configure all our dma channels to default settings */ for (i = 0; i < base->num_phy_chans; i++) { activeo[i % 2] = activeo[i % 2] << 2; if (base->phy_res[base->num_phy_chans - i - 1].allocated_src == D40_ALLOC_PHY) { activeo[i % 2] |= 3; continue; } /* Enable interrupt # */ pcmis = (pcmis << 1) | 1; /* Clear interrupt # */ pcicr = (pcicr << 1) | 1; /* Set channel to physical mode */ prmseo[i % 2] = prmseo[i % 2] << 2; prmseo[i % 2] |= 1; } writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); /* Write which interrupt to enable */ writel(pcmis, base->virtbase + D40_DREG_PCMIS); /* Write which interrupt to clear */ writel(pcicr, base->virtbase + D40_DREG_PCICR); } static int __init d40_lcla_allocate(struct d40_base *base) { struct d40_lcla_pool *pool = &base->lcla_pool; unsigned long *page_list; int i, j; int ret = 0; /* * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, * To full fill this hardware requirement without wasting 256 kb * we allocate pages until we get an aligned one. */ page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, GFP_KERNEL); if (!page_list) { ret = -ENOMEM; goto failure; } /* Calculating how many pages that are required */ base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { page_list[i] = __get_free_pages(GFP_KERNEL, base->lcla_pool.pages); if (!page_list[i]) { d40_err(base->dev, "Failed to allocate %d pages.\n", base->lcla_pool.pages); for (j = 0; j < i; j++) free_pages(page_list[j], base->lcla_pool.pages); goto failure; } if ((virt_to_phys((void *)page_list[i]) & (LCLA_ALIGNMENT - 1)) == 0) break; } for (j = 0; j < i; j++) free_pages(page_list[j], base->lcla_pool.pages); if (i < MAX_LCLA_ALLOC_ATTEMPTS) { base->lcla_pool.base = (void *)page_list[i]; } else { /* * After many attempts and no succees with finding the correct * alignment, try with allocating a big buffer. */ dev_warn(base->dev, "[%s] Failed to get %d pages @ 18 bit align.\n", __func__, base->lcla_pool.pages); base->lcla_pool.base_unaligned = kmalloc(SZ_1K * base->num_phy_chans + LCLA_ALIGNMENT, GFP_KERNEL); if (!base->lcla_pool.base_unaligned) { ret = -ENOMEM; goto failure; } base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, LCLA_ALIGNMENT); } pool->dma_addr = dma_map_single(base->dev, pool->base, SZ_1K * base->num_phy_chans, DMA_TO_DEVICE); if (dma_mapping_error(base->dev, pool->dma_addr)) { pool->dma_addr = 0; ret = -ENOMEM; goto failure; } writel(virt_to_phys(base->lcla_pool.base), base->virtbase + D40_DREG_LCLA); failure: kfree(page_list); return ret; } static int __init d40_probe(struct platform_device *pdev) { int err; int ret = -ENOENT; struct d40_base *base; struct resource *res = NULL; int num_reserved_chans; u32 val; base = d40_hw_detect_init(pdev); if (!base) goto failure; num_reserved_chans = d40_phy_res_init(base); platform_set_drvdata(pdev, base); spin_lock_init(&base->interrupt_lock); spin_lock_init(&base->execmd_lock); /* Get IO for logical channel parameter address */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); if (!res) { ret = -ENOENT; d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); goto failure; } base->lcpa_size = resource_size(res); base->phy_lcpa = res->start; if (request_mem_region(res->start, resource_size(res), D40_NAME " I/O lcpa") == NULL) { ret = -EBUSY; d40_err(&pdev->dev, "Failed to request LCPA region 0x%x-0x%x\n", res->start, res->end); goto failure; } /* We make use of ESRAM memory for this. */ val = readl(base->virtbase + D40_DREG_LCPA); if (res->start != val && val != 0) { dev_warn(&pdev->dev, "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", __func__, val, res->start); } else writel(res->start, base->virtbase + D40_DREG_LCPA); base->lcpa_base = ioremap(res->start, resource_size(res)); if (!base->lcpa_base) { ret = -ENOMEM; d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); goto failure; } /* If lcla has to be located in ESRAM we don't need to allocate */ if (base->plat_data->use_esram_lcla) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla_esram"); if (!res) { ret = -ENOENT; d40_err(&pdev->dev, "No \"lcla_esram\" memory resource\n"); goto failure; } base->lcla_pool.base = ioremap(res->start, resource_size(res)); if (!base->lcla_pool.base) { ret = -ENOMEM; d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); goto failure; } writel(res->start, base->virtbase + D40_DREG_LCLA); } else { ret = d40_lcla_allocate(base); if (ret) { d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); goto failure; } } spin_lock_init(&base->lcla_pool.lock); base->irq = platform_get_irq(pdev, 0); ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); if (ret) { d40_err(&pdev->dev, "No IRQ defined\n"); goto failure; } pm_runtime_irq_safe(base->dev); pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(base->dev); pm_runtime_enable(base->dev); pm_runtime_resume(base->dev); if (base->plat_data->use_esram_lcla) { base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); if (IS_ERR(base->lcpa_regulator)) { d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); base->lcpa_regulator = NULL; goto failure; } ret = regulator_enable(base->lcpa_regulator); if (ret) { d40_err(&pdev->dev, "Failed to enable lcpa_regulator\n"); regulator_put(base->lcpa_regulator); base->lcpa_regulator = NULL; goto failure; } } base->initialized = true; err = d40_dmaengine_init(base, num_reserved_chans); if (err) goto failure; d40_hw_init(base); dev_info(base->dev, "initialized\n"); return 0; failure: if (base) { if (base->desc_slab) kmem_cache_destroy(base->desc_slab); if (base->virtbase) iounmap(base->virtbase); if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { iounmap(base->lcla_pool.base); base->lcla_pool.base = NULL; } if (base->lcla_pool.dma_addr) dma_unmap_single(base->dev, base->lcla_pool.dma_addr, SZ_1K * base->num_phy_chans, DMA_TO_DEVICE); if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) free_pages((unsigned long)base->lcla_pool.base, base->lcla_pool.pages); kfree(base->lcla_pool.base_unaligned); if (base->phy_lcpa) release_mem_region(base->phy_lcpa, base->lcpa_size); if (base->phy_start) release_mem_region(base->phy_start, base->phy_size); if (base->clk) { clk_disable(base->clk); clk_put(base->clk); } if (base->lcpa_regulator) { regulator_disable(base->lcpa_regulator); regulator_put(base->lcpa_regulator); } kfree(base->lcla_pool.alloc_map); kfree(base->lookup_log_chans); kfree(base->lookup_phy_chans); kfree(base->phy_res); kfree(base); } d40_err(&pdev->dev, "probe failed\n"); return ret; } static struct platform_driver d40_driver = { .driver = { .owner = THIS_MODULE, .name = D40_NAME, .pm = DMA40_PM_OPS, }, }; static int __init stedma40_init(void) { return platform_driver_probe(&d40_driver, d40_probe); } subsys_initcall(stedma40_init);
gpl-2.0
neobuddy89/falcon_kernel
arch/m68k/sun3x/config.c
4678
1528
/* * Setup kernel for a Sun3x machine * * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de) * * based on code from Oliver Jowett <oliver@jowett.manawatu.gen.nz> */ #include <linux/types.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/console.h> #include <linux/init.h> #include <asm/machdep.h> #include <asm/irq.h> #include <asm/sun3xprom.h> #include <asm/sun3ints.h> #include <asm/setup.h> #include <asm/oplib.h> #include "time.h" volatile char *clock_va; extern void sun3_get_model(char *model); void sun3_leds(unsigned int i) { } static void sun3x_get_hardware_list(struct seq_file *m) { seq_printf(m, "PROM Revision:\t%s\n", romvec->pv_monid); } /* * Setup the sun3x configuration info */ void __init config_sun3x(void) { sun3x_prom_init(); mach_max_dma_address = 0xffffffff; /* we can DMA anywhere, whee */ mach_sched_init = sun3x_sched_init; mach_init_IRQ = sun3_init_IRQ; mach_gettimeoffset = sun3x_gettimeoffset; mach_reset = sun3x_reboot; mach_hwclk = sun3x_hwclk; mach_get_model = sun3_get_model; mach_get_hardware_list = sun3x_get_hardware_list; sun3_intreg = (unsigned char *)SUN3X_INTREG; /* only the serial console is known to work anyway... */ #if 0 switch (*(unsigned char *)SUN3X_EEPROM_CONS) { case 0x10: serial_console = 1; conswitchp = NULL; break; case 0x11: serial_console = 2; conswitchp = NULL; break; default: serial_console = 0; conswitchp = &dummy_con; break; } #endif }
gpl-2.0
htc-msm8960/android_kernel_htc_m7
drivers/scsi/aic7xxx/aic79xx_core.c
4934
299698
/* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $ */ #ifdef __linux__ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #include "aicasm/aicasm_insformat.h" #else #include <dev/aic7xxx/aic79xx_osm.h> #include <dev/aic7xxx/aic79xx_inline.h> #include <dev/aic7xxx/aicasm/aicasm_insformat.h> #endif /***************************** Lookup Tables **********************************/ static const char *const ahd_chip_names[] = { "NONE", "aic7901", "aic7902", "aic7901A" }; static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names); /* * Hardware error codes. */ struct ahd_hard_error_entry { uint8_t errno; const char *errmesg; }; static const struct ahd_hard_error_entry ahd_hard_errors[] = { { DSCTMOUT, "Discard Timer has timed out" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors); static const struct ahd_phase_table_entry ahd_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = ARRAY_SIZE(ahd_phase_table) - 1; /* Our Sequencer Program */ #include "aic79xx_seq.h" /**************************** Function Declarations ***************************/ static void ahd_handle_transmission_error(struct ahd_softc *ahd); static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1); static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime); static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); static void ahd_handle_proto_violation(struct ahd_softc *ahd); static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static struct ahd_tmode_tstate* ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel); #ifdef AHD_TARGET_MODE static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force); #endif static void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo); static void ahd_update_pending_scbs(struct ahd_softc *ahd); static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset); static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width); static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahd_clear_msg_state(struct ahd_softc *ahd); static void ahd_handle_message_phase(struct ahd_softc *ahd); typedef enum { AHDMSG_1B, AHDMSG_2B, AHDMSG_EXT } ahd_msgtype; static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full); static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level); #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); #endif static u_int ahd_sglist_size(struct ahd_softc *ahd); static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); static bus_dmamap_callback_t ahd_dmamap_cb; static void ahd_initialize_hscbs(struct ahd_softc *ahd); static int ahd_init_scbdata(struct ahd_softc *ahd); static void ahd_fini_scbdata(struct ahd_softc *ahd); static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); static void ahd_iocell_first_selection(struct ahd_softc *ahd); static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx); static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb); static void ahd_chip_init(struct ahd_softc *ahd); static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb); static int ahd_qinfifo_count(struct ahd_softc *ahd); static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid); static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next); static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid); static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid); static void ahd_reset_current_bus(struct ahd_softc *ahd); static ahd_callback_t ahd_stat_timer; #ifdef AHD_DUMP_SEQ static void ahd_dumpseq(struct ahd_softc *ahd); #endif static void ahd_loadseq(struct ahd_softc *ahd); static int ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch, u_int start_instr, u_int *skip_addr); static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address); static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts); static int ahd_probe_stack_size(struct ahd_softc *ahd); static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb); static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb); #ifdef AHD_TARGET_MODE static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask); static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd); #endif static int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); static void ahd_alloc_scbs(struct ahd_softc *ahd); static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid); static void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb); static void ahd_clear_critical_section(struct ahd_softc *ahd); static void ahd_clear_intstat(struct ahd_softc *ahd); static void ahd_enable_coalescing(struct ahd_softc *ahd, int enable); static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); static void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb); static void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb); static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); static void ahd_shutdown(void *arg); static void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds); static int ahd_verify_vpd_cksum(struct vpd_config *vpd); static int ahd_wait_seeprom(struct ahd_softc *ahd); static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role); static void ahd_reset_cmds_pending(struct ahd_softc *ahd); /*************************** Interrupt Services *******************************/ static void ahd_run_qoutfifo(struct ahd_softc *ahd); #ifdef AHD_TARGET_MODE static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused); #endif static void ahd_handle_hwerrint(struct ahd_softc *ahd); static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat); static void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat); /************************ Sequencer Execution Control *************************/ void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) { if (ahd->src_mode == src && ahd->dst_mode == dst) return; #ifdef AHD_DEBUG if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) panic("Setting mode prior to saving it.\n"); if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printk("%s: Setting mode 0x%x\n", ahd_name(ahd), ahd_build_mode_state(ahd, src, dst)); #endif ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); ahd->src_mode = src; ahd->dst_mode = dst; } static void ahd_update_modes(struct ahd_softc *ahd) { ahd_mode_state mode_ptr; ahd_mode src; ahd_mode dst; mode_ptr = ahd_inb(ahd, MODE_PTR); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printk("Reading mode 0x%x\n", mode_ptr); #endif ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); ahd_known_modes(ahd, src, dst); } static void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, ahd_mode dstmode, const char *file, int line) { #ifdef AHD_DEBUG if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { panic("%s:%s:%d: Mode assertion failed.\n", ahd_name(ahd), file, line); } #endif } #define AHD_ASSERT_MODES(ahd, source, dest) \ ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); ahd_mode_state ahd_save_modes(struct ahd_softc *ahd) { if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) ahd_update_modes(ahd); return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); } void ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) { ahd_mode src; ahd_mode dst; ahd_extract_mode_state(ahd, state, &src, &dst); ahd_set_modes(ahd, src, dst); } /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ int ahd_is_paused(struct ahd_softc *ahd) { return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ void ahd_pause(struct ahd_softc *ahd) { ahd_outb(ahd, HCNTRL, ahd->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahd_is_paused(ahd) == 0) ; } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ void ahd_unpause(struct ahd_softc *ahd) { /* * Automatically restore our modes to those saved * prior to the first change of the mode. */ if (ahd->saved_src_mode != AHD_MODE_UNKNOWN && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) ahd_reset_cmds_pending(ahd); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); } if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) ahd_outb(ahd, HCNTRL, ahd->unpause); ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); } /*********************** Scatter Gather List Handling *************************/ void * ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, void *sgptr, dma_addr_t addr, bus_size_t len, int last) { scb->sg_count++; if (sizeof(dma_addr_t) > 4 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)sgptr; sg->addr = ahd_htole64(addr); sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } else { struct ahd_dma_seg *sg; sg = (struct ahd_dma_seg *)sgptr; sg->addr = ahd_htole32(addr & 0xFFFFFFFF); sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } } static void ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) { /* XXX Handle target mode SCBs. */ scb->crc_retry_count = 0; if ((scb->flags & SCB_PACKETIZED) != 0) { /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; } else { if (ahd_get_transfer_length(scb) & 0x01) scb->hscb->task_attribute = SCB_XFERLEN_ODD; else scb->hscb->task_attribute = 0; } if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = ahd_htole32(scb->sense_busaddr); } static void ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) { /* * Copy the first SG into the "current" data ponter area. */ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)scb->sg_list; scb->hscb->dataptr = sg->addr; scb->hscb->datacnt = sg->len; } else { struct ahd_dma_seg *sg; uint32_t *dataptr_words; sg = (struct ahd_dma_seg *)scb->sg_list; dataptr_words = (uint32_t*)&scb->hscb->dataptr; dataptr_words[0] = sg->addr; dataptr_words[1] = 0; if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { uint64_t high_addr; high_addr = ahd_le32toh(sg->len) & 0x7F000000; scb->hscb->dataptr |= ahd_htole64(high_addr << 8); } scb->hscb->datacnt = sg->len; } /* * Note where to find the SG entries in bus space. * We also set the full residual flag which the * sequencer will clear as soon as a data transfer * occurs. */ scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); } static void ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) { scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; } /************************** Memory mapping routines ***************************/ static void * ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) { dma_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); return ((uint8_t *)scb->sg_list + sg_offset); } static uint32_t ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) { dma_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) - ahd_sg_size(ahd); return (scb->sg_list_busaddr + sg_offset); } static void ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) { ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, scb->hscb_map->dmamap, /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, /*len*/sizeof(*scb->hscb), op); } void ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) { if (scb->sg_count == 0) return; ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat, scb->sg_map->dmamap, /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), /*len*/ahd_sg_size(ahd) * scb->sg_count, op); } static void ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) { ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat, scb->sense_map->dmamap, /*offset*/scb->sense_busaddr, /*len*/AHD_SENSE_BUFSIZE, op); } #ifdef AHD_TARGET_MODE static uint32_t ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) { return (((uint8_t *)&ahd->targetcmds[index]) - (uint8_t *)ahd->qoutfifo); } #endif /*********************** Miscellaneous Support Functions ***********************/ /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ struct ahd_initiator_tinfo * ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, u_int remote_id, struct ahd_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahd->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } uint16_t ahd_inw(struct ahd_softc *ahd, u_int port) { /* * Read high byte first as some registers increment * or have other side effects when the low byte is * read. */ uint16_t r = ahd_inb(ahd, port+1) << 8; return r | ahd_inb(ahd, port); } void ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) { /* * Write low byte first to accommodate registers * such as PRGMCNT where the order maters. */ ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); } uint32_t ahd_inl(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24)); } void ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) { ahd_outb(ahd, port, (value) & 0xFF); ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); } uint64_t ahd_inq(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24) | (((uint64_t)ahd_inb(ahd, port+4)) << 32) | (((uint64_t)ahd_inb(ahd, port+5)) << 40) | (((uint64_t)ahd_inb(ahd, port+6)) << 48) | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); } void ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) { ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); ahd_outb(ahd, port+2, (value >> 16) & 0xFF); ahd_outb(ahd, port+3, (value >> 24) & 0xFF); ahd_outb(ahd, port+4, (value >> 32) & 0xFF); ahd_outb(ahd, port+5, (value >> 40) & 0xFF); ahd_outb(ahd, port+6, (value >> 48) & 0xFF); ahd_outb(ahd, port+7, (value >> 56) & 0xFF); } u_int ahd_get_scbptr(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); } void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); ahd_outb(ahd, SCBPTR, scbptr & 0xFF); ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); } #if 0 /* unused */ static u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd) { return (ahd_inw_atomic(ahd, HNSCB_QOFF)); } #endif static void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outw_atomic(ahd, HNSCB_QOFF, value); } #if 0 /* unused */ static u_int ahd_get_hescb_qoff(struct ahd_softc *ahd) { return (ahd_inb(ahd, HESCB_QOFF)); } #endif static void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outb(ahd, HESCB_QOFF, value); } static u_int ahd_get_snscb_qoff(struct ahd_softc *ahd) { u_int oldvalue; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); oldvalue = ahd_inw(ahd, SNSCB_QOFF); ahd_outw(ahd, SNSCB_QOFF, oldvalue); return (oldvalue); } static void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outw(ahd, SNSCB_QOFF, value); } #if 0 /* unused */ static u_int ahd_get_sescb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SESCB_QOFF)); } #endif static void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SESCB_QOFF, value); } #if 0 /* unused */ static u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); } #endif static void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); } u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) { u_int value; /* * Workaround PCI-X Rev A. hardware bug. * After a host read of SCB memory, the chip * may become confused into thinking prefetch * was required. This starts the discard timer * running and can cause an unexpected discard * timer interrupt. The work around is to read * a normal register prior to the exhaustion of * the discard timer. The mode pointer register * has no side effects and so serves well for * this purpose. * * Razor #528 */ value = ahd_inb(ahd, offset); if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0) ahd_inb(ahd, MODE_PTR); return (value); } u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inb_scbram(ahd, offset) | (ahd_inb_scbram(ahd, offset+1) << 8)); } static uint32_t ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inw_scbram(ahd, offset) | (ahd_inw_scbram(ahd, offset+2) << 16)); } static uint64_t ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inl_scbram(ahd, offset) | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); } struct scb * ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) { struct scb* scb; if (tag >= AHD_SCB_MAX) return (NULL); scb = ahd->scb_data.scbindex[tag]; if (scb != NULL) ahd_sync_scb(ahd, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static void ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *q_hscb; struct map_node *q_hscb_map; uint32_t saved_hscb_busaddr; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB (by address) to download, * and we can't disappoint it. To achieve this, the next * HSCB to download is saved off in ahd->next_queued_hscb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahd->next_queued_hscb; q_hscb_map = ahd->next_queued_hscb_map; saved_hscb_busaddr = q_hscb->hscb_busaddr; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); q_hscb->hscb_busaddr = saved_hscb_busaddr; q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; /* Now swap HSCB pointers. */ ahd->next_queued_hscb = scb->hscb; ahd->next_queued_hscb_map = scb->hscb_map; scb->hscb = q_hscb; scb->hscb_map = q_hscb_map; /* Now define the mapping from tag to SCB in the scbindex */ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; } /* * Tell the sequencer about a new transaction to execute. */ void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) { ahd_swap_with_next_hscb(ahd, scb); if (SCBID_IS_NULL(SCB_GET_TAG(scb))) panic("Attempt to queue invalid SCB tag %x\n", SCB_GET_TAG(scb)); /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; if (scb->sg_count != 0) ahd_setup_data_scb(ahd, scb); else ahd_setup_noxfer_scb(ahd, scb); ahd_setup_scb_common(ahd, scb); /* * Make sure our data is consistent from the * perspective of the adapter. */ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { uint64_t host_dataptr; host_dataptr = ahd_le64toh(scb->hscb->dataptr); printk("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n", ahd_name(ahd), SCB_GET_TAG(scb), scb->hscb->scsiid, ahd_le32toh(scb->hscb->hscb_busaddr), (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), (u_int)(host_dataptr & 0xFFFFFFFF), ahd_le32toh(scb->hscb->datacnt)); } #endif /* Tell the adapter about the newly queued SCB */ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); } /************************** Interrupt Processing ******************************/ static void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) { ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, /*offset*/0, /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op); } static void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) { #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, 0), sizeof(struct target_cmd) * AHD_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHD_RUN_QOUTFIFO 0x1 #define AHD_RUN_TQINFIFO 0x2 static u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) { u_int retval; retval = 0; ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo), /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD); if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag == ahd->qoutfifonext_valid_tag) retval |= AHD_RUN_QOUTFIFO; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) retval |= AHD_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ int ahd_intr(struct ahd_softc *ahd) { u_int intstat; if ((ahd->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 && (ahd_check_cmdcmpltqueues(ahd) != 0)) intstat = CMDCMPLT; else intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) return (0); if (intstat & CMDCMPLT) { ahd_outb(ahd, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { if (ahd_is_paused(ahd)) { /* * Potentially lost SEQINT. * If SEQINTCODE is non-zero, * simulate the SEQINT. */ if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) intstat |= SEQINT; } } else { ahd_flush_device_writes(ahd); } ahd_run_qoutfifo(ahd); ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; ahd->cmdcmplt_total++; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) ahd_run_tqinfifo(ahd, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & HWERRINT) { ahd_handle_hwerrint(ahd); } else if ((intstat & (PCIINT|SPLTINT)) != 0) { ahd->bus_intr(ahd); } else { if ((intstat & SEQINT) != 0) ahd_handle_seqint(ahd, intstat); if ((intstat & SCSIINT) != 0) ahd_handle_scsiint(ahd, intstat); } return (1); } /******************************** Private Inlines *****************************/ static inline void ahd_assert_atn(struct ahd_softc *ahd) { ahd_outb(ahd, SCSISIGO, ATNO); } /* * Determine if the current connection has a packetized * agreement. This does not necessarily mean that we * are currently in a packetized transfer. We could * just as easily be sending or receiving a message. */ static int ahd_currently_packetized(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int packetized; saved_modes = ahd_save_modes(ahd); if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { /* * The packetized bit refers to the last * connection, not the current one. Check * for non-zero LQISTATE instead. */ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); packetized = ahd_inb(ahd, LQISTATE) != 0; } else { ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; } ahd_restore_modes(ahd, saved_modes); return (packetized); } static inline int ahd_set_active_fifo(struct ahd_softc *ahd) { u_int active_fifo; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; switch (active_fifo) { case 0: case 1: ahd_set_modes(ahd, active_fifo, active_fifo); return (1); default: return (0); } } static inline void ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) { ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); } /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static inline void ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = ahd_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_calc_residual(ahd, scb); } static inline void ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = ahd_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_handle_scb_status(ahd, scb); else ahd_done(ahd, scb); } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ static void ahd_restart(struct ahd_softc *ahd) { ahd_pause(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* No more pending messages */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */ ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); ahd_outb(ahd, SEQINTCTL, 0); ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SAVED_SCSIID, 0xFF); ahd_outb(ahd, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); /* Always allow reselection */ ahd_outb(ahd, SCSISEQ1, ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahd_outb(ahd, CLRINT, CLRSEQINT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); ahd_unpause(ahd); } static void ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) { ahd_mode_state saved_modes; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_FIFOS) != 0) printk("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); #endif saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, fifo, fifo); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, CCSGRESET); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_restore_modes(ahd, saved_modes); } /************************* Input/Output Queues ********************************/ /* * Flush and completed commands that are sitting in the command * complete queues down on the chip but have yet to be dma'ed back up. */ static void ahd_flush_qoutfifo(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int saved_scbptr; u_int ccscbctl; u_int scbid; u_int next_scbid; saved_modes = ahd_save_modes(ahd); /* * Flush the good status FIFO for completed packetized commands. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scbptr = ahd_get_scbptr(ahd); while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { u_int fifo_mode; u_int i; scbid = ahd_inw(ahd, GSFIFO); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - GSFIFO SCB %d invalid\n", ahd_name(ahd), scbid); continue; } /* * Determine if this transaction is still active in * any FIFO. If it is, we must flush that FIFO to * the host before completing the command. */ fifo_mode = 0; rescan_fifos: for (i = 0; i < 2; i++) { /* Toggle to the other mode. */ fifo_mode ^= 1; ahd_set_modes(ahd, fifo_mode, fifo_mode); if (ahd_scb_active_in_fifo(ahd, scb) == 0) continue; ahd_run_data_fifo(ahd, scb); /* * Running this FIFO may cause a CFG4DATA for * this same transaction to assert in the other * FIFO or a new snapshot SAVEPTRS interrupt * in this FIFO. Even running a FIFO may not * clear the transaction if we are still waiting * for data to drain to the host. We must loop * until the transaction is not active in either * FIFO just to be sure. Reset our loop counter * so we will visit both FIFOs again before * declaring this transaction finished. We * also delay a bit so that status has a chance * to change before we look at this FIFO again. */ ahd_delay(200); goto rescan_fifos; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_set_scbptr(ahd, scbid); if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR) & SG_LIST_NULL) != 0)) { u_int comp_head; /* * The transfer completed with a residual. * Place this SCB on the complete DMA list * so that we update our in-core copy of the * SCB before completing the command. */ ahd_outb(ahd, SCB_SCSI_STATUS, 0); ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_STATUS_VALID); ahd_outw(ahd, SCB_TAG, scbid); ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL); comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); if (SCBID_IS_NULL(comp_head)) { ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); } else { u_int tail; tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL); ahd_set_scbptr(ahd, tail); ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); ahd_set_scbptr(ahd, scbid); } } else ahd_complete_scb(ahd, scb); } ahd_set_scbptr(ahd, saved_scbptr); /* * Setup for command channel portion of flush. */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Wait for any inprogress DMA to complete and clear DMA state * if this if for an SCB in the qinfifo. */ while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { if ((ccscbctl & ARRDONE) != 0) break; } else if ((ccscbctl & CCSCBDONE) != 0) break; ahd_delay(200); } /* * We leave the sequencer to cleanup in the case of DMA's to * update the qoutfifo. In all other cases (DMA's to the * chip or a push of an SCB from the COMPLETE_DMA_SCB list), * we disable the DMA engine so that the sequencer will not * attempt to handle the DMA completion. */ if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0) ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); /* * Complete any SCBs that just finished * being DMA'ed into the qoutfifo. */ ahd_run_qoutfifo(ahd); saved_scbptr = ahd_get_scbptr(ahd); /* * Manually update/complete any completed SCBs that are waiting to be * DMA'ed back up to the host. */ scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { uint8_t *hscb_ptr; u_int i; ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - DMA-up and complete " "SCB %d invalid\n", ahd_name(ahd), scbid); continue; } hscb_ptr = (uint8_t *)scb->hscb; for (i = 0; i < sizeof(struct hardware_scb); i++) *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - Complete Qfrz SCB %d invalid\n", ahd_name(ahd), scbid); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - Complete SCB %d invalid\n", ahd_name(ahd), scbid); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); /* * Restore state. */ ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_UPDATE_PEND_CMDS; } /* * Determine if an SCB for a packetized transaction * is active in a FIFO. */ static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb) { /* * The FIFO is only active for our transaction if * the SCBPTR matches the SCB's ID and the firmware * has installed a handler for the FIFO or we have * a pending SAVEPTRS or CFG4DATA interrupt. */ if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb) || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) return (0); return (1); } /* * Run a data fifo to completion for a transaction we know * has completed across the SCSI bus (good status has been * received). We are already set to the correct FIFO mode * on entry to this routine. * * This function attempts to operate exactly as the firmware * would when running this FIFO. Care must be taken to update * this routine any time the firmware's FIFO algorithm is * changed. */ static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb) { u_int seqintsrc; seqintsrc = ahd_inb(ahd, SEQINTSRC); if ((seqintsrc & CFG4DATA) != 0) { uint32_t datacnt; uint32_t sgptr; /* * Clear full residual flag. */ sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; ahd_outb(ahd, SCB_SGPTR, sgptr); /* * Load datacnt and address. */ datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); if ((datacnt & AHD_DMA_LAST_SEG) != 0) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } else ahd_outb(ahd, SG_STATE, LOADING_NEEDED); ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); /* * Initialize Residual Fields. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); /* * Mark the SCB as having a FIFO in use. */ ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); /* * Install a "fake" handler for this FIFO. */ ahd_outw(ahd, LONGJMP_ADDR, 0); /* * Notify the hardware that we have satisfied * this sequencer interrupt. */ ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); } else if ((seqintsrc & SAVEPTRS) != 0) { uint32_t sgptr; uint32_t resid; if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { /* * Snapshot Save Pointers. All that * is necessary to clear the snapshot * is a CLRCHN. */ goto clrchn; } /* * Disable S/G fetch so the DMA engine * is available to future users. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, 0); /* * Flush the data FIFO. Strickly only * necessary for Rev A parts. */ ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); /* * Calculate residual. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); resid = ahd_inl(ahd, SHCNT); resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { /* * Must back up to the correct S/G element. * Typically this just means resetting our * low byte to the offset in the SG_CACHE, * but if we wrapped, we have to correct * the other bytes of the sgptr too. */ if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 && (sgptr & 0x80) == 0) sgptr -= 0x100; sgptr &= ~0xFF; sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) & SG_ADDR_MASK; ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); } else if ((resid & AHD_SG_LEN_MASK) == 0) { ahd_outb(ahd, SCB_RESIDUAL_SGPTR, sgptr | SG_LIST_NULL); } /* * Save Pointers. */ ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); ahd_outl(ahd, SCB_DATACNT, resid); ahd_outl(ahd, SCB_SGPTR, sgptr); ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); ahd_outb(ahd, SEQIMODE, ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); /* * If the data is to the SCSI bus, we are * done, otherwise wait for FIFOEMP. */ if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) goto clrchn; } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { uint32_t sgptr; uint64_t data_addr; uint32_t data_len; u_int dfcntrl; /* * Disable S/G fetch so the DMA engine * is available to future users. We won't * be using the DMA engine to load segments. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, LOADING_NEEDED); } /* * Wait for the DMA engine to notice that the * host transfer is enabled and that there is * space in the S/G FIFO for new segments before * loading more segments. */ if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) { /* * Determine the offset of the next S/G * element to load. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK; data_addr <<= 8; data_addr |= sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } /* * Update residual information. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); /* * Load the S/G. */ if (data_len & AHD_DMA_LAST_SEG) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } ahd_outq(ahd, HADDR, data_addr); ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF); /* * Advertise the segment to the hardware. */ dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { /* * Use SCSIENWRDIS so that SCSIEN * is never modified by this * operation. */ dfcntrl |= SCSIENWRDIS; } ahd_outb(ahd, DFCNTRL, dfcntrl); } } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) { /* * Transfer completed to the end of SG list * and has flushed to the host. */ ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); goto clrchn; } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { clrchn: /* * Clear any handler for this FIFO, decrement * the FIFO use count for the SCB, and release * the FIFO. */ ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); ahd_outb(ahd, DFFSXFRCTL, CLRCHN); } } /* * Look for entries in the QoutFIFO that have completed. * The valid_tag completion field indicates the validity * of the entry - the valid value toggles each time through * the queue. We use the sg_status field in the completion * entry to avoid referencing the hscb if the completion * occurred with no errors and no residual. sg_status is * a copy of the first byte (little endian) of the sgptr * hscb field. */ static void ahd_run_qoutfifo(struct ahd_softc *ahd) { struct ahd_completion *completion; struct scb *scb; u_int scb_index; if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) panic("ahd_run_qoutfifo recursion"); ahd->flags |= AHD_RUNNING_QOUTFIFO; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); for (;;) { completion = &ahd->qoutfifo[ahd->qoutfifonext]; if (completion->valid_tag != ahd->qoutfifonext_valid_tag) break; scb_index = ahd_le16toh(completion->tag); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { printk("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahd_name(ahd), scb_index, ahd->qoutfifonext); ahd_dump_card_state(ahd); } else if ((completion->sg_status & SG_STATUS_VALID) != 0) { ahd_handle_scb_status(ahd, scb); } else { ahd_done(ahd, scb); } ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); if (ahd->qoutfifonext == 0) ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID; } ahd->flags &= ~AHD_RUNNING_QOUTFIFO; } /************************* Interrupt Handling *********************************/ static void ahd_handle_hwerrint(struct ahd_softc *ahd) { /* * Some catastrophic hardware error has occurred. * Print it for the user and disable the controller. */ int i; int error; error = ahd_inb(ahd, ERROR); for (i = 0; i < num_errors; i++) { if ((error & ahd_hard_errors[i].errno) != 0) printk("%s: hwerrint, %s\n", ahd_name(ahd), ahd_hard_errors[i].errmesg); } ahd_dump_card_state(ahd); panic("BRKADRINT"); /* Tell everyone that this HBA is no longer available */ ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Tell the system that this controller has gone away. */ ahd_free(ahd); } #ifdef AHD_DEBUG static void ahd_dump_sglist(struct scb *scb) { int i; if (scb->sg_count > 0) { if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg_list; sg_list = (struct ahd_dma64_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint64_t addr; uint32_t len; addr = ahd_le64toh(sg_list[i].addr); len = ahd_le32toh(sg_list[i].len); printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (uint32_t)((addr >> 32) & 0xFFFFFFFF), (uint32_t)(addr & 0xFFFFFFFF), sg_list[i].len & AHD_SG_LEN_MASK, (sg_list[i].len & AHD_DMA_LAST_SEG) ? " Last" : ""); } } else { struct ahd_dma_seg *sg_list; sg_list = (struct ahd_dma_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint32_t len; len = ahd_le32toh(sg_list[i].len); printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (len & AHD_SG_HIGH_ADDR_MASK) >> 24, ahd_le32toh(sg_list[i].addr), len & AHD_SG_LEN_MASK, len & AHD_DMA_LAST_SEG ? " Last" : ""); } } } } #endif /* AHD_DEBUG */ static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) { u_int seqintcode; /* * Save the sequencer interrupt code and clear the SEQINT * bit. We will unpause the sequencer, if appropriate, * after servicing the request. */ seqintcode = ahd_inb(ahd, SEQINTCODE); ahd_outb(ahd, CLRINT, CLRSEQINT); if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { /* * Unpause the sequencer and let it clear * SEQINT by writing NO_SEQINT to it. This * will cause the sequencer to be paused again, * which is the expected state of this routine. */ ahd_unpause(ahd); while (!ahd_is_paused(ahd)) ; ahd_outb(ahd, CLRINT, CLRSEQINT); } ahd_update_modes(ahd); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: Handle Seqint Called for code %d\n", ahd_name(ahd), seqintcode); #endif switch (seqintcode) { case ENTERING_NONPACK: { struct scb *scb; u_int scbid; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { /* * Somehow need to know if this * is from a selection or reselection. * From that, we can determine target * ID so we at least have an I_T nexus. */ } else { ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); ahd_outb(ahd, SEQ_FLAGS, 0x0); } if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * Phase change after read stream with * CRC error with P0 asserted on last * packet. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printk("%s: Assuming LQIPHASE_NLQ with " "P0 assertion\n", ahd_name(ahd)); #endif } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printk("%s: Entering NONPACK\n", ahd_name(ahd)); #endif break; } case INVALID_SEQINT: printk("%s: Invalid Sequencer interrupt occurred, " "resetting channel.\n", ahd_name(ahd)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) ahd_dump_card_state(ahd); #endif ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; case STATUS_OVERRUN: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) ahd_print_path(ahd, scb); else printk("%s: ", ahd_name(ahd)); printk("SCB %d Packetized Status Overrun", scbid); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } case CFG4ISTAT_INTR: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { ahd_dump_card_state(ahd); printk("CFG4ISTAT: Free SCB %d referenced", scbid); panic("For safety"); } ahd_outq(ahd, HADDR, scb->sense_busaddr); ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); ahd_outb(ahd, HCNT + 2, 0); ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); break; } case ILLEGAL_PHASE: { u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; printk("%s: ILLEGAL_PHASE 0x%x\n", ahd_name(ahd), bus_phase); switch (bus_phase) { case P_DATAOUT: case P_DATAIN: case P_DATAOUT_DT: case P_DATAIN_DT: case P_MESGOUT: case P_STATUS: case P_MESGIN: ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); printk("%s: Issued Bus Reset.\n", ahd_name(ahd)); break; case P_COMMAND: { struct ahd_devinfo devinfo; struct scb *scb; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; u_int scbid; /* * If a target takes us into the command phase * assume that it has been externally reset and * has thus lost our previous packetized negotiation * agreement. Since we have not sent an identify * message and may not have fully qualified the * connection, we change our command to TUR, assert * ATN and ABORT the task when we go to message in * phase. The OSM will see the REQUEUE_REQUEST * status and retry the command. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("Invalid phase with no valid SCB. " "Resetting bus.\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE, /*paused*/TRUE); /* Hand-craft TUR command */ ahd_outb(ahd, SCB_CDB_STORE, 0); ahd_outb(ahd, SCB_CDB_STORE+1, 0); ahd_outb(ahd, SCB_CDB_STORE+2, 0); ahd_outb(ahd, SCB_CDB_STORE+3, 0); ahd_outb(ahd, SCB_CDB_STORE+4, 0); ahd_outb(ahd, SCB_CDB_STORE+5, 0); ahd_outb(ahd, SCB_CDB_LEN, 6); scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); scb->hscb->control |= MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); /* * The lun is 0, regardless of the SCB's lun * as we have not sent an identify message. */ ahd_outb(ahd, SAVED_LUN, 0); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_assert_atn(ahd); scb->flags &= ~SCB_PACKETIZED; scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET; ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_freeze_scb(scb); /* Notify XPT */ ahd_send_async(ahd, devinfo.channel, devinfo.target, CAM_LUN_WILDCARD, AC_SENT_BDR); /* * Allow the sequencer to continue with * non-pack processing. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT1, 0); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printk("Unexpected command phase from " "packetized target\n"); } #endif break; } } break; } case CFG4OVERRUN: { struct scb *scb; u_int scb_index; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), ahd_inb(ahd, MODE_PTR)); } #endif scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { /* * Attempt to transfer to an SCB that is * not outstanding. */ ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; /* * Clear status received flag to prevent any * attempt to complete this bogus SCB. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~STATUS_RCVD); } break; } case DUMP_CARD_STATE: { ahd_dump_card_state(ahd); break; } case PDATA_REINIT: { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printk("%s: PDATA_REINIT - DFCNTRL = 0x%x " "SG_CACHE_SHADOW = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, DFCNTRL), ahd_inb(ahd, SG_CACHE_SHADOW)); } #endif ahd_reinitialize_dataptrs(ahd); break; } case HOST_MSG_LOOP: { struct ahd_devinfo devinfo; /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ ahd_fetch_devinfo(ahd, &devinfo); if (ahd->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printk("ahd_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_restart(ahd); return; } scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) ahd_setup_initiator_msgout(ahd, &devinfo, scb); else { ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; } } #ifdef AHD_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd->msgin_index = 0; } else ahd_setup_target_msgin(ahd, &devinfo, scb); } #endif } ahd_handle_message_phase(ahd); break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); printk("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "REG0 == 0x%x ACCUM = 0x%x\n", ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), ahd_find_busy_tcl(ahd, BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN))), ahd_inw(ahd, SINDEX)); printk("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_CONTROL == 0x%x\n", ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inb_scbram(ahd, SCB_LUN), ahd_inb_scbram(ahd, SCB_CONTROL)); printk("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); ahd_dump_card_state(ahd); ahd->msgout_buf[0] = MSG_BUS_DEV_RESET; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_assert_atn(ahd); break; } case PROTO_VIOLATION: { ahd_handle_proto_violation(ahd); break; } case IGN_WIDE_RES: { struct ahd_devinfo devinfo; ahd_fetch_devinfo(ahd, &devinfo); ahd_handle_ign_wide_residue(ahd, &devinfo); break; } case BAD_PHASE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printk("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printk("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); ahd_restart(ahd); return; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ struct scb *scb; u_int scbindex; #ifdef AHD_DEBUG u_int lastphase; #endif scbindex = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbindex); #ifdef AHD_DEBUG lastphase = ahd_inb(ahd, LASTPHASE); if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printk("data overrun detected %s. Tag == 0x%x.\n", ahd_lookup_phase_entry(lastphase)->phasemsg, SCB_GET_TAG(scb)); ahd_print_path(ahd, scb); printk("%s seen Data Phase. Length = %ld. " "NumSGs = %d.\n", ahd_inb(ahd, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", ahd_get_transfer_length(scb), scb->sg_count); ahd_dump_sglist(scb); } #endif /* * Set this and it will take effect when the * target does a command complete. */ ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); ahd_freeze_scb(scb); break; } case MKMSG_FAILED: { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; ahd_fetch_devinfo(ahd, &devinfo); printk("%s:%c:%d:%d: Attempt to issue message failed\n", ahd_name(ahd), devinfo.channel, devinfo.target, devinfo.lun); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); break; } case TASKMGMT_FUNC_COMPLETE: { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { u_int lun; u_int tag; cam_status error; ahd_print_path(ahd, scb); printk("Task Management Func 0x%x Complete\n", scb->hscb->task_management); lun = CAM_LUN_WILDCARD; tag = SCB_LIST_NULL; switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK: tag = SCB_GET_TAG(scb); case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET: lun = scb->hscb->lun; error = CAM_REQ_ABORTED; ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', lun, tag, ROLE_INITIATOR, error); break; case SIU_TASKMGMT_LUN_RESET: lun = scb->hscb->lun; case SIU_TASKMGMT_TARGET_RESET: { struct ahd_devinfo devinfo; ahd_scb_devinfo(ahd, &devinfo, scb); error = CAM_BDR_SENT; ahd_handle_devreset(ahd, &devinfo, lun, CAM_BDR_SENT, lun != CAM_LUN_WILDCARD ? "Lun Reset" : "Target Reset", /*verbose_level*/0); break; } default: panic("Unexpected TaskMgmt Func\n"); break; } } break; } case TASKMGMT_CMD_CMPLT_OKAY: { u_int scbid; struct scb *scb; /* * An ABORT TASK TMF failed to be delivered before * the targeted command completed normally. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { /* * Remove the second instance of this SCB from * the QINFIFO if it is still there. */ ahd_print_path(ahd, scb); printk("SCB completes before TMF\n"); /* * Handle losing the race. Wait until any * current selection completes. We will then * set the TMF back to zero in this SCB so that * the sequencer doesn't bother to issue another * sequencer interrupt for its completion. */ while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) ; ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); } break; } case TRACEPOINT0: case TRACEPOINT1: case TRACEPOINT2: case TRACEPOINT3: printk("%s: Tracepoint %d\n", ahd_name(ahd), seqintcode - TRACEPOINT0); break; case NO_SEQINT: break; case SAW_HWERR: ahd_handle_hwerrint(ahd); break; default: printk("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), seqintcode); break; } /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahd_unpause(ahd); } static void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) { struct scb *scb; u_int status0; u_int status3; u_int status; u_int lqistat1; u_int lqostat0; u_int scbid; u_int busfreetime; ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); lqistat1 = ahd_inb(ahd, LQISTAT1); lqostat0 = ahd_inb(ahd, LQOSTAT0); busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; /* * Ignore external resets after a bus reset. */ if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE)) { ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); return; } /* * Clear bus reset flag */ ahd->flags &= ~AHD_BUS_RESET_ACTIVE; if ((status0 & (SELDI|SELDO)) != 0) { u_int simode0; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((status0 & IOERR) != 0) { u_int now_lvd; now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; printk("%s: Transceiver State Has Changed to %s mode\n", ahd_name(ahd), now_lvd ? "LVD" : "SE"); ahd_outb(ahd, CLRSINT0, CLRIOERR); /* * A change in I/O mode is equivalent to a bus reset. */ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); ahd_pause(ahd); ahd_setup_iocell_workaround(ahd); ahd_unpause(ahd); } else if ((status0 & OVERRUN) != 0) { printk("%s: SCSI offset overrun detected. Resetting bus.\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } else if ((status & SCSIRSTI) != 0) { printk("%s: Someone reset channel A\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_transmission_error(ahd); } else if (lqostat0 != 0) { printk("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); ahd_outb(ahd, CLRLQOINT0, lqostat0); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); } else if ((status & SELTO) != 0) { /* Stop the selection */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* No more pending messages */ ahd_clear_msg_state(ahd); /* Clear interrupt state */ ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a successful * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahd_outb(ahd, CLRSINT0, CLRSELINGO); scbid = ahd_inw(ahd, WAITING_TID_HEAD); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: ahd_intr - referenced scb not " "valid during SELTO scb(0x%x)\n", ahd_name(ahd), scbid); ahd_dump_card_state(ahd); } else { struct ahd_devinfo devinfo; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SELTO) != 0) { ahd_print_path(ahd, scb); printk("Saw Selection Timeout for SCB 0x%x\n", scbid); } #endif ahd_scb_devinfo(ahd, &devinfo, scb); ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahd_freeze_devq(ahd, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if ((status0 & (SELDI|SELDO)) != 0) { ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if (status3 != 0) { printk("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", ahd_name(ahd), status3); ahd_outb(ahd, CLRSINT3, status3); } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_lqiphase_error(ahd, lqistat1); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * This status can be delayed during some * streaming operations. The SCSIPHASE * handler has already dealt with this case * so just clear the error. */ ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); } else if ((status & BUSFREE) != 0 || (lqistat1 & LQOBUSFREE) != 0) { u_int lqostat1; int restart; int clear_fifo; int packetized; u_int mode; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* * Determine what we were up to at the time of * the busfree. */ mode = AHD_MODE_SCSI; busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; lqostat1 = ahd_inb(ahd, LQOSTAT1); switch (busfreetime) { case BUSFREE_DFF0: case BUSFREE_DFF1: { mode = busfreetime == BUSFREE_DFF0 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; ahd_set_modes(ahd, mode, mode); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Invalid SCB %d in DFF%d " "during unexpected busfree\n", ahd_name(ahd), scbid, mode); packetized = 0; } else packetized = (scb->flags & SCB_PACKETIZED) != 0; clear_fifo = 1; break; } case BUSFREE_LQO: clear_fifo = 0; packetized = 1; break; default: clear_fifo = 0; packetized = (lqostat1 & LQOBUSFREE) != 0; if (!packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE && (ahd_inb(ahd, SSTAT0) & SELDI) == 0 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0)) /* * Assume packetized if we are not * on the bus in a non-packetized * capacity and any pending selection * was a packetized selection. */ packetized = 1; break; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("Saw Busfree. Busfreetime = 0x%x.\n", busfreetime); #endif /* * Busfrees that occur in non-packetized phases are * handled by the nonpkt_busfree handler. */ if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { restart = ahd_handle_pkt_busfree(ahd, busfreetime); } else { packetized = 0; restart = ahd_handle_nonpkt_busfree(ahd); } /* * Clear the busfree interrupt status. The setting of * the interrupt is a pulse, so in a perfect world, we * would not need to muck with the ENBUSFREE logic. This * would ensure that if the bus moves on to another * connection, busfree protection is still in force. If * BUSFREEREV is broken, however, we must manually clear * the ENBUSFREE if the busfree occurred during a non-pack * connection so that we don't get false positives during * future, packetized, connections. */ ahd_outb(ahd, CLRSINT1, CLRBUSFREE); if (packetized == 0 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); if (clear_fifo) ahd_clear_fifo(ahd, mode); ahd_clear_msg_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); if (restart) { ahd_restart(ahd); } else { ahd_unpause(ahd); } } else { printk("%s: Missing case in ahd_handle_scsiint. status = %x\n", ahd_name(ahd), status); ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_unpause(ahd); } } static void ahd_handle_transmission_error(struct ahd_softc *ahd) { struct scb *scb; u_int scbid; u_int lqistat1; u_int lqistat2; u_int msg_out; u_int curphase; u_int lastphase; u_int perrdiag; u_int cur_col; int silent; scb = NULL; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); lqistat2 = ahd_inb(ahd, LQISTAT2); if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { u_int lqistate; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); lqistate = ahd_inb(ahd, LQISTATE); if ((lqistate >= 0x1E && lqistate <= 0x24) || (lqistate == 0x29)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printk("%s: NLQCRC found via LQISTATE\n", ahd_name(ahd)); } #endif lqistat1 |= LQICRCI_NLQ; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } ahd_outb(ahd, CLRLQIINT1, lqistat1); lastphase = ahd_inb(ahd, LASTPHASE); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; perrdiag = ahd_inb(ahd, PERRDIAG); msg_out = MSG_INITIATOR_DET_ERR; ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); /* * Try to find the SCB associated with this error. */ silent = FALSE; if (lqistat1 == 0 || (lqistat1 & LQICRCI_NLQ) != 0) { if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) ahd_set_active_fifo(ahd); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && SCB_IS_SILENT(scb)) silent = TRUE; } cur_col = 0; if (silent == FALSE) { printk("%s: Transmission error detected\n", ahd_name(ahd)); ahd_lqistat1_print(lqistat1, &cur_col, 50); ahd_lastphase_print(lastphase, &cur_col, 50); ahd_scsisigi_print(curphase, &cur_col, 50); ahd_perrdiag_print(perrdiag, &cur_col, 50); printk("\n"); ahd_dump_card_state(ahd); } if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { if (silent == FALSE) { printk("%s: Gross protocol error during incoming " "packet. lqistat1 == 0x%x. Resetting bus.\n", ahd_name(ahd), lqistat1); } ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((lqistat1 & LQICRCI_LQ) != 0) { /* * A CRC error has been detected on an incoming LQ. * The bus is currently hung on the last ACK. * Hit LQIRETRY to release the last ack, and * wait for the sequencer to determine that ATNO * is asserted while in message out to take us * to our host message loop. No NONPACKREQ or * LQIPHASE type errors will occur in this * scenario. After this first LQIRETRY, the LQI * manager will be in ISELO where it will * happily sit until another packet phase begins. * Unexpected bus free detection is enabled * through any phases that occur after we release * this last ack until the LQI manager sees a * packet phase. This implies we may have to * ignore a perfectly valid "unexected busfree" * after our "initiator detected error" message is * sent. A busfree is the expected response after * we tell the target that it's L_Q was corrupted. * (SPI4R09 10.7.3.3.3) */ ahd_outb(ahd, LQCTL2, LQIRETRY); printk("LQIRetry for LQICRCI_LQ to release ACK\n"); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * We detected a CRC error in a NON-LQ packet. * The hardware has varying behavior in this situation * depending on whether this packet was part of a * stream or not. * * PKT by PKT mode: * The hardware has already acked the complete packet. * If the target honors our outstanding ATN condition, * we should be (or soon will be) in MSGOUT phase. * This will trigger the LQIPHASE_LQ status bit as the * hardware was expecting another LQ. Unexpected * busfree detection is enabled. Once LQIPHASE_LQ is * true (first entry into host message loop is much * the same), we must clear LQIPHASE_LQ and hit * LQIRETRY so the hardware is ready to handle * a future LQ. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree * or start another packet in response to our message. * * Read Streaming P0 asserted: * If we raise ATN and the target completes the entire * stream (P0 asserted during the last packet), the * hardware will ack all data and return to the ISTART * state. When the target reponds to our ATN condition, * LQIPHASE_LQ will be asserted. We should respond to * this with an LQIRETRY to prepare for any future * packets. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree or * start another packet in response to our message. * Busfree detection is enabled. * * Read Streaming P0 not asserted: * If we raise ATN and the target transitions to * MSGOUT in or after a packet where P0 is not * asserted, the hardware will assert LQIPHASE_NLQ. * We should respond to the LQIPHASE_NLQ with an * LQIRETRY. Should the target stay in a non-pkt * phase after we send our message, the hardware * will assert LQIPHASE_LQ. Recovery is then just as * listed above for the read streaming with P0 asserted. * Busfree detection is enabled. */ if (silent == FALSE) printk("LQICRC_NLQ\n"); if (scb == NULL) { printk("%s: No SCB valid for LQICRC_NLQ. " "Resetting bus\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } } else if ((lqistat1 & LQIBADLQI) != 0) { printk("Need to handle BADLQI!\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { if ((curphase & ~P_DATAIN_DT) != 0) { /* Ack the byte. So we can continue. */ if (silent == FALSE) printk("Acking %s to clear perror\n", ahd_lookup_phase_entry(curphase)->phasemsg); ahd_inb(ahd, SCSIDAT); } if (curphase == P_MESGIN) msg_out = MSG_PARITY_ERROR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ ahd->send_msg_perror = msg_out; if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR) scb->flags |= SCB_TRANSMISSION_ERROR; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) { /* * Clear the sources of the interrupts. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQIINT1, lqistat1); /* * If the "illegal" phase changes were in response * to our ATN to flag a CRC error, AND we ended up * on packet boundaries, clear the error, restart the * LQI manager as appropriate, and go on our merry * way toward sending the message. Otherwise, reset * the bus to clear the error. */ ahd_set_active_fifo(ahd); if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { if ((lqistat1 & LQIPHASE_LQ) != 0) { printk("LQIRETRY for LQIPHASE_LQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { printk("LQIRETRY for LQIPHASE_NLQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else panic("ahd_handle_lqiphase_error: No phase errors\n"); ahd_dump_card_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } else { printk("Reseting Channel for LQI Phase error\n"); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } } /* * Packetized unexpected or expected busfree. * Entered in mode based on busfreetime. */ static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) { u_int lqostat1; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); lqostat1 = ahd_inb(ahd, LQOSTAT1); if ((lqostat1 & LQOBUSFREE) != 0) { struct scb *scb; u_int scbid; u_int saved_scbptr; u_int waiting_h; u_int waiting_t; u_int next; /* * The LQO manager detected an unexpected busfree * either: * * 1) During an outgoing LQ. * 2) After an outgoing LQ but before the first * REQ of the command packet. * 3) During an outgoing command packet. * * In all cases, CURRSCB is pointing to the * SCB that encountered the failure. Clean * up the queue, clear SELDO and LQOBUSFREE, * and allow the sequencer to restart the select * out at its lesure. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); scbid = ahd_inw(ahd, CURRSCB); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) panic("SCB not valid during LQOBUSFREE"); /* * Clear the status. */ ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_flush_device_writes(ahd); ahd_outb(ahd, CLRSINT0, CLRSELDO); /* * Return the LQO manager to its idle loop. It will * not do this automatically if the busfree occurs * after the first REQ of either the LQ or command * packet or between the LQ and command packet. */ ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); /* * Update the waiting for selection queue so * we restart on the correct SCB. */ waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); saved_scbptr = ahd_get_scbptr(ahd); if (waiting_h != scbid) { ahd_outw(ahd, WAITING_TID_HEAD, scbid); waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); if (waiting_t == waiting_h) { ahd_outw(ahd, WAITING_TID_TAIL, scbid); next = SCB_LIST_NULL; } else { ahd_set_scbptr(ahd, waiting_h); next = ahd_inw_scbram(ahd, SCB_NEXT2); } ahd_set_scbptr(ahd, scbid); ahd_outw(ahd, SCB_NEXT2, next); } ahd_set_scbptr(ahd, saved_scbptr); if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { if (SCB_IS_SILENT(scb) == FALSE) { ahd_print_path(ahd, scb); printk("Probable outgoing LQ CRC error. " "Retrying command\n"); } scb->crc_retry_count++; } else { ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); ahd_freeze_scb(scb); ahd_freeze_devq(ahd, scb); } /* Return unpausing the sequencer. */ return (0); } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { /* * Ignore what are really parity errors that * occur on the last REQ of a free running * clock prior to going busfree. Some drives * do not properly active negate just before * going busfree resulting in a parity glitch. */ ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) printk("%s: Parity on last REQ detected " "during busfree phase.\n", ahd_name(ahd)); #endif /* Return unpausing the sequencer. */ return (0); } if (ahd->src_mode != AHD_MODE_SCSI) { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); ahd_print_path(ahd, scb); printk("Unexpected PKT busfree condition\n"); ahd_dump_card_state(ahd); ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, CAM_UNEXP_BUSFREE); /* Return restarting the sequencer. */ return (1); } printk("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); ahd_dump_card_state(ahd); /* Restart the sequencer. */ return (1); } /* * Non-packetized unexpected or expected busfree. */ static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; u_int scbid; u_int ppr_busfree; int printerror; /* * Look at what phase we were last in. If its message out, * chances are pretty good that the busfree was in response * to one of our abort requests. */ lastphase = ahd_inb(ahd, LASTPHASE); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); saved_lun = ahd_inb(ahd, SAVED_LUN); target = SCSIID_TARGET(ahd, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); ahd_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, 'A', ROLE_INITIATOR); printerror = 1; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE) || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) { int found; int sent_msg; if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); printk("Abort for unidentified " "connection completed.\n"); /* restart the sequencer. */ return (1); } sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; ahd_print_path(ahd, scb); printk("SCB %d - Abort%s Completed.\n", SCB_GET_TAG(scb), sent_msg == MSG_ABORT_TAG ? "" : " Tag"); if (sent_msg == MSG_ABORT_TAG) tag = SCB_GET_TAG(scb); if ((scb->flags & SCB_EXTERNAL_RESET) != 0) { /* * This abort is in response to an * unexpected switch to command phase * for a packetized connection. Since * the identify message was never sent, * "saved lun" is 0. We really want to * abort only the SCB that encountered * this error, which could have a different * lun. The SCB will be retried so the OS * will see the UA after renegotiating to * packetized. */ tag = SCB_GET_TAG(scb); saved_lun = scb->hscb->lun; } found = ahd_abort_scbs(ahd, target, 'A', saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printk("found == 0x%x\n", found); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahd_match_scb(ahd, scb, target, 'A', CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) ahd_set_transaction_status(scb, CAM_REQ_CMP); #endif ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE) && ppr_busfree == 0) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; /* * PPR Rejected. * * If the previous negotiation was packetized, * this could be because the device has been * reset without our knowledge. Force our * current negotiation to async and retry the * negotiation. Otherwise retry the command * with non-ppr negotiation. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("PPR negotiation rejected busfree.\n"); #endif tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) { ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); /* * The expect PPR busfree handler below * will effect the retry and necessary * abort. */ } else { tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; if (scb != NULL) { /* * Remove any SCBs in the waiting * for selection queue that may * also be for this target so that * command ordering is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); } printerror = 0; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-narrow and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("WDTR negotiation rejected busfree.\n"); #endif ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); if (scb != NULL) { /* * Remove any SCBs in the waiting for * selection queue that may also be for * this target so that command ordering * is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); } printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-async and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("SDTR negotiation rejected busfree.\n"); #endif ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); if (scb != NULL) { /* * Remove any SCBs in the waiting for * selection queue that may also be for * this target so that command ordering * is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); } printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 && ahd_sent_msg(ahd, AHDMSG_1B, MSG_INITIATOR_DET_ERR, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("Expected IDE Busfree\n"); #endif printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) && ahd_sent_msg(ahd, AHDMSG_1B, MSG_MESSAGE_REJECT, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("Expected QAS Reject Busfree\n"); #endif printerror = 0; } } /* * The busfree required flag is honored at the end of * the message phases. We check it last in case we * had to send some other message that caused a busfree. */ if (scb != NULL && printerror != 0 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_freeze_scb(scb); if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQ_ABORTED); } else { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("PPR Negotiation Busfree.\n"); #endif ahd_done(ahd, scb); } printerror = 0; } if (printerror != 0) { int aborted; aborted = 0; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = SCB_GET_TAG(scb); else tag = SCB_LIST_NULL; ahd_print_path(ahd, scb); aborted = ahd_abort_scbs(ahd, target, 'A', SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printk("%s: ", ahd_name(ahd)); } printk("Unexpected busfree %s, %d SCBs aborted, " "PRGMCNT == 0x%x\n", ahd_lookup_phase_entry(lastphase)->phasemsg, aborted, ahd_inw(ahd, PRGMCNT)); ahd_dump_card_state(ahd); if (lastphase != P_BUSFREE) ahd_force_renegotiation(ahd, &devinfo); } /* Always restart the sequencer. */ return (1); } static void ahd_handle_proto_violation(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahd_fetch_devinfo(ahd, &devinfo); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); seq_flags = ahd_inb(ahd, SEQ_FLAGS); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; lastphase = ahd_inb(ahd, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahd_print_devinfo(ahd, &devinfo); printk("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahd_print_devinfo(ahd, &devinfo); printk("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahd_print_path(ahd, scb); printk("No or incomplete CDB sent to device.\n"); } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahd_print_path(ahd, scb); printk("Completed command without status.\n"); } else { ahd_print_path(ahd, scb); printk("Unknown protocol violation.\n"); ahd_dump_card_state(ahd); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahd_reset_channel(ahd, 'A', TRUE); printk("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahd_name(ahd), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahd_print_path(ahd, scb); scb->flags |= SCB_ABORT; } printk("Protocol violation %s. Attempting to abort.\n", ahd_lookup_phase_entry(curphase)->phasemsg); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printk("Forcing renegotiation\n"); } #endif targ_info = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahd_update_neg_request(ahd, devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } #define AHD_MAX_STEPS 2000 static void ahd_clear_critical_section(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int stepping; int steps; int first_instr; u_int simode0; u_int simode1; u_int simode3; u_int lqimode0; u_int lqimode1; u_int lqomode0; u_int lqomode1; if (ahd->num_critical_sections == 0) return; stepping = FALSE; steps = 0; first_instr = 0; simode0 = 0; simode1 = 0; simode3 = 0; lqimode0 = 0; lqimode1 = 0; lqomode0 = 0; lqomode1 = 0; saved_modes = ahd_save_modes(ahd); for (;;) { struct cs *cs; u_int seqaddr; u_int i; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seqaddr = ahd_inw(ahd, CURADDR); cs = ahd->critical_sections; for (i = 0; i < ahd->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahd->num_critical_sections) break; if (steps > AHD_MAX_STEPS) { printk("%s: Infinite loop in critical section\n" "%s: First Instruction 0x%x now 0x%x\n", ahd_name(ahd), ahd_name(ahd), first_instr, seqaddr); ahd_dump_card_state(ahd); panic("critical section loop"); } steps++; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: Single stepping at 0x%x\n", ahd_name(ahd), seqaddr); #endif if (stepping == FALSE) { first_instr = seqaddr; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); simode3 = ahd_inb(ahd, SIMODE3); lqimode0 = ahd_inb(ahd, LQIMODE0); lqimode1 = ahd_inb(ahd, LQIMODE1); lqomode0 = ahd_inb(ahd, LQOMODE0); lqomode1 = ahd_inb(ahd, LQOMODE1); ahd_outb(ahd, SIMODE0, 0); ahd_outb(ahd, SIMODE3, 0); ahd_outb(ahd, LQIMODE0, 0); ahd_outb(ahd, LQIMODE1, 0); ahd_outb(ahd, LQOMODE0, 0); ahd_outb(ahd, LQOMODE1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); simode1 = ahd_inb(ahd, SIMODE1); /* * We don't clear ENBUSFREE. Unfortunately * we cannot re-enable busfree detection within * the current connection, so we must leave it * on while single stepping. */ ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); stepping = TRUE; } ahd_outb(ahd, CLRSINT1, CLRBUSFREE); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); ahd_outb(ahd, HCNTRL, ahd->unpause); while (!ahd_is_paused(ahd)) ahd_delay(200); ahd_update_modes(ahd); } if (stepping) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, SIMODE0, simode0); ahd_outb(ahd, SIMODE3, simode3); ahd_outb(ahd, LQIMODE0, lqimode0); ahd_outb(ahd, LQIMODE1, lqimode1); ahd_outb(ahd, LQOMODE0, lqomode0); ahd_outb(ahd, LQOMODE1, lqomode1); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); ahd_outb(ahd, SIMODE1, simode1); /* * SCSIINT seems to glitch occasionally when * the interrupt masks are restored. Clear SCSIINT * one more time so that only persistent errors * are seen as a real interrupt. */ ahd_outb(ahd, CLRINT, CLRSCSIINT); } ahd_restore_modes(ahd, saved_modes); } /* * Clear any pending interrupt status. */ static void ahd_clear_intstat(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); /* Clear any interrupt conditions this may have caused */ ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ |CLRLQOATNPKT|CLRLQOTCRC); ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT0, 0); ahd_outb(ahd, CLRLQOINT1, 0); } ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO |CLRIOERR|CLROVERRUN); ahd_outb(ahd, CLRINT, CLRSCSIINT); } /**************************** Debugging Routines ******************************/ #ifdef AHD_DEBUG uint32_t ahd_debug = AHD_DEBUG_OPTS; #endif #if 0 void ahd_print_scb(struct scb *scb) { struct hardware_scb *hscb; int i; hscb = scb->hscb; printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printk("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) printk("%#02x", hscb->shared_data.idata.cdb[i]); printk(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), ahd_le32toh(hscb->datacnt), ahd_le32toh(hscb->sgptr), SCB_GET_TAG(scb)); ahd_dump_sglist(scb); } #endif /* 0 */ /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahd_tmode_tstate * ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) { struct ahd_tmode_tstate *master_tstate; struct ahd_tmode_tstate *tstate; int i; master_tstate = ahd->enabled_targets[ahd->our_id]; if (ahd->enabled_targets[scsi_id] != NULL && ahd->enabled_targets[scsi_id] != master_tstate) panic("%s: ahd_alloc_tstate - Target already allocated", ahd_name(ahd)); tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); for (i = 0; i < 16; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahd->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHD_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) { struct ahd_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (scsi_id == ahd->our_id && force == FALSE) return; tstate = ahd->enabled_targets[scsi_id]; if (tstate != NULL) kfree(tstate); ahd->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest period to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ static void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahd_transinfo *transinfo; u_int maxsync; if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHD_SYNCRATE_PACED; } else { maxsync = AHD_SYNCRATE_ULTRA; /* Can't do DT related options on an SE bus */ *ppr_options &= MSG_EXT_PPR_QAS_REQ; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = max(maxsync, (u_int)AHD_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; } else { *period = max(*period, (u_int)transinfo->period); ahd_find_syncrate(ahd, period, ppr_options, maxsync); } } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, u_int *ppr_options, u_int maxsync) { if (*period < maxsync) *period = maxsync; if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 && *period > AHD_SYNCRATE_MIN_DT) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; if (*period > AHD_SYNCRATE_MIN) *period = 0; /* Honor PPR option conformance rules. */ if (*period > AHD_SYNCRATE_PACED) *ppr_options &= ~MSG_EXT_PPR_RTI; if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) *ppr_options &= MSG_EXT_PPR_QAS_REQ; /* Skip all PACED only entries if IU is not available */ if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 && *period < AHD_SYNCRATE_DT) *period = AHD_SYNCRATE_DT; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && *period < AHD_SYNCRATE_ULTRA2) *period = AHD_SYNCRATE_ULTRA2; } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ static void ahd_validate_offset(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int period, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (period == 0) maxoffset = 0; else if (period <= AHD_SYNCRATE_PACED) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) maxoffset = MAX_OFFSET_PACED_BUG; else maxoffset = MAX_OFFSET_PACED; } else maxoffset = MAX_OFFSET_NON_PACED; *offset = min(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = min(*offset, (u_int)tinfo->user.offset); else *offset = min(*offset, (u_int)tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ static void ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahd->features & AHD_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = min((u_int)tinfo->user.width, *bus_width); else *bus_width = min((u_int)tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient opportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_tmode_tstate *tstate, struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHD_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahd->features & AHD_WIDE) != 0) tinfo->curr.width = AHD_WIDTH_UNKNOWN; tinfo->curr.period = AHD_PERIOD_UNKNOWN; tinfo->curr.offset = AHD_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHD_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; if (period == 0 || offset == 0) { period = 0; offset = 0; } tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHD_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHD_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { update_needed++; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { if (offset != 0) { int options; printk("%s: target %d synchronous with " "period = 0x%x, offset = 0x%x", ahd_name(ahd), devinfo->target, period, offset); options = 0; if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { printk("(RDSTRM"); options++; } if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { printk("%s", options ? "|DT" : "(DT"); options++; } if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { printk("%s", options ? "|IU" : "(IU"); options++; } if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { printk("%s", options ? "|RTI" : "(RTI"); options++; } if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { printk("%s", options ? "|QAS" : "(QAS"); options++; } if (options != 0) printk(")\n"); else printk("\n"); } else { printk("%s: target %d using " "asynchronous transfers%s\n", ahd_name(ahd), devinfo->target, (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 ? "(QAS)" : ""); } } } /* * Always refresh the neg-table to handle the case of the * sequencer setting the ENATNO bit for a MK_MESSAGE request. * We will always renegotiate in that case if this is a * packetized request. Also manage the busfree expected flag * from this common routine so that we catch changes due to * WDTR or SDTR messages. */ if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); if (ahd->msg_type != MSG_TYPE_NONE) { if ((old_ppr & MSG_EXT_PPR_IU_REQ) != (ppr_options & MSG_EXT_PPR_IU_REQ)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printk("Expecting IU Change busfree\n"); } #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; } if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("PPR with IU_REQ outstanding\n"); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; } } } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHD_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { update_needed++; tinfo->curr.width = width; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { printk("%s: target %d using %dbit transfers\n", ahd_name(ahd), devinfo->target, 8 * (0x01 << width)); } } if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the current state of tagged queuing for a given target. */ static void ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd, struct ahd_devinfo *devinfo, ahd_queue_alg alg) { struct scsi_device *sdev = cmd->device; ahd_platform_set_tags(ahd, sdev, devinfo, alg); ahd_send_async(ahd, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG); } static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo) { ahd_mode_state saved_modes; u_int period; u_int ppr_opts; u_int con_opts; u_int offset; u_int saved_negoaddr; uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_negoaddr = ahd_inb(ahd, NEGOADDR); ahd_outb(ahd, NEGOADDR, devinfo->target); period = tinfo->period; offset = tinfo->offset; memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); con_opts = 0; if (period == 0) period = AHD_SYNCRATE_ASYNC; if (period == AHD_SYNCRATE_160) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * When the SPI4 spec was finalized, PACE transfers * was not made a configurable option in the PPR * message. Instead it is assumed to be enabled for * any syncrate faster than 80MHz. Nevertheless, * Harpoon2A4 allows this to be configurable. * * Harpoon2A4 also assumes at most 2 data bytes per * negotiated REQ/ACK offset. Paced transfers take * 4, so we must adjust our offset. */ ppr_opts |= PPROPT_PACE; offset *= 2; /* * Harpoon2A assumed that there would be a * fallback rate between 160MHz and 80MHz, * so 7 is used as the period factor rather * than 8 for 160MHz. */ period = AHD_SYNCRATE_REVA_160; } if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; } else { /* * Precomp should be disabled for non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) { /* * Slow down our CRC interval to be * compatible with non-packetized * U160 devices that can't handle a * CRC at full speed. */ con_opts |= ENSLOWCRC; } if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * On H2A4, revert to a slower slewrate * on non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK; } } ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); ahd_outb(ahd, NEGPERIOD, period); ahd_outb(ahd, NEGPPROPTS, ppr_opts); ahd_outb(ahd, NEGOFFSET, offset); if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) con_opts |= WIDEXFER; /* * Slow down our CRC interval to be * compatible with packetized U320 devices * that can't handle a CRC at full speed */ if (ahd->features & AHD_AIC79XXB_SLOWCRC) { con_opts |= ENSLOWCRC; } /* * During packetized transfers, the target will * give us the opportunity to send command packets * without us asserting attention. */ if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) con_opts |= ENAUTOATNO; ahd_outb(ahd, NEGCONOPTS, con_opts); ahd_outb(ahd, NEGOADDR, saved_negoaddr); ahd_restore_modes(ahd, saved_modes); } /* * When the transfer settings for a connection change, setup for * negotiation in pending SCBs to effect the change as quickly as * possible. We also cancel any negotiations that are scheduled * for inflight SCBs that have not been started yet. */ static void ahd_update_pending_scbs(struct ahd_softc *ahd) { struct scb *pending_scb; int pending_scb_count; int paused; u_int saved_scbptr; ahd_mode_state saved_modes; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. We can only safely * clear the negotiation required flag (setting requires the * execution queue to be modified) and this is only possible * if we are not already attempting to select out for this * SCB. For this reason, all callers only call this routine * if we are changing the negotiation settings for the currently * active transaction on the bus. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; ahd_scb_devinfo(ahd, &devinfo, pending_scb); tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_scb->hscb->control &= ~MK_MESSAGE; } ahd_sync_scb(ahd, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* * Force the sequencer to reinitialize the selection for * the command at the head of the execution queue if it * has already been setup. The negotiation changes may * effect whether we select-out with ATN. It is only * safe to clear ENSELO when the bus is not free and no * selection is in progres or completed. */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); saved_scbptr = ahd_get_scbptr(ahd); /* Ensure that the hscbs down on the card match the new information */ LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { u_int scb_tag; u_int control; scb_tag = SCB_GET_TAG(pending_scb); ahd_set_scbptr(ahd, scb_tag); control = ahd_inb_scbram(ahd, SCB_CONTROL); control &= ~MK_MESSAGE; control |= pending_scb->hscb->control & MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, control); } ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } /**************************** Pathing Information *****************************/ static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { ahd_mode_state saved_modes; u_int saved_scsiid; role_t role; int our_id; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd_inb(ahd, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahd_inb(ahd, TARGIDIN) & OID; } else if (role == ROLE_TARGET) our_id = ahd_inb(ahd, TOWNID); else our_id = ahd_inb(ahd, IOWNID); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); ahd_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahd, saved_scsiid), ahd_inb(ahd, SAVED_LUN), SCSIID_CHANNEL(ahd, saved_scsiid), role); ahd_restore_modes(ahd, saved_modes); } void ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { printk("%s:%c:%d:%d: ", ahd_name(ahd), 'A', devinfo->target, devinfo->lun); } static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase) { const struct ahd_phase_table_entry *entry; const struct ahd_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahd_phase_table[num_phases]; for (entry = ahd_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->hscb->control & TARGET_SCB) != 0) role = ROLE_TARGET; ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); } /************************ Message Phase Processing ****************************/ /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (ahd_currently_packetized(ahd)) ahd->msg_flags |= MSG_FLAG_PACKETIZED; if (ahd->send_msg_perror && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("Setting up for Parity Error delivery\n"); #endif return; } else if (scb == NULL) { printk("%s: WARNING. No pending message for " "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; return; } if ((scb->flags & SCB_DEVICE_RESET) == 0 && (scb->flags & SCB_PACKETIZED) == 0 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahd->msgout_buf[ahd->msgout_index++] = identify_msg; ahd->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); ahd->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET; ahd->msgout_len++; ahd_print_path(ahd, scb); printk("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG; } else { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT; } ahd->msgout_len++; ahd_print_path(ahd, scb); printk("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahd_build_transfer_msg(ahd, devinfo); /* * Clear our selection hardware in advance of potential * PPR IU status change busfree. We may have an entry in * the waiting Q for this target, and we don't want to go * about selecting while we handle the busfree and blow * it away. */ ahd_outb(ahd, SCSISEQ0, 0); } else { printk("ahd_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahd->features & AHD_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahd_print_devinfo(ahd, devinfo); printk("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahd_validate_offset(ahd, tinfo, period, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahd_construct_ppr(ahd, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahd_construct_sdtr(ahd, devinfo, period, offset); } } else { ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_index += spi_populate_sync_msg( ahd->msgout_buf + ahd->msgout_index, period, offset); ahd->msgout_len += 5; if (bootverbose) { printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiateion message in our message * buffer based on the input parameters. */ static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width) { ahd->msgout_index += spi_populate_width_msg( ahd->msgout_buf + ahd->msgout_index, bus_width); ahd->msgout_len += 4; if (bootverbose) { printk("(%s:%c:%d:%d): Sending WDTR %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { /* * Always request precompensation from * the other target if we are running * at paced syncrates. */ if (period <= AHD_SYNCRATE_PACED) ppr_options |= MSG_EXT_PPR_PCOMP_EN; if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_index += spi_populate_ppr_msg( ahd->msgout_buf + ahd->msgout_index, period, offset, bus_width, ppr_options); ahd->msgout_len += 8; if (bootverbose) { printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahd_clear_msg_state(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd->send_msg_perror = 0; ahd->msg_flags = MSG_FLAG_NONE; ahd->msgout_len = 0; ahd->msgin_index = 0; ahd->msg_type = MSG_TYPE_NONE; if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahd_outb(ahd, CLRSINT1, CLRATNO); } ahd_outb(ahd, MSG_OUT, MSG_NOOP); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); ahd_restore_modes(ahd, saved_modes); } /* * Manual message loop handler. */ static void ahd_handle_message_phase(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; u_int bus_phase; int end_session; ahd_fetch_devinfo(ahd, &devinfo); end_session = FALSE; bus_phase = ahd_inb(ahd, LASTPHASE); if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { printk("LQIRETRY for LQIPHASE_OUTPKT\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } reswitch: switch (ahd->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printk("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd_outb(ahd, CLRSINT1, CLRATNO); ahd->send_msg_perror = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahd->send_msg_perror) { ahd_outb(ahd, CLRSINT1, CLRATNO); ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahd->send_msg_perror); #endif /* * If we are notifying the target of a CRC error * during packetized operations, the target is * within its rights to acknowledge our message * with a busfree. */ if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR) ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahd->msgout_index = 0; ahd_assert_atn(ahd); } lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahd_outb(ahd, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahd->msgout_buf[ahd->msgout_index]); #endif ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printk("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahd->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahd->send_msg_perror != 0 || (ahd->msgout_len != 0 && ahd->msgout_index == 0))) { ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahd->msgin_buf[ahd->msgin_index]); #endif message_done = ahd_parse_msg(ahd, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahd->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahd->msgout_len != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printk("Asserting ATN for response\n"); } #endif ahd_assert_atn(ahd); } } else ahd->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); if (ahd->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 && ahd->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); ahd->msgin_index = 0; /* Dummy read to REQ for first byte */ ahd_inb(ahd, SCSIDAT); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); msgdone = ahd_parse_msg(ahd, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahd->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahd->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahd->msgout_len != 0) { ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd->msg_type = MSG_TYPE_TARGET_MSGIN; ahd->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { printk("%s: Returning to Idle Loop\n", ahd_name(ahd)); ahd_clear_msg_state(ahd); /* * Perform the equivalent of a clear_target_state. */ ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); } else { ahd_clear_msg_state(ahd); ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); } } } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahd->msgout_len) { if (ahd->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahd->msgout_buf[index + 1]; if (ahd->msgout_buf[index+2] == msgval && type == AHDMSG_EXT) { if (full) { if (ahd->msgout_index > end_index) found = TRUE; } else if (ahd->msgout_index > index) found = TRUE; } index = end_index; } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHDMSG_1B && ahd->msgout_index > index && (ahd->msgout_buf[index] == msgval || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 && msgval == MSG_IDENTIFYFLAG))) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int reject; int done; int response; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahd->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahd_handle_msg_reject(ahd, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahd->msgin_index < 2) break; switch (ahd->msgin_buf[2]) { case MSG_EXT_SDTR: { u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahd->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahd->msgin_buf[4]; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, tinfo->curr.width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, ahd->msgin_buf[3], saved_offset, period, offset); } ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_sdtr(ahd, devinfo, period, offset); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahd->msgin_buf[3]; saved_width = bus_width; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printk("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_wdtr(ahd, devinfo, bus_width); ahd->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_ALWAYS); ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahd->msgin_buf[3]; offset = ahd->msgin_buf[5]; bus_width = ahd->msgin_buf[6]; saved_width = bus_width; ppr_options = ahd->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period <= 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Transfer options are only available if we * are negotiating wide. */ if (bus_width == 0) ppr_options &= MSG_EXT_PPR_QAS_REQ; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, bus_width, devinfo->role); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; } } else { if (devinfo->role != ROLE_TARGET) printk("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); else printk("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_ppr(ahd, devinfo, period, offset, bus_width, ppr_options); ahd->msgout_index = 0; response = TRUE; } if (bootverbose) { printk("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahd->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHD_TARGET_MODE case MSG_BUS_DEV_RESET: ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahd->msgin_buf[0] == MSG_ABORT_TAG) tag = ahd_inb(ahd, INITIATOR_TAG); ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, ahd->msgin_buf[0], /*arg*/tag); ahd_send_lstate_events(ahd, lstate); } } ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; } #endif case MSG_QAS_REQUEST: #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("%s: QAS request. SCSISIGI == 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; /* FALLTHROUGH */ case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahd->msgout_index = 0; ahd->msgout_len = 1; ahd->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahd->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahd_inb(ahd, LAST_MSG); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE) && tinfo->goal.period <= AHD_SYNCRATE_PACED) { /* * Target may not like our SPI-4 PPR Options. * Attempt to negotiate 80MHz which will turn * off these options. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying simple U160 PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.period = AHD_SYNCRATE_DT; tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; } else { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); printk("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printk("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE); mask = ~0x23; } else { printk("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & mask); scb->hscb->control &= mask; ahd_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); ahd_assert_atn(ahd); ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), SCB_GET_TAG(scb)); /* * Requeue all tagged commands for this target * currently in our possession so they can be * converted to untagged commands. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { /* * Most likely the device believes that we had * previously negotiated packetized. */ ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; ahd_force_renegotiation(ahd, devinfo); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else { /* * Otherwise, we ignore it. */ printk("%s:%c:%d: Message reject for %x -- ignored\n", ahd_name(ahd), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { uint32_t data_cnt; uint64_t data_addr; uint32_t sglen; /* Pull in the rest of the sgptr */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHD_SG_LEN_MASK; } data_addr = ahd_inq(ahd, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = ahd_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = ahd_le64toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = ahd_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = ahd_le32toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahd_outb(ahd, SCB_TASK_ATTRIBUTE, ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) ^ SCB_XFERLEN_ODD); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt); /* * The FIFO's pointers will be updated if/when the * sequencer re-enters a data phase. */ } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int scb_index; u_int wait; uint32_t sgptr; uint32_t resid; uint64_t dataptr; AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * Release and reacquire the FIFO so we * have a clean slate. */ ahd_outb(ahd, DFFSXFRCTL, CLRCHN); wait = 1000; while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)) ahd_delay(100); if (wait == 0) { ahd_print_path(ahd, scb); printk("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, ahd_inb(ahd, DFFSTAT) | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); /* * Determine initial values for data_addr and data_cnt * for resuming the data phase. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = ahd_le64toh(sg->addr) + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outl(ahd, HADDR + 4, dataptr >> 32); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = ahd_le32toh(sg->addr) + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outb(ahd, HADDR + 4, (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); } ahd_outl(ahd, HADDR, dataptr); ahd_outb(ahd, HCNT + 2, resid >> 16); ahd_outb(ahd, HCNT + 1, resid >> 8); ahd_outb(ahd, HCNT, resid); } /* * Handle the effects of issuing a bus device reset message. */ static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level) { #ifdef AHD_TARGET_MODE struct ahd_tmode_tstate* tstate; #endif int found; found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, lun, SCB_LIST_NULL, devinfo->role, status); #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { u_int cur_lun; u_int max_lun; if (lun != CAM_LUN_WILDCARD) { cur_lun = 0; max_lun = AHD_NUM_LUNS - 1; } else { cur_lun = lun; max_lun = lun; } for (;cur_lun <= max_lun; cur_lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[cur_lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR); if (message != NULL && bootverbose) printk("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), message, devinfo->channel, devinfo->target, found); } #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahd_build_transfer_msg(ahd, devinfo); else panic("ahd_intr: AWAITING target message with no message"); ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ static u_int ahd_sglist_size(struct ahd_softc *ahd) { bus_size_t list_size; list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; return (list_size); } /* * Calculate the optimum S/G List allocation size. S/G elements used * for a given transaction must be physically contiguous. Assume the * OS will allocate full pages to us, so it doesn't make sense to request * less than a page. */ static u_int ahd_sglist_allocsize(struct ahd_softc *ahd) { bus_size_t sg_list_increment; bus_size_t sg_list_size; bus_size_t max_list_size; bus_size_t best_list_size; /* Start out with the minimum required for AHD_NSEG. */ sg_list_increment = ahd_sglist_size(ahd); sg_list_size = sg_list_increment; /* Get us as close as possible to a page in size. */ while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) sg_list_size += sg_list_increment; /* * Try to reduce the amount of wastage by allocating * multiple pages. */ best_list_size = sg_list_size; max_list_size = roundup(sg_list_increment, PAGE_SIZE); if (max_list_size < 4 * PAGE_SIZE) max_list_size = 4 * PAGE_SIZE; if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); while ((sg_list_size + sg_list_increment) <= max_list_size && (sg_list_size % PAGE_SIZE) != 0) { bus_size_t new_mod; bus_size_t best_mod; sg_list_size += sg_list_increment; new_mod = sg_list_size % PAGE_SIZE; best_mod = best_list_size % PAGE_SIZE; if (new_mod > best_mod || new_mod == 0) { best_list_size = sg_list_size; } } return (best_list_size); } /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahd_softc * ahd_alloc(void *platform_arg, char *name) { struct ahd_softc *ahd; #ifndef __FreeBSD__ ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC); if (!ahd) { printk("aic7xxx: cannot malloc softc!\n"); kfree(name); return NULL; } #else ahd = device_get_softc((device_t)platform_arg); #endif memset(ahd, 0, sizeof(*ahd)); ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC); if (ahd->seep_config == NULL) { #ifndef __FreeBSD__ kfree(ahd); #endif kfree(name); return (NULL); } LIST_INIT(&ahd->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahd->name = name; ahd->unit = -1; ahd->description = NULL; ahd->bus_description = NULL; ahd->channel = 'A'; ahd->chip = AHD_NONE; ahd->features = AHD_FENONE; ahd->bugs = AHD_BUGNONE; ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; ahd_timer_init(&ahd->reset_timer); ahd_timer_init(&ahd->stat_timer); ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT; ahd->int_coalescing_stop_threshold = AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT; if (ahd_platform_alloc(ahd, platform_arg) != 0) { ahd_free(ahd); ahd = NULL; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { printk("%s: scb size = 0x%x, hscb size = 0x%x\n", ahd_name(ahd), (u_int)sizeof(struct scb), (u_int)sizeof(struct hardware_scb)); } #endif return (ahd); } int ahd_softc_init(struct ahd_softc *ahd) { ahd->unpause = 0; ahd->pause = PAUSE; return (0); } void ahd_set_unit(struct ahd_softc *ahd, int unit) { ahd->unit = unit; } void ahd_set_name(struct ahd_softc *ahd, char *name) { if (ahd->name != NULL) kfree(ahd->name); ahd->name = name; } void ahd_free(struct ahd_softc *ahd) { int i; switch (ahd->init_level) { default: case 5: ahd_shutdown(ahd); /* FALLTHROUGH */ case 4: ahd_dmamap_unload(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 3: ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, ahd->shared_data_map.dmamap); ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 2: ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); case 1: #ifndef __linux__ ahd_dma_tag_destroy(ahd, ahd->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ ahd_dma_tag_destroy(ahd, ahd->parent_dmat); #endif ahd_platform_free(ahd); ahd_fini_scbdata(ahd); for (i = 0; i < AHD_NUM_TARGETS; i++) { struct ahd_tmode_tstate *tstate; tstate = ahd->enabled_targets[i]; if (tstate != NULL) { #ifdef AHD_TARGET_MODE int j; for (j = 0; j < AHD_NUM_LUNS; j++) { struct ahd_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); kfree(lstate); } } #endif kfree(tstate); } } #ifdef AHD_TARGET_MODE if (ahd->black_hole != NULL) { xpt_free_path(ahd->black_hole->path); kfree(ahd->black_hole); } #endif if (ahd->name != NULL) kfree(ahd->name); if (ahd->seep_config != NULL) kfree(ahd->seep_config); if (ahd->saved_stack != NULL) kfree(ahd->saved_stack); #ifndef __FreeBSD__ kfree(ahd); #endif return; } static void ahd_shutdown(void *arg) { struct ahd_softc *ahd; ahd = (struct ahd_softc *)arg; /* * Stop periodic timer callbacks. */ ahd_timer_stop(&ahd->reset_timer); ahd_timer_stop(&ahd->stat_timer); /* This will reset most registers to 0, but not all */ ahd_reset(ahd, /*reinit*/FALSE); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occurred after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahd_intr_enable(). */ int ahd_reset(struct ahd_softc *ahd, int reinit) { u_int sxfrctl1; int wait; uint32_t cmd; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahd_pause(ahd); ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sxfrctl1 = ahd_inb(ahd, SXFRCTL1); cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { uint32_t mod_cmd; /* * A4 Razor #632 * During the assertion of CHIPRST, the chip * does not disable its parity logic prior to * the start of the reset. This may cause a * parity error to be detected and thus a * spurious SERR or PERR assertion. Disble * PERR and SERR responses during the CHIPRST. */ mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, mod_cmd, /*bytes*/2); } ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { ahd_delay(1000); } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printk("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahd_name(ahd)); } ahd_outb(ahd, HCNTRL, ahd->pause); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { /* * Clear any latched PCI error status and restore * previous SERR and PERR response enables. */ ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, 0xFF, /*bytes*/1); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); } /* * Mode should be SCSI after a chip reset, but lets * set it just to be safe. We touch the MODE_PTR * register directly so as to bypass the lazy update * code in ahd_set_modes(). */ ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); /* * Restore SXFRCTL1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); ahd_outb(ahd, SXFRCTL1, sxfrctl1); /* Determine chip configuration */ ahd->features &= ~AHD_WIDE; if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) ahd->features |= AHD_WIDE; /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ if (reinit != 0) ahd_chip_init(ahd); return (0); } /* * Determine the number of SCBs available on the controller */ static int ahd_probe_scbs(struct ahd_softc *ahd) { int i; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); for (i = 0; i < AHD_SCB_MAX; i++) { int j; ahd_set_scbptr(ahd, i); ahd_outw(ahd, SCB_BASE, i); for (j = 2; j < 64; j++) ahd_outb(ahd, SCB_BASE+j, 0); /* Start out life as unallocated (needing an abort) */ ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); if (ahd_inw_scbram(ahd, SCB_BASE) != i) break; ahd_set_scbptr(ahd, 0); if (ahd_inw_scbram(ahd, SCB_BASE) != 0) break; } return (i); } static void ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { dma_addr_t *baddr; baddr = (dma_addr_t *)arg; *baddr = segs->ds_addr; } static void ahd_initialize_hscbs(struct ahd_softc *ahd) { int i; for (i = 0; i < ahd->scb_data.maxhscbs; i++) { ahd_set_scbptr(ahd, i); /* Clear the control byte. */ ahd_outb(ahd, SCB_CONTROL, 0); /* Set the next pointer */ ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); } } static int ahd_init_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; int i; scb_data = &ahd->scb_data; TAILQ_INIT(&scb_data->free_scbs); for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) LIST_INIT(&scb_data->free_scb_lists[i]); LIST_INIT(&scb_data->any_dev_free_scb_list); SLIST_INIT(&scb_data->hscb_maps); SLIST_INIT(&scb_data->sg_maps); SLIST_INIT(&scb_data->sense_maps); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahd_probe_scbs(ahd); if (scb_data->maxhscbs == 0) { printk("%s: No SCB space found\n", ahd_name(ahd)); return (ENXIO); } ahd_initialize_hscbs(ahd); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* DMA tag for our S/G structures. */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, ahd_sglist_allocsize(ahd), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) printk("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), ahd_sglist_allocsize(ahd)); #endif scb_data->init_level++; /* DMA tag for our sense buffers. We allocate in page sized chunks */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ ahd_alloc_scbs(ahd); if (scb_data->numscbs == 0) { printk("%s: ahd_init_scbdata - " "Unable to allocate initial scbs\n", ahd_name(ahd)); goto error_exit; } /* * Note that we were successful */ return (0); error_exit: return (ENOMEM); } static struct scb * ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) { struct scb *scb; /* * Look on the pending list. */ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (SCB_GET_TAG(scb) == tag) return (scb); } /* * Then on all of the collision free lists. */ TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { if (SCB_GET_TAG(list_scb) == tag) return (list_scb); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb); } /* * And finally on the generic free list. */ LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (SCB_GET_TAG(scb) == tag) return (scb); } return (NULL); } static void ahd_fini_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; scb_data = &ahd->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct map_node *sns_map; while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); ahd_dmamap_unload(ahd, scb_data->sense_dmat, sns_map->dmamap); ahd_dmamem_free(ahd, scb_data->sense_dmat, sns_map->vaddr, sns_map->dmamap); kfree(sns_map); } ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); /* FALLTHROUGH */ } case 6: { struct map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); ahd_dmamap_unload(ahd, scb_data->sg_dmat, sg_map->dmamap); ahd_dmamem_free(ahd, scb_data->sg_dmat, sg_map->vaddr, sg_map->dmamap); kfree(sg_map); } ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); /* FALLTHROUGH */ } case 5: { struct map_node *hscb_map; while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); ahd_dmamap_unload(ahd, scb_data->hscb_dmat, hscb_map->dmamap); ahd_dmamem_free(ahd, scb_data->hscb_dmat, hscb_map->vaddr, hscb_map->dmamap); kfree(hscb_map); } ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); /* FALLTHROUGH */ } case 4: case 3: case 2: case 1: case 0: break; } } /* * DSP filter Bypass must be enabled until the first selection * after a change in bus mode (Razor #491 and #493). */ static void ahd_setup_iocell_workaround(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: Setting up iocell workaround\n", ahd_name(ahd)); #endif ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_HAD_FIRST_SEL; } static void ahd_iocell_first_selection(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int sblkctl; if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0) return; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sblkctl = ahd_inb(ahd, SBLKCTL); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: iocell first selection\n", ahd_name(ahd)); #endif if ((sblkctl & ENAB40) != 0) { ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: BYPASS now disabled\n", ahd_name(ahd)); #endif } ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_HAD_FIRST_SEL; } /*************************** SCB Management ***********************************/ static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; scb->flags |= SCB_ON_COL_LIST; AHD_SET_SCB_COL_IDX(scb, col_idx); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb != NULL) { LIST_INSERT_AFTER(first_scb, scb, collision_links); } else { LIST_INSERT_HEAD(free_list, scb, collision_links); TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); } } static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; u_int col_idx; scb->flags &= ~SCB_ON_COL_LIST; col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb == scb) { struct scb *next_scb; /* * Maintain order in the collision free * lists for fairness if this device has * other colliding tags active. */ next_scb = LIST_NEXT(scb, collision_links); if (next_scb != NULL) { TAILQ_INSERT_AFTER(free_tailq, scb, next_scb, links.tqe); } TAILQ_REMOVE(free_tailq, scb, links.tqe); } LIST_REMOVE(scb, collision_links); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) { struct scb *scb; int tries; tries = 0; look_again: TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { ahd_rem_col_list(ahd, scb); goto found; } } if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { if (tries++ != 0) return (NULL); ahd_alloc_scbs(ahd); goto look_again; } LIST_REMOVE(scb, links.le); if (col_idx != AHD_NEVER_COL_IDX && (scb->col_scb != NULL) && (scb->col_scb->flags & SCB_ACTIVE) == 0) { LIST_REMOVE(scb->col_scb, links.le); ahd_add_col_list(ahd, scb->col_scb, col_idx); } found: scb->flags |= SCB_ACTIVE; return (scb); } /* * Return an SCB resource to the free list. */ void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) { /* Clean up for the next user */ scb->flags = SCB_FLAG_NONE; scb->hscb->control = 0; ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; if (scb->col_scb == NULL) { /* * No collision possible. Just free normally. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { /* * The SCB we might have collided with is on * a free collision list. Put both SCBs on * the generic list. */ ahd_rem_col_list(ahd, scb->col_scb); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb->col_scb, links.le); } else if ((scb->col_scb->flags & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE && (scb->col_scb->hscb->control & TAG_ENB) != 0) { /* * The SCB we might collide with on the next allocation * is still active in a non-packetized, tagged, context. * Put us on the SCB collision list. */ ahd_add_col_list(ahd, scb, AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); } else { /* * The SCB we might collide with on the next allocation * is either active in a packetized context, or free. * Since we can't collide, put this SCB on the generic * free list. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } ahd_platform_scb_free(ahd, scb); } static void ahd_alloc_scbs(struct ahd_softc *ahd) { struct scb_data *scb_data; struct scb *next_scb; struct hardware_scb *hscb; struct map_node *hscb_map; struct map_node *sg_map; struct map_node *sense_map; uint8_t *segs; uint8_t *sense_data; dma_addr_t hscb_busaddr; dma_addr_t sg_busaddr; dma_addr_t sense_busaddr; int newcount; int i; scb_data = &ahd->scb_data; if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) /* Can't allocate any more */ return; if (scb_data->scbs_left != 0) { int offset; offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; hscb_map = SLIST_FIRST(&scb_data->hscb_maps); hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); } else { hscb_map = kmalloc(sizeof(*hscb_map), GFP_ATOMIC); if (hscb_map == NULL) return; /* Allocate the next batch of hardware SCBs */ if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, (void **)&hscb_map->vaddr, BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { kfree(hscb_map); return; } SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &hscb_map->physaddr, /*flags*/0); hscb = (struct hardware_scb *)hscb_map->vaddr; hscb_busaddr = hscb_map->physaddr; scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); } if (scb_data->sgs_left != 0) { int offset; offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) - scb_data->sgs_left) * ahd_sglist_size(ahd); sg_map = SLIST_FIRST(&scb_data->sg_maps); segs = sg_map->vaddr + offset; sg_busaddr = sg_map->physaddr + offset; } else { sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); if (sg_map == NULL) return; /* Allocate the next batch of S/G lists */ if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, (void **)&sg_map->vaddr, BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { kfree(sg_map); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, sg_map->vaddr, ahd_sglist_allocsize(ahd), ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); segs = sg_map->vaddr; sg_busaddr = sg_map->physaddr; scb_data->sgs_left = ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printk("Mapped SG data\n"); #endif } if (scb_data->sense_left != 0) { int offset; offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); sense_map = SLIST_FIRST(&scb_data->sense_maps); sense_data = sense_map->vaddr + offset; sense_busaddr = sense_map->physaddr + offset; } else { sense_map = kmalloc(sizeof(*sense_map), GFP_ATOMIC); if (sense_map == NULL) return; /* Allocate the next batch of sense buffers */ if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, (void **)&sense_map->vaddr, BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { kfree(sense_map); return; } SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &sense_map->physaddr, /*flags*/0); sense_data = sense_map->vaddr; sense_busaddr = sense_map->physaddr; scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printk("Mapped sense data\n"); #endif } newcount = min(scb_data->sense_left, scb_data->scbs_left); newcount = min(newcount, scb_data->sgs_left); newcount = min(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; u_int col_tag; #ifndef __linux__ int error; #endif next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC); if (next_scb == NULL) break; pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); if (pdata == NULL) { kfree(next_scb); break; } next_scb->platform_data = pdata; next_scb->hscb_map = hscb_map; next_scb->sg_map = sg_map; next_scb->sense_map = sense_map; next_scb->sg_list = segs; next_scb->sense_data = sense_data; next_scb->sense_busaddr = sense_busaddr; memset(hscb, 0, sizeof(*hscb)); next_scb->hscb = hscb; hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_busaddr = sg_busaddr; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) next_scb->sg_list_busaddr += sizeof(struct ahd_dma64_seg); else next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); next_scb->ahd_softc = ahd; next_scb->flags = SCB_FLAG_NONE; #ifndef __linux__ error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) { kfree(next_scb); kfree(pdata); break; } #endif next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); col_tag = scb_data->numscbs ^ 0x100; next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); if (next_scb->col_scb != NULL) next_scb->col_scb->col_scb = next_scb; ahd_free_scb(ahd, next_scb); hscb++; hscb_busaddr += sizeof(*hscb); segs += ahd_sglist_size(ahd); sg_busaddr += ahd_sglist_size(ahd); sense_data += AHD_SENSE_BUFSIZE; sense_busaddr += AHD_SENSE_BUFSIZE; scb_data->numscbs++; scb_data->sense_left--; scb_data->scbs_left--; scb_data->sgs_left--; } } void ahd_controller_info(struct ahd_softc *ahd, char *buf) { const char *speed; const char *type; int len; len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); buf += len; speed = "Ultra320 "; if ((ahd->features & AHD_WIDE) != 0) { type = "Wide "; } else { type = "Single "; } len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", speed, type, ahd->channel, ahd->our_id); buf += len; sprintf(buf, "%s, %d SCBs", ahd->bus_description, ahd->scb_data.maxhscbs); } static const char *channel_strings[] = { "Primary Low", "Primary High", "Secondary Low", "Secondary High" }; static const char *termstat_strings[] = { "Terminated Correctly", "Over Terminated", "Under Terminated", "Not Configured" }; /***************************** Timer Facilities *******************************/ #define ahd_timer_init init_timer #define ahd_timer_stop del_timer_sync typedef void ahd_linux_callback_t (u_long); static void ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg) { struct ahd_softc *ahd; ahd = (struct ahd_softc *)arg; del_timer(timer); timer->data = (u_long)arg; timer->expires = jiffies + (usec * HZ)/1000000; timer->function = (ahd_linux_callback_t*)func; add_timer(timer); } /* * Start the board, ready for normal operation */ int ahd_init(struct ahd_softc *ahd) { uint8_t *next_vaddr; dma_addr_t next_baddr; size_t driver_data_size; int i; int error; u_int warn_user; uint8_t current_sensing; uint8_t fstat; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd->stack_size = ahd_probe_stack_size(ahd); ahd->saved_stack = kmalloc(ahd->stack_size * sizeof(uint16_t), GFP_ATOMIC); if (ahd->saved_stack == NULL) return (ENOMEM); /* * Verify that the compiler hasn't over-aggressively * padded important structures. */ if (sizeof(struct hardware_scb) != 64) panic("Hardware SCB size is incorrect"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) ahd->flags |= AHD_SEQUENCER_DEBUG; #endif /* * Default to allowing initiator operations. */ ahd->flags |= AHD_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) ahd->features &= ~AHD_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING ? (dma_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHD_NSEG, /*maxsegsz*/AHD_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahd->buffer_dmat) != 0) { return (ENOMEM); } #endif ahd->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qoutfifo. When providing * for the target mode role, we must additionally provide space for * the incoming target command fifo. */ driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo) + sizeof(struct hardware_scb); if ((ahd->features & AHD_TARGETMODE) != 0) driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) driver_data_size += PKT_OVERRUN_BUFSIZE; if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahd->shared_data_dmat) != 0) { return (ENOMEM); } ahd->init_level++; /* Allocation of driver data */ if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, (void **)&ahd->shared_data_map.vaddr, BUS_DMA_NOWAIT, &ahd->shared_data_map.dmamap) != 0) { return (ENOMEM); } ahd->init_level++; /* And permanently map it in */ ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd->shared_data_map.vaddr, driver_data_size, ahd_dmamap_cb, &ahd->shared_data_map.physaddr, /*flags*/0); ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; next_baddr = ahd->shared_data_map.physaddr + AHD_QOUT_SIZE*sizeof(struct ahd_completion); if ((ahd->features & AHD_TARGETMODE) != 0) { ahd->targetcmds = (struct target_cmd *)next_vaddr; next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); } if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { ahd->overrun_buf = next_vaddr; next_vaddr += PKT_OVERRUN_BUFSIZE; next_baddr += PKT_OVERRUN_BUFSIZE; } /* * We need one SCB to serve as the "next SCB". Since the * tag identifier in this SCB will never be used, there is * no point in using a valid HSCB tag from an SCB pulled from * the standard free pool. So, we allocate this "sentinel" * specially from the DMA safe memory chunk used for the QOUTFIFO. */ ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; ahd->next_queued_hscb_map = &ahd->shared_data_map; ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr); ahd->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahd_init_scbdata(ahd) != 0) return (ENOMEM); if ((ahd->flags & AHD_INITIATORROLE) == 0) ahd->flags &= ~AHD_RESET_BUS_A; /* * Before committing these settings to the chip, give * the OSM one last chance to modify our configuration. */ ahd_platform_init(ahd); /* Bring up the chip. */ ahd_chip_init(ahd); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if ((ahd->flags & AHD_CURRENT_SENSING) == 0) goto init_done; /* * Verify termination based on current draw and * warn user if the bus is over/under terminated. */ error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, CURSENSE_ENB); if (error != 0) { printk("%s: current sensing timeout 1\n", ahd_name(ahd)); goto init_done; } for (i = 20, fstat = FLX_FSTAT_BUSY; (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); if (error != 0) { printk("%s: current sensing timeout 2\n", ahd_name(ahd)); goto init_done; } } if (i == 0) { printk("%s: Timedout during current-sensing test\n", ahd_name(ahd)); goto init_done; } /* Latch Current Sensing status. */ error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing); if (error != 0) { printk("%s: current sensing timeout 3\n", ahd_name(ahd)); goto init_done; } /* Diable current sensing. */ ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { printk("%s: current_sensing == 0x%x\n", ahd_name(ahd), current_sensing); } #endif warn_user = 0; for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { u_int term_stat; term_stat = (current_sensing & FLX_CSTAT_MASK); switch (term_stat) { case FLX_CSTAT_OVER: case FLX_CSTAT_UNDER: warn_user++; case FLX_CSTAT_INVALID: case FLX_CSTAT_OKAY: if (warn_user == 0 && bootverbose == 0) break; printk("%s: %s Channel %s\n", ahd_name(ahd), channel_strings[i], termstat_strings[term_stat]); break; } } if (warn_user) { printk("%s: WARNING. Termination is not configured correctly.\n" "%s: WARNING. SCSI bus operations may FAIL.\n", ahd_name(ahd), ahd_name(ahd)); } init_done: ahd_restart(ahd); ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, ahd_stat_timer, ahd); return (0); } /* * (Re)initialize chip state after a chip reset. */ static void ahd_chip_init(struct ahd_softc *ahd) { uint32_t busaddr; u_int sxfrctl1; u_int scsiseq_template; u_int wait; u_int i; u_int target; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Take the LED out of diagnostic mode */ ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); /* * Return HS_MAILBOX to its default value. */ ahd->hs_mailbox = 0; ahd_outb(ahd, HS_MAILBOX, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ ahd_outb(ahd, IOWNID, ahd->our_id); ahd_outb(ahd, TOWNID, ahd->our_id); sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; if ((ahd->bugs & AHD_LONG_SETIMO_BUG) && (ahd->seltime != STIMESEL_MIN)) { /* * The selection timer duration is twice as long * as it should be. Halve it by adding "1" to * the user specified setting. */ sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; } else { sxfrctl1 |= ahd->seltime; } ahd_outb(ahd, SXFRCTL0, DFON); ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); /* * Now that termination is set, wait for up * to 500ms for our transceivers to settle. If * the adapter does not have a cable attached, * the transceivers may never settle, so don't * complain if we fail here. */ for (wait = 10000; (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) ahd_delay(100); /* Clear any false bus resets due to the transceivers settling */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); ahd_outb(ahd, CLRINT, CLRSCSIINT); /* Initialize mode specific S/G state. */ for (i = 0; i < 2; i++) { ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_outb(ahd, CLRSEQINTSRC, 0xFF); ahd_outb(ahd, SEQIMODE, ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); } ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); } else { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); } ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) /* * Do not issue a target abort when a split completion * error occurs. Let our PCIX interrupt handler deal * with it instead. H2A4 Razor #625 */ ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); /* * Tweak IOCELL settings. */ if ((ahd->flags & AHD_HP_BOARD) != 0) { for (i = 0; i < NUMDSPS; i++) { ahd_outb(ahd, DSPSELECT, i); ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), WRTBIASCTL_HP_DEFAULT); #endif } ahd_setup_iocell_workaround(ahd); /* * Enable LQI Manager interrupts. */ ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); /* * We choose to have the sequencer catch LQOPHCHGINPKT errors * manually for the command phase at the start of a packetized * selection case. ENLQOBUSFREE should be made redundant by * the BUSFREE interrupt, but it seems that some LQOBUSFREE * events fail to assert the BUSFREE interrupt so we must * also enable LQOBUSFREE interrupts. */ ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE); /* * Setup sequencer interrupt handlers. */ ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); /* * Setup SCB Offset registers. */ if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, pkt_long_lun)); } else { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); } ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, shared_data.idata.cdb)); ahd_outb(ahd, QNEXTPTR, offsetof(struct hardware_scb, next_hscb_busaddr)); ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNLEN, sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); } else { ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN); } ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); ahd_outb(ahd, MAXCMD, 0xFF); ahd_outb(ahd, SCBAUTOPTR, AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); /* We haven't been enabled for target mode yet. */ ahd_outb(ahd, MULTARGID, 0); ahd_outb(ahd, MULTARGID + 1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* Initialize the negotiation table. */ if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { /* * Clear the spare bytes in the neg table to avoid * spurious parity errors. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { ahd_outb(ahd, NEGOADDR, target); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) ahd_outb(ahd, ANNEXDAT, 0); } } for (target = 0; target < AHD_NUM_TARGETS; target++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, target, &tstate); ahd_compile_devinfo(&devinfo, ahd->our_id, target, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); } ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); ahd_outb(ahd, CLRINT, CLRSCSIINT); #ifdef NEEDS_MORE_TESTING /* * Always enable abort on incoming L_Qs if this feature is * supported. We use this to catch invalid SCB references. */ if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) ahd_outb(ahd, LQCTL1, ABORTPENDING); else #endif ahd_outb(ahd, LQCTL1, 0); /* All of our queues are empty */ ahd->qoutfifonext = 0; ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID; ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID); for (i = 0; i < AHD_QOUT_SIZE; i++) ahd->qoutfifo[i].valid_tag = 0; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); ahd->qinfifonext = 0; for (i = 0; i < AHD_QIN_SIZE; i++) ahd->qinfifo[i] = SCB_LIST_NULL; if ((ahd->features & AHD_TARGETMODE) != 0) { /* All target command blocks start out invalid. */ for (i = 0; i < AHD_TMODE_CMDS; i++) ahd->targetcmds[i].cmd_valid = 0; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); ahd->tqinfifonext = 1; ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); } /* Initialize Scratch Ram. */ ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SEQ_FLAGS2, 0); /* We don't have any waiting selections */ ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF); for (i = 0; i < AHD_NUM_TARGETS; i++) ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); /* * Nobody is waiting to be DMAed into the QOUTFIFO. */ ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); /* * The Freeze Count is 0. */ ahd->qfreeze_cnt = 0; ahd_outw(ahd, QFREEZE_COUNT, 0); ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0); /* * Tell the sequencer where it can find our arrays in memory. */ busaddr = ahd->shared_data_map.physaddr; ahd_outl(ahd, SHARED_DATA_ADDR, busaddr); ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENAUTOATNP; if ((ahd->flags & AHD_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); /* There are no busy SCBs yet. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { int lun; for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); } /* * Initialize the group code to command length table. * Vendor Unique codes are set to 0 so we only capture * the first byte of the cdb. These can be overridden * when target mode is enabled. */ ahd_outb(ahd, CMDSIZE_TABLE, 5); ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); /* Tell the sequencer of our initial queue positions */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); ahd->qinfifonext = 0; ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_set_hescb_qoff(ahd, 0); ahd_set_snscb_qoff(ahd, 0); ahd_set_sescb_qoff(ahd, 0); ahd_set_sdscb_qoff(ahd, 0); /* * Tell the sequencer which SCB will be the next one it receives. */ busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); /* * Default to coalescing disabled. */ ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0); ahd_outw(ahd, CMDS_PENDING, 0); ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer, ahd->int_coalescing_maxcmds, ahd->int_coalescing_mincmds); ahd_enable_coalescing(ahd, FALSE); ahd_loadseq(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd->features & AHD_AIC79XXB_SLOWCRC) { u_int negodat3 = ahd_inb(ahd, NEGCONOPTS); negodat3 |= ENSLOWCRC; ahd_outb(ahd, NEGCONOPTS, negodat3); negodat3 = ahd_inb(ahd, NEGCONOPTS); if (!(negodat3 & ENSLOWCRC)) printk("aic79xx: failed to set the SLOWCRC bit\n"); else printk("aic79xx: SLOWCRC bit set\n"); } } /* * Setup default device and controller settings. * This should only be called if our probe has * determined that no configuration data is available. */ int ahd_default_config(struct ahd_softc *ahd) { int targ; ahd->our_id = 7; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printk("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable |= target_mask; tstate->discenable |= target_mask; ahd->user_tagenable |= target_mask; #ifdef AHD_FORCE_160 tinfo->user.period = AHD_SYNCRATE_DT; #else tinfo->user.period = AHD_SYNCRATE_160; #endif tinfo->user.offset = MAX_OFFSET; tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; if ((ahd->features & AHD_RTI) != 0) tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); tstate->tagenable &= ~target_mask; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } return (0); } /* * Parse device configuration information. */ int ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) { int targ; int max_targ; max_targ = sc->max_targets & CFMAXTARG; ahd->our_id = sc->brtime_id & CFSCSIID; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printk("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < max_targ; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_transinfo *user_tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); user_tinfo = &tinfo->user; /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable &= ~target_mask; tstate->discenable &= ~target_mask; ahd->user_tagenable &= ~target_mask; if (sc->device_flags[targ] & CFDISC) { tstate->discenable |= target_mask; ahd->user_discenable |= target_mask; ahd->user_tagenable |= target_mask; } else { /* * Cannot be packetized without disconnection. */ sc->device_flags[targ] &= ~CFPACKETIZED; } user_tinfo->ppr_options = 0; user_tinfo->period = (sc->device_flags[targ] & CFXFER); if (user_tinfo->period < CFXFER_ASYNC) { if (user_tinfo->period <= AHD_PERIOD_10MHz) user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; user_tinfo->offset = MAX_OFFSET; } else { user_tinfo->offset = 0; user_tinfo->period = AHD_ASYNC_XFER_PERIOD; } #ifdef AHD_FORCE_160 if (user_tinfo->period <= AHD_SYNCRATE_160) user_tinfo->period = AHD_SYNCRATE_DT; #endif if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ; if ((ahd->features & AHD_RTI) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; } if ((sc->device_flags[targ] & CFQAS) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; if ((sc->device_flags[targ] & CFWIDEB) != 0) user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; else user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, user_tinfo->period, user_tinfo->offset, user_tinfo->ppr_options); #endif /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tstate->tagenable &= ~target_mask; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } ahd->flags &= ~AHD_SPCHK_ENB_A; if (sc->bios_control & CFSPARITY) ahd->flags |= AHD_SPCHK_ENB_A; ahd->flags &= ~AHD_RESET_BUS_A; if (sc->bios_control & CFRESETB) ahd->flags |= AHD_RESET_BUS_A; ahd->flags &= ~AHD_EXTENDED_TRANS_A; if (sc->bios_control & CFEXTEND) ahd->flags |= AHD_EXTENDED_TRANS_A; ahd->flags &= ~AHD_BIOS_ENABLED; if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) ahd->flags |= AHD_BIOS_ENABLED; ahd->flags &= ~AHD_STPWLEVEL_A; if ((sc->adapter_control & CFSTPWLEVEL) != 0) ahd->flags |= AHD_STPWLEVEL_A; return (0); } /* * Parse device configuration information. */ int ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) { int error; error = ahd_verify_vpd_cksum(vpd); if (error == 0) return (EINVAL); if ((vpd->bios_flags & VPDBOOTHOST) != 0) ahd->flags |= AHD_BOOT_CHANNEL; return (0); } void ahd_intr_enable(struct ahd_softc *ahd, int enable) { u_int hcntrl; hcntrl = ahd_inb(ahd, HCNTRL); hcntrl &= ~INTEN; ahd->pause &= ~INTEN; ahd->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahd->pause |= INTEN; ahd->unpause |= INTEN; } ahd_outb(ahd, HCNTRL, hcntrl); } static void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds) { if (timer > AHD_TIMER_MAX_US) timer = AHD_TIMER_MAX_US; ahd->int_coalescing_timer = timer; if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX) maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX; if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX) mincmds = AHD_INT_COALESCING_MINCMDS_MAX; ahd->int_coalescing_maxcmds = maxcmds; ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK); ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds); ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds); } static void ahd_enable_coalescing(struct ahd_softc *ahd, int enable) { ahd->hs_mailbox &= ~ENINT_COALESCE; if (enable) ahd->hs_mailbox |= ENINT_COALESCE; ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); ahd_flush_device_writes(ahd); ahd_run_qoutfifo(ahd); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahd_pause_and_flushwork(struct ahd_softc *ahd) { u_int intstat; u_int maxloops; maxloops = 1000; ahd->flags |= AHD_ALL_INTERRUPTS; ahd_pause(ahd); /* * Freeze the outgoing selections. We do this only * until we are safely paused without further selections * pending. */ ahd->qfreeze_cnt--; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); do { ahd_unpause(ahd); /* * Give the sequencer some time to service * any active selections. */ ahd_delay(500); ahd_intr(ahd); ahd_pause(ahd); intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) { ahd_clear_critical_section(ahd); intstat = ahd_inb(ahd, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printk("Infinite interrupt loop, INTSTAT = %x", ahd_inb(ahd, INTSTAT)); } ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_flush_qoutfifo(ahd); ahd->flags &= ~AHD_ALL_INTERRUPTS; } #ifdef CONFIG_PM int ahd_suspend(struct ahd_softc *ahd) { ahd_pause_and_flushwork(ahd); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ahd_unpause(ahd); return (EBUSY); } ahd_shutdown(ahd); return (0); } void ahd_resume(struct ahd_softc *ahd) { ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, TRUE); ahd_restart(ahd); } #endif /************************** Busy Target Table *********************************/ /* * Set SCBPTR to the SCB that contains the busy * table entry for TCL. Return the offset into * the SCB that contains the entry for TCL. * saved_scbid is dereferenced and set to the * scbid that should be restored once manipualtion * of the TCL entry is complete. */ static inline u_int ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) { /* * Index to the SCB that contains the busy entry. */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); *saved_scbid = ahd_get_scbptr(ahd); ahd_set_scbptr(ahd, TCL_LUN(tcl) | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); /* * And now calculate the SCB offset to the entry. * Each entry is 2 bytes wide, hence the * multiplication by 2. */ return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); } /* * Return the untagged transaction id for a given target/channel lun. */ static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) { u_int scbid; u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); scbid = ahd_inw_scbram(ahd, scb_offset); ahd_set_scbptr(ahd, saved_scbptr); return (scbid); } static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) { u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); ahd_outw(ahd, scb_offset, scbid); ahd_set_scbptr(ahd, saved_scbptr); } /************************** SCB and SCB queue management **********************/ static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahd, scb); char chan = SCB_GET_CHANNEL(ahd, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHD_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHD_TARGET_MODE */ match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); #endif /* AHD_TARGET_MODE */ } return match; } static void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahd, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahd, scb); ahd_search_qinfifo(ahd, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahd_platform_freeze_devq(ahd, scb); } void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) { struct scb *prev_scb; ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); prev_scb = NULL; if (ahd_qinfifo_count(ahd) != 0) { u_int prev_tag; u_int prev_pos; prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); prev_tag = ahd->qinfifo[prev_pos]; prev_scb = ahd_lookup_scb(ahd, prev_tag); } ahd_qinfifo_requeue(ahd, prev_scb, scb); ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_restore_modes(ahd, saved_modes); } static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { uint32_t busaddr; busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); } else { prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; ahd_sync_scb(ahd, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahd_qinfifo_count(struct ahd_softc *ahd) { u_int qinpos; u_int wrap_qinpos; u_int wrap_qinfifonext; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); qinpos = ahd_get_snscb_qoff(ahd); wrap_qinpos = AHD_QIN_WRAP(qinpos); wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); if (wrap_qinfifonext >= wrap_qinpos) return (wrap_qinfifonext - wrap_qinpos); else return (wrap_qinfifonext + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos); } static void ahd_reset_cmds_pending(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int pending_cmds; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Don't count any commands as outstanding that the * sequencer has already marked for completion. */ ahd_flush_qoutfifo(ahd); pending_cmds = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { pending_cmds++; } ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_UPDATE_PEND_CMDS; } static void ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) { cam_status ostat; cam_status cstat; ostat = ahd_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahd_set_transaction_status(scb, status); cstat = ahd_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahd_freeze_scb(scb); ahd_done(ahd, scb); } int ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action) { struct scb *scb; struct scb *mk_msg_scb; struct scb *prev_scb; ahd_mode_state saved_modes; u_int qinstart; u_int qinpos; u_int qintail; u_int tid_next; u_int tid_prev; u_int scbid; u_int seq_flags2; u_int savedscbptr; uint32_t busaddr; int found; int targets; /* Must be in CCHAN mode */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Halt any pending SCB DMA. The sequencer will reinitiate * this dma if the qinfifo is not empty once we unpause. */ if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) == (CCARREN|CCSCBEN|CCSCBDIR)) { ahd_outb(ahd, CCSCBCTL, ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) ; } /* Determine sequencer's position in the qinfifo. */ qintail = AHD_QIN_WRAP(ahd->qinfifonext); qinstart = ahd_get_snscb_qoff(ahd); qinpos = AHD_QIN_WRAP(qinstart); found = 0; prev_scb = NULL; if (action == SEARCH_PRINT) { printk("qinstart = %d qinfifonext = %d\nQINFIFO:", qinstart, ahd->qinfifonext); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahd->qinfifonext = qinstart; busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); while (qinpos != qintail) { scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); if (scb == NULL) { printk("qinpos = %d, SCB index = %d\n", qinpos, ahd->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in qinfifo\n"); ahd_done_with_status(ahd, scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: break; case SEARCH_PRINT: printk(" 0x%x", ahd->qinfifo[qinpos]); /* FALLTHROUGH */ case SEARCH_COUNT: ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; break; } } else { ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; } qinpos = AHD_QIN_WRAP(qinpos+1); } ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); if (action == SEARCH_PRINT) printk("\nWAITING_TID_QUEUES:\n"); /* * Search waiting for selection lists. We traverse the * list of "their ids" waiting for selection and, if * appropriate, traverse the SCBs of each "their id" * looking for matches. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2); if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) { scbid = ahd_inw(ahd, MK_MESSAGE_SCB); mk_msg_scb = ahd_lookup_scb(ahd, scbid); } else mk_msg_scb = NULL; savedscbptr = ahd_get_scbptr(ahd); tid_next = ahd_inw(ahd, WAITING_TID_HEAD); tid_prev = SCB_LIST_NULL; targets = 0; for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { u_int tid_head; u_int tid_tail; targets++; if (targets > AHD_NUM_TARGETS) panic("TID LIST LOOP"); if (scbid >= ahd->scb_data.numscbs) { printk("%s: Waiting TID List inconsistency. " "SCB index == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: SCB = 0x%x Not Active!\n", ahd_name(ahd), scbid); panic("Waiting TID List traversal\n"); } ahd_set_scbptr(ahd, scbid); tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { tid_prev = scbid; continue; } /* * We found a list of scbs that needs to be searched. */ if (action == SEARCH_PRINT) printk(" %d ( ", SCB_GET_TARGET(ahd, scb)); tid_head = scbid; found += ahd_search_scb_list(ahd, target, channel, lun, tag, role, status, action, &tid_head, &tid_tail, SCB_GET_TARGET(ahd, scb)); /* * Check any MK_MESSAGE SCB that is still waiting to * enter this target's waiting for selection queue. */ if (mk_msg_scb != NULL && ahd_match_scb(ahd, mk_msg_scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB pending MK_MSG\n"); ahd_done_with_status(ahd, mk_msg_scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: { u_int tail_offset; printk("Removing MK_MSG scb\n"); /* * Reset our tail to the tail of the * main per-target list. */ tail_offset = WAITING_SCB_TAILS + (2 * SCB_GET_TARGET(ahd, mk_msg_scb)); ahd_outw(ahd, tail_offset, tid_tail); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING)-1); mk_msg_scb = NULL; break; } case SEARCH_PRINT: printk(" 0x%x", SCB_GET_TAG(scb)); /* FALLTHROUGH */ case SEARCH_COUNT: break; } } if (mk_msg_scb != NULL && SCBID_IS_NULL(tid_head) && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN)) { /* * When removing the last SCB for a target * queue with a pending MK_MESSAGE scb, we * must queue the MK_MESSAGE scb. */ printk("Queueing mk_msg_scb\n"); tid_head = ahd_inw(ahd, MK_MESSAGE_SCB); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); mk_msg_scb = NULL; } if (tid_head != scbid) ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); if (!SCBID_IS_NULL(tid_head)) tid_prev = tid_head; if (action == SEARCH_PRINT) printk(")\n"); } /* Restore saved state. */ ahd_set_scbptr(ahd, savedscbptr); ahd_restore_modes(ahd, saved_modes); return (found); } static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid) { struct scb *scb; u_int scbid; u_int next; u_int prev; int found; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); found = 0; prev = SCB_LIST_NULL; next = *list_head; *list_tail = SCB_LIST_NULL; for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { if (scbid >= ahd->scb_data.numscbs) { printk("%s:SCB List inconsistency. " "SCB == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: SCB = %d Not Active!\n", ahd_name(ahd), scbid); panic("Waiting List traversal\n"); } ahd_set_scbptr(ahd, scbid); *list_tail = scbid; next = ahd_inw_scbram(ahd, SCB_NEXT); if (ahd_match_scb(ahd, scb, target, channel, lun, SCB_LIST_NULL, role) == 0) { prev = scbid; continue; } found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahd_done_with_status(ahd, scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: ahd_rem_wscb(ahd, scbid, prev, next, tid); *list_tail = prev; if (SCBID_IS_NULL(prev)) *list_head = next; break; case SEARCH_PRINT: printk("0x%x ", scbid); case SEARCH_COUNT: prev = scbid; break; } if (found > AHD_SCB_MAX) panic("SCB LIST LOOP"); } if (action == SEARCH_COMPLETE || action == SEARCH_REMOVE) ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); return (found); } static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next) { AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (SCBID_IS_NULL(tid_cur)) { /* Bypass current TID list */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_next); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_next); } if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); } else { /* Stitch through tid_cur */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_cur); } ahd_set_scbptr(ahd, tid_cur); ahd_outw(ahd, SCB_NEXT2, tid_next); if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid) { u_int tail_offset; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (!SCBID_IS_NULL(prev)) { ahd_set_scbptr(ahd, prev); ahd_outw(ahd, SCB_NEXT, next); } /* * SCBs that have MK_MESSAGE set in them may * cause the tail pointer to be updated without * setting the next pointer of the previous tail. * Only clear the tail if the removed SCB was * the tail. */ tail_offset = WAITING_SCB_TAILS + (2 * tid); if (SCBID_IS_NULL(next) && ahd_inw(ahd, tail_offset) == scbid) ahd_outw(ahd, tail_offset, prev); ahd_add_scb_to_free_list(ahd, scbid); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) { /* XXX Need some other mechanism to designate "free". */ /* * Invalidate the tag so that our abort * routines don't think it's active. ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); */ } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ static int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int i, j; u_int maxtarget; u_int minlun; u_int maxlun; int found; ahd_mode_state saved_modes; /* restore this when we're done */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { minlun = 0; maxlun = AHD_NUM_LUNS_NONPKT; } else if (lun >= AHD_NUM_LUNS_NONPKT) { minlun = maxlun = 0; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL_RAW(i, 'A', j); scbid = ahd_find_busy_tcl(ahd, tcl); scbp = ahd_lookup_scb(ahd, scbid); if (scbp == NULL || ahd_match_scb(ahd, scbp, target, channel, lun, tag, role) == 0) continue; ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); } } } /* * Don't abort commands that have already completed, * but haven't quite made it up to the host yet. */ ahd_flush_qoutfifo(ahd); /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahd->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = ahd_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) ahd_set_transaction_status(scbp, status); if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) ahd_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printk("Inactive SCB on pending list\n"); ahd_done(ahd, scbp); found++; } } ahd_restore_modes(ahd, saved_modes); ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); ahd->flags |= AHD_UPDATE_PEND_CMDS; return found; } static void ahd_reset_current_bus(struct ahd_softc *ahd) { uint8_t scsiseq; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); ahd_flush_device_writes(ahd); ahd_delay(AHD_BUSRESET_DELAY); /* Turn off the bus reset */ ahd_outb(ahd, SCSISEQ0, scsiseq); ahd_flush_device_writes(ahd); ahd_delay(AHD_BUSRESET_DELAY); if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { /* * 2A Razor #474 * Certain chip state is not cleared for * SCSI bus resets that we initiate, so * we must reset the chip. */ ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, /*enable*/TRUE); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); } ahd_clear_intstat(ahd); } int ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) { struct ahd_devinfo caminfo; u_int initiator; u_int target; u_int max_scsiid; int found; u_int fifo; u_int next_fifo; uint8_t scsiseq; /* * Check if the last bus reset is cleared */ if (ahd->flags & AHD_BUS_RESET_ACTIVE) { printk("%s: bus reset still active\n", ahd_name(ahd)); return 0; } ahd->flags |= AHD_BUS_RESET_ACTIVE; ahd->pending_device = NULL; ahd_compile_devinfo(&caminfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahd_pause(ahd); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahd_run_qoutfifo(ahd); #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { ahd_run_tqinfifo(ahd, /*paused*/TRUE); } #endif ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Disable selections so no automatic hardware * functions will modify chip state. */ ahd_outb(ahd, SCSISEQ0, 0); ahd_outb(ahd, SCSISEQ1, 0); /* * Safely shut down our DMA engines. Always start with * the FIFO that is not currently active (if any are * actively connected). */ next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; if (next_fifo > CURRFIFO_1) /* If disconneced, arbitrarily start with FIFO1. */ next_fifo = fifo = 0; do { next_fifo ^= CURRFIFO_1; ahd_set_modes(ahd, next_fifo, next_fifo); ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) ahd_delay(10); /* * Set CURRFIFO to the now inactive channel. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, next_fifo); } while (next_fifo != fifo); /* * Reset the bus if we are initiating this reset */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); if (initiate_reset) ahd_reset_current_bus(ahd); ahd_clear_intstat(ahd); /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); /* * Cleanup anything left in the FIFOs. */ ahd_clear_fifo(ahd, 0); ahd_clear_fifo(ahd, 1); /* * Clear SCSI interrupt status */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); /* * Reenable selections */ ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE); ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahd_tmode_tstate* tstate; u_int lun; tstate = ahd->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHD_NUM_LUNS; lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahd->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahd_devinfo devinfo; ahd_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, 'A', ROLE_UNKNOWN); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); } } /* Notify the XPT that a bus reset occurred */ ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET); ahd_restart(ahd); return (found); } /**************************** Statistics Processing ***************************/ static void ahd_stat_timer(void *arg) { struct ahd_softc *ahd = arg; u_long s; int enint_coal; ahd_lock(ahd, &s); enint_coal = ahd->hs_mailbox & ENINT_COALESCE; if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold) enint_coal |= ENINT_COALESCE; else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold) enint_coal &= ~ENINT_COALESCE; if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) { ahd_enable_coalescing(ahd, enint_coal); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) printk("%s: Interrupt coalescing " "now %sabled. Cmds %d\n", ahd_name(ahd), (enint_coal & ENINT_COALESCE) ? "en" : "dis", ahd->cmdcmplt_total); #endif } ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, ahd_stat_timer, ahd); ahd_unlock(ahd, &s); } /****************************** Status Processing *****************************/ static void ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; int paused; /* * The sequencer freezes its select-out queue * anytime a SCSI status error occurs. We must * handle the error and increment our qfreeze count * to allow the sequencer to continue. We don't * bother clearing critical sections here since all * operations are on data structures that the sequencer * is not touching once the queue is frozen. */ hscb = scb->hscb; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* Freeze the queue until the client sees the error. */ ahd_freeze_devq(ahd, scb); ahd_freeze_scb(scb); ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); if (paused == 0) ahd_unpause(ahd); /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and perform * a normal command completion. */ scb->flags &= ~SCB_SENSE; ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); ahd_done(ahd, scb); return; } ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); switch (hscb->shared_data.istatus.scsi_status) { case STATUS_PKT_SENSE: { struct scsi_status_iu_header *siu; ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); siu = (struct scsi_status_iu_header *)scb->sense_data; ahd_set_scsi_status(scb, siu->status); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) { ahd_print_path(ahd, scb); printk("SCB 0x%x Received PKT Status of 0x%x\n", SCB_GET_TAG(scb), siu->status); printk("\tflags = 0x%x, sense len = 0x%x, " "pktfail = 0x%x\n", siu->flags, scsi_4btoul(siu->sense_length), scsi_4btoul(siu->pkt_failures_length)); } #endif if ((siu->flags & SIU_RSPVALID) != 0) { ahd_print_path(ahd, scb); if (scsi_4btoul(siu->pkt_failures_length) < 4) { printk("Unable to parse pkt_failures\n"); } else { switch (SIU_PKTFAIL_CODE(siu)) { case SIU_PFC_NONE: printk("No packet failure found\n"); break; case SIU_PFC_CIU_FIELDS_INVALID: printk("Invalid Command IU Field\n"); break; case SIU_PFC_TMF_NOT_SUPPORTED: printk("TMF not supported\n"); break; case SIU_PFC_TMF_FAILED: printk("TMF failed\n"); break; case SIU_PFC_INVALID_TYPE_CODE: printk("Invalid L_Q Type code\n"); break; case SIU_PFC_ILLEGAL_REQUEST: printk("Illegal request\n"); default: break; } } if (siu->status == SCSI_STATUS_OK) ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR); } if ((siu->flags & SIU_SNSVALID) != 0) { scb->flags |= SCB_PKT_SENSE; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) printk("Sense data available\n"); #endif } ahd_done(ahd, scb); break; } case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahd_devinfo devinfo; struct ahd_dma_seg *sg; struct scsi_sense *sc; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printk("SCB %d: requests Check Status\n", SCB_GET_TAG(scb)); } #endif if (ahd_perform_autosense(scb) == 0) break; ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; /* * Save off the residual if there is one. */ ahd_update_residual(ahd, scb); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printk("Sending Sense\n"); } #endif scb->sg_count = 0; sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), ahd_get_sense_bufsize(ahd, scb), /*last*/TRUE); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = ahd_get_sense_bufsize(ahd, scb); sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { ahd_update_neg_request(ahd, &devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); ahd_setup_data_scb(ahd, scb); scb->flags |= SCB_SENSE; ahd_queue_scb(ahd, scb); break; } case SCSI_STATUS_OK: printk("%s: Interrupted for status of 0???\n", ahd_name(ahd)); /* FALLTHROUGH */ default: ahd_done(ahd, scb); break; } } static void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) { if (scb->hscb->shared_data.istatus.scsi_status != 0) { ahd_handle_scsi_status(ahd, scb); } else { ahd_calc_residual(ahd, scb); ahd_done(ahd, scb); } } /* * Calculate the residual for a just completed SCB. */ static void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; struct initiator_status *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_STATUS_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = ahd_le32toh(hscb->sgptr); if ((sgptr & SG_STATUS_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_STATUS_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; /* * Residual fields are the same in both * target and initiator status packets, * so we can always use the initiator fields * regardless of the role for this SCB. */ spkt = &hscb->shared_data.istatus; resid_sgptr = ahd_le32toh(spkt->residual_sgptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = ahd_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { ahd_print_path(ahd, scb); printk("data overrun detected Tag == 0x%x.\n", SCB_GET_TAG(scb)); ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); ahd_freeze_scb(scb); return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); /* NOTREACHED */ } else { struct ahd_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { sg++; resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) ahd_set_residual(scb, resid); else ahd_set_sense_residual(scb, resid); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) { ahd_print_path(ahd, scb); printk("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHD_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahd_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printk("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahd_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHD_DUMP_SEQ void ahd_dumpseq(struct ahd_softc* ahd) { int i; int max_prog; max_prog = 2048; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < max_prog; i++) { uint8_t ins_bytes[4]; ahd_insb(ahd, SEQRAM, ins_bytes, 4); printk("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static void ahd_loadseq(struct ahd_softc *ahd) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; const struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; int downloaded; u_int skip_addr; u_int sg_prefetch_cnt; u_int sg_prefetch_cnt_limit; u_int sg_prefetch_align; u_int sg_size; u_int cacheline_mask; uint8_t download_consts[DOWNLOAD_CONST_COUNT]; if (bootverbose) printk("%s: Downloading Sequencer Program...", ahd_name(ahd)); #if DOWNLOAD_CONST_COUNT != 8 #error "Download Const Mismatch" #endif /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* * Setup downloadable constant table. * * The computation for the S/G prefetch variables is * a bit complicated. We would like to always fetch * in terms of cachelined sized increments. However, * if the cacheline is not an even multiple of the * SG element size or is larger than our SG RAM, using * just the cache size might leave us with only a portion * of an SG element at the tail of a prefetch. If the * cacheline is larger than our S/G prefetch buffer less * the size of an SG element, we may round down to a cacheline * that doesn't contain any or all of the S/G of interest * within the bounds of our S/G ram. Provide variables to * the sequencer that will allow it to handle these edge * cases. */ /* Start by aligning to the nearest cacheline. */ sg_prefetch_align = ahd->pci_cachesize; if (sg_prefetch_align == 0) sg_prefetch_align = 8; /* Round down to the nearest power of 2. */ while (powerof2(sg_prefetch_align) == 0) sg_prefetch_align--; cacheline_mask = sg_prefetch_align - 1; /* * If the cacheline boundary is greater than half our prefetch RAM * we risk not being able to fetch even a single complete S/G * segment if we align to that boundary. */ if (sg_prefetch_align > CCSGADDR_MAX/2) sg_prefetch_align = CCSGADDR_MAX/2; /* Start by fetching a single cacheline. */ sg_prefetch_cnt = sg_prefetch_align; /* * Increment the prefetch count by cachelines until * at least one S/G element will fit. */ sg_size = sizeof(struct ahd_dma_seg); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) sg_size = sizeof(struct ahd_dma64_seg); while (sg_prefetch_cnt < sg_size) sg_prefetch_cnt += sg_prefetch_align; /* * If the cacheline is not an even multiple of * the S/G size, we may only get a partial S/G when * we align. Add a cacheline if this is the case. */ if ((sg_prefetch_align % sg_size) != 0 && (sg_prefetch_cnt < CCSGADDR_MAX)) sg_prefetch_cnt += sg_prefetch_align; /* * Lastly, compute a value that the sequencer can use * to determine if the remainder of the CCSGRAM buffer * has a full S/G element in it. */ sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); download_consts[SG_SIZEOF] = sg_size; download_consts[PKT_OVERRUN_BUFOFFSET] = (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; download_consts[CACHELINE_MASK] = cacheline_mask; cur_patch = patches; downloaded = 0; skip_addr = 0; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahd_download_instr(ahd, i, download_consts); downloaded++; } ahd->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahd->critical_sections = kmalloc(cs_count, GFP_ATOMIC); if (ahd->critical_sections == NULL) panic("ahd_loadseq: Could not malloc"); memcpy(ahd->critical_sections, cs_table, cs_count); } ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printk(" %d instructions downloaded\n", downloaded); printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); } } static int ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch, u_int start_instr, u_int *skip_addr) { const struct patch *cur_patch; const struct patch *last_patch; u_int num_patches; num_patches = ARRAY_SIZE(patches); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahd) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) { const struct patch *cur_patch; int address_offset; u_int skip_addr; u_int i; address_offset = 0; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahd_check_patch(ahd, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = min(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } return (address - address_offset); } static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { fmt3_ins = &instr.format3; fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; /* FALLTHROUGH */ case AIC_OP_ROL: { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; /* The sequencer is a little endian cpu */ instr.integer = ahd_htole32(instr.integer); ahd_outsb(ahd, SEQRAM, instr.bytes, 4); break; } default: panic("Unknown opcode encountered in seq program"); break; } } static int ahd_probe_stack_size(struct ahd_softc *ahd) { int last_probe; last_probe = 0; while (1) { int i; /* * We avoid using 0 as a pattern to avoid * confusion if the stack implementation * "back-fills" with zeros when "poping' * entries. */ for (i = 1; i <= last_probe+1; i++) { ahd_outb(ahd, STACK, i & 0xFF); ahd_outb(ahd, STACK, (i >> 8) & 0xFF); } /* Verify */ for (i = last_probe+1; i > 0; i--) { u_int stack_entry; stack_entry = ahd_inb(ahd, STACK) |(ahd_inb(ahd, STACK) << 8); if (stack_entry != i) goto sized; } last_probe++; } sized: return (last_probe); } int ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printk("\n"); *cur_column = 0; } printed = printk("%s[0x%x]", name, value); if (table == NULL) { printed += printk(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printk("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printk(") "); else printed += printk(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahd_dump_card_state(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int dffstat; int paused; u_int scb_index; u_int saved_scb_index; u_int cur_col; int i; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", ahd_name(ahd), ahd_inw(ahd, CURADDR), ahd_build_mode_state(ahd, ahd->saved_src_mode, ahd->saved_dst_mode)); if (paused) printk("Card was paused\n"); if (ahd_check_cmdcmpltqueues(ahd)) printk("Completions are pending\n"); /* * Mode independent registers. */ cur_col = 0; ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50); ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50); ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50); ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50); ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT), &cur_col, 50); ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50); ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID), &cur_col, 50); ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); printk("\n"); printk("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " "CURRSCB 0x%x NEXTSCB 0x%x\n", ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), ahd_inw(ahd, NEXTSCB)); cur_col = 0; /* QINFIFO */ ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); saved_scb_index = ahd_get_scbptr(ahd); printk("Pending list:"); i = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (i++ > AHD_SCB_MAX) break; cur_col = printk("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), &cur_col, 60); ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), &cur_col, 60); } printk("\nTotal %d\n", i); printk("Kernel Free SCB list: "); i = 0; TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { printk("%d ", SCB_GET_TAG(list_scb)); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb && i++ < AHD_SCB_MAX); } LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (i++ > AHD_SCB_MAX) break; printk("%d ", SCB_GET_TAG(scb)); } printk("\n"); printk("Sequencer Complete DMA-inprog list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); printk("Sequencer Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); printk("Sequencer DMA-Up and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); printk("Sequencer On QFreeze and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); ahd_set_scbptr(ahd, saved_scb_index); dffstat = ahd_inb(ahd, DFFSTAT); for (i = 0; i < 2; i++) { #ifdef AHD_DEBUG struct scb *fifo_scb; #endif u_int fifo_scbptr; ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); fifo_scbptr = ahd_get_scbptr(ahd); printk("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", ahd_name(ahd), i, (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); cur_col = 0; ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), &cur_col, 50); ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); if (cur_col > 50) { printk("\n"); cur_col = 0; } cur_col += printk("SHADDR = 0x%x%x, SHCNT = 0x%x ", ahd_inl(ahd, SHADDR+4), ahd_inl(ahd, SHADDR), (ahd_inb(ahd, SHCNT) | (ahd_inb(ahd, SHCNT + 1) << 8) | (ahd_inb(ahd, SHCNT + 2) << 16))); if (cur_col > 50) { printk("\n"); cur_col = 0; } cur_col += printk("HADDR = 0x%x%x, HCNT = 0x%x ", ahd_inl(ahd, HADDR+4), ahd_inl(ahd, HADDR), (ahd_inb(ahd, HCNT) | (ahd_inb(ahd, HCNT + 1) << 8) | (ahd_inb(ahd, HCNT + 2) << 16))); ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SG) != 0) { fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); if (fifo_scb != NULL) ahd_dump_sglist(fifo_scb); } #endif } printk("\nLQIN: "); for (i = 0; i < 20; i++) printk("0x%x ", ahd_inb(ahd, LQIN + i)); printk("\n"); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); printk("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), ahd_inb(ahd, OPTIONMODE)); printk("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), ahd_inb(ahd, MAXCMDCNT)); printk("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN)); ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); printk("\n"); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); cur_col = 0; ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); printk("\n"); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); printk("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), ahd_inw(ahd, DINDEX)); printk("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", ahd_name(ahd), ahd_get_scbptr(ahd), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2)); printk("CDB %x %x %x %x %x %x\n", ahd_inb_scbram(ahd, SCB_CDB_STORE), ahd_inb_scbram(ahd, SCB_CDB_STORE+1), ahd_inb_scbram(ahd, SCB_CDB_STORE+2), ahd_inb_scbram(ahd, SCB_CDB_STORE+3), ahd_inb_scbram(ahd, SCB_CDB_STORE+4), ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); printk("STACK:"); for (i = 0; i < ahd->stack_size; i++) { ahd->saved_stack[i] = ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); printk(" 0x%x", ahd->saved_stack[i]); } for (i = ahd->stack_size-1; i >= 0; i--) { ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); } printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } #if 0 void ahd_dump_scbs(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int saved_scb_index; int i; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scb_index = ahd_get_scbptr(ahd); for (i = 0; i < AHD_SCB_MAX; i++) { ahd_set_scbptr(ahd, i); printk("%3d", i); printk("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2), ahd_inl_scbram(ahd, SCB_SGPTR), ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); } printk("\n"); ahd_set_scbptr(ahd, saved_scb_index); ahd_restore_modes(ahd, saved_modes); } #endif /* 0 */ /**************************** Flexport Logic **********************************/ /* * Read count 16bit words from 16bit word address start_addr from the * SEEPROM attached to the controller, into buf, using the controller's * SEEPROM reading state machine. Optionally treat the data as a byte * stream in terms of byte order. */ int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count, int bytestream) { u_int cur_addr; u_int end_addr; int error; /* * If we never make it through the loop even once, * we were passed invalid arguments. */ error = EINVAL; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); error = ahd_wait_seeprom(ahd); if (error) break; if (bytestream != 0) { uint8_t *bytestream_ptr; bytestream_ptr = (uint8_t *)buf; *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); } else { /* * ahd_inw() already handles machine byte order. */ *buf = ahd_inw(ahd, SEEDAT); } buf++; } return (error); } /* * Write count 16bit words from buf, into SEEPROM attache to the * controller starting at 16bit word address start_addr, using the * controller's SEEPROM writing state machine. */ int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count) { u_int cur_addr; u_int end_addr; int error; int retval; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); error = ENOENT; /* Place the chip into write-enable mode */ ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); /* * Write the data. If we don't get through the loop at * least once, the arguments were invalid. */ retval = EINVAL; end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outw(ahd, SEEDAT, *buf++); ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); retval = ahd_wait_seeprom(ahd); if (retval) break; } /* * Disable writes. */ ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); return (retval); } /* * Wait ~100us for the serial eeprom to satisfy our request. */ static int ahd_wait_seeprom(struct ahd_softc *ahd) { int cnt; cnt = 5000; while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) ahd_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } /* * Validate the two checksums in the per_channel * vital product data struct. */ static int ahd_verify_vpd_cksum(struct vpd_config *vpd) { int i; int maxaddr; uint32_t checksum; uint8_t *vpdarray; vpdarray = (uint8_t *)vpd; maxaddr = offsetof(struct vpd_config, vpd_checksum); checksum = 0; for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->vpd_checksum) return (0); checksum = 0; maxaddr = offsetof(struct vpd_config, checksum); for (i = offsetof(struct vpd_config, default_target_flags); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->checksum) return (0); return (1); } int ahd_verify_cksum(struct seeprom_config *sc) { int i; int maxaddr; uint32_t checksum; uint16_t *scarray; maxaddr = (sizeof(*sc)/2) - 1; checksum = 0; scarray = (uint16_t *)sc; for (i = 0; i < maxaddr; i++) checksum = checksum + scarray[i]; if (checksum == 0 || (checksum & 0xFFFF) != sc->checksum) { return (0); } else { return (1); } } int ahd_acquire_seeprom(struct ahd_softc *ahd) { /* * We should be able to determine the SEEPROM type * from the flexport logic, but unfortunately not * all implementations have this logic and there is * no programatic method for determining if the logic * is present. */ return (1); #if 0 uint8_t seetype; int error; error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); if (error != 0 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) return (0); return (1); #endif } void ahd_release_seeprom(struct ahd_softc *ahd) { /* Currently a no-op */ } /* * Wait at most 2 seconds for flexport arbitration to succeed. */ static int ahd_wait_flexport(struct ahd_softc *ahd) { int cnt; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); cnt = 1000000 * 2 / 5; while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) ahd_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } int ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_write_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); ahd_outb(ahd, BRDDAT, value); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_read_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); *value = ahd_inb(ahd, BRDDAT); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } /************************* Target Mode ****************************************/ #ifdef AHD_TARGET_MODE cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, struct ahd_tmode_tstate **tstate, struct ahd_tmode_lstate **lstate, int notfound_failure) { if ((ahd->features & AHD_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahd->black_hole; } else { u_int max_id; max_id = (ahd->features & AHD_WIDE) ? 16 : 8; if (ccb->ccb_h.target_id >= max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) { #if NOT_YET struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_int target; u_int lun; u_int target_mask; u_long s; char channel; status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if ((ahd->features & AHD_MULTIROLE) != 0) { u_int our_id; our_id = ahd->our_id; if (ccb->ccb_h.target_id != our_id) { if ((ahd->features & AHD_MULTI_TID) != 0 && (ahd->flags & AHD_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahd->flags & AHD_INITIATORROLE) != 0 || ahd->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahd->flags & AHD_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; printk("Configuring Target Mode\n"); ahd_lock(ahd, &s); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahd_unlock(ahd, &s); return; } ahd->flags |= AHD_TARGETROLE; if ((ahd->features & AHD_MULTIROLE) == 0) ahd->flags &= ~AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); ahd_unlock(ahd, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahd, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq1; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printk("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printk("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahd_alloc_tstate(ahd, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { kfree(lstate); xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahd_lock(ahd, &s); ahd_pause(ahd); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahd->enabled_luns++; if ((ahd->features & AHD_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask |= target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahd, sim); our_id = SIM_SCSI_ID(ahd, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahd_inb(ahd, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahd->features & AHD_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; ahd->our_id = target; if (swap) ahd_outb(ahd, SBLKCTL, sblkctl ^ SELBUSB); ahd_outb(ahd, SCSIID, target); if (swap) ahd_outb(ahd, SBLKCTL, sblkctl); } } } else ahd->black_hole = lstate; /* Allow select-in operations */ if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); } ahd_unpause(ahd); ahd_unlock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printk("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahd_lock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printk("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahd_unlock(ahd, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printk("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printk("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahd_unlock(ahd, &s); return; } xpt_print_path(ccb->ccb_h.path); printk("Target mode disabled\n"); xpt_free_path(lstate->path); kfree(lstate); ahd_pause(ahd); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahd->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahd_free_tstate(ahd, target, channel, /*force*/FALSE); if (ahd->features & AHD_MULTI_TID) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask &= ~target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } } } else { ahd->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahd->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq1; scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); if ((ahd->features & AHD_MULTIROLE) == 0) { printk("Configuring Initiator Mode\n"); ahd->flags &= ~AHD_TARGETROLE; ahd->flags |= AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahd_unpause(ahd); ahd_unlock(ahd, &s); } #endif } static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) { #if NOT_YET u_int scsiid_mask; u_int scsiid; if ((ahd->features & AHD_MULTI_TID) == 0) panic("ahd_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahd->features & AHD_ULTRA2) != 0) scsiid = ahd_inb(ahd, SCSIID_ULTRA2); else scsiid = ahd_inb(ahd, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahd->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahd->features & AHD_ULTRA2) != 0) ahd_outb(ahd, SCSIID_ULTRA2, scsiid); else ahd_outb(ahd, SCSIID, scsiid); #endif } static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) { struct target_cmd *cmd; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahd_handle_target_cmd(ahd, cmd) != 0) break; cmd->cmd_valid = 0; ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahd->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { u_int hs_mailbox; hs_mailbox = ahd_inb(ahd, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; ahd_outb(ahd, HS_MAILBOX, hs_mailbox); } } } static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) { struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahd, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahd->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahd->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahd->flags |= AHD_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ return (1); } else ahd->flags &= ~AHD_TQINFIFO_BLOCKED; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printk("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahd->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahd->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printk("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printk("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahd->pending_device); #endif ahd->pending_device = lstate; ahd_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif
gpl-2.0
sndnvaps/G718c_kernel
drivers/mmc/host/sdricoh_cs.c
5446
14822
/* * sdricoh_cs.c - driver for Ricoh Secure Digital Card Readers that can be * found on some Ricoh RL5c476 II cardbus bridge * * Copyright (C) 2006 - 2008 Sascha Sommer <saschasommer@freenet.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ /* #define DEBUG #define VERBOSE_DEBUG */ #include <linux/delay.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/scatterlist.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <linux/io.h> #include <linux/mmc/host.h> #define DRIVER_NAME "sdricoh_cs" static unsigned int switchlocked; /* i/o region */ #define SDRICOH_PCI_REGION 0 #define SDRICOH_PCI_REGION_SIZE 0x1000 /* registers */ #define R104_VERSION 0x104 #define R200_CMD 0x200 #define R204_CMD_ARG 0x204 #define R208_DATAIO 0x208 #define R20C_RESP 0x20c #define R21C_STATUS 0x21c #define R2E0_INIT 0x2e0 #define R2E4_STATUS_RESP 0x2e4 #define R2F0_RESET 0x2f0 #define R224_MODE 0x224 #define R226_BLOCKSIZE 0x226 #define R228_POWER 0x228 #define R230_DATA 0x230 /* flags for the R21C_STATUS register */ #define STATUS_CMD_FINISHED 0x00000001 #define STATUS_TRANSFER_FINISHED 0x00000004 #define STATUS_CARD_INSERTED 0x00000020 #define STATUS_CARD_LOCKED 0x00000080 #define STATUS_CMD_TIMEOUT 0x00400000 #define STATUS_READY_TO_READ 0x01000000 #define STATUS_READY_TO_WRITE 0x02000000 #define STATUS_BUSY 0x40000000 /* timeouts */ #define INIT_TIMEOUT 100 #define CMD_TIMEOUT 100000 #define TRANSFER_TIMEOUT 100000 #define BUSY_TIMEOUT 32767 /* list of supported pcmcia devices */ static const struct pcmcia_device_id pcmcia_ids[] = { /* vendor and device strings followed by their crc32 hashes */ PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed, 0xc3901202), PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay Controller", 0xd9f522ed, 0xace80909), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, pcmcia_ids); /* mmc privdata */ struct sdricoh_host { struct device *dev; struct mmc_host *mmc; /* MMC structure */ unsigned char __iomem *iobase; struct pci_dev *pci_dev; int app_cmd; }; /***************** register i/o helper functions *****************************/ static inline unsigned int sdricoh_readl(struct sdricoh_host *host, unsigned int reg) { unsigned int value = readl(host->iobase + reg); dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value); return value; } static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg, unsigned int value) { writel(value, host->iobase + reg); dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value); } static inline unsigned int sdricoh_readw(struct sdricoh_host *host, unsigned int reg) { unsigned int value = readw(host->iobase + reg); dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); return value; } static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg, unsigned short value) { writew(value, host->iobase + reg); dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value); } static inline unsigned int sdricoh_readb(struct sdricoh_host *host, unsigned int reg) { unsigned int value = readb(host->iobase + reg); dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); return value; } static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted, unsigned int timeout){ unsigned int loop; unsigned int status = 0; struct device *dev = host->dev; for (loop = 0; loop < timeout; loop++) { status = sdricoh_readl(host, R21C_STATUS); sdricoh_writel(host, R2E4_STATUS_RESP, status); if (status & wanted) break; } if (loop == timeout) { dev_err(dev, "query_status: timeout waiting for %x\n", wanted); return -ETIMEDOUT; } /* do not do this check in the loop as some commands fail otherwise */ if (status & 0x7F0000) { dev_err(dev, "waiting for status bit %x failed\n", wanted); return -EINVAL; } return 0; } static int sdricoh_mmc_cmd(struct sdricoh_host *host, unsigned char opcode, unsigned int arg) { unsigned int status; int result = 0; unsigned int loop = 0; /* reset status reg? */ sdricoh_writel(host, R21C_STATUS, 0x18); /* fill parameters */ sdricoh_writel(host, R204_CMD_ARG, arg); sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode); /* wait for command completion */ if (opcode) { for (loop = 0; loop < CMD_TIMEOUT; loop++) { status = sdricoh_readl(host, R21C_STATUS); sdricoh_writel(host, R2E4_STATUS_RESP, status); if (status & STATUS_CMD_FINISHED) break; } /* don't check for timeout in the loop it is not always reset correctly */ if (loop == CMD_TIMEOUT || status & STATUS_CMD_TIMEOUT) result = -ETIMEDOUT; } return result; } static int sdricoh_reset(struct sdricoh_host *host) { dev_dbg(host->dev, "reset\n"); sdricoh_writel(host, R2F0_RESET, 0x10001); sdricoh_writel(host, R2E0_INIT, 0x10000); if (sdricoh_readl(host, R2E0_INIT) != 0x10000) return -EIO; sdricoh_writel(host, R2E0_INIT, 0x10007); sdricoh_writel(host, R224_MODE, 0x2000000); sdricoh_writel(host, R228_POWER, 0xe0); /* status register ? */ sdricoh_writel(host, R21C_STATUS, 0x18); return 0; } static int sdricoh_blockio(struct sdricoh_host *host, int read, u8 *buf, int len) { int size; u32 data = 0; /* wait until the data is available */ if (read) { if (sdricoh_query_status(host, STATUS_READY_TO_READ, TRANSFER_TIMEOUT)) return -ETIMEDOUT; sdricoh_writel(host, R21C_STATUS, 0x18); /* read data */ while (len) { data = sdricoh_readl(host, R230_DATA); size = min(len, 4); len -= size; while (size) { *buf = data & 0xFF; buf++; data >>= 8; size--; } } } else { if (sdricoh_query_status(host, STATUS_READY_TO_WRITE, TRANSFER_TIMEOUT)) return -ETIMEDOUT; sdricoh_writel(host, R21C_STATUS, 0x18); /* write data */ while (len) { size = min(len, 4); len -= size; while (size) { data >>= 8; data |= (u32)*buf << 24; buf++; size--; } sdricoh_writel(host, R230_DATA, data); } } if (len) return -EIO; return 0; } static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct sdricoh_host *host = mmc_priv(mmc); struct mmc_command *cmd = mrq->cmd; struct mmc_data *data = cmd->data; struct device *dev = host->dev; unsigned char opcode = cmd->opcode; int i; dev_dbg(dev, "=============================\n"); dev_dbg(dev, "sdricoh_request opcode=%i\n", opcode); sdricoh_writel(host, R21C_STATUS, 0x18); /* MMC_APP_CMDs need some special handling */ if (host->app_cmd) { opcode |= 64; host->app_cmd = 0; } else if (opcode == 55) host->app_cmd = 1; /* read/write commands seem to require this */ if (data) { sdricoh_writew(host, R226_BLOCKSIZE, data->blksz); sdricoh_writel(host, R208_DATAIO, 0); } cmd->error = sdricoh_mmc_cmd(host, opcode, cmd->arg); /* read response buffer */ if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { /* CRC is stripped so we need to do some shifting. */ for (i = 0; i < 4; i++) { cmd->resp[i] = sdricoh_readl(host, R20C_RESP + (3 - i) * 4) << 8; if (i != 3) cmd->resp[i] |= sdricoh_readb(host, R20C_RESP + (3 - i) * 4 - 1); } } else cmd->resp[0] = sdricoh_readl(host, R20C_RESP); } /* transfer data */ if (data && cmd->error == 0) { dev_dbg(dev, "transfer: blksz %i blocks %i sg_len %i " "sg length %i\n", data->blksz, data->blocks, data->sg_len, data->sg->length); /* enter data reading mode */ sdricoh_writel(host, R21C_STATUS, 0x837f031e); for (i = 0; i < data->blocks; i++) { size_t len = data->blksz; u8 *buf; struct page *page; int result; page = sg_page(data->sg); buf = kmap(page) + data->sg->offset + (len * i); result = sdricoh_blockio(host, data->flags & MMC_DATA_READ, buf, len); kunmap(page); flush_dcache_page(page); if (result) { dev_err(dev, "sdricoh_request: cmd %i " "block transfer failed\n", cmd->opcode); cmd->error = result; break; } else data->bytes_xfered += len; } sdricoh_writel(host, R208_DATAIO, 1); if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED, TRANSFER_TIMEOUT)) { dev_err(dev, "sdricoh_request: transfer end error\n"); cmd->error = -EINVAL; } } /* FIXME check busy flag */ mmc_request_done(mmc, mrq); dev_dbg(dev, "=============================\n"); } static void sdricoh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdricoh_host *host = mmc_priv(mmc); dev_dbg(host->dev, "set_ios\n"); if (ios->power_mode == MMC_POWER_ON) { sdricoh_writel(host, R228_POWER, 0xc0e0); if (ios->bus_width == MMC_BUS_WIDTH_4) { sdricoh_writel(host, R224_MODE, 0x2000300); sdricoh_writel(host, R228_POWER, 0x40e0); } else { sdricoh_writel(host, R224_MODE, 0x2000340); } } else if (ios->power_mode == MMC_POWER_UP) { sdricoh_writel(host, R224_MODE, 0x2000320); sdricoh_writel(host, R228_POWER, 0xe0); } } static int sdricoh_get_ro(struct mmc_host *mmc) { struct sdricoh_host *host = mmc_priv(mmc); unsigned int status; status = sdricoh_readl(host, R21C_STATUS); sdricoh_writel(host, R2E4_STATUS_RESP, status); /* some notebooks seem to have the locked flag switched */ if (switchlocked) return !(status & STATUS_CARD_LOCKED); return (status & STATUS_CARD_LOCKED); } static struct mmc_host_ops sdricoh_ops = { .request = sdricoh_request, .set_ios = sdricoh_set_ios, .get_ro = sdricoh_get_ro, }; /* initialize the control and register it to the mmc framework */ static int sdricoh_init_mmc(struct pci_dev *pci_dev, struct pcmcia_device *pcmcia_dev) { int result = 0; void __iomem *iobase = NULL; struct mmc_host *mmc = NULL; struct sdricoh_host *host = NULL; struct device *dev = &pcmcia_dev->dev; /* map iomem */ if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) != SDRICOH_PCI_REGION_SIZE) { dev_dbg(dev, "unexpected pci resource len\n"); return -ENODEV; } iobase = pci_iomap(pci_dev, SDRICOH_PCI_REGION, SDRICOH_PCI_REGION_SIZE); if (!iobase) { dev_err(dev, "unable to map iobase\n"); return -ENODEV; } /* check version? */ if (readl(iobase + R104_VERSION) != 0x4000) { dev_dbg(dev, "no supported mmc controller found\n"); result = -ENODEV; goto err; } /* allocate privdata */ mmc = pcmcia_dev->priv = mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev); if (!mmc) { dev_err(dev, "mmc_alloc_host failed\n"); result = -ENOMEM; goto err; } host = mmc_priv(mmc); host->iobase = iobase; host->dev = dev; host->pci_dev = pci_dev; mmc->ops = &sdricoh_ops; /* FIXME: frequency and voltage handling is done by the controller */ mmc->f_min = 450000; mmc->f_max = 24000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps |= MMC_CAP_4_BIT_DATA; mmc->max_seg_size = 1024 * 512; mmc->max_blk_size = 512; /* reset the controller */ if (sdricoh_reset(host)) { dev_dbg(dev, "could not reset\n"); result = -EIO; goto err; } result = mmc_add_host(mmc); if (!result) { dev_dbg(dev, "mmc host registered\n"); return 0; } err: if (iobase) pci_iounmap(pci_dev, iobase); if (mmc) mmc_free_host(mmc); return result; } /* search for supported mmc controllers */ static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev) { struct pci_dev *pci_dev = NULL; dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device" " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]); /* search pci cardbus bridge that contains the mmc controller */ /* the io region is already claimed by yenta_socket... */ while ((pci_dev = pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, pci_dev))) { /* try to init the device */ if (!sdricoh_init_mmc(pci_dev, pcmcia_dev)) { dev_info(&pcmcia_dev->dev, "MMC controller found\n"); return 0; } } dev_err(&pcmcia_dev->dev, "No MMC controller was found.\n"); return -ENODEV; } static void sdricoh_pcmcia_detach(struct pcmcia_device *link) { struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "detach\n"); /* remove mmc host */ if (mmc) { struct sdricoh_host *host = mmc_priv(mmc); mmc_remove_host(mmc); pci_iounmap(host->pci_dev, host->iobase); pci_dev_put(host->pci_dev); mmc_free_host(mmc); } pcmcia_disable_device(link); } #ifdef CONFIG_PM static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) { struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "suspend\n"); mmc_suspend_host(mmc); return 0; } static int sdricoh_pcmcia_resume(struct pcmcia_device *link) { struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "resume\n"); sdricoh_reset(mmc_priv(mmc)); mmc_resume_host(mmc); return 0; } #else #define sdricoh_pcmcia_suspend NULL #define sdricoh_pcmcia_resume NULL #endif static struct pcmcia_driver sdricoh_driver = { .name = DRIVER_NAME, .probe = sdricoh_pcmcia_probe, .remove = sdricoh_pcmcia_detach, .id_table = pcmcia_ids, .suspend = sdricoh_pcmcia_suspend, .resume = sdricoh_pcmcia_resume, }; /*****************************************************************************\ * * * Driver init/exit * * * \*****************************************************************************/ static int __init sdricoh_drv_init(void) { return pcmcia_register_driver(&sdricoh_driver); } static void __exit sdricoh_drv_exit(void) { pcmcia_unregister_driver(&sdricoh_driver); } module_init(sdricoh_drv_init); module_exit(sdricoh_drv_exit); module_param(switchlocked, uint, 0444); MODULE_AUTHOR("Sascha Sommer <saschasommer@freenet.de>"); MODULE_DESCRIPTION("Ricoh PCMCIA Secure Digital Interface driver"); MODULE_LICENSE("GPL"); MODULE_PARM_DESC(switchlocked, "Switch the cards locked status." "Use this when unlocked cards are shown readonly (default 0)");
gpl-2.0
mpokwsths/mpokang_kernel
lib/lzo/lzo1x_compress.c
6214
6295
/* * LZO1X Compressor from LZO * * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> * * The full LZO package can be found at: * http://www.oberhumer.com/opensource/lzo/ * * Changed for Linux kernel use by: * Nitin Gupta <nitingupta910@gmail.com> * Richard Purdie <rpurdie@openedhand.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <asm/unaligned.h> #include <linux/lzo.h> #include "lzodefs.h" static noinline size_t lzo1x_1_do_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, size_t ti, void *wrkmem) { const unsigned char *ip; unsigned char *op; const unsigned char * const in_end = in + in_len; const unsigned char * const ip_end = in + in_len - 20; const unsigned char *ii; lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; op = out; ip = in; ii = ip; ip += ti < 4 ? 4 - ti : 0; for (;;) { const unsigned char *m_pos; size_t t, m_len, m_off; u32 dv; literal: ip += 1 + ((ip - ii) >> 5); next: if (unlikely(ip >= ip_end)) break; dv = get_unaligned_le32(ip); t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; m_pos = in + dict[t]; dict[t] = (lzo_dict_t) (ip - in); if (unlikely(dv != get_unaligned_le32(m_pos))) goto literal; ii -= ti; ti = 0; t = ip - ii; if (t != 0) { if (t <= 3) { op[-2] |= t; COPY4(op, ii); op += t; } else if (t <= 16) { *op++ = (t - 3); COPY8(op, ii); COPY8(op + 8, ii + 8); op += t; } else { if (t <= 18) { *op++ = (t - 3); } else { size_t tt = t - 18; *op++ = 0; while (unlikely(tt > 255)) { tt -= 255; *op++ = 0; } *op++ = tt; } do { COPY8(op, ii); COPY8(op + 8, ii + 8); op += 16; ii += 16; t -= 16; } while (t >= 16); if (t > 0) do { *op++ = *ii++; } while (--t > 0); } } m_len = 4; { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) u64 v; v = get_unaligned((const u64 *) (ip + m_len)) ^ get_unaligned((const u64 *) (m_pos + m_len)); if (unlikely(v == 0)) { do { m_len += 8; v = get_unaligned((const u64 *) (ip + m_len)) ^ get_unaligned((const u64 *) (m_pos + m_len)); if (unlikely(ip + m_len >= ip_end)) goto m_len_done; } while (v == 0); } # if defined(__LITTLE_ENDIAN) m_len += (unsigned) __builtin_ctzll(v) / 8; # elif defined(__BIG_ENDIAN) m_len += (unsigned) __builtin_clzll(v) / 8; # else # error "missing endian definition" # endif #elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32) u32 v; v = get_unaligned((const u32 *) (ip + m_len)) ^ get_unaligned((const u32 *) (m_pos + m_len)); if (unlikely(v == 0)) { do { m_len += 4; v = get_unaligned((const u32 *) (ip + m_len)) ^ get_unaligned((const u32 *) (m_pos + m_len)); if (v != 0) break; m_len += 4; v = get_unaligned((const u32 *) (ip + m_len)) ^ get_unaligned((const u32 *) (m_pos + m_len)); if (unlikely(ip + m_len >= ip_end)) goto m_len_done; } while (v == 0); } # if defined(__LITTLE_ENDIAN) m_len += (unsigned) __builtin_ctz(v) / 8; # elif defined(__BIG_ENDIAN) m_len += (unsigned) __builtin_clz(v) / 8; # else # error "missing endian definition" # endif #else if (unlikely(ip[m_len] == m_pos[m_len])) { do { m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (unlikely(ip + m_len >= ip_end)) goto m_len_done; } while (ip[m_len] == m_pos[m_len]); } #endif } m_len_done: m_off = ip - m_pos; ip += m_len; ii = ip; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); *op++ = (m_off >> 3); } else if (m_off <= M3_MAX_OFFSET) { m_off -= 1; if (m_len <= M3_MAX_LEN) *op++ = (M3_MARKER | (m_len - 2)); else { m_len -= M3_MAX_LEN; *op++ = M3_MARKER | 0; while (unlikely(m_len > 255)) { m_len -= 255; *op++ = 0; } *op++ = (m_len); } *op++ = (m_off << 2); *op++ = (m_off >> 6); } else { m_off -= 0x4000; if (m_len <= M4_MAX_LEN) *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); else { m_len -= M4_MAX_LEN; *op++ = (M4_MARKER | ((m_off >> 11) & 8)); while (unlikely(m_len > 255)) { m_len -= 255; *op++ = 0; } *op++ = (m_len); } *op++ = (m_off << 2); *op++ = (m_off >> 6); } goto next; } *out_len = op - out; return in_end - (ii - ti); } int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, void *wrkmem) { const unsigned char *ip = in; unsigned char *op = out; size_t l = in_len; size_t t = 0; while (l > 20) { size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1); uintptr_t ll_end = (uintptr_t) ip + ll; if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); ip += ll; op += *out_len; l -= ll; } t += l; if (t > 0) { const unsigned char *ii = in + in_len - t; if (op == out && t <= 238) { *op++ = (17 + t); } else if (t <= 3) { op[-2] |= t; } else if (t <= 18) { *op++ = (t - 3); } else { size_t tt = t - 18; *op++ = 0; while (tt > 255) { tt -= 255; *op++ = 0; } *op++ = tt; } if (t >= 16) do { COPY8(op, ii); COPY8(op + 8, ii + 8); op += 16; ii += 16; t -= 16; } while (t >= 16); if (t > 0) do { *op++ = *ii++; } while (--t > 0); } *op++ = M4_MARKER | 1; *op++ = 0; *op++ = 0; *out_len = op - out; return LZO_E_OK; } EXPORT_SYMBOL_GPL(lzo1x_1_compress); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor");
gpl-2.0
JustAkan/F220K_Stock_Kernel
drivers/char/agp/isoch.c
14662
13204
/* * Setup routines for AGP 3.5 compliant bridges. */ #include <linux/list.h> #include <linux/pci.h> #include <linux/agp_backend.h> #include <linux/module.h> #include <linux/slab.h> #include "agp.h" /* Generic AGP 3.5 enabling routines */ struct agp_3_5_dev { struct list_head list; u8 capndx; u32 maxbw; struct pci_dev *dev; }; static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new) { struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list); struct list_head *pos; list_for_each(pos, head) { cur = list_entry(pos, struct agp_3_5_dev, list); if (cur->maxbw > n->maxbw) break; } list_add_tail(new, pos); } static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs) { struct agp_3_5_dev *cur; struct pci_dev *dev; struct list_head *pos, *tmp, *head = &list->list, *start = head->next; u32 nistat; INIT_LIST_HEAD(head); for (pos=start; pos!=head; ) { cur = list_entry(pos, struct agp_3_5_dev, list); dev = cur->dev; pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat); cur->maxbw = (nistat >> 16) & 0xff; tmp = pos; pos = pos->next; agp_3_5_dev_list_insert(head, tmp); } } /* * Initialize all isochronous transfer parameters for an AGP 3.0 * node (i.e. a host bridge in combination with the adapters * lying behind it...) */ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, struct agp_3_5_dev *dev_list, unsigned int ndevs) { /* * Convenience structure to make the calculations clearer * here. The field names come straight from the AGP 3.0 spec. */ struct isoch_data { u32 maxbw; u32 n; u32 y; u32 l; u32 rq; struct agp_3_5_dev *dev; }; struct pci_dev *td = bridge->dev, *dev; struct list_head *head = &dev_list->list, *pos; struct agp_3_5_dev *cur; struct isoch_data *master, target; unsigned int cdev = 0; u32 mnistat, tnistat, tstatus, mcmd; u16 tnicmd, mnicmd; u8 mcapndx; u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; u32 step, rem, rem_isoch, rem_async; int ret = 0; /* * We'll work with an array of isoch_data's (one for each * device in dev_list) throughout this function. */ if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) { ret = -ENOMEM; goto get_out; } /* * Sort the device list by maxbw. We need to do this because the * spec suggests that the devices with the smallest requirements * have their resources allocated first, with all remaining resources * falling to the device with the largest requirement. * * We don't exactly do this, we divide target resources by ndevs * and split them amongst the AGP 3.0 devices. The remainder of such * division operations are dropped on the last device, sort of like * the spec mentions it should be done. * * We can't do this sort when we initially construct the dev_list * because we don't know until this function whether isochronous * transfers are enabled and consequently whether maxbw will mean * anything. */ agp_3_5_dev_list_sort(dev_list, ndevs); pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); /* Extract power-on defaults from the target */ target.maxbw = (tnistat >> 16) & 0xff; target.n = (tnistat >> 8) & 0xff; target.y = (tnistat >> 6) & 0x3; target.l = (tnistat >> 3) & 0x7; target.rq = (tstatus >> 24) & 0xff; y_max = target.y; /* * Extract power-on defaults for each device in dev_list. Along * the way, calculate the total isochronous bandwidth required * by these devices and the largest requested payload size. */ list_for_each(pos, head) { cur = list_entry(pos, struct agp_3_5_dev, list); dev = cur->dev; mcapndx = cur->capndx; pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); master[cdev].maxbw = (mnistat >> 16) & 0xff; master[cdev].n = (mnistat >> 8) & 0xff; master[cdev].y = (mnistat >> 6) & 0x3; master[cdev].dev = cur; tot_bw += master[cdev].maxbw; y_max = max(y_max, master[cdev].y); cdev++; } /* Check if this configuration has any chance of working */ if (tot_bw > target.maxbw) { dev_err(&td->dev, "isochronous bandwidth required " "by AGP 3.0 devices exceeds that which is supported by " "the AGP 3.0 bridge!\n"); ret = -ENODEV; goto free_and_exit; } target.y = y_max; /* * Write the calculated payload size into the target's NICMD * register. Doing this directly effects the ISOCH_N value * in the target's NISTAT register, so we need to do this now * to get an accurate value for ISOCH_N later. */ pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd); tnicmd &= ~(0x3 << 6); tnicmd |= target.y << 6; pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd); /* Reread the target's ISOCH_N */ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); target.n = (tnistat >> 8) & 0xff; /* Calculate the minimum ISOCH_N needed by each master */ for (cdev=0; cdev<ndevs; cdev++) { master[cdev].y = target.y; master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1); tot_n += master[cdev].n; } /* Exit if the minimal ISOCH_N allocation among the masters is more * than the target can handle. */ if (tot_n > target.n) { dev_err(&td->dev, "number of isochronous " "transactions per period required by AGP 3.0 devices " "exceeds that which is supported by the AGP 3.0 " "bridge!\n"); ret = -ENODEV; goto free_and_exit; } /* Calculate left over ISOCH_N capability in the target. We'll give * this to the hungriest device (as per the spec) */ rem = target.n - tot_n; /* * Calculate the minimum isochronous RQ depth needed by each master. * Along the way, distribute the extra ISOCH_N capability calculated * above. */ for (cdev=0; cdev<ndevs; cdev++) { /* * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y * byte isochronous writes will be broken into 64B pieces. * This means we need to budget more RQ depth to account for * these kind of writes (each isochronous write is actually * many writes on the AGP bus). */ master[cdev].rq = master[cdev].n; if (master[cdev].y > 0x1) master[cdev].rq *= (1 << (master[cdev].y - 1)); tot_rq += master[cdev].rq; } master[ndevs-1].n += rem; /* Figure the number of isochronous and asynchronous RQ slots the * target is providing. */ rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n; rq_async = target.rq - rq_isoch; /* Exit if the minimal RQ needs of the masters exceeds what the target * can provide. */ if (tot_rq > rq_isoch) { dev_err(&td->dev, "number of request queue slots " "required by the isochronous bandwidth requested by " "AGP 3.0 devices exceeds the number provided by the " "AGP 3.0 bridge!\n"); ret = -ENODEV; goto free_and_exit; } /* Calculate asynchronous RQ capability in the target (per master) as * well as the total number of leftover isochronous RQ slots. */ step = rq_async / ndevs; rem_async = step + (rq_async % ndevs); rem_isoch = rq_isoch - tot_rq; /* Distribute the extra RQ slots calculated above and write our * isochronous settings out to the actual devices. */ for (cdev=0; cdev<ndevs; cdev++) { cur = master[cdev].dev; dev = cur->dev; mcapndx = cur->capndx; master[cdev].rq += (cdev == ndevs - 1) ? (rem_async + rem_isoch) : step; pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd); pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd); mnicmd &= ~(0xff << 8); mnicmd &= ~(0x3 << 6); mcmd &= ~(0xff << 24); mnicmd |= master[cdev].n << 8; mnicmd |= master[cdev].y << 6; mcmd |= master[cdev].rq << 24; pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd); pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd); } free_and_exit: kfree(master); get_out: return ret; } /* * This function basically allocates request queue slots among the * AGP 3.0 systems in nonisochronous nodes. The algorithm is * pretty stupid, divide the total number of RQ slots provided by the * target by ndevs. Distribute this many slots to each AGP 3.0 device, * giving any left over slots to the last device in dev_list. */ static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge, struct agp_3_5_dev *dev_list, unsigned int ndevs) { struct agp_3_5_dev *cur; struct list_head *head = &dev_list->list, *pos; u32 tstatus, mcmd; u32 trq, mrq, rem; unsigned int cdev = 0; pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus); trq = (tstatus >> 24) & 0xff; mrq = trq / ndevs; rem = mrq + (trq % ndevs); for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) { cur = list_entry(pos, struct agp_3_5_dev, list); pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd); mcmd &= ~(0xff << 24); mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24; pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd); } } /* * Fully configure and enable an AGP 3.0 host bridge and all the devices * lying behind it. */ int agp_3_5_enable(struct agp_bridge_data *bridge) { struct pci_dev *td = bridge->dev, *dev = NULL; u8 mcapndx; u32 isoch, arqsz; u32 tstatus, mstatus, ncapid; u32 mmajor; u16 mpstat; struct agp_3_5_dev *dev_list, *cur; struct list_head *head, *pos; unsigned int ndevs = 0; int ret = 0; /* Extract some power-on defaults from the target */ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); isoch = (tstatus >> 17) & 0x1; if (isoch == 0) /* isoch xfers not available, bail out. */ return -ENODEV; arqsz = (tstatus >> 13) & 0x7; /* * Allocate a head for our AGP 3.5 device list * (multiple AGP v3 devices are allowed behind a single bridge). */ if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) { ret = -ENOMEM; goto get_out; } head = &dev_list->list; INIT_LIST_HEAD(head); /* Find all AGP devices, and add them to dev_list. */ for_each_pci_dev(dev) { mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP); if (mcapndx == 0) continue; switch ((dev->class >>8) & 0xff00) { case 0x0600: /* Bridge */ /* Skip bridges. We should call this function for each one. */ continue; case 0x0001: /* Unclassified device */ /* Don't know what this is, but log it for investigation. */ if (mcapndx != 0) { dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n", pci_name(dev), dev->vendor, dev->device); } continue; case 0x0300: /* Display controller */ case 0x0400: /* Multimedia controller */ if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) { ret = -ENOMEM; goto free_and_exit; } cur->dev = dev; pos = &cur->list; list_add(pos, head); ndevs++; continue; default: continue; } } /* * Take an initial pass through the devices lying behind our host * bridge. Make sure each one is actually an AGP 3.0 device, otherwise * exit with an error message. Along the way store the AGP 3.0 * cap_ptr for each device */ list_for_each(pos, head) { cur = list_entry(pos, struct agp_3_5_dev, list); dev = cur->dev; pci_read_config_word(dev, PCI_STATUS, &mpstat); if ((mpstat & PCI_STATUS_CAP_LIST) == 0) continue; pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx); if (mcapndx != 0) { do { pci_read_config_dword(dev, mcapndx, &ncapid); if ((ncapid & 0xff) != 2) mcapndx = (ncapid >> 8) & 0xff; } while (((ncapid & 0xff) != 2) && (mcapndx != 0)); } if (mcapndx == 0) { dev_err(&td->dev, "woah! Non-AGP device %s on " "secondary bus of AGP 3.5 bridge!\n", pci_name(dev)); ret = -ENODEV; goto free_and_exit; } mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; if (mmajor < 3) { dev_err(&td->dev, "woah! AGP 2.0 device %s on " "secondary bus of AGP 3.5 bridge operating " "with AGP 3.0 electricals!\n", pci_name(dev)); ret = -ENODEV; goto free_and_exit; } cur->capndx = mcapndx; pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); if (((mstatus >> 3) & 0x1) == 0) { dev_err(&td->dev, "woah! AGP 3.x device %s not " "operating in AGP 3.x mode on secondary bus " "of AGP 3.5 bridge operating with AGP 3.0 " "electricals!\n", pci_name(dev)); ret = -ENODEV; goto free_and_exit; } } /* * Call functions to divide target resources amongst the AGP 3.0 * masters. This process is dramatically different depending on * whether isochronous transfers are supported. */ if (isoch) { ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs); if (ret) { dev_info(&td->dev, "something bad happened setting " "up isochronous xfers; falling back to " "non-isochronous xfer mode\n"); } else { goto free_and_exit; } } agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs); free_and_exit: /* Be sure to free the dev_list */ for (pos=head->next; pos!=head; ) { cur = list_entry(pos, struct agp_3_5_dev, list); pos = pos->next; kfree(cur); } kfree(dev_list); get_out: return ret; }
gpl-2.0
davilla/meatgrinder
lib/enca/lib/ctype.c
71
3660
/* @(#) $Id: ctype.c,v 1.1 2004/05/11 16:14:02 yeti Exp $ convert charset and surface names to internal representation and back Copyright (C) 2000-2003 David Necas (Yeti) <yeti@physics.muni.cz> Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald and the GLib team. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif /* HAVE_CONFIG_H */ #include "internal.h" /* Basically taken from GLib, some more flags were added. * Note glibc says isspace('\v'), GLib says it is not. */ const short int enca_ctype_data[0x100] = { 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, /* 0x00 */ 0x1004, 0x0104, 0x0104, 0x1004, 0x0104, 0x0104, 0x1004, 0x1004, /* 0x08 */ 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, /* 0x10 */ 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, 0x1004, /* 0x18 */ 0x0140, 0x00d0, 0x00d0, 0x00d0, 0x00d0, 0x00d0, 0x00d0, 0x00d0, /* 0x20 */ 0x00d0, 0x00d0, 0x00d0, 0x00d0, 0x00d0, 0x08d0, 0x08d0, 0x08d0, /* 0x28 */ 0x0c59, 0x0c59, 0x0c59, 0x0c59, 0x0c59, 0x0c59, 0x0c59, 0x0c59, /* 0x30 */ 0x0c59, 0x0c59, 0x08d0, 0x00d0, 0x00d0, 0x00d0, 0x00d0, 0x00d0, /* 0x38 */ 0x00d0, 0x2e53, 0x2e53, 0x2e53, 0x2e53, 0x2e53, 0x2e53, 0x2a53, /* 0x40 */ 0x2a53, 0x2a53, 0x2a53, 0x2a53, 0x2a53, 0x2a53, 0x2a53, 0x2a53, /* 0x48 */ 0x2a53, 0x2a53, 0x2a53, 0x2a53, 0x2a53, 0x2a53, 0x2a53, 0x2a53, /* 0x50 */ 0x2a53, 0x2a53, 0x2a53, 0x00d0, 0x00d0, 0x00d0, 0x00d0, 0x08d0, /* 0x58 */ 0x00d0, 0x2c73, 0x2c73, 0x2c73, 0x2c73, 0x2c73, 0x2c73, 0x2873, /* 0x60 */ 0x2873, 0x2873, 0x2873, 0x2873, 0x2873, 0x2873, 0x2873, 0x2873, /* 0x68 */ 0x2873, 0x2873, 0x2873, 0x2873, 0x2873, 0x2873, 0x2873, 0x2873, /* 0x70 */ 0x2873, 0x2873, 0x2873, 0x00d0, 0x00d0, 0x00d0, 0x00d0, 0x1004, /* 0x78 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0x80 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0x88 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0x90 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0x98 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xa0 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xa8 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xb0 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xb8 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xc0 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xc8 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xd0 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xd8 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xe0 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xe8 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xf0 */ 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, 0x2000, /* 0xf8 */ };
gpl-2.0
raveslave/ti-linux-3.14.26
drivers/iio/buffer_cb.c
327
3156
#include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> #include <linux/iio/buffer.h> #include <linux/iio/consumer.h> struct iio_cb_buffer { struct iio_buffer buffer; int (*cb)(const void *data, void *private); void *private; struct iio_channel *channels; }; static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer) { return container_of(buffer, struct iio_cb_buffer, buffer); } static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data) { struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer); return cb_buff->cb(data, cb_buff->private); } static void iio_buffer_cb_release(struct iio_buffer *buffer) { struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer); kfree(cb_buff->buffer.scan_mask); kfree(cb_buff); } static const struct iio_buffer_access_funcs iio_cb_access = { .store_to = &iio_buffer_cb_store_to, .release = &iio_buffer_cb_release, }; struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, int (*cb)(const void *data, void *private), void *private) { int ret; struct iio_cb_buffer *cb_buff; struct iio_dev *indio_dev; struct iio_channel *chan; cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL); if (cb_buff == NULL) { ret = -ENOMEM; goto error_ret; } iio_buffer_init(&cb_buff->buffer); cb_buff->private = private; cb_buff->cb = cb; cb_buff->buffer.access = &iio_cb_access; INIT_LIST_HEAD(&cb_buff->buffer.demux_list); cb_buff->channels = iio_channel_get_all(dev); if (IS_ERR(cb_buff->channels)) { ret = PTR_ERR(cb_buff->channels); goto error_free_cb_buff; } indio_dev = cb_buff->channels[0].indio_dev; cb_buff->buffer.scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long), GFP_KERNEL); if (cb_buff->buffer.scan_mask == NULL) { ret = -ENOMEM; goto error_release_channels; } chan = &cb_buff->channels[0]; while (chan->indio_dev) { if (chan->indio_dev != indio_dev) { ret = -EINVAL; goto error_free_scan_mask; } set_bit(chan->channel->scan_index, cb_buff->buffer.scan_mask); chan++; } return cb_buff; error_free_scan_mask: kfree(cb_buff->buffer.scan_mask); error_release_channels: iio_channel_release_all(cb_buff->channels); error_free_cb_buff: kfree(cb_buff); error_ret: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(iio_channel_get_all_cb); int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff) { return iio_update_buffers(cb_buff->channels[0].indio_dev, &cb_buff->buffer, NULL); } EXPORT_SYMBOL_GPL(iio_channel_start_all_cb); void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff) { iio_update_buffers(cb_buff->channels[0].indio_dev, NULL, &cb_buff->buffer); } EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb); void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff) { iio_channel_release_all(cb_buff->channels); iio_buffer_put(&cb_buff->buffer); } EXPORT_SYMBOL_GPL(iio_channel_release_all_cb); struct iio_channel *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer) { return cb_buffer->channels; } EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
gpl-2.0
makarandk/linux
drivers/gpu/drm/nouveau/core/subdev/fb/ramnv1a.c
1095
2207
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "priv.h" static int nv1a_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_fb *pfb = nouveau_fb(parent); struct nouveau_ram *ram; struct pci_dev *bridge; u32 mem, mib; int ret; bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); if (!bridge) { nv_fatal(pfb, "no bridge device\n"); return -ENODEV; } ret = nouveau_ram_create(parent, engine, oclass, &ram); *pobject = nv_object(ram); if (ret) return ret; if (nv_device(pfb)->chipset == 0x1a) { pci_read_config_dword(bridge, 0x7c, &mem); mib = ((mem >> 6) & 31) + 1; } else { pci_read_config_dword(bridge, 0x84, &mem); mib = ((mem >> 4) & 127) + 1; } ram->type = NV_MEM_TYPE_STOLEN; ram->size = mib * 1024 * 1024; return 0; } struct nouveau_oclass nv1a_ram_oclass = { .handle = 0, .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv1a_ram_create, .dtor = _nouveau_ram_dtor, .init = _nouveau_ram_init, .fini = _nouveau_ram_fini, } };
gpl-2.0
Talustus/dreamkernel_ics_sghi777
kernel/relay.c
1095
33536
/* * Public API and common code for kernel->userspace relay file support. * * See Documentation/filesystems/relay.txt for an overview. * * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com) * * Moved to kernel/relay.c by Paul Mundt, 2006. * November 2006 - CPU hotplug support by Mathieu Desnoyers * (mathieu.desnoyers@polymtl.ca) * * This file is released under the GPL. */ #include <linux/errno.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/string.h> #include <linux/relay.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/splice.h> /* list of open channels, for cpu hotplug */ static DEFINE_MUTEX(relay_channels_mutex); static LIST_HEAD(relay_channels); /* * close() vm_op implementation for relay file mapping. */ static void relay_file_mmap_close(struct vm_area_struct *vma) { struct rchan_buf *buf = vma->vm_private_data; buf->chan->cb->buf_unmapped(buf, vma->vm_file); } /* * fault() vm_op implementation for relay file mapping. */ static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; struct rchan_buf *buf = vma->vm_private_data; pgoff_t pgoff = vmf->pgoff; if (!buf) return VM_FAULT_OOM; page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } /* * vm_ops for relay file mappings. */ static const struct vm_operations_struct relay_file_mmap_ops = { .fault = relay_buf_fault, .close = relay_file_mmap_close, }; /* * allocate an array of pointers of struct page */ static struct page **relay_alloc_page_array(unsigned int n_pages) { const size_t pa_size = n_pages * sizeof(struct page *); if (pa_size > PAGE_SIZE) return vzalloc(pa_size); return kzalloc(pa_size, GFP_KERNEL); } /* * free an array of pointers of struct page */ static void relay_free_page_array(struct page **array) { if (is_vmalloc_addr(array)) vfree(array); else kfree(array); } /** * relay_mmap_buf: - mmap channel buffer to process address space * @buf: relay channel buffer * @vma: vm_area_struct describing memory to be mapped * * Returns 0 if ok, negative on error * * Caller should already have grabbed mmap_sem. */ static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) { unsigned long length = vma->vm_end - vma->vm_start; struct file *filp = vma->vm_file; if (!buf) return -EBADF; if (length != (unsigned long)buf->chan->alloc_size) return -EINVAL; vma->vm_ops = &relay_file_mmap_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_private_data = buf; buf->chan->cb->buf_mapped(buf, filp); return 0; } /** * relay_alloc_buf - allocate a channel buffer * @buf: the buffer struct * @size: total size of the buffer * * Returns a pointer to the resulting buffer, %NULL if unsuccessful. The * passed in size will get page aligned, if it isn't already. */ static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size) { void *mem; unsigned int i, j, n_pages; *size = PAGE_ALIGN(*size); n_pages = *size >> PAGE_SHIFT; buf->page_array = relay_alloc_page_array(n_pages); if (!buf->page_array) return NULL; for (i = 0; i < n_pages; i++) { buf->page_array[i] = alloc_page(GFP_KERNEL); if (unlikely(!buf->page_array[i])) goto depopulate; set_page_private(buf->page_array[i], (unsigned long)buf); } mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL); if (!mem) goto depopulate; memset(mem, 0, *size); buf->page_count = n_pages; return mem; depopulate: for (j = 0; j < i; j++) __free_page(buf->page_array[j]); relay_free_page_array(buf->page_array); return NULL; } /** * relay_create_buf - allocate and initialize a channel buffer * @chan: the relay channel * * Returns channel buffer if successful, %NULL otherwise. */ static struct rchan_buf *relay_create_buf(struct rchan *chan) { struct rchan_buf *buf; if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) return NULL; buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); if (!buf) return NULL; buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); if (!buf->padding) goto free_buf; buf->start = relay_alloc_buf(buf, &chan->alloc_size); if (!buf->start) goto free_buf; buf->chan = chan; kref_get(&buf->chan->kref); return buf; free_buf: kfree(buf->padding); kfree(buf); return NULL; } /** * relay_destroy_channel - free the channel struct * @kref: target kernel reference that contains the relay channel * * Should only be called from kref_put(). */ static void relay_destroy_channel(struct kref *kref) { struct rchan *chan = container_of(kref, struct rchan, kref); kfree(chan); } /** * relay_destroy_buf - destroy an rchan_buf struct and associated buffer * @buf: the buffer struct */ static void relay_destroy_buf(struct rchan_buf *buf) { struct rchan *chan = buf->chan; unsigned int i; if (likely(buf->start)) { vunmap(buf->start); for (i = 0; i < buf->page_count; i++) __free_page(buf->page_array[i]); relay_free_page_array(buf->page_array); } chan->buf[buf->cpu] = NULL; kfree(buf->padding); kfree(buf); kref_put(&chan->kref, relay_destroy_channel); } /** * relay_remove_buf - remove a channel buffer * @kref: target kernel reference that contains the relay buffer * * Removes the file from the fileystem, which also frees the * rchan_buf_struct and the channel buffer. Should only be called from * kref_put(). */ static void relay_remove_buf(struct kref *kref) { struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref); buf->chan->cb->remove_buf_file(buf->dentry); relay_destroy_buf(buf); } /** * relay_buf_empty - boolean, is the channel buffer empty? * @buf: channel buffer * * Returns 1 if the buffer is empty, 0 otherwise. */ static int relay_buf_empty(struct rchan_buf *buf) { return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1; } /** * relay_buf_full - boolean, is the channel buffer full? * @buf: channel buffer * * Returns 1 if the buffer is full, 0 otherwise. */ int relay_buf_full(struct rchan_buf *buf) { size_t ready = buf->subbufs_produced - buf->subbufs_consumed; return (ready >= buf->chan->n_subbufs) ? 1 : 0; } EXPORT_SYMBOL_GPL(relay_buf_full); /* * High-level relay kernel API and associated functions. */ /* * rchan_callback implementations defining default channel behavior. Used * in place of corresponding NULL values in client callback struct. */ /* * subbuf_start() default callback. Does nothing. */ static int subbuf_start_default_callback (struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding) { if (relay_buf_full(buf)) return 0; return 1; } /* * buf_mapped() default callback. Does nothing. */ static void buf_mapped_default_callback(struct rchan_buf *buf, struct file *filp) { } /* * buf_unmapped() default callback. Does nothing. */ static void buf_unmapped_default_callback(struct rchan_buf *buf, struct file *filp) { } /* * create_buf_file_create() default callback. Does nothing. */ static struct dentry *create_buf_file_default_callback(const char *filename, struct dentry *parent, int mode, struct rchan_buf *buf, int *is_global) { return NULL; } /* * remove_buf_file() default callback. Does nothing. */ static int remove_buf_file_default_callback(struct dentry *dentry) { return -EINVAL; } /* relay channel default callbacks */ static struct rchan_callbacks default_channel_callbacks = { .subbuf_start = subbuf_start_default_callback, .buf_mapped = buf_mapped_default_callback, .buf_unmapped = buf_unmapped_default_callback, .create_buf_file = create_buf_file_default_callback, .remove_buf_file = remove_buf_file_default_callback, }; /** * wakeup_readers - wake up readers waiting on a channel * @data: contains the channel buffer * * This is the timer function used to defer reader waking. */ static void wakeup_readers(unsigned long data) { struct rchan_buf *buf = (struct rchan_buf *)data; wake_up_interruptible(&buf->read_wait); } /** * __relay_reset - reset a channel buffer * @buf: the channel buffer * @init: 1 if this is a first-time initialization * * See relay_reset() for description of effect. */ static void __relay_reset(struct rchan_buf *buf, unsigned int init) { size_t i; if (init) { init_waitqueue_head(&buf->read_wait); kref_init(&buf->kref); setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); } else del_timer_sync(&buf->timer); buf->subbufs_produced = 0; buf->subbufs_consumed = 0; buf->bytes_consumed = 0; buf->finalized = 0; buf->data = buf->start; buf->offset = 0; for (i = 0; i < buf->chan->n_subbufs; i++) buf->padding[i] = 0; buf->chan->cb->subbuf_start(buf, buf->data, NULL, 0); } /** * relay_reset - reset the channel * @chan: the channel * * This has the effect of erasing all data from all channel buffers * and restarting the channel in its initial state. The buffers * are not freed, so any mappings are still in effect. * * NOTE. Care should be taken that the channel isn't actually * being used by anything when this call is made. */ void relay_reset(struct rchan *chan) { unsigned int i; if (!chan) return; if (chan->is_global && chan->buf[0]) { __relay_reset(chan->buf[0], 0); return; } mutex_lock(&relay_channels_mutex); for_each_possible_cpu(i) if (chan->buf[i]) __relay_reset(chan->buf[i], 0); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_reset); static inline void relay_set_buf_dentry(struct rchan_buf *buf, struct dentry *dentry) { buf->dentry = dentry; buf->dentry->d_inode->i_size = buf->early_bytes; } static struct dentry *relay_create_buf_file(struct rchan *chan, struct rchan_buf *buf, unsigned int cpu) { struct dentry *dentry; char *tmpname; tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL); if (!tmpname) return NULL; snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu); /* Create file in fs */ dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR, buf, &chan->is_global); kfree(tmpname); return dentry; } /* * relay_open_buf - create a new relay channel buffer * * used by relay_open() and CPU hotplug. */ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) { struct rchan_buf *buf = NULL; struct dentry *dentry; if (chan->is_global) return chan->buf[0]; buf = relay_create_buf(chan); if (!buf) return NULL; if (chan->has_base_filename) { dentry = relay_create_buf_file(chan, buf, cpu); if (!dentry) goto free_buf; relay_set_buf_dentry(buf, dentry); } buf->cpu = cpu; __relay_reset(buf, 1); if(chan->is_global) { chan->buf[0] = buf; buf->cpu = 0; } return buf; free_buf: relay_destroy_buf(buf); return NULL; } /** * relay_close_buf - close a channel buffer * @buf: channel buffer * * Marks the buffer finalized and restores the default callbacks. * The channel buffer and channel buffer data structure are then freed * automatically when the last reference is given up. */ static void relay_close_buf(struct rchan_buf *buf) { buf->finalized = 1; del_timer_sync(&buf->timer); kref_put(&buf->kref, relay_remove_buf); } static void setup_callbacks(struct rchan *chan, struct rchan_callbacks *cb) { if (!cb) { chan->cb = &default_channel_callbacks; return; } if (!cb->subbuf_start) cb->subbuf_start = subbuf_start_default_callback; if (!cb->buf_mapped) cb->buf_mapped = buf_mapped_default_callback; if (!cb->buf_unmapped) cb->buf_unmapped = buf_unmapped_default_callback; if (!cb->create_buf_file) cb->create_buf_file = create_buf_file_default_callback; if (!cb->remove_buf_file) cb->remove_buf_file = remove_buf_file_default_callback; chan->cb = cb; } /** * relay_hotcpu_callback - CPU hotplug callback * @nb: notifier block * @action: hotplug action to take * @hcpu: CPU number * * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) */ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int hotcpu = (unsigned long)hcpu; struct rchan *chan; switch(action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: mutex_lock(&relay_channels_mutex); list_for_each_entry(chan, &relay_channels, list) { if (chan->buf[hotcpu]) continue; chan->buf[hotcpu] = relay_open_buf(chan, hotcpu); if(!chan->buf[hotcpu]) { printk(KERN_ERR "relay_hotcpu_callback: cpu %d buffer " "creation failed\n", hotcpu); mutex_unlock(&relay_channels_mutex); return notifier_from_errno(-ENOMEM); } } mutex_unlock(&relay_channels_mutex); break; case CPU_DEAD: case CPU_DEAD_FROZEN: /* No need to flush the cpu : will be flushed upon * final relay_flush() call. */ break; } return NOTIFY_OK; } /** * relay_open - create a new relay channel * @base_filename: base name of files to create, %NULL for buffering only * @parent: dentry of parent directory, %NULL for root directory or buffer * @subbuf_size: size of sub-buffers * @n_subbufs: number of sub-buffers * @cb: client callback functions * @private_data: user-defined data * * Returns channel pointer if successful, %NULL otherwise. * * Creates a channel buffer for each cpu using the sizes and * attributes specified. The created channel buffer files * will be named base_filename0...base_filenameN-1. File * permissions will be %S_IRUSR. */ struct rchan *relay_open(const char *base_filename, struct dentry *parent, size_t subbuf_size, size_t n_subbufs, struct rchan_callbacks *cb, void *private_data) { unsigned int i; struct rchan *chan; if (!(subbuf_size && n_subbufs)) return NULL; if (subbuf_size > UINT_MAX / n_subbufs) return NULL; chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); if (!chan) return NULL; chan->version = RELAYFS_CHANNEL_VERSION; chan->n_subbufs = n_subbufs; chan->subbuf_size = subbuf_size; chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); chan->parent = parent; chan->private_data = private_data; if (base_filename) { chan->has_base_filename = 1; strlcpy(chan->base_filename, base_filename, NAME_MAX); } setup_callbacks(chan, cb); kref_init(&chan->kref); mutex_lock(&relay_channels_mutex); for_each_online_cpu(i) { chan->buf[i] = relay_open_buf(chan, i); if (!chan->buf[i]) goto free_bufs; } list_add(&chan->list, &relay_channels); mutex_unlock(&relay_channels_mutex); return chan; free_bufs: for_each_possible_cpu(i) { if (chan->buf[i]) relay_close_buf(chan->buf[i]); } kref_put(&chan->kref, relay_destroy_channel); mutex_unlock(&relay_channels_mutex); return NULL; } EXPORT_SYMBOL_GPL(relay_open); struct rchan_percpu_buf_dispatcher { struct rchan_buf *buf; struct dentry *dentry; }; /* Called in atomic context. */ static void __relay_set_buf_dentry(void *info) { struct rchan_percpu_buf_dispatcher *p = info; relay_set_buf_dentry(p->buf, p->dentry); } /** * relay_late_setup_files - triggers file creation * @chan: channel to operate on * @base_filename: base name of files to create * @parent: dentry of parent directory, %NULL for root directory * * Returns 0 if successful, non-zero otherwise. * * Use to setup files for a previously buffer-only channel. * Useful to do early tracing in kernel, before VFS is up, for example. */ int relay_late_setup_files(struct rchan *chan, const char *base_filename, struct dentry *parent) { int err = 0; unsigned int i, curr_cpu; unsigned long flags; struct dentry *dentry; struct rchan_percpu_buf_dispatcher disp; if (!chan || !base_filename) return -EINVAL; strlcpy(chan->base_filename, base_filename, NAME_MAX); mutex_lock(&relay_channels_mutex); /* Is chan already set up? */ if (unlikely(chan->has_base_filename)) { mutex_unlock(&relay_channels_mutex); return -EEXIST; } chan->has_base_filename = 1; chan->parent = parent; curr_cpu = get_cpu(); /* * The CPU hotplug notifier ran before us and created buffers with * no files associated. So it's safe to call relay_setup_buf_file() * on all currently online CPUs. */ for_each_online_cpu(i) { if (unlikely(!chan->buf[i])) { WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); err = -EINVAL; break; } dentry = relay_create_buf_file(chan, chan->buf[i], i); if (unlikely(!dentry)) { err = -EINVAL; break; } if (curr_cpu == i) { local_irq_save(flags); relay_set_buf_dentry(chan->buf[i], dentry); local_irq_restore(flags); } else { disp.buf = chan->buf[i]; disp.dentry = dentry; smp_mb(); /* relay_channels_mutex must be held, so wait. */ err = smp_call_function_single(i, __relay_set_buf_dentry, &disp, 1); } if (unlikely(err)) break; } put_cpu(); mutex_unlock(&relay_channels_mutex); return err; } /** * relay_switch_subbuf - switch to a new sub-buffer * @buf: channel buffer * @length: size of current event * * Returns either the length passed in or 0 if full. * * Performs sub-buffer-switch tasks such as invoking callbacks, * updating padding counts, waking up readers, etc. */ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) { void *old, *new; size_t old_subbuf, new_subbuf; if (unlikely(length > buf->chan->subbuf_size)) goto toobig; if (buf->offset != buf->chan->subbuf_size + 1) { buf->prev_padding = buf->chan->subbuf_size - buf->offset; old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; buf->padding[old_subbuf] = buf->prev_padding; buf->subbufs_produced++; if (buf->dentry) buf->dentry->d_inode->i_size += buf->chan->subbuf_size - buf->padding[old_subbuf]; else buf->early_bytes += buf->chan->subbuf_size - buf->padding[old_subbuf]; smp_mb(); if (waitqueue_active(&buf->read_wait)) /* * Calling wake_up_interruptible() from here * will deadlock if we happen to be logging * from the scheduler (trying to re-grab * rq->lock), so defer it. */ mod_timer(&buf->timer, jiffies + 1); } old = buf->data; new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; new = buf->start + new_subbuf * buf->chan->subbuf_size; buf->offset = 0; if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) { buf->offset = buf->chan->subbuf_size + 1; return 0; } buf->data = new; buf->padding[new_subbuf] = 0; if (unlikely(length + buf->offset > buf->chan->subbuf_size)) goto toobig; return length; toobig: buf->chan->last_toobig = length; return 0; } EXPORT_SYMBOL_GPL(relay_switch_subbuf); /** * relay_subbufs_consumed - update the buffer's sub-buffers-consumed count * @chan: the channel * @cpu: the cpu associated with the channel buffer to update * @subbufs_consumed: number of sub-buffers to add to current buf's count * * Adds to the channel buffer's consumed sub-buffer count. * subbufs_consumed should be the number of sub-buffers newly consumed, * not the total consumed. * * NOTE. Kernel clients don't need to call this function if the channel * mode is 'overwrite'. */ void relay_subbufs_consumed(struct rchan *chan, unsigned int cpu, size_t subbufs_consumed) { struct rchan_buf *buf; if (!chan) return; if (cpu >= NR_CPUS || !chan->buf[cpu] || subbufs_consumed > chan->n_subbufs) return; buf = chan->buf[cpu]; if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) buf->subbufs_consumed = buf->subbufs_produced; else buf->subbufs_consumed += subbufs_consumed; } EXPORT_SYMBOL_GPL(relay_subbufs_consumed); /** * relay_close - close the channel * @chan: the channel * * Closes all channel buffers and frees the channel. */ void relay_close(struct rchan *chan) { unsigned int i; if (!chan) return; mutex_lock(&relay_channels_mutex); if (chan->is_global && chan->buf[0]) relay_close_buf(chan->buf[0]); else for_each_possible_cpu(i) if (chan->buf[i]) relay_close_buf(chan->buf[i]); if (chan->last_toobig) printk(KERN_WARNING "relay: one or more items not logged " "[item size (%Zd) > sub-buffer size (%Zd)]\n", chan->last_toobig, chan->subbuf_size); list_del(&chan->list); kref_put(&chan->kref, relay_destroy_channel); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_close); /** * relay_flush - close the channel * @chan: the channel * * Flushes all channel buffers, i.e. forces buffer switch. */ void relay_flush(struct rchan *chan) { unsigned int i; if (!chan) return; if (chan->is_global && chan->buf[0]) { relay_switch_subbuf(chan->buf[0], 0); return; } mutex_lock(&relay_channels_mutex); for_each_possible_cpu(i) if (chan->buf[i]) relay_switch_subbuf(chan->buf[i], 0); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_flush); /** * relay_file_open - open file op for relay files * @inode: the inode * @filp: the file * * Increments the channel buffer refcount. */ static int relay_file_open(struct inode *inode, struct file *filp) { struct rchan_buf *buf = inode->i_private; kref_get(&buf->kref); filp->private_data = buf; return nonseekable_open(inode, filp); } /** * relay_file_mmap - mmap file op for relay files * @filp: the file * @vma: the vma describing what to map * * Calls upon relay_mmap_buf() to map the file into user space. */ static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) { struct rchan_buf *buf = filp->private_data; return relay_mmap_buf(buf, vma); } /** * relay_file_poll - poll file op for relay files * @filp: the file * @wait: poll table * * Poll implemention. */ static unsigned int relay_file_poll(struct file *filp, poll_table *wait) { unsigned int mask = 0; struct rchan_buf *buf = filp->private_data; if (buf->finalized) return POLLERR; if (filp->f_mode & FMODE_READ) { poll_wait(filp, &buf->read_wait, wait); if (!relay_buf_empty(buf)) mask |= POLLIN | POLLRDNORM; } return mask; } /** * relay_file_release - release file op for relay files * @inode: the inode * @filp: the file * * Decrements the channel refcount, as the filesystem is * no longer using it. */ static int relay_file_release(struct inode *inode, struct file *filp) { struct rchan_buf *buf = filp->private_data; kref_put(&buf->kref, relay_remove_buf); return 0; } /* * relay_file_read_consume - update the consumed count for the buffer */ static void relay_file_read_consume(struct rchan_buf *buf, size_t read_pos, size_t bytes_consumed) { size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; size_t read_subbuf; if (buf->subbufs_produced == buf->subbufs_consumed && buf->offset == buf->bytes_consumed) return; if (buf->bytes_consumed + bytes_consumed > subbuf_size) { relay_subbufs_consumed(buf->chan, buf->cpu, 1); buf->bytes_consumed = 0; } buf->bytes_consumed += bytes_consumed; if (!read_pos) read_subbuf = buf->subbufs_consumed % n_subbufs; else read_subbuf = read_pos / buf->chan->subbuf_size; if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) { if ((read_subbuf == buf->subbufs_produced % n_subbufs) && (buf->offset == subbuf_size)) return; relay_subbufs_consumed(buf->chan, buf->cpu, 1); buf->bytes_consumed = 0; } } /* * relay_file_read_avail - boolean, are there unconsumed bytes available? */ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos) { size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; size_t produced = buf->subbufs_produced; size_t consumed = buf->subbufs_consumed; relay_file_read_consume(buf, read_pos, 0); consumed = buf->subbufs_consumed; if (unlikely(buf->offset > subbuf_size)) { if (produced == consumed) return 0; return 1; } if (unlikely(produced - consumed >= n_subbufs)) { consumed = produced - n_subbufs + 1; buf->subbufs_consumed = consumed; buf->bytes_consumed = 0; } produced = (produced % n_subbufs) * subbuf_size + buf->offset; consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed; if (consumed > produced) produced += n_subbufs * subbuf_size; if (consumed == produced) { if (buf->offset == subbuf_size && buf->subbufs_produced > buf->subbufs_consumed) return 1; return 0; } return 1; } /** * relay_file_read_subbuf_avail - return bytes available in sub-buffer * @read_pos: file read position * @buf: relay channel buffer */ static size_t relay_file_read_subbuf_avail(size_t read_pos, struct rchan_buf *buf) { size_t padding, avail = 0; size_t read_subbuf, read_offset, write_subbuf, write_offset; size_t subbuf_size = buf->chan->subbuf_size; write_subbuf = (buf->data - buf->start) / subbuf_size; write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset; read_subbuf = read_pos / subbuf_size; read_offset = read_pos % subbuf_size; padding = buf->padding[read_subbuf]; if (read_subbuf == write_subbuf) { if (read_offset + padding < write_offset) avail = write_offset - (read_offset + padding); } else avail = (subbuf_size - padding) - read_offset; return avail; } /** * relay_file_read_start_pos - find the first available byte to read * @read_pos: file read position * @buf: relay channel buffer * * If the @read_pos is in the middle of padding, return the * position of the first actually available byte, otherwise * return the original value. */ static size_t relay_file_read_start_pos(size_t read_pos, struct rchan_buf *buf) { size_t read_subbuf, padding, padding_start, padding_end; size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; size_t consumed = buf->subbufs_consumed % n_subbufs; if (!read_pos) read_pos = consumed * subbuf_size + buf->bytes_consumed; read_subbuf = read_pos / subbuf_size; padding = buf->padding[read_subbuf]; padding_start = (read_subbuf + 1) * subbuf_size - padding; padding_end = (read_subbuf + 1) * subbuf_size; if (read_pos >= padding_start && read_pos < padding_end) { read_subbuf = (read_subbuf + 1) % n_subbufs; read_pos = read_subbuf * subbuf_size; } return read_pos; } /** * relay_file_read_end_pos - return the new read position * @read_pos: file read position * @buf: relay channel buffer * @count: number of bytes to be read */ static size_t relay_file_read_end_pos(struct rchan_buf *buf, size_t read_pos, size_t count) { size_t read_subbuf, padding, end_pos; size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; read_subbuf = read_pos / subbuf_size; padding = buf->padding[read_subbuf]; if (read_pos % subbuf_size + count + padding == subbuf_size) end_pos = (read_subbuf + 1) * subbuf_size; else end_pos = read_pos + count; if (end_pos >= subbuf_size * n_subbufs) end_pos = 0; return end_pos; } /* * subbuf_read_actor - read up to one subbuf's worth of data */ static int subbuf_read_actor(size_t read_start, struct rchan_buf *buf, size_t avail, read_descriptor_t *desc, read_actor_t actor) { void *from; int ret = 0; from = buf->start + read_start; ret = avail; if (copy_to_user(desc->arg.buf, from, avail)) { desc->error = -EFAULT; ret = 0; } desc->arg.data += ret; desc->written += ret; desc->count -= ret; return ret; } typedef int (*subbuf_actor_t) (size_t read_start, struct rchan_buf *buf, size_t avail, read_descriptor_t *desc, read_actor_t actor); /* * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries */ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos, subbuf_actor_t subbuf_actor, read_actor_t actor, read_descriptor_t *desc) { struct rchan_buf *buf = filp->private_data; size_t read_start, avail; int ret; if (!desc->count) return 0; mutex_lock(&filp->f_path.dentry->d_inode->i_mutex); do { if (!relay_file_read_avail(buf, *ppos)) break; read_start = relay_file_read_start_pos(*ppos, buf); avail = relay_file_read_subbuf_avail(read_start, buf); if (!avail) break; avail = min(desc->count, avail); ret = subbuf_actor(read_start, buf, avail, desc, actor); if (desc->error < 0) break; if (ret) { relay_file_read_consume(buf, read_start, ret); *ppos = relay_file_read_end_pos(buf, read_start, ret); } } while (desc->count && ret); mutex_unlock(&filp->f_path.dentry->d_inode->i_mutex); return desc->written; } static ssize_t relay_file_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { read_descriptor_t desc; desc.written = 0; desc.count = count; desc.arg.buf = buffer; desc.error = 0; return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, NULL, &desc); } static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed) { rbuf->bytes_consumed += bytes_consumed; if (rbuf->bytes_consumed >= rbuf->chan->subbuf_size) { relay_subbufs_consumed(rbuf->chan, rbuf->cpu, 1); rbuf->bytes_consumed %= rbuf->chan->subbuf_size; } } static void relay_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct rchan_buf *rbuf; rbuf = (struct rchan_buf *)page_private(buf->page); relay_consume_bytes(rbuf, buf->private); } static const struct pipe_buf_operations relay_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = relay_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static void relay_page_release(struct splice_pipe_desc *spd, unsigned int i) { } /* * subbuf_splice_actor - splice up to one subbuf's worth of data */ static ssize_t subbuf_splice_actor(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags, int *nonpad_ret) { unsigned int pidx, poff, total_len, subbuf_pages, nr_pages; struct rchan_buf *rbuf = in->private_data; unsigned int subbuf_size = rbuf->chan->subbuf_size; uint64_t pos = (uint64_t) *ppos; uint32_t alloc_size = (uint32_t) rbuf->chan->alloc_size; size_t read_start = (size_t) do_div(pos, alloc_size); size_t read_subbuf = read_start / subbuf_size; size_t padding = rbuf->padding[read_subbuf]; size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages, .nr_pages = 0, .partial = partial, .flags = flags, .ops = &relay_pipe_buf_ops, .spd_release = relay_page_release, }; ssize_t ret; if (rbuf->subbufs_produced == rbuf->subbufs_consumed) return 0; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; /* * Adjust read len, if longer than what is available */ if (len > (subbuf_size - read_start % subbuf_size)) len = subbuf_size - read_start % subbuf_size; subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; pidx = (read_start / PAGE_SIZE) % subbuf_pages; poff = read_start & ~PAGE_MASK; nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers); for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { unsigned int this_len, this_end, private; unsigned int cur_pos = read_start + total_len; if (!len) break; this_len = min_t(unsigned long, len, PAGE_SIZE - poff); private = this_len; spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; spd.partial[spd.nr_pages].offset = poff; this_end = cur_pos + this_len; if (this_end >= nonpad_end) { this_len = nonpad_end - cur_pos; private = this_len + padding; } spd.partial[spd.nr_pages].len = this_len; spd.partial[spd.nr_pages].private = private; len -= this_len; total_len += this_len; poff = 0; pidx = (pidx + 1) % subbuf_pages; if (this_end >= nonpad_end) { spd.nr_pages++; break; } } ret = 0; if (!spd.nr_pages) goto out; ret = *nonpad_ret = splice_to_pipe(pipe, &spd); if (ret < 0 || ret < total_len) goto out; if (read_start + ret == nonpad_end) ret += padding; out: splice_shrink_spd(pipe, &spd); return ret; } static ssize_t relay_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { ssize_t spliced; int ret; int nonpad_ret = 0; ret = 0; spliced = 0; while (len && !spliced) { ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret); if (ret < 0) break; else if (!ret) { if (flags & SPLICE_F_NONBLOCK) ret = -EAGAIN; break; } *ppos += ret; if (ret > len) len = 0; else len -= ret; spliced += nonpad_ret; nonpad_ret = 0; } if (spliced) return spliced; return ret; } const struct file_operations relay_file_operations = { .open = relay_file_open, .poll = relay_file_poll, .mmap = relay_file_mmap, .read = relay_file_read, .llseek = no_llseek, .release = relay_file_release, .splice_read = relay_file_splice_read, }; EXPORT_SYMBOL_GPL(relay_file_operations); static __init int relay_init(void) { hotcpu_notifier(relay_hotcpu_callback, 0); return 0; } early_initcall(relay_init);
gpl-2.0
sangrokhan/linux
arch/arm/mach-sa1100/assabet.c
1863
15546
/* * linux/arch/arm/mach-sa1100/assabet.c * * Author: Nicolas Pitre * * This file contains all Assabet-specific tweaks. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_data/sa11x0-serial.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/mfd/ucb1x00.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/leds.h> #include <linux/slab.h> #include <video/sa1100fb.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/irda.h> #include <asm/mach/map.h> #include <mach/assabet.h> #include <linux/platform_data/mfd-mcp-sa11x0.h> #include <mach/irqs.h> #include "generic.h" #define ASSABET_BCR_DB1110 \ (ASSABET_BCR_SPK_OFF | \ ASSABET_BCR_LED_GREEN | ASSABET_BCR_LED_RED | \ ASSABET_BCR_RS232EN | ASSABET_BCR_LCD_12RGB | \ ASSABET_BCR_IRDA_MD0) #define ASSABET_BCR_DB1111 \ (ASSABET_BCR_SPK_OFF | \ ASSABET_BCR_LED_GREEN | ASSABET_BCR_LED_RED | \ ASSABET_BCR_RS232EN | ASSABET_BCR_LCD_12RGB | \ ASSABET_BCR_CF_BUS_OFF | ASSABET_BCR_STEREO_LB | \ ASSABET_BCR_IRDA_MD0 | ASSABET_BCR_CF_RST) unsigned long SCR_value = ASSABET_SCR_INIT; EXPORT_SYMBOL(SCR_value); static unsigned long BCR_value = ASSABET_BCR_DB1110; void ASSABET_BCR_frob(unsigned int mask, unsigned int val) { unsigned long flags; local_irq_save(flags); BCR_value = (BCR_value & ~mask) | val; ASSABET_BCR = BCR_value; local_irq_restore(flags); } EXPORT_SYMBOL(ASSABET_BCR_frob); static void assabet_ucb1x00_reset(enum ucb1x00_reset state) { if (state == UCB_RST_PROBE) ASSABET_BCR_set(ASSABET_BCR_CODEC_RST); } /* * Assabet flash support code. */ #ifdef ASSABET_REV_4 /* * Phase 4 Assabet has two 28F160B3 flash parts in bank 0: */ static struct mtd_partition assabet_partitions[] = { { .name = "bootloader", .size = 0x00020000, .offset = 0, .mask_flags = MTD_WRITEABLE, }, { .name = "bootloader params", .size = 0x00020000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, }, { .name = "jffs", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; #else /* * Phase 5 Assabet has two 28F128J3A flash parts in bank 0: */ static struct mtd_partition assabet_partitions[] = { { .name = "bootloader", .size = 0x00040000, .offset = 0, .mask_flags = MTD_WRITEABLE, }, { .name = "bootloader params", .size = 0x00040000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, }, { .name = "jffs", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; #endif static struct flash_platform_data assabet_flash_data = { .map_name = "cfi_probe", .parts = assabet_partitions, .nr_parts = ARRAY_SIZE(assabet_partitions), }; static struct resource assabet_flash_resources[] = { DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_32M), DEFINE_RES_MEM(SA1100_CS1_PHYS, SZ_32M), }; /* * Assabet IrDA support code. */ static int assabet_irda_set_power(struct device *dev, unsigned int state) { static unsigned int bcr_state[4] = { ASSABET_BCR_IRDA_MD0, ASSABET_BCR_IRDA_MD1|ASSABET_BCR_IRDA_MD0, ASSABET_BCR_IRDA_MD1, 0 }; if (state < 4) { state = bcr_state[state]; ASSABET_BCR_clear(state ^ (ASSABET_BCR_IRDA_MD1| ASSABET_BCR_IRDA_MD0)); ASSABET_BCR_set(state); } return 0; } static void assabet_irda_set_speed(struct device *dev, unsigned int speed) { if (speed < 4000000) ASSABET_BCR_clear(ASSABET_BCR_IRDA_FSEL); else ASSABET_BCR_set(ASSABET_BCR_IRDA_FSEL); } static struct irda_platform_data assabet_irda_data = { .set_power = assabet_irda_set_power, .set_speed = assabet_irda_set_speed, }; static struct ucb1x00_plat_data assabet_ucb1x00_data = { .reset = assabet_ucb1x00_reset, .gpio_base = -1, }; static struct mcp_plat_data assabet_mcp_data = { .mccr0 = MCCR0_ADM, .sclk_rate = 11981000, .codec_pdata = &assabet_ucb1x00_data, }; static void assabet_lcd_set_visual(u32 visual) { u_int is_true_color = visual == FB_VISUAL_TRUECOLOR; if (machine_is_assabet()) { #if 1 // phase 4 or newer Assabet's if (is_true_color) ASSABET_BCR_set(ASSABET_BCR_LCD_12RGB); else ASSABET_BCR_clear(ASSABET_BCR_LCD_12RGB); #else // older Assabet's if (is_true_color) ASSABET_BCR_clear(ASSABET_BCR_LCD_12RGB); else ASSABET_BCR_set(ASSABET_BCR_LCD_12RGB); #endif } } #ifndef ASSABET_PAL_VIDEO static void assabet_lcd_backlight_power(int on) { if (on) ASSABET_BCR_set(ASSABET_BCR_LIGHT_ON); else ASSABET_BCR_clear(ASSABET_BCR_LIGHT_ON); } /* * Turn on/off the backlight. When turning the backlight on, we wait * 500us after turning it on so we don't cause the supplies to droop * when we enable the LCD controller (and cause a hard reset.) */ static void assabet_lcd_power(int on) { if (on) { ASSABET_BCR_set(ASSABET_BCR_LCD_ON); udelay(500); } else ASSABET_BCR_clear(ASSABET_BCR_LCD_ON); } /* * The assabet uses a sharp LQ039Q2DS54 LCD module. It is actually * takes an RGB666 signal, but we provide it with an RGB565 signal * instead (def_rgb_16). */ static struct sa1100fb_mach_info lq039q2ds54_info = { .pixclock = 171521, .bpp = 16, .xres = 320, .yres = 240, .hsync_len = 5, .vsync_len = 1, .left_margin = 61, .upper_margin = 3, .right_margin = 9, .lower_margin = 0, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act, .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2), .backlight_power = assabet_lcd_backlight_power, .lcd_power = assabet_lcd_power, .set_visual = assabet_lcd_set_visual, }; #else static void assabet_pal_backlight_power(int on) { ASSABET_BCR_clear(ASSABET_BCR_LIGHT_ON); } static void assabet_pal_power(int on) { ASSABET_BCR_clear(ASSABET_BCR_LCD_ON); } static struct sa1100fb_mach_info pal_info = { .pixclock = 67797, .bpp = 16, .xres = 640, .yres = 512, .hsync_len = 64, .vsync_len = 6, .left_margin = 125, .upper_margin = 70, .right_margin = 115, .lower_margin = 36, .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act, .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(512), .backlight_power = assabet_pal_backlight_power, .lcd_power = assabet_pal_power, .set_visual = assabet_lcd_set_visual, }; #endif #ifdef CONFIG_ASSABET_NEPONSET static struct resource neponset_resources[] = { DEFINE_RES_MEM(0x10000000, 0x08000000), DEFINE_RES_MEM(0x18000000, 0x04000000), DEFINE_RES_MEM(0x40000000, SZ_8K), DEFINE_RES_IRQ(IRQ_GPIO25), }; #endif static void __init assabet_init(void) { /* * Ensure that the power supply is in "high power" mode. */ GPSR = GPIO_GPIO16; GPDR |= GPIO_GPIO16; /* * Ensure that these pins are set as outputs and are driving * logic 0. This ensures that we won't inadvertently toggle * the WS latch in the CPLD, and we don't float causing * excessive power drain. --rmk */ GPCR = GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM; GPDR |= GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM; /* * Also set GPIO27 as an output; this is used to clock UART3 * via the FPGA and as otherwise has no pullups or pulldowns, * so stop it floating. */ GPCR = GPIO_GPIO27; GPDR |= GPIO_GPIO27; /* * Set up registers for sleep mode. */ PWER = PWER_GPIO0; PGSR = 0; PCFR = 0; PSDR = 0; PPDR |= PPC_TXD3 | PPC_TXD1; PPSR |= PPC_TXD3 | PPC_TXD1; sa11x0_ppc_configure_mcp(); if (machine_has_neponset()) { /* * Angel sets this, but other bootloaders may not. * * This must precede any driver calls to BCR_set() * or BCR_clear(). */ ASSABET_BCR = BCR_value = ASSABET_BCR_DB1111; #ifndef CONFIG_ASSABET_NEPONSET printk( "Warning: Neponset detected but full support " "hasn't been configured in the kernel\n" ); #else platform_device_register_simple("neponset", 0, neponset_resources, ARRAY_SIZE(neponset_resources)); #endif } #ifndef ASSABET_PAL_VIDEO sa11x0_register_lcd(&lq039q2ds54_info); #else sa11x0_register_lcd(&pal_video); #endif sa11x0_register_mtd(&assabet_flash_data, assabet_flash_resources, ARRAY_SIZE(assabet_flash_resources)); sa11x0_register_irda(&assabet_irda_data); sa11x0_register_mcp(&assabet_mcp_data); } /* * On Assabet, we must probe for the Neponset board _before_ * paging_init() has occurred to actually determine the amount * of RAM available. To do so, we map the appropriate IO section * in the page table here in order to access GPIO registers. */ static void __init map_sa1100_gpio_regs( void ) { unsigned long phys = __PREG(GPLR) & PMD_MASK; unsigned long virt = (unsigned long)io_p2v(phys); int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO); pmd_t *pmd; pmd = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); *pmd = __pmd(phys | prot); flush_pmd_entry(pmd); } /* * Read System Configuration "Register" * (taken from "Intel StrongARM SA-1110 Microprocessor Development Board * User's Guide", section 4.4.1) * * This same scan is performed in arch/arm/boot/compressed/head-sa1100.S * to set up the serial port for decompression status messages. We * repeat it here because the kernel may not be loaded as a zImage, and * also because it's a hassle to communicate the SCR value to the kernel * from the decompressor. * * Note that IRQs are guaranteed to be disabled. */ static void __init get_assabet_scr(void) { unsigned long uninitialized_var(scr), i; GPDR |= 0x3fc; /* Configure GPIO 9:2 as outputs */ GPSR = 0x3fc; /* Write 0xFF to GPIO 9:2 */ GPDR &= ~(0x3fc); /* Configure GPIO 9:2 as inputs */ for(i = 100; i--; ) /* Read GPIO 9:2 */ scr = GPLR; GPDR |= 0x3fc; /* restore correct pin direction */ scr &= 0x3fc; /* save as system configuration byte. */ SCR_value = scr; } static void __init fixup_assabet(struct tag *tags, char **cmdline, struct meminfo *mi) { /* This must be done before any call to machine_has_neponset() */ map_sa1100_gpio_regs(); get_assabet_scr(); if (machine_has_neponset()) printk("Neponset expansion board detected\n"); } static void assabet_uart_pm(struct uart_port *port, u_int state, u_int oldstate) { if (port->mapbase == _Ser1UTCR0) { if (state) ASSABET_BCR_clear(ASSABET_BCR_RS232EN | ASSABET_BCR_COM_RTS | ASSABET_BCR_COM_DTR); else ASSABET_BCR_set(ASSABET_BCR_RS232EN | ASSABET_BCR_COM_RTS | ASSABET_BCR_COM_DTR); } } /* * Assabet uses COM_RTS and COM_DTR for both UART1 (com port) * and UART3 (radio module). We only handle them for UART1 here. */ static void assabet_set_mctrl(struct uart_port *port, u_int mctrl) { if (port->mapbase == _Ser1UTCR0) { u_int set = 0, clear = 0; if (mctrl & TIOCM_RTS) clear |= ASSABET_BCR_COM_RTS; else set |= ASSABET_BCR_COM_RTS; if (mctrl & TIOCM_DTR) clear |= ASSABET_BCR_COM_DTR; else set |= ASSABET_BCR_COM_DTR; ASSABET_BCR_clear(clear); ASSABET_BCR_set(set); } } static u_int assabet_get_mctrl(struct uart_port *port) { u_int ret = 0; u_int bsr = ASSABET_BSR; /* need 2 reads to read current value */ bsr = ASSABET_BSR; if (port->mapbase == _Ser1UTCR0) { if (bsr & ASSABET_BSR_COM_DCD) ret |= TIOCM_CD; if (bsr & ASSABET_BSR_COM_CTS) ret |= TIOCM_CTS; if (bsr & ASSABET_BSR_COM_DSR) ret |= TIOCM_DSR; } else if (port->mapbase == _Ser3UTCR0) { if (bsr & ASSABET_BSR_RAD_DCD) ret |= TIOCM_CD; if (bsr & ASSABET_BSR_RAD_CTS) ret |= TIOCM_CTS; if (bsr & ASSABET_BSR_RAD_DSR) ret |= TIOCM_DSR; if (bsr & ASSABET_BSR_RAD_RI) ret |= TIOCM_RI; } else { ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR; } return ret; } static struct sa1100_port_fns assabet_port_fns __initdata = { .set_mctrl = assabet_set_mctrl, .get_mctrl = assabet_get_mctrl, .pm = assabet_uart_pm, }; static struct map_desc assabet_io_desc[] __initdata = { { /* Board Control Register */ .virtual = 0xf1000000, .pfn = __phys_to_pfn(0x12000000), .length = 0x00100000, .type = MT_DEVICE }, { /* MQ200 */ .virtual = 0xf2800000, .pfn = __phys_to_pfn(0x4b800000), .length = 0x00800000, .type = MT_DEVICE } }; static void __init assabet_map_io(void) { sa1100_map_io(); iotable_init(assabet_io_desc, ARRAY_SIZE(assabet_io_desc)); /* * Set SUS bit in SDCR0 so serial port 1 functions. * Its called GPCLKR0 in my SA1110 manual. */ Ser1SDCR0 |= SDCR0_SUS; MSC1 = (MSC1 & ~0xffff) | MSC_NonBrst | MSC_32BitStMem | MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0); if (!machine_has_neponset()) sa1100_register_uart_fns(&assabet_port_fns); /* * When Neponset is attached, the first UART should be * UART3. That's what Angel is doing and many documents * are stating this. * * We do the Neponset mapping even if Neponset support * isn't compiled in so the user will still get something on * the expected physical serial port. * * We no longer do this; not all boot loaders support it, * and UART3 appears to be somewhat unreliable with blob. */ sa1100_register_uart(0, 1); sa1100_register_uart(2, 3); } /* LEDs */ #if defined(CONFIG_NEW_LEDS) && defined(CONFIG_LEDS_CLASS) struct assabet_led { struct led_classdev cdev; u32 mask; }; /* * The triggers lines up below will only be used if the * LED triggers are compiled in. */ static const struct { const char *name; const char *trigger; } assabet_leds[] = { { "assabet:red", "cpu0",}, { "assabet:green", "heartbeat", }, }; /* * The LED control in Assabet is reversed: * - setting bit means turn off LED * - clearing bit means turn on LED */ static void assabet_led_set(struct led_classdev *cdev, enum led_brightness b) { struct assabet_led *led = container_of(cdev, struct assabet_led, cdev); if (b != LED_OFF) ASSABET_BCR_clear(led->mask); else ASSABET_BCR_set(led->mask); } static enum led_brightness assabet_led_get(struct led_classdev *cdev) { struct assabet_led *led = container_of(cdev, struct assabet_led, cdev); return (ASSABET_BCR & led->mask) ? LED_OFF : LED_FULL; } static int __init assabet_leds_init(void) { int i; if (!machine_is_assabet()) return -ENODEV; for (i = 0; i < ARRAY_SIZE(assabet_leds); i++) { struct assabet_led *led; led = kzalloc(sizeof(*led), GFP_KERNEL); if (!led) break; led->cdev.name = assabet_leds[i].name; led->cdev.brightness_set = assabet_led_set; led->cdev.brightness_get = assabet_led_get; led->cdev.default_trigger = assabet_leds[i].trigger; if (!i) led->mask = ASSABET_BCR_LED_RED; else led->mask = ASSABET_BCR_LED_GREEN; if (led_classdev_register(NULL, &led->cdev) < 0) { kfree(led); break; } } return 0; } /* * Since we may have triggers on any subsystem, defer registration * until after subsystem_init. */ fs_initcall(assabet_leds_init); #endif MACHINE_START(ASSABET, "Intel-Assabet") .atag_offset = 0x100, .fixup = fixup_assabet, .map_io = assabet_map_io, .nr_irqs = SA1100_NR_IRQS, .init_irq = sa1100_init_irq, .init_time = sa1100_timer_init, .init_machine = assabet_init, .init_late = sa11x0_init_late, #ifdef CONFIG_SA1111 .dma_zone_size = SZ_1M, #endif .restart = sa11x0_restart, MACHINE_END
gpl-2.0
daniabo/kernel-HuaweiP2-6011.3.0.8
drivers/platform/x86/dell-laptop.c
2375
17469
/* * Driver for Dell laptop extras * * Copyright (c) Red Hat <mjg@redhat.com> * * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell * Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/backlight.h> #include <linux/err.h> #include <linux/dmi.h> #include <linux/io.h> #include <linux/rfkill.h> #include <linux/power_supply.h> #include <linux/acpi.h> #include <linux/mm.h> #include <linux/i8042.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "../../firmware/dcdbas.h" #define BRIGHTNESS_TOKEN 0x7d /* This structure will be modified by the firmware when we enter * system management mode, hence the volatiles */ struct calling_interface_buffer { u16 class; u16 select; volatile u32 input[4]; volatile u32 output[4]; } __packed; struct calling_interface_token { u16 tokenID; u16 location; union { u16 value; u16 stringlength; }; }; struct calling_interface_structure { struct dmi_header header; u16 cmdIOAddress; u8 cmdIOCode; u32 supportedCmds; struct calling_interface_token tokens[]; } __packed; static int da_command_address; static int da_command_code; static int da_num_tokens; static struct calling_interface_token *da_tokens; static struct platform_driver platform_driver = { .driver = { .name = "dell-laptop", .owner = THIS_MODULE, } }; static struct platform_device *platform_device; static struct backlight_device *dell_backlight_device; static struct rfkill *wifi_rfkill; static struct rfkill *bluetooth_rfkill; static struct rfkill *wwan_rfkill; static const struct dmi_system_id __initdata dell_device_table[] = { { .ident = "Dell laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_CHASSIS_TYPE, "8"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/ }, }, { .ident = "Dell Computer Corporation", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_CHASSIS_TYPE, "8"), }, }, { } }; static struct dmi_system_id __devinitdata dell_blacklist[] = { /* Supported by compal-laptop */ { .ident = "Dell Mini 9", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"), }, }, { .ident = "Dell Mini 10", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"), }, }, { .ident = "Dell Mini 10v", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"), }, }, { .ident = "Dell Mini 1012", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), }, }, { .ident = "Dell Inspiron 11z", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"), }, }, { .ident = "Dell Mini 12", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"), }, }, {} }; static struct calling_interface_buffer *buffer; static struct page *bufferpage; static DEFINE_MUTEX(buffer_mutex); static int hwswitch_state; static void get_buffer(void) { mutex_lock(&buffer_mutex); memset(buffer, 0, sizeof(struct calling_interface_buffer)); } static void release_buffer(void) { mutex_unlock(&buffer_mutex); } static void __init parse_da_table(const struct dmi_header *dm) { /* Final token is a terminator, so we don't want to copy it */ int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1; struct calling_interface_structure *table = container_of(dm, struct calling_interface_structure, header); /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least 6 bytes of entry */ if (dm->length < 17) return; da_command_address = table->cmdIOAddress; da_command_code = table->cmdIOCode; da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * sizeof(struct calling_interface_token), GFP_KERNEL); if (!da_tokens) return; memcpy(da_tokens+da_num_tokens, table->tokens, sizeof(struct calling_interface_token) * tokens); da_num_tokens += tokens; } static void __init find_tokens(const struct dmi_header *dm, void *dummy) { switch (dm->type) { case 0xd4: /* Indexed IO */ break; case 0xd5: /* Protected Area Type 1 */ break; case 0xd6: /* Protected Area Type 2 */ break; case 0xda: /* Calling interface */ parse_da_table(dm); break; } } static int find_token_location(int tokenid) { int i; for (i = 0; i < da_num_tokens; i++) { if (da_tokens[i].tokenID == tokenid) return da_tokens[i].location; } return -1; } static struct calling_interface_buffer * dell_send_request(struct calling_interface_buffer *buffer, int class, int select) { struct smi_cmd command; command.magic = SMI_CMD_MAGIC; command.command_address = da_command_address; command.command_code = da_command_code; command.ebx = virt_to_phys(buffer); command.ecx = 0x42534931; buffer->class = class; buffer->select = select; dcdbas_smi_request(&command); return buffer; } /* Derived from information in DellWirelessCtl.cpp: Class 17, select 11 is radio control. It returns an array of 32-bit values. Input byte 0 = 0: Wireless information result[0]: return code result[1]: Bit 0: Hardware switch supported Bit 1: Wifi locator supported Bit 2: Wifi is supported Bit 3: Bluetooth is supported Bit 4: WWAN is supported Bit 5: Wireless keyboard supported Bits 6-7: Reserved Bit 8: Wifi is installed Bit 9: Bluetooth is installed Bit 10: WWAN is installed Bits 11-15: Reserved Bit 16: Hardware switch is on Bit 17: Wifi is blocked Bit 18: Bluetooth is blocked Bit 19: WWAN is blocked Bits 20-31: Reserved result[2]: NVRAM size in bytes result[3]: NVRAM format version number Input byte 0 = 2: Wireless switch configuration result[0]: return code result[1]: Bit 0: Wifi controlled by switch Bit 1: Bluetooth controlled by switch Bit 2: WWAN controlled by switch Bits 3-6: Reserved Bit 7: Wireless switch config locked Bit 8: Wifi locator enabled Bits 9-14: Reserved Bit 15: Wifi locator setting locked Bits 16-31: Reserved */ static int dell_rfkill_set(void *data, bool blocked) { int disable = blocked ? 1 : 0; unsigned long radio = (unsigned long)data; int hwswitch_bit = (unsigned long)data - 1; int ret = 0; get_buffer(); dell_send_request(buffer, 17, 11); /* If the hardware switch controls this radio, and the hardware switch is disabled, don't allow changing the software state */ if ((hwswitch_state & BIT(hwswitch_bit)) && !(buffer->output[1] & BIT(16))) { ret = -EINVAL; goto out; } buffer->input[0] = (1 | (radio<<8) | (disable << 16)); dell_send_request(buffer, 17, 11); out: release_buffer(); return ret; } static void dell_rfkill_query(struct rfkill *rfkill, void *data) { int status; int bit = (unsigned long)data + 16; int hwswitch_bit = (unsigned long)data - 1; get_buffer(); dell_send_request(buffer, 17, 11); status = buffer->output[1]; release_buffer(); rfkill_set_sw_state(rfkill, !!(status & BIT(bit))); if (hwswitch_state & (BIT(hwswitch_bit))) rfkill_set_hw_state(rfkill, !(status & BIT(16))); } static const struct rfkill_ops dell_rfkill_ops = { .set_block = dell_rfkill_set, .query = dell_rfkill_query, }; static struct dentry *dell_laptop_dir; static int dell_debugfs_show(struct seq_file *s, void *data) { int status; get_buffer(); dell_send_request(buffer, 17, 11); status = buffer->output[1]; release_buffer(); seq_printf(s, "status:\t0x%X\n", status); seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n", status & BIT(0)); seq_printf(s, "Bit 1 : Wifi locator supported: %lu\n", (status & BIT(1)) >> 1); seq_printf(s, "Bit 2 : Wifi is supported: %lu\n", (status & BIT(2)) >> 2); seq_printf(s, "Bit 3 : Bluetooth is supported: %lu\n", (status & BIT(3)) >> 3); seq_printf(s, "Bit 4 : WWAN is supported: %lu\n", (status & BIT(4)) >> 4); seq_printf(s, "Bit 5 : Wireless keyboard supported: %lu\n", (status & BIT(5)) >> 5); seq_printf(s, "Bit 8 : Wifi is installed: %lu\n", (status & BIT(8)) >> 8); seq_printf(s, "Bit 9 : Bluetooth is installed: %lu\n", (status & BIT(9)) >> 9); seq_printf(s, "Bit 10: WWAN is installed: %lu\n", (status & BIT(10)) >> 10); seq_printf(s, "Bit 16: Hardware switch is on: %lu\n", (status & BIT(16)) >> 16); seq_printf(s, "Bit 17: Wifi is blocked: %lu\n", (status & BIT(17)) >> 17); seq_printf(s, "Bit 18: Bluetooth is blocked: %lu\n", (status & BIT(18)) >> 18); seq_printf(s, "Bit 19: WWAN is blocked: %lu\n", (status & BIT(19)) >> 19); seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state); seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n", hwswitch_state & BIT(0)); seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n", (hwswitch_state & BIT(1)) >> 1); seq_printf(s, "Bit 2 : WWAN controlled by switch: %lu\n", (hwswitch_state & BIT(2)) >> 2); seq_printf(s, "Bit 7 : Wireless switch config locked: %lu\n", (hwswitch_state & BIT(7)) >> 7); seq_printf(s, "Bit 8 : Wifi locator enabled: %lu\n", (hwswitch_state & BIT(8)) >> 8); seq_printf(s, "Bit 15: Wifi locator setting locked: %lu\n", (hwswitch_state & BIT(15)) >> 15); return 0; } static int dell_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, dell_debugfs_show, inode->i_private); } static const struct file_operations dell_debugfs_fops = { .owner = THIS_MODULE, .open = dell_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void dell_update_rfkill(struct work_struct *ignored) { if (wifi_rfkill) dell_rfkill_query(wifi_rfkill, (void *)1); if (bluetooth_rfkill) dell_rfkill_query(bluetooth_rfkill, (void *)2); if (wwan_rfkill) dell_rfkill_query(wwan_rfkill, (void *)3); } static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); static int __init dell_setup_rfkill(void) { int status; int ret; if (dmi_check_system(dell_blacklist)) { pr_info("Blacklisted hardware detected - not enabling rfkill\n"); return 0; } get_buffer(); dell_send_request(buffer, 17, 11); status = buffer->output[1]; buffer->input[0] = 0x2; dell_send_request(buffer, 17, 11); hwswitch_state = buffer->output[1]; release_buffer(); if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev, RFKILL_TYPE_WLAN, &dell_rfkill_ops, (void *) 1); if (!wifi_rfkill) { ret = -ENOMEM; goto err_wifi; } ret = rfkill_register(wifi_rfkill); if (ret) goto err_wifi; } if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) { bluetooth_rfkill = rfkill_alloc("dell-bluetooth", &platform_device->dev, RFKILL_TYPE_BLUETOOTH, &dell_rfkill_ops, (void *) 2); if (!bluetooth_rfkill) { ret = -ENOMEM; goto err_bluetooth; } ret = rfkill_register(bluetooth_rfkill); if (ret) goto err_bluetooth; } if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) { wwan_rfkill = rfkill_alloc("dell-wwan", &platform_device->dev, RFKILL_TYPE_WWAN, &dell_rfkill_ops, (void *) 3); if (!wwan_rfkill) { ret = -ENOMEM; goto err_wwan; } ret = rfkill_register(wwan_rfkill); if (ret) goto err_wwan; } return 0; err_wwan: rfkill_destroy(wwan_rfkill); if (bluetooth_rfkill) rfkill_unregister(bluetooth_rfkill); err_bluetooth: rfkill_destroy(bluetooth_rfkill); if (wifi_rfkill) rfkill_unregister(wifi_rfkill); err_wifi: rfkill_destroy(wifi_rfkill); return ret; } static void dell_cleanup_rfkill(void) { if (wifi_rfkill) { rfkill_unregister(wifi_rfkill); rfkill_destroy(wifi_rfkill); } if (bluetooth_rfkill) { rfkill_unregister(bluetooth_rfkill); rfkill_destroy(bluetooth_rfkill); } if (wwan_rfkill) { rfkill_unregister(wwan_rfkill); rfkill_destroy(wwan_rfkill); } } static int dell_send_intensity(struct backlight_device *bd) { int ret = 0; get_buffer(); buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); buffer->input[1] = bd->props.brightness; if (buffer->input[0] == -1) { ret = -ENODEV; goto out; } if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 1, 2); else dell_send_request(buffer, 1, 1); out: release_buffer(); return 0; } static int dell_get_intensity(struct backlight_device *bd) { int ret = 0; get_buffer(); buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); if (buffer->input[0] == -1) { ret = -ENODEV; goto out; } if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 0, 2); else dell_send_request(buffer, 0, 1); ret = buffer->output[1]; out: release_buffer(); return ret; } static const struct backlight_ops dell_ops = { .get_brightness = dell_get_intensity, .update_status = dell_send_intensity, }; static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, struct serio *port) { static bool extended; if (str & 0x20) return false; if (unlikely(data == 0xe0)) { extended = true; return false; } else if (unlikely(extended)) { switch (data) { case 0x8: schedule_delayed_work(&dell_rfkill_work, round_jiffies_relative(HZ)); break; } extended = false; } return false; } static int __init dell_init(void) { int max_intensity = 0; int ret; if (!dmi_check_system(dell_device_table)) return -ENODEV; dmi_walk(find_tokens, NULL); if (!da_tokens) { pr_info("Unable to find dmi tokens\n"); return -ENODEV; } ret = platform_driver_register(&platform_driver); if (ret) goto fail_platform_driver; platform_device = platform_device_alloc("dell-laptop", -1); if (!platform_device) { ret = -ENOMEM; goto fail_platform_device1; } ret = platform_device_add(platform_device); if (ret) goto fail_platform_device2; /* * Allocate buffer below 4GB for SMI data--only 32-bit physical addr * is passed to SMI handler. */ bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32); if (!bufferpage) goto fail_buffer; buffer = page_address(bufferpage); mutex_init(&buffer_mutex); ret = dell_setup_rfkill(); if (ret) { pr_warn("Unable to setup rfkill\n"); goto fail_rfkill; } ret = i8042_install_filter(dell_laptop_i8042_filter); if (ret) { pr_warn("Unable to install key filter\n"); goto fail_filter; } dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); if (dell_laptop_dir != NULL) debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, &dell_debugfs_fops); #ifdef CONFIG_ACPI /* In the event of an ACPI backlight being available, don't * register the platform controller. */ if (acpi_video_backlight_support()) return 0; #endif get_buffer(); buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); if (buffer->input[0] != -1) { dell_send_request(buffer, 0, 2); max_intensity = buffer->output[3]; } release_buffer(); if (max_intensity) { struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_intensity; dell_backlight_device = backlight_device_register("dell_backlight", &platform_device->dev, NULL, &dell_ops, &props); if (IS_ERR(dell_backlight_device)) { ret = PTR_ERR(dell_backlight_device); dell_backlight_device = NULL; goto fail_backlight; } dell_backlight_device->props.brightness = dell_get_intensity(dell_backlight_device); backlight_update_status(dell_backlight_device); } return 0; fail_backlight: i8042_remove_filter(dell_laptop_i8042_filter); cancel_delayed_work_sync(&dell_rfkill_work); fail_filter: dell_cleanup_rfkill(); fail_rfkill: free_page((unsigned long)bufferpage); fail_buffer: platform_device_del(platform_device); fail_platform_device2: platform_device_put(platform_device); fail_platform_device1: platform_driver_unregister(&platform_driver); fail_platform_driver: kfree(da_tokens); return ret; } static void __exit dell_exit(void) { debugfs_remove_recursive(dell_laptop_dir); i8042_remove_filter(dell_laptop_i8042_filter); cancel_delayed_work_sync(&dell_rfkill_work); backlight_device_unregister(dell_backlight_device); dell_cleanup_rfkill(); if (platform_device) { platform_device_unregister(platform_device); platform_driver_unregister(&platform_driver); } kfree(da_tokens); free_page((unsigned long)buffer); } module_init(dell_init); module_exit(dell_exit); MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); MODULE_DESCRIPTION("Dell laptop driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("dmi:*svnDellInc.:*:ct8:*"); MODULE_ALIAS("dmi:*svnDellInc.:*:ct9:*"); MODULE_ALIAS("dmi:*svnDellComputerCorporation.:*:ct8:*");
gpl-2.0
digitaleric-google/GCG-2.6.39
drivers/char/ipmi/ipmi_bt_sm.c
2631
20796
/* * ipmi_bt_sm.c * * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part * of the driver architecture at http://sourceforge.net/projects/openipmi * * Author: Rocky Craig <first.last@hp.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> /* For printk. */ #include <linux/string.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ipmi_msgdefs.h> /* for completion codes */ #include "ipmi_si_sm.h" #define BT_DEBUG_OFF 0 /* Used in production */ #define BT_DEBUG_ENABLE 1 /* Generic messages */ #define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ #define BT_DEBUG_STATES 4 /* Verbose look at state changes */ /* * BT_DEBUG_OFF must be zero to correspond to the default uninitialized * value */ static int bt_debug; /* 0 == BT_DEBUG_OFF */ module_param(bt_debug, int, 0644); MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); /* * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, * and 64 byte buffers. However, one HP implementation wants 255 bytes of * buffer (with a documented message of 160 bytes) so go for the max. * Since the Open IPMI architecture is single-message oriented at this * stage, the queue depth of BT is of no concern. */ #define BT_NORMAL_TIMEOUT 5 /* seconds */ #define BT_NORMAL_RETRY_LIMIT 2 #define BT_RESET_DELAY 6 /* seconds after warm reset */ /* * States are written in chronological order and usually cover * multiple rows of the state table discussion in the IPMI spec. */ enum bt_states { BT_STATE_IDLE = 0, /* Order is critical in this list */ BT_STATE_XACTION_START, BT_STATE_WRITE_BYTES, BT_STATE_WRITE_CONSUME, BT_STATE_READ_WAIT, BT_STATE_CLEAR_B2H, BT_STATE_READ_BYTES, BT_STATE_RESET1, /* These must come last */ BT_STATE_RESET2, BT_STATE_RESET3, BT_STATE_RESTART, BT_STATE_PRINTME, BT_STATE_CAPABILITIES_BEGIN, BT_STATE_CAPABILITIES_END, BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ }; /* * Macros seen at the end of state "case" blocks. They help with legibility * and debugging. */ #define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; } #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } struct si_sm_data { enum bt_states state; unsigned char seq; /* BT sequence number */ struct si_sm_io *io; unsigned char write_data[IPMI_MAX_MSG_LENGTH]; int write_count; unsigned char read_data[IPMI_MAX_MSG_LENGTH]; int read_count; int truncated; long timeout; /* microseconds countdown */ int error_retries; /* end of "common" fields */ int nonzero_status; /* hung BMCs stay all 0 */ enum bt_states complete; /* to divert the state machine */ int BT_CAP_outreqs; long BT_CAP_req2rsp; int BT_CAP_retries; /* Recommended retries */ }; #define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */ #define BT_CLR_RD_PTR 0x02 #define BT_H2B_ATN 0x04 #define BT_B2H_ATN 0x08 #define BT_SMS_ATN 0x10 #define BT_OEM0 0x20 #define BT_H_BUSY 0x40 #define BT_B_BUSY 0x80 /* * Some bits are toggled on each write: write once to set it, once * more to clear it; writing a zero does nothing. To absolutely * clear it, check its state and write if set. This avoids the "get * current then use as mask" scheme to modify one bit. Note that the * variable "bt" is hardcoded into these macros. */ #define BT_STATUS bt->io->inputb(bt->io, 0) #define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) #define BMC2HOST bt->io->inputb(bt->io, 1) #define HOST2BMC(x) bt->io->outputb(bt->io, 1, x) #define BT_INTMASK_R bt->io->inputb(bt->io, 2) #define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) /* * Convenience routines for debugging. These are not multi-open safe! * Note the macros have hardcoded variables in them. */ static char *state2txt(unsigned char state) { switch (state) { case BT_STATE_IDLE: return("IDLE"); case BT_STATE_XACTION_START: return("XACTION"); case BT_STATE_WRITE_BYTES: return("WR_BYTES"); case BT_STATE_WRITE_CONSUME: return("WR_CONSUME"); case BT_STATE_READ_WAIT: return("RD_WAIT"); case BT_STATE_CLEAR_B2H: return("CLEAR_B2H"); case BT_STATE_READ_BYTES: return("RD_BYTES"); case BT_STATE_RESET1: return("RESET1"); case BT_STATE_RESET2: return("RESET2"); case BT_STATE_RESET3: return("RESET3"); case BT_STATE_RESTART: return("RESTART"); case BT_STATE_LONG_BUSY: return("LONG_BUSY"); case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN"); case BT_STATE_CAPABILITIES_END: return("CAP_END"); } return("BAD STATE"); } #define STATE2TXT state2txt(bt->state) static char *status2txt(unsigned char status) { /* * This cannot be called by two threads at the same time and * the buffer is always consumed immediately, so the static is * safe to use. */ static char buf[40]; strcpy(buf, "[ "); if (status & BT_B_BUSY) strcat(buf, "B_BUSY "); if (status & BT_H_BUSY) strcat(buf, "H_BUSY "); if (status & BT_OEM0) strcat(buf, "OEM0 "); if (status & BT_SMS_ATN) strcat(buf, "SMS "); if (status & BT_B2H_ATN) strcat(buf, "B2H "); if (status & BT_H2B_ATN) strcat(buf, "H2B "); strcat(buf, "]"); return buf; } #define STATUS2TXT status2txt(status) /* called externally at insmod time, and internally on cleanup */ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) { memset(bt, 0, sizeof(struct si_sm_data)); if (bt->io != io) { /* external: one-time only things */ bt->io = io; bt->seq = 0; } bt->state = BT_STATE_IDLE; /* start here */ bt->complete = BT_STATE_IDLE; /* end here */ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000; bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */ return 3; /* We claim 3 bytes of space; ought to check SPMI table */ } /* Jam a completion code (probably an error) into a response */ static void force_result(struct si_sm_data *bt, unsigned char completion_code) { bt->read_data[0] = 4; /* # following bytes */ bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */ bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */ bt->read_data[3] = bt->write_data[3]; /* Command */ bt->read_data[4] = completion_code; bt->read_count = 5; } /* The upper state machine starts here */ static int bt_start_transaction(struct si_sm_data *bt, unsigned char *data, unsigned int size) { unsigned int i; if (size < 2) return IPMI_REQ_LEN_INVALID_ERR; if (size > IPMI_MAX_MSG_LENGTH) return IPMI_REQ_LEN_EXCEEDED_ERR; if (bt->state == BT_STATE_LONG_BUSY) return IPMI_NODE_BUSY_ERR; if (bt->state != BT_STATE_IDLE) return IPMI_NOT_IN_MY_STATE_ERR; if (bt_debug & BT_DEBUG_MSG) { printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); for (i = 0; i < size; i ++) printk(" %02x", data[i]); printk("\n"); } bt->write_data[0] = size + 1; /* all data plus seq byte */ bt->write_data[1] = *data; /* NetFn/LUN */ bt->write_data[2] = bt->seq++; memcpy(bt->write_data + 3, data + 1, size - 1); bt->write_count = size + 2; bt->error_retries = 0; bt->nonzero_status = 0; bt->truncated = 0; bt->state = BT_STATE_XACTION_START; bt->timeout = bt->BT_CAP_req2rsp; force_result(bt, IPMI_ERR_UNSPECIFIED); return 0; } /* * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE * it calls this. Strip out the length and seq bytes. */ static int bt_get_result(struct si_sm_data *bt, unsigned char *data, unsigned int length) { int i, msg_len; msg_len = bt->read_count - 2; /* account for length & seq */ if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { force_result(bt, IPMI_ERR_UNSPECIFIED); msg_len = 3; } data[0] = bt->read_data[1]; data[1] = bt->read_data[3]; if (length < msg_len || bt->truncated) { data[2] = IPMI_ERR_MSG_TRUNCATED; msg_len = 3; } else memcpy(data + 2, bt->read_data + 4, msg_len - 2); if (bt_debug & BT_DEBUG_MSG) { printk(KERN_WARNING "BT: result %d bytes:", msg_len); for (i = 0; i < msg_len; i++) printk(" %02x", data[i]); printk("\n"); } return msg_len; } /* This bit's functionality is optional */ #define BT_BMC_HWRST 0x80 static void reset_flags(struct si_sm_data *bt) { if (bt_debug) printk(KERN_WARNING "IPMI BT: flag reset %s\n", status2txt(BT_STATUS)); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* force clear */ BT_CONTROL(BT_CLR_WR_PTR); /* always reset */ BT_CONTROL(BT_SMS_ATN); /* always clear */ BT_INTMASK_W(BT_BMC_HWRST); } /* * Get rid of an unwanted/stale response. This should only be needed for * BMCs that support multiple outstanding requests. */ static void drain_BMC2HOST(struct si_sm_data *bt) { int i, size; if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */ return; BT_CONTROL(BT_H_BUSY); /* now set */ BT_CONTROL(BT_B2H_ATN); /* always clear */ BT_STATUS; /* pause */ BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */ BT_CONTROL(BT_CLR_RD_PTR); /* always reset */ if (bt_debug) printk(KERN_WARNING "IPMI BT: stale response %s; ", status2txt(BT_STATUS)); size = BMC2HOST; for (i = 0; i < size ; i++) BMC2HOST; BT_CONTROL(BT_H_BUSY); /* now clear */ if (bt_debug) printk("drained %d bytes\n", size + 1); } static inline void write_all_bytes(struct si_sm_data *bt) { int i; if (bt_debug & BT_DEBUG_MSG) { printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", bt->write_count, bt->seq); for (i = 0; i < bt->write_count; i++) printk(" %02x", bt->write_data[i]); printk("\n"); } for (i = 0; i < bt->write_count; i++) HOST2BMC(bt->write_data[i]); } static inline int read_all_bytes(struct si_sm_data *bt) { unsigned char i; /* * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. * Keep layout of first four bytes aligned with write_data[] */ bt->read_data[0] = BMC2HOST; bt->read_count = bt->read_data[0]; if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) { if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "BT: bad raw rsp len=%d\n", bt->read_count); bt->truncated = 1; return 1; /* let next XACTION START clean it up */ } for (i = 1; i <= bt->read_count; i++) bt->read_data[i] = BMC2HOST; bt->read_count++; /* Account internally for length byte */ if (bt_debug & BT_DEBUG_MSG) { int max = bt->read_count; printk(KERN_WARNING "BT: got %d bytes seq=0x%02X", max, bt->read_data[2]); if (max > 16) max = 16; for (i = 0; i < max; i++) printk(KERN_CONT " %02x", bt->read_data[i]); printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ..."); } /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ if ((bt->read_data[3] == bt->write_data[3]) && (bt->read_data[2] == bt->write_data[2]) && ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) return 1; if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "IPMI BT: bad packet: " "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3], bt->read_data[1], bt->read_data[2], bt->read_data[3]); return 0; } /* Restart if retries are left, or return an error completion code */ static enum si_sm_result error_recovery(struct si_sm_data *bt, unsigned char status, unsigned char cCode) { char *reason; bt->timeout = bt->BT_CAP_req2rsp; switch (cCode) { case IPMI_TIMEOUT_ERR: reason = "timeout"; break; default: reason = "internal error"; break; } printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ reason, STATE2TXT, STATUS2TXT); /* * Per the IPMI spec, retries are based on the sequence number * known only to this module, so manage a restart here. */ (bt->error_retries)++; if (bt->error_retries < bt->BT_CAP_retries) { printk("%d retries left\n", bt->BT_CAP_retries - bt->error_retries); bt->state = BT_STATE_RESTART; return SI_SM_CALL_WITHOUT_DELAY; } printk(KERN_WARNING "failed %d retries, sending error response\n", bt->BT_CAP_retries); if (!bt->nonzero_status) printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); /* this is most likely during insmod */ else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) { printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n"); bt->state = BT_STATE_RESET1; return SI_SM_CALL_WITHOUT_DELAY; } /* * Concoct a useful error message, set up the next state, and * be done with this sequence. */ bt->state = BT_STATE_IDLE; switch (cCode) { case IPMI_TIMEOUT_ERR: if (status & BT_B_BUSY) { cCode = IPMI_NODE_BUSY_ERR; bt->state = BT_STATE_LONG_BUSY; } break; default: break; } force_result(bt, cCode); return SI_SM_TRANSACTION_COMPLETE; } /* Check status and (usually) take action and change this state machine. */ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) { unsigned char status, BT_CAP[8]; static enum bt_states last_printed = BT_STATE_PRINTME; int i; status = BT_STATUS; bt->nonzero_status |= status; if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) { printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n", STATE2TXT, STATUS2TXT, bt->timeout, time); last_printed = bt->state; } /* * Commands that time out may still (eventually) provide a response. * This stale response will get in the way of a new response so remove * it if possible (hopefully during IDLE). Even if it comes up later * it will be rejected by its (now-forgotten) seq number. */ if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { drain_BMC2HOST(bt); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } if ((bt->state != BT_STATE_IDLE) && (bt->state < BT_STATE_PRINTME)) { /* check timeout */ bt->timeout -= time; if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) return error_recovery(bt, status, IPMI_TIMEOUT_ERR); } switch (bt->state) { /* * Idle state first checks for asynchronous messages from another * channel, then does some opportunistic housekeeping. */ case BT_STATE_IDLE: if (status & BT_SMS_ATN) { BT_CONTROL(BT_SMS_ATN); /* clear it */ return SI_SM_ATTN; } if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ BT_CONTROL(BT_H_BUSY); /* Read BT capabilities if it hasn't been done yet */ if (!bt->BT_CAP_outreqs) BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, SI_SM_CALL_WITHOUT_DELAY); bt->timeout = bt->BT_CAP_req2rsp; BT_SI_SM_RETURN(SI_SM_IDLE); case BT_STATE_XACTION_START: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* force clear */ BT_STATE_CHANGE(BT_STATE_WRITE_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_BYTES: if (status & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* clear */ BT_CONTROL(BT_CLR_WR_PTR); write_all_bytes(bt); BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */ BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_CONSUME: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); /* Spinning hard can suppress B2H_ATN and force a timeout */ case BT_STATE_READ_WAIT: if (!(status & BT_B2H_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_CONTROL(BT_H_BUSY); /* set */ /* * Uncached, ordered writes should just proceeed serially but * some BMCs don't clear B2H_ATN with one hit. Fast-path a * workaround without too much penalty to the general case. */ BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_CLEAR_B2H: if (status & BT_B2H_ATN) { /* keep hitting it */ BT_CONTROL(BT_B2H_ATN); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } BT_STATE_CHANGE(BT_STATE_READ_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_READ_BYTES: if (!(status & BT_H_BUSY)) /* check in case of retry */ BT_CONTROL(BT_H_BUSY); BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ i = read_all_bytes(bt); /* true == packet seq match */ BT_CONTROL(BT_H_BUSY); /* NOW clear */ if (!i) /* Not my message */ BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); bt->state = bt->complete; return bt->state == BT_STATE_IDLE ? /* where to next? */ SI_SM_TRANSACTION_COMPLETE : /* normal */ SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */ case BT_STATE_LONG_BUSY: /* For example: after FW update */ if (!(status & BT_B_BUSY)) { reset_flags(bt); /* next state is now IDLE */ bt_init_data(bt, bt->io); } return SI_SM_CALL_WITH_DELAY; /* No repeat printing */ case BT_STATE_RESET1: reset_flags(bt); drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESET2, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET2: /* Send a soft reset */ BT_CONTROL(BT_CLR_WR_PTR); HOST2BMC(3); /* number of bytes following */ HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */ HOST2BMC(42); /* Sequence number */ HOST2BMC(3); /* Cmd == Soft reset */ BT_CONTROL(BT_H2B_ATN); bt->timeout = BT_RESET_DELAY * 1000000; BT_STATE_CHANGE(BT_STATE_RESET3, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET3: /* Hold off everything for a bit */ if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY; drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESTART, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESTART: /* don't reset retries or seq! */ bt->read_count = 0; bt->nonzero_status = 0; bt->timeout = bt->BT_CAP_req2rsp; BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); /* * Get BT Capabilities, using timing of upper level state machine. * Set outreqs to prevent infinite loop on timeout. */ case BT_STATE_CAPABILITIES_BEGIN: bt->BT_CAP_outreqs = 1; { unsigned char GetBT_CAP[] = { 0x18, 0x36 }; bt->state = BT_STATE_IDLE; bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); } bt->complete = BT_STATE_CAPABILITIES_END; BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); case BT_STATE_CAPABILITIES_END: i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); bt_init_data(bt, bt->io); if ((i == 8) && !BT_CAP[2]) { bt->BT_CAP_outreqs = BT_CAP[3]; bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000; bt->BT_CAP_retries = BT_CAP[7]; } else printk(KERN_WARNING "IPMI BT: using default values\n"); if (!bt->BT_CAP_outreqs) bt->BT_CAP_outreqs = 1; printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries); bt->timeout = bt->BT_CAP_req2rsp; return SI_SM_CALL_WITHOUT_DELAY; default: /* should never occur */ return error_recovery(bt, status, IPMI_ERR_UNSPECIFIED); } return SI_SM_CALL_WITH_DELAY; } static int bt_detect(struct si_sm_data *bt) { /* * It's impossible for the BT status and interrupt registers to be * all 1's, (assuming a properly functioning, self-initialized BMC) * but that's what you get from reading a bogus address, so we * test that first. The calling routine uses negative logic. */ if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) return 1; reset_flags(bt); return 0; } static void bt_cleanup(struct si_sm_data *bt) { } static int bt_size(void) { return sizeof(struct si_sm_data); } struct si_sm_handlers bt_smi_handlers = { .init_data = bt_init_data, .start_transaction = bt_start_transaction, .get_result = bt_get_result, .event = bt_event, .detect = bt_detect, .cleanup = bt_cleanup, .size = bt_size, };
gpl-2.0
SOKP/kernel_motorola_msm8610
net/ipv4/tcp_cubic.c
3655
14313
/* * TCP CUBIC: Binary Increase Congestion control for TCP v2.3 * Home page: * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC * This is from the implementation of CUBIC TCP in * Sangtae Ha, Injong Rhee and Lisong Xu, * "CUBIC: A New TCP-Friendly High-Speed TCP Variant" * in ACM SIGOPS Operating System Review, July 2008. * Available from: * http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf * * CUBIC integrates a new slow start algorithm, called HyStart. * The details of HyStart are presented in * Sangtae Ha and Injong Rhee, * "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008. * Available from: * http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf * * All testing results are available from: * http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing * * Unless CUBIC is enabled and congestion window is large * this behaves the same as the original Reno. */ #include <linux/mm.h> #include <linux/module.h> #include <linux/math64.h> #include <net/tcp.h> #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation * max_cwnd = snd_cwnd * beta */ #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ /* Two methods of hybrid slow start */ #define HYSTART_ACK_TRAIN 0x1 #define HYSTART_DELAY 0x2 /* Number of delay samples for detecting the increase of delay */ #define HYSTART_MIN_SAMPLES 8 #define HYSTART_DELAY_MIN (4U<<3) #define HYSTART_DELAY_MAX (16U<<3) #define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX) static int fast_convergence __read_mostly = 1; static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */ static int initial_ssthresh __read_mostly; static int bic_scale __read_mostly = 41; static int tcp_friendliness __read_mostly = 1; static int hystart __read_mostly = 1; static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY; static int hystart_low_window __read_mostly = 16; static int hystart_ack_delta __read_mostly = 2; static u32 cube_rtt_scale __read_mostly; static u32 beta_scale __read_mostly; static u64 cube_factor __read_mostly; /* Note parameters that are used for precomputing scale factors are read-only */ module_param(fast_convergence, int, 0644); MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence"); module_param(beta, int, 0644); MODULE_PARM_DESC(beta, "beta for multiplicative increase"); module_param(initial_ssthresh, int, 0644); MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); module_param(bic_scale, int, 0444); MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)"); module_param(tcp_friendliness, int, 0644); MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness"); module_param(hystart, int, 0644); MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm"); module_param(hystart_detect, int, 0644); MODULE_PARM_DESC(hystart_detect, "hyrbrid slow start detection mechanisms" " 1: packet-train 2: delay 3: both packet-train and delay"); module_param(hystart_low_window, int, 0644); MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start"); module_param(hystart_ack_delta, int, 0644); MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (msecs)"); /* BIC TCP Parameters */ struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ u32 last_max_cwnd; /* last maximum snd_cwnd */ u32 loss_cwnd; /* congestion window at last loss */ u32 last_cwnd; /* the last snd_cwnd */ u32 last_time; /* time when updated last_cwnd */ u32 bic_origin_point;/* origin point of bic function */ u32 bic_K; /* time to origin point from the beginning of the current epoch */ u32 delay_min; /* min delay (msec << 3) */ u32 epoch_start; /* beginning of an epoch */ u32 ack_cnt; /* number of acks */ u32 tcp_cwnd; /* estimated tcp cwnd */ #define ACK_RATIO_SHIFT 4 #define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT) u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ u8 sample_cnt; /* number of samples to decide curr_rtt */ u8 found; /* the exit point is found? */ u32 round_start; /* beginning of each round */ u32 end_seq; /* end_seq of the round */ u32 last_ack; /* last time when the ACK spacing is close */ u32 curr_rtt; /* the minimum rtt of current round */ }; static inline void bictcp_reset(struct bictcp *ca) { ca->cnt = 0; ca->last_max_cwnd = 0; ca->last_cwnd = 0; ca->last_time = 0; ca->bic_origin_point = 0; ca->bic_K = 0; ca->delay_min = 0; ca->epoch_start = 0; ca->delayed_ack = 2 << ACK_RATIO_SHIFT; ca->ack_cnt = 0; ca->tcp_cwnd = 0; ca->found = 0; } static inline u32 bictcp_clock(void) { #if HZ < 1000 return ktime_to_ms(ktime_get_real()); #else return jiffies_to_msecs(jiffies); #endif } static inline void bictcp_hystart_reset(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); ca->round_start = ca->last_ack = bictcp_clock(); ca->end_seq = tp->snd_nxt; ca->curr_rtt = 0; ca->sample_cnt = 0; } static void bictcp_init(struct sock *sk) { struct bictcp *ca = inet_csk_ca(sk); bictcp_reset(ca); ca->loss_cwnd = 0; if (hystart) bictcp_hystart_reset(sk); if (!hystart && initial_ssthresh) tcp_sk(sk)->snd_ssthresh = initial_ssthresh; } /* calculate the cubic root of x using a table lookup followed by one * Newton-Raphson iteration. * Avg err ~= 0.195% */ static u32 cubic_root(u64 a) { u32 x, b, shift; /* * cbrt(x) MSB values for x MSB values in [0..63]. * Precomputed then refined by hand - Willy Tarreau * * For x in [0..63], * v = cbrt(x << 18) - 1 * cbrt(x) = (v[x] + 10) >> 6 */ static const u8 v[] = { /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118, /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156, /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179, /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199, /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215, /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229, /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242, /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254, }; b = fls64(a); if (b < 7) { /* a in [0..63] */ return ((u32)v[(u32)a] + 35) >> 6; } b = ((b * 84) >> 8) - 1; shift = (a >> (b * 3)); x = ((u32)(((u32)v[shift] + 10) << b)) >> 6; /* * Newton-Raphson iteration * 2 * x = ( 2 * x + a / x ) / 3 * k+1 k k */ x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1))); x = ((x * 341) >> 10); return x; } /* * Compute congestion window to use. */ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) { u64 offs; u32 delta, t, bic_target, max_cnt; ca->ack_cnt++; /* count the number of ACKs */ if (ca->last_cwnd == cwnd && (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) return; ca->last_cwnd = cwnd; ca->last_time = tcp_time_stamp; if (ca->epoch_start == 0) { ca->epoch_start = tcp_time_stamp; /* record the beginning of an epoch */ ca->ack_cnt = 1; /* start counting */ ca->tcp_cwnd = cwnd; /* syn with cubic */ if (ca->last_max_cwnd <= cwnd) { ca->bic_K = 0; ca->bic_origin_point = cwnd; } else { /* Compute new K based on * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ) */ ca->bic_K = cubic_root(cube_factor * (ca->last_max_cwnd - cwnd)); ca->bic_origin_point = ca->last_max_cwnd; } } /* cubic function - calc*/ /* calculate c * time^3 / rtt, * while considering overflow in calculation of time^3 * (so time^3 is done by using 64 bit) * and without the support of division of 64bit numbers * (so all divisions are done by using 32 bit) * also NOTE the unit of those veriables * time = (t - K) / 2^bictcp_HZ * c = bic_scale >> 10 * rtt = (srtt >> 3) / HZ * !!! The following code does not have overflow problems, * if the cwnd < 1 million packets !!! */ /* change the unit from HZ to bictcp_HZ */ t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) - ca->epoch_start) << BICTCP_HZ) / HZ; if (t < ca->bic_K) /* t - K */ offs = ca->bic_K - t; else offs = t - ca->bic_K; /* c/rtt * (t-K)^3 */ delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); if (t < ca->bic_K) /* below origin*/ bic_target = ca->bic_origin_point - delta; else /* above origin*/ bic_target = ca->bic_origin_point + delta; /* cubic function - calc bictcp_cnt*/ if (bic_target > cwnd) { ca->cnt = cwnd / (bic_target - cwnd); } else { ca->cnt = 100 * cwnd; /* very small increment*/ } /* * The initial growth of cubic function may be too conservative * when the available bandwidth is still unknown. */ if (ca->last_max_cwnd == 0 && ca->cnt > 20) ca->cnt = 20; /* increase cwnd 5% per RTT */ /* TCP Friendly */ if (tcp_friendliness) { u32 scale = beta_scale; delta = (cwnd * scale) >> 3; while (ca->ack_cnt > delta) { /* update tcp cwnd */ ca->ack_cnt -= delta; ca->tcp_cwnd++; } if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */ delta = ca->tcp_cwnd - cwnd; max_cnt = cwnd / delta; if (ca->cnt > max_cnt) ca->cnt = max_cnt; } } ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; if (ca->cnt == 0) /* cannot be zero */ ca->cnt = 1; } static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); if (!tcp_is_cwnd_limited(sk, in_flight)) return; if (tp->snd_cwnd <= tp->snd_ssthresh) { if (hystart && after(ack, ca->end_seq)) bictcp_hystart_reset(sk); tcp_slow_start(tp); } else { bictcp_update(ca, tp->snd_cwnd); tcp_cong_avoid_ai(tp, ca->cnt); } } static u32 bictcp_recalc_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); ca->epoch_start = 0; /* end of epoch */ /* Wmax and fast convergence */ if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence) ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta)) / (2 * BICTCP_BETA_SCALE); else ca->last_max_cwnd = tp->snd_cwnd; ca->loss_cwnd = tp->snd_cwnd; return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); } static u32 bictcp_undo_cwnd(struct sock *sk) { struct bictcp *ca = inet_csk_ca(sk); return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); } static void bictcp_state(struct sock *sk, u8 new_state) { if (new_state == TCP_CA_Loss) { bictcp_reset(inet_csk_ca(sk)); bictcp_hystart_reset(sk); } } static void hystart_update(struct sock *sk, u32 delay) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); if (!(ca->found & hystart_detect)) { u32 now = bictcp_clock(); /* first detection parameter - ack-train detection */ if ((s32)(now - ca->last_ack) <= hystart_ack_delta) { ca->last_ack = now; if ((s32)(now - ca->round_start) > ca->delay_min >> 4) ca->found |= HYSTART_ACK_TRAIN; } /* obtain the minimum delay of more than sampling packets */ if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { if (ca->curr_rtt == 0 || ca->curr_rtt > delay) ca->curr_rtt = delay; ca->sample_cnt++; } else { if (ca->curr_rtt > ca->delay_min + HYSTART_DELAY_THRESH(ca->delay_min>>4)) ca->found |= HYSTART_DELAY; } /* * Either one of two conditions are met, * we exit from slow start immediately. */ if (ca->found & hystart_detect) tp->snd_ssthresh = tp->snd_cwnd; } } /* Track delayed acknowledgment ratio using sliding window * ratio = (15*ratio + sample) / 16 */ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); u32 delay; if (icsk->icsk_ca_state == TCP_CA_Open) { u32 ratio = ca->delayed_ack; ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; ratio += cnt; ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT); } /* Some calls are for duplicates without timetamps */ if (rtt_us < 0) return; /* Discard delay samples right after fast recovery */ if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) return; delay = (rtt_us << 3) / USEC_PER_MSEC; if (delay == 0) delay = 1; /* first time call or link delay decreases */ if (ca->delay_min == 0 || ca->delay_min > delay) ca->delay_min = delay; /* hystart triggers when cwnd is larger than some threshold */ if (hystart && tp->snd_cwnd <= tp->snd_ssthresh && tp->snd_cwnd >= hystart_low_window) hystart_update(sk, delay); } static struct tcp_congestion_ops cubictcp __read_mostly = { .init = bictcp_init, .ssthresh = bictcp_recalc_ssthresh, .cong_avoid = bictcp_cong_avoid, .set_state = bictcp_state, .undo_cwnd = bictcp_undo_cwnd, .pkts_acked = bictcp_acked, .owner = THIS_MODULE, .name = "cubic", }; static int __init cubictcp_register(void) { BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE); /* Precompute a bunch of the scaling factors that are used per-packet * based on SRTT of 100ms */ beta_scale = 8*(BICTCP_BETA_SCALE+beta)/ 3 / (BICTCP_BETA_SCALE - beta); cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */ /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3 * so K = cubic_root( (wmax-cwnd)*rtt/c ) * the unit of K is bictcp_HZ=2^10, not HZ * * c = bic_scale >> 10 * rtt = 100ms * * the following code has been designed and tested for * cwnd < 1 million packets * RTT < 100 seconds * HZ < 1,000,00 (corresponding to 10 nano-second) */ /* 1/c * 2^2*bictcp_HZ * srtt */ cube_factor = 1ull << (10+3*BICTCP_HZ); /* 2^40 */ /* divide by bic_scale and by constant Srtt (100ms) */ do_div(cube_factor, bic_scale * 10); /* hystart needs ms clock resolution */ if (hystart && HZ < 1000) cubictcp.flags |= TCP_CONG_RTT_STAMP; return tcp_register_congestion_control(&cubictcp); } static void __exit cubictcp_unregister(void) { tcp_unregister_congestion_control(&cubictcp); } module_init(cubictcp_register); module_exit(cubictcp_unregister); MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CUBIC TCP"); MODULE_VERSION("2.3");
gpl-2.0
Krylon360/SGS4G_Kernel_GB
arch/frv/mb93090-mb00/pci-frv.c
3911
6691
/* pci-frv.c: low-level PCI access routines * * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from the i386 equivalent stuff * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/errno.h> #include "pci-frv.h" /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { resource_size_t start = res->start; if ((res->flags & IORESOURCE_IO) && (start & 0x300)) start = (start + 0x3ff) & ~0x3ff; return start; } /* * Handle resources of PCI devices. If the world were perfect, we could * just allocate all the resource regions and do nothing more. It isn't. * On the other hand, we cannot just re-allocate all devices, as it would * require us to know lots of host bridge internals. So we attempt to * keep as much of the original configuration as possible, but tweak it * when it's found to be wrong. * * Known BIOS problems we have to work around: * - I/O or memory regions not configured * - regions configured, but not enabled in the command register * - bogus I/O addresses above 64K used * - expansion ROMs left enabled (this may sound harmless, but given * the fact the PCI specs explicitly allow address decoders to be * shared between expansion ROMs and other resource regions, it's * at least dangerous) * * Our solution: * (1) Allocate resources for all buses behind PCI-to-PCI bridges. * This gives us fixed barriers on where we can allocate. * (2) Allocate resources for all enabled devices. If there is * a collision, just mark the resource as unallocated. Also * disable expansion ROMs during this step. * (3) Try to allocate resources for disabled devices. If the * resources were assigned correctly, everything goes well, * if they weren't, they won't disturb allocation of other * resources. * (4) Assign new addresses to resources which were either * not configured at all or misconfigured. If explicitly * requested by the user, configure expansion ROM address * as well. */ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) { struct list_head *ln; struct pci_bus *bus; struct pci_dev *dev; int idx; struct resource *r; /* Depth-First Search on bus tree */ for (ln=bus_list->next; ln != bus_list; ln=ln->next) { bus = pci_bus_b(ln); if ((dev = bus->self)) { for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) { r = &dev->resource[idx]; if (!r->start) continue; pci_claim_resource(dev, idx); } } pcibios_allocate_bus_resources(&bus->children); } } static void __init pcibios_allocate_resources(int pass) { struct pci_dev *dev = NULL; int idx, disabled; u16 command; struct resource *r; for_each_pci_dev(dev) { pci_read_config_word(dev, PCI_COMMAND, &command); for(idx = 0; idx < 6; idx++) { r = &dev->resource[idx]; if (r->parent) /* Already allocated */ continue; if (!r->start) /* Address not assigned at all */ continue; if (r->flags & IORESOURCE_IO) disabled = !(command & PCI_COMMAND_IO); else disabled = !(command & PCI_COMMAND_MEMORY); if (pass == disabled) { DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n", r->start, r->end, r->flags, disabled, pass); if (pci_claim_resource(dev, idx) < 0) { /* We'll assign a new address later */ r->end -= r->start; r->start = 0; } } } if (!pass) { r = &dev->resource[PCI_ROM_RESOURCE]; if (r->flags & IORESOURCE_ROM_ENABLE) { /* Turn the ROM off, leave the resource region, but keep it unregistered. */ u32 reg; DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); r->flags &= ~IORESOURCE_ROM_ENABLE; pci_read_config_dword(dev, dev->rom_base_reg, &reg); pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE); } } } } static void __init pcibios_assign_resources(void) { struct pci_dev *dev = NULL; int idx; struct resource *r; for_each_pci_dev(dev) { int class = dev->class >> 8; /* Don't touch classless devices and host bridges */ if (!class || class == PCI_CLASS_BRIDGE_HOST) continue; for(idx=0; idx<6; idx++) { r = &dev->resource[idx]; /* * Don't touch IDE controllers and I/O ports of video cards! */ if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) || (class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO))) continue; /* * We shall assign a new address to this resource, either because * the BIOS forgot to do so or because we have decided the old * address was unusable for some reason. */ if (!r->start && r->end) pci_assign_resource(dev, idx); } if (pci_probe & PCI_ASSIGN_ROMS) { r = &dev->resource[PCI_ROM_RESOURCE]; r->end -= r->start; r->start = 0; if (r->end) pci_assign_resource(dev, PCI_ROM_RESOURCE); } } } void __init pcibios_resource_survey(void) { DBG("PCI: Allocating resources\n"); pcibios_allocate_bus_resources(&pci_root_buses); pcibios_allocate_resources(0); pcibios_allocate_resources(1); pcibios_assign_resources(); } /* * If we set up a device for bus mastering, we need to check the latency * timer as certain crappy BIOSes forget to set it properly. */ unsigned int pcibios_max_latency = 255; void pcibios_set_master(struct pci_dev *dev) { u8 lat; pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); if (lat < 16) lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; else if (lat > pcibios_max_latency) lat = pcibios_max_latency; else return; printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat); pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); }
gpl-2.0
bgcngm/802Xtreem
arch/x86/ia32/sys_ia32.c
4679
14129
/* * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Based on * sys_sparc32 * * Copyright (C) 2000 VA Linux Co * Copyright (C) 2000 Don Dugger <n0ano@valinux.com> * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 2000 Hewlett-Packard Co. * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2000,2001,2002 Andi Kleen, SuSE Labs (x86-64 port) * * These routines maintain argument size conversion between 32bit and 64bit * environment. In 2.5 most of this should be moved to a generic directory. * * This file assumes that there is a hole at the end of user address space. * * Some of the functions are LE specific currently. These are * hopefully all marked. This should be fixed. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/signal.h> #include <linux/syscalls.h> #include <linux/times.h> #include <linux/utsname.h> #include <linux/mm.h> #include <linux/uio.h> #include <linux/poll.h> #include <linux/personality.h> #include <linux/stat.h> #include <linux/rwsem.h> #include <linux/compat.h> #include <linux/vfs.h> #include <linux/ptrace.h> #include <linux/highuid.h> #include <linux/sysctl.h> #include <linux/slab.h> #include <asm/mman.h> #include <asm/types.h> #include <asm/uaccess.h> #include <linux/atomic.h> #include <asm/vgtod.h> #include <asm/sys_ia32.h> #define AA(__x) ((unsigned long)(__x)) asmlinkage long sys32_truncate64(const char __user *filename, unsigned long offset_low, unsigned long offset_high) { return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); } asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, unsigned long offset_high) { return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); } /* * Another set for IA32/LFS -- x86_64 struct stat is different due to * support for 64bit inode numbers. */ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) { typeof(ubuf->st_uid) uid = 0; typeof(ubuf->st_gid) gid = 0; SET_UID(uid, stat->uid); SET_GID(gid, stat->gid); if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) || __put_user(stat->ino, &ubuf->__st_ino) || __put_user(stat->ino, &ubuf->st_ino) || __put_user(stat->mode, &ubuf->st_mode) || __put_user(stat->nlink, &ubuf->st_nlink) || __put_user(uid, &ubuf->st_uid) || __put_user(gid, &ubuf->st_gid) || __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) || __put_user(stat->size, &ubuf->st_size) || __put_user(stat->atime.tv_sec, &ubuf->st_atime) || __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) || __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) || __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) || __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) || __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) || __put_user(stat->blksize, &ubuf->st_blksize) || __put_user(stat->blocks, &ubuf->st_blocks)) return -EFAULT; return 0; } asmlinkage long sys32_stat64(const char __user *filename, struct stat64 __user *statbuf) { struct kstat stat; int ret = vfs_stat(filename, &stat); if (!ret) ret = cp_stat64(statbuf, &stat); return ret; } asmlinkage long sys32_lstat64(const char __user *filename, struct stat64 __user *statbuf) { struct kstat stat; int ret = vfs_lstat(filename, &stat); if (!ret) ret = cp_stat64(statbuf, &stat); return ret; } asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) { struct kstat stat; int ret = vfs_fstat(fd, &stat); if (!ret) ret = cp_stat64(statbuf, &stat); return ret; } asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename, struct stat64 __user *statbuf, int flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_stat64(statbuf, &stat); } /* * Linux/i386 didn't use to be able to handle more than * 4 system call parameters, so these system calls used a memory * block for parameter passing.. */ struct mmap_arg_struct32 { unsigned int addr; unsigned int len; unsigned int prot; unsigned int flags; unsigned int fd; unsigned int offset; }; asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) { struct mmap_arg_struct32 a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset>>PAGE_SHIFT); } asmlinkage long sys32_mprotect(unsigned long start, size_t len, unsigned long prot) { return sys_mprotect(start, len, prot); } asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act, struct sigaction32 __user *oact, unsigned int sigsetsize) { struct k_sigaction new_ka, old_ka; int ret; compat_sigset_t set32; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; if (act) { compat_uptr_t handler, restorer; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(handler, &act->sa_handler) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(restorer, &act->sa_restorer) || __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t))) return -EFAULT; new_ka.sa.sa_handler = compat_ptr(handler); new_ka.sa.sa_restorer = compat_ptr(restorer); /* * FIXME: here we rely on _COMPAT_NSIG_WORS to be >= * than _NSIG_WORDS << 1 */ switch (_NSIG_WORDS) { case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32); case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32); case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32); case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32); } } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { /* * FIXME: here we rely on _COMPAT_NSIG_WORS to be >= * than _NSIG_WORDS << 1 */ switch (_NSIG_WORDS) { case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3]; case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2]; case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1]; case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0]; } if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) || __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t))) return -EFAULT; } return ret; } asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { compat_old_sigset_t mask; compat_uptr_t handler, restorer; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(handler, &act->sa_handler) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(restorer, &act->sa_restorer) || __get_user(mask, &act->sa_mask)) return -EFAULT; new_ka.sa.sa_handler = compat_ptr(handler); new_ka.sa.sa_restorer = compat_ptr(restorer); siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) || __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } asmlinkage long sys32_alarm(unsigned int seconds) { return alarm_setitimer(seconds); } asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options) { return compat_sys_wait4(pid, stat_addr, options, NULL); } /* 32-bit timeval and related flotsam. */ asmlinkage long sys32_sysfs(int option, u32 arg1, u32 arg2) { return sys_sysfs(option, arg1, arg2); } asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval) { struct timespec t; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); set_fs(old_fs); if (put_compat_timespec(&t, interval)) return -EFAULT; return ret; } asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize) { sigset_t s; compat_sigset_t s32; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize); set_fs(old_fs); if (!ret) { switch (_NSIG_WORDS) { case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; } if (copy_to_user(set, &s32, sizeof(compat_sigset_t))) return -EFAULT; } return ret; } asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) { siginfo_t info; int ret; mm_segment_t old_fs = get_fs(); if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; set_fs(KERNEL_DS); ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info); set_fs(old_fs); return ret; } /* warning: next two assume little endian */ asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count, u32 poslo, u32 poshi) { return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); } asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, u32 count, u32 poslo, u32 poshi) { return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); } asmlinkage long sys32_personality(unsigned long personality) { int ret; if (personality(current->personality) == PER_LINUX32 && personality == PER_LINUX) personality = PER_LINUX32; ret = sys_personality(personality); if (ret == PER_LINUX32) ret = PER_LINUX; return ret; } asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count) { mm_segment_t old_fs = get_fs(); int ret; off_t of; if (offset && get_user(of, offset)) return -EFAULT; set_fs(KERNEL_DS); ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count); set_fs(old_fs); if (offset && put_user(of, offset)) return -EFAULT; return ret; } asmlinkage long sys32_execve(const char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, struct pt_regs *regs) { long error; char *filename; filename = getname(name); error = PTR_ERR(filename); if (IS_ERR(filename)) return error; error = compat_do_execve(filename, argv, envp, regs); putname(filename); return error; } asmlinkage long sys32_clone(unsigned int clone_flags, unsigned int newsp, struct pt_regs *regs) { void __user *parent_tid = (void __user *)regs->dx; void __user *child_tid = (void __user *)regs->di; if (!newsp) newsp = regs->sp; return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); } /* * Some system calls that need sign extended arguments. This could be * done by a generic wrapper. */ long sys32_lseek(unsigned int fd, int offset, unsigned int whence) { return sys_lseek(fd, offset, whence); } long sys32_kill(int pid, int sig) { return sys_kill(pid, sig); } long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, __u32 len_low, __u32 len_high, int advice) { return sys_fadvise64_64(fd, (((u64)offset_high)<<32) | offset_low, (((u64)len_high)<<32) | len_low, advice); } long sys32_vm86_warning(void) { struct task_struct *me = current; static char lastcomm[sizeof(me->comm)]; if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) { compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n", me->comm); strncpy(lastcomm, me->comm, sizeof(lastcomm)); } return -ENOSYS; } long sys32_lookup_dcookie(u32 addr_low, u32 addr_high, char __user *buf, size_t len) { return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len); } asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, size_t count) { return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); } asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi, unsigned n_low, unsigned n_hi, int flags) { return sys_sync_file_range(fd, ((u64)off_hi << 32) | off_low, ((u64)n_hi << 32) | n_low, flags); } asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, size_t len, int advice) { return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, len, advice); } asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, unsigned offset_hi, unsigned len_lo, unsigned len_hi) { return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, ((u64)len_hi << 32) | len_lo); } asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags, u32 mask_lo, u32 mask_hi, int fd, const char __user *pathname) { return sys_fanotify_mark(fanotify_fd, flags, ((u64)mask_hi << 32) | mask_lo, fd, pathname); }
gpl-2.0
davidmueller13/Gear_S_Kernel
drivers/i2c/busses/i2c-sis96x.c
4935
9041
/* Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* This module must be considered BETA unless and until the chipset manufacturer releases a datasheet. The register definitions are based on the SiS630. This module relies on quirk_sis_96x_smbus (drivers/pci/quirks.c) for just about every machine for which users have reported. If this module isn't detecting your 96x south bridge, have a look there. We assume there can only be one SiS96x with one SMBus interface. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/io.h> /* base address register in PCI config space */ #define SIS96x_BAR 0x04 /* SiS96x SMBus registers */ #define SMB_STS 0x00 #define SMB_EN 0x01 #define SMB_CNT 0x02 #define SMB_HOST_CNT 0x03 #define SMB_ADDR 0x04 #define SMB_CMD 0x05 #define SMB_PCOUNT 0x06 #define SMB_COUNT 0x07 #define SMB_BYTE 0x08 #define SMB_DEV_ADDR 0x10 #define SMB_DB0 0x11 #define SMB_DB1 0x12 #define SMB_SAA 0x13 /* register count for request_region */ #define SMB_IOSIZE 0x20 /* Other settings */ #define MAX_TIMEOUT 500 /* SiS96x SMBus constants */ #define SIS96x_QUICK 0x00 #define SIS96x_BYTE 0x01 #define SIS96x_BYTE_DATA 0x02 #define SIS96x_WORD_DATA 0x03 #define SIS96x_PROC_CALL 0x04 #define SIS96x_BLOCK_DATA 0x05 static struct pci_driver sis96x_driver; static struct i2c_adapter sis96x_adapter; static u16 sis96x_smbus_base; static inline u8 sis96x_read(u8 reg) { return inb(sis96x_smbus_base + reg) ; } static inline void sis96x_write(u8 reg, u8 data) { outb(data, sis96x_smbus_base + reg) ; } /* Execute a SMBus transaction. int size is from SIS96x_QUICK to SIS96x_BLOCK_DATA */ static int sis96x_transaction(int size) { int temp; int result = 0; int timeout = 0; dev_dbg(&sis96x_adapter.dev, "SMBus transaction %d\n", size); /* Make sure the SMBus host is ready to start transmitting */ if (((temp = sis96x_read(SMB_CNT)) & 0x03) != 0x00) { dev_dbg(&sis96x_adapter.dev, "SMBus busy (0x%02x). " "Resetting...\n", temp); /* kill the transaction */ sis96x_write(SMB_HOST_CNT, 0x20); /* check it again */ if (((temp = sis96x_read(SMB_CNT)) & 0x03) != 0x00) { dev_dbg(&sis96x_adapter.dev, "Failed (0x%02x)\n", temp); return -EBUSY; } else { dev_dbg(&sis96x_adapter.dev, "Successful\n"); } } /* Turn off timeout interrupts, set fast host clock */ sis96x_write(SMB_CNT, 0x20); /* clear all (sticky) status flags */ temp = sis96x_read(SMB_STS); sis96x_write(SMB_STS, temp & 0x1e); /* start the transaction by setting bit 4 and size bits */ sis96x_write(SMB_HOST_CNT, 0x10 | (size & 0x07)); /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis96x_read(SMB_STS); } while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&sis96x_adapter.dev, "SMBus Timeout! (0x%02x)\n", temp); result = -ETIMEDOUT; } /* device error - probably missing ACK */ if (temp & 0x02) { dev_dbg(&sis96x_adapter.dev, "Failed bus transaction!\n"); result = -ENXIO; } /* bus collision */ if (temp & 0x04) { dev_dbg(&sis96x_adapter.dev, "Bus collision!\n"); result = -EIO; } /* Finish up by resetting the bus */ sis96x_write(SMB_STS, temp); if ((temp = sis96x_read(SMB_STS))) { dev_dbg(&sis96x_adapter.dev, "Failed reset at " "end of transaction! (0x%02x)\n", temp); } return result; } /* Return negative errno on error. */ static s32 sis96x_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS96x_QUICK; break; case I2C_SMBUS_BYTE: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis96x_write(SMB_CMD, command); size = SIS96x_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis96x_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis96x_write(SMB_BYTE, data->byte); size = SIS96x_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis96x_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis96x_write(SMB_BYTE, data->word & 0xff); sis96x_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL ? SIS96x_PROC_CALL : SIS96x_WORD_DATA); break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } status = sis96x_transaction(size); if (status) return status; if ((size != SIS96x_PROC_CALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS96x_QUICK))) return 0; switch (size) { case SIS96x_BYTE: case SIS96x_BYTE_DATA: data->byte = sis96x_read(SMB_BYTE); break; case SIS96x_WORD_DATA: case SIS96x_PROC_CALL: data->word = sis96x_read(SMB_BYTE) + (sis96x_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis96x_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis96x_access, .functionality = sis96x_func, }; static struct i2c_adapter sis96x_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static DEFINE_PCI_DEVICE_TABLE(sis96x_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) }, { 0, } }; MODULE_DEVICE_TABLE (pci, sis96x_ids); static int __devinit sis96x_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 ww = 0; int retval; if (sis96x_smbus_base) { dev_err(&dev->dev, "Only one device supported.\n"); return -EBUSY; } pci_read_config_word(dev, PCI_CLASS_DEVICE, &ww); if (PCI_CLASS_SERIAL_SMBUS != ww) { dev_err(&dev->dev, "Unsupported device class 0x%04x!\n", ww); return -ENODEV; } sis96x_smbus_base = pci_resource_start(dev, SIS96x_BAR); if (!sis96x_smbus_base) { dev_err(&dev->dev, "SiS96x SMBus base address " "not initialized!\n"); return -EINVAL; } dev_info(&dev->dev, "SiS96x SMBus base address: 0x%04x\n", sis96x_smbus_base); retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); if (retval) return -ENODEV; /* Everything is happy, let's grab the memory and set things up. */ if (!request_region(sis96x_smbus_base, SMB_IOSIZE, sis96x_driver.name)) { dev_err(&dev->dev, "SMBus registers 0x%04x-0x%04x " "already in use!\n", sis96x_smbus_base, sis96x_smbus_base + SMB_IOSIZE - 1); sis96x_smbus_base = 0; return -EINVAL; } /* set up the sysfs linkage to our parent device */ sis96x_adapter.dev.parent = &dev->dev; snprintf(sis96x_adapter.name, sizeof(sis96x_adapter.name), "SiS96x SMBus adapter at 0x%04x", sis96x_smbus_base); if ((retval = i2c_add_adapter(&sis96x_adapter))) { dev_err(&dev->dev, "Couldn't register adapter!\n"); release_region(sis96x_smbus_base, SMB_IOSIZE); sis96x_smbus_base = 0; } return retval; } static void __devexit sis96x_remove(struct pci_dev *dev) { if (sis96x_smbus_base) { i2c_del_adapter(&sis96x_adapter); release_region(sis96x_smbus_base, SMB_IOSIZE); sis96x_smbus_base = 0; } } static struct pci_driver sis96x_driver = { .name = "sis96x_smbus", .id_table = sis96x_ids, .probe = sis96x_probe, .remove = __devexit_p(sis96x_remove), }; static int __init i2c_sis96x_init(void) { return pci_register_driver(&sis96x_driver); } static void __exit i2c_sis96x_exit(void) { pci_unregister_driver(&sis96x_driver); } MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>"); MODULE_DESCRIPTION("SiS96x SMBus driver"); MODULE_LICENSE("GPL"); /* Register initialization functions using helper macros */ module_init(i2c_sis96x_init); module_exit(i2c_sis96x_exit);
gpl-2.0
golden-guy/android_kernel_asus_grouper
arch/m68k/sun3/prom/init.c
9287
1774
/* * init.c: Initialize internal variables used by the PROM * library functions. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/openprom.h> #include <asm/oplib.h> struct linux_romvec *romvec; enum prom_major_version prom_vers; unsigned int prom_rev, prom_prev; /* The root node of the prom device tree. */ int prom_root_node; /* Pointer to the device tree operations structure. */ struct linux_nodeops *prom_nodeops; /* You must call prom_init() before you attempt to use any of the * routines in the prom library. It returns 0 on success, 1 on * failure. It gets passed the pointer to the PROM vector. */ extern void prom_meminit(void); extern void prom_ranges_init(void); void __init prom_init(struct linux_romvec *rp) { romvec = rp; #ifndef CONFIG_SUN3 switch(romvec->pv_romvers) { case 0: prom_vers = PROM_V0; break; case 2: prom_vers = PROM_V2; break; case 3: prom_vers = PROM_V3; break; case 4: prom_vers = PROM_P1275; prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n"); prom_halt(); break; default: prom_printf("PROMLIB: Bad PROM version %d\n", romvec->pv_romvers); prom_halt(); break; }; prom_rev = romvec->pv_plugin_revision; prom_prev = romvec->pv_printrev; prom_nodeops = romvec->pv_nodeops; prom_root_node = prom_getsibling(0); if((prom_root_node == 0) || (prom_root_node == -1)) prom_halt(); if((((unsigned long) prom_nodeops) == 0) || (((unsigned long) prom_nodeops) == -1)) prom_halt(); prom_meminit(); prom_ranges_init(); #endif // printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n", // romvec->pv_romvers, prom_rev); /* Initialization successful. */ return; }
gpl-2.0
wimpknocker/lge-kernel-lproj
drivers/media/rc/keymaps/rc-terratec-slim.c
9543
2365
/* * TerraTec remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> /* TerraTec slim remote, 7 rows, 4 columns. */ /* Uses NEC extended 0x02bd. */ static struct rc_map_table terratec_slim[] = { { 0x02bd00, KEY_1 }, { 0x02bd01, KEY_2 }, { 0x02bd02, KEY_3 }, { 0x02bd03, KEY_4 }, { 0x02bd04, KEY_5 }, { 0x02bd05, KEY_6 }, { 0x02bd06, KEY_7 }, { 0x02bd07, KEY_8 }, { 0x02bd08, KEY_9 }, { 0x02bd09, KEY_0 }, { 0x02bd0a, KEY_MUTE }, { 0x02bd0b, KEY_NEW }, /* symbol: PIP */ { 0x02bd0e, KEY_VOLUMEDOWN }, { 0x02bd0f, KEY_PLAYPAUSE }, { 0x02bd10, KEY_RIGHT }, { 0x02bd11, KEY_LEFT }, { 0x02bd12, KEY_UP }, { 0x02bd13, KEY_DOWN }, { 0x02bd15, KEY_OK }, { 0x02bd16, KEY_STOP }, { 0x02bd17, KEY_CAMERA }, /* snapshot */ { 0x02bd18, KEY_CHANNELUP }, { 0x02bd19, KEY_RECORD }, { 0x02bd1a, KEY_CHANNELDOWN }, { 0x02bd1c, KEY_ESC }, { 0x02bd1f, KEY_VOLUMEUP }, { 0x02bd44, KEY_EPG }, { 0x02bd45, KEY_POWER2 }, /* [red power button] */ }; static struct rc_map_list terratec_slim_map = { .map = { .scan = terratec_slim, .size = ARRAY_SIZE(terratec_slim), .rc_type = RC_TYPE_NEC, .name = RC_MAP_TERRATEC_SLIM, } }; static int __init init_rc_map_terratec_slim(void) { return rc_map_register(&terratec_slim_map); } static void __exit exit_rc_map_terratec_slim(void) { rc_map_unregister(&terratec_slim_map); } module_init(init_rc_map_terratec_slim) module_exit(exit_rc_map_terratec_slim) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
multirom-htc/kernel_htc_m8gpe
drivers/media/rc/keymaps/rc-imon-pad.c
9543
4426
/* rc5-imon-pad.c - Keytable for SoundGraph iMON PAD and Antec Veris * RM-200 Remote Control * * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* * standard imon remote key table, which isn't really entirely * "standard", as different receivers decode the same key on the * same remote to different hex codes, and the silkscreened names * vary a bit between the SoundGraph and Antec remotes... ugh. */ static struct rc_map_table imon_pad[] = { /* keys sorted mostly by frequency of use to optimize lookups */ { 0x2a8195b7, KEY_REWIND }, { 0x298315b7, KEY_REWIND }, { 0x2b8115b7, KEY_FASTFORWARD }, { 0x2b8315b7, KEY_FASTFORWARD }, { 0x2b9115b7, KEY_PREVIOUS }, { 0x298195b7, KEY_NEXT }, { 0x2a8115b7, KEY_PLAY }, { 0x2a8315b7, KEY_PLAY }, { 0x2a9115b7, KEY_PAUSE }, { 0x2b9715b7, KEY_STOP }, { 0x298115b7, KEY_RECORD }, { 0x01008000, KEY_UP }, { 0x01007f00, KEY_DOWN }, { 0x01000080, KEY_LEFT }, { 0x0100007f, KEY_RIGHT }, { 0x2aa515b7, KEY_UP }, { 0x289515b7, KEY_DOWN }, { 0x29a515b7, KEY_LEFT }, { 0x2ba515b7, KEY_RIGHT }, { 0x0200002c, KEY_SPACE }, /* Select/Space */ { 0x2a9315b7, KEY_SPACE }, /* Select/Space */ { 0x02000028, KEY_ENTER }, { 0x28a195b7, KEY_ENTER }, { 0x288195b7, KEY_EXIT }, { 0x02000029, KEY_ESC }, { 0x2bb715b7, KEY_ESC }, { 0x0200002a, KEY_BACKSPACE }, { 0x28a115b7, KEY_BACKSPACE }, { 0x2b9595b7, KEY_MUTE }, { 0x28a395b7, KEY_VOLUMEUP }, { 0x28a595b7, KEY_VOLUMEDOWN }, { 0x289395b7, KEY_CHANNELUP }, { 0x288795b7, KEY_CHANNELDOWN }, { 0x0200001e, KEY_NUMERIC_1 }, { 0x0200001f, KEY_NUMERIC_2 }, { 0x02000020, KEY_NUMERIC_3 }, { 0x02000021, KEY_NUMERIC_4 }, { 0x02000022, KEY_NUMERIC_5 }, { 0x02000023, KEY_NUMERIC_6 }, { 0x02000024, KEY_NUMERIC_7 }, { 0x02000025, KEY_NUMERIC_8 }, { 0x02000026, KEY_NUMERIC_9 }, { 0x02000027, KEY_NUMERIC_0 }, { 0x28b595b7, KEY_NUMERIC_1 }, { 0x2bb195b7, KEY_NUMERIC_2 }, { 0x28b195b7, KEY_NUMERIC_3 }, { 0x2a8595b7, KEY_NUMERIC_4 }, { 0x299595b7, KEY_NUMERIC_5 }, { 0x2aa595b7, KEY_NUMERIC_6 }, { 0x2b9395b7, KEY_NUMERIC_7 }, { 0x2a8515b7, KEY_NUMERIC_8 }, { 0x2aa115b7, KEY_NUMERIC_9 }, { 0x2ba595b7, KEY_NUMERIC_0 }, { 0x02200025, KEY_NUMERIC_STAR }, { 0x28b515b7, KEY_NUMERIC_STAR }, { 0x02200020, KEY_NUMERIC_POUND }, { 0x29a115b7, KEY_NUMERIC_POUND }, { 0x2b8515b7, KEY_VIDEO }, { 0x299195b7, KEY_AUDIO }, { 0x2ba115b7, KEY_IMAGES }, { 0x28a515b7, KEY_TV }, { 0x29a395b7, KEY_DVD }, { 0x29a295b7, KEY_DVD }, /* the Menu key between DVD and Subtitle on the RM-200... */ { 0x2ba385b7, KEY_MENU }, { 0x2ba395b7, KEY_MENU }, { 0x288515b7, KEY_BOOKMARKS }, { 0x2ab715b7, KEY_CAMERA }, /* Thumbnail */ { 0x298595b7, KEY_SUBTITLE }, { 0x2b8595b7, KEY_LANGUAGE }, { 0x29a595b7, KEY_ZOOM }, { 0x2aa395b7, KEY_SCREEN }, /* FullScreen */ { 0x299115b7, KEY_KEYBOARD }, { 0x299135b7, KEY_KEYBOARD }, { 0x01010000, BTN_LEFT }, { 0x01020000, BTN_RIGHT }, { 0x01010080, BTN_LEFT }, { 0x01020080, BTN_RIGHT }, { 0x688301b7, BTN_LEFT }, { 0x688481b7, BTN_RIGHT }, { 0x2a9395b7, KEY_CYCLEWINDOWS }, /* TaskSwitcher */ { 0x2b8395b7, KEY_TIME }, /* Timer */ { 0x289115b7, KEY_POWER }, { 0x29b195b7, KEY_EJECTCD }, /* the one next to play */ { 0x299395b7, KEY_EJECTCLOSECD }, /* eject (by TaskSw) */ { 0x02800000, KEY_CONTEXT_MENU }, /* Left Menu */ { 0x2b8195b7, KEY_CONTEXT_MENU }, /* Left Menu*/ { 0x02000065, KEY_COMPOSE }, /* RightMenu */ { 0x28b715b7, KEY_COMPOSE }, /* RightMenu */ { 0x2ab195b7, KEY_MEDIA }, /* Go or MultiMon */ { 0x29b715b7, KEY_DASHBOARD }, /* AppLauncher */ }; static struct rc_map_list imon_pad_map = { .map = { .scan = imon_pad, .size = ARRAY_SIZE(imon_pad), /* actual protocol details unknown, hardware decoder */ .rc_type = RC_TYPE_OTHER, .name = RC_MAP_IMON_PAD, } }; static int __init init_rc_map_imon_pad(void) { return rc_map_register(&imon_pad_map); } static void __exit exit_rc_map_imon_pad(void) { rc_map_unregister(&imon_pad_map); } module_init(init_rc_map_imon_pad) module_exit(exit_rc_map_imon_pad) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
gpl-2.0
NathanAtSamraksh/dart-linux
drivers/media/rc/keymaps/rc-terratec-slim.c
9543
2365
/* * TerraTec remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> /* TerraTec slim remote, 7 rows, 4 columns. */ /* Uses NEC extended 0x02bd. */ static struct rc_map_table terratec_slim[] = { { 0x02bd00, KEY_1 }, { 0x02bd01, KEY_2 }, { 0x02bd02, KEY_3 }, { 0x02bd03, KEY_4 }, { 0x02bd04, KEY_5 }, { 0x02bd05, KEY_6 }, { 0x02bd06, KEY_7 }, { 0x02bd07, KEY_8 }, { 0x02bd08, KEY_9 }, { 0x02bd09, KEY_0 }, { 0x02bd0a, KEY_MUTE }, { 0x02bd0b, KEY_NEW }, /* symbol: PIP */ { 0x02bd0e, KEY_VOLUMEDOWN }, { 0x02bd0f, KEY_PLAYPAUSE }, { 0x02bd10, KEY_RIGHT }, { 0x02bd11, KEY_LEFT }, { 0x02bd12, KEY_UP }, { 0x02bd13, KEY_DOWN }, { 0x02bd15, KEY_OK }, { 0x02bd16, KEY_STOP }, { 0x02bd17, KEY_CAMERA }, /* snapshot */ { 0x02bd18, KEY_CHANNELUP }, { 0x02bd19, KEY_RECORD }, { 0x02bd1a, KEY_CHANNELDOWN }, { 0x02bd1c, KEY_ESC }, { 0x02bd1f, KEY_VOLUMEUP }, { 0x02bd44, KEY_EPG }, { 0x02bd45, KEY_POWER2 }, /* [red power button] */ }; static struct rc_map_list terratec_slim_map = { .map = { .scan = terratec_slim, .size = ARRAY_SIZE(terratec_slim), .rc_type = RC_TYPE_NEC, .name = RC_MAP_TERRATEC_SLIM, } }; static int __init init_rc_map_terratec_slim(void) { return rc_map_register(&terratec_slim_map); } static void __exit exit_rc_map_terratec_slim(void) { rc_map_unregister(&terratec_slim_map); } module_init(init_rc_map_terratec_slim) module_exit(exit_rc_map_terratec_slim) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
KylinUI/android_kernel_lge_mako
arch/arm/mach-omap2/mux2420.c
10567
25393
/* * Copyright (C) 2010 Nokia * Copyright (C) 2010 Texas Instruments * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include "mux.h" #ifdef CONFIG_OMAP_MUX #define _OMAP2420_MUXENTRY(M0, g, m0, m1, m2, m3, m4, m5, m6, m7) \ { \ .reg_offset = (OMAP2420_CONTROL_PADCONF_##M0##_OFFSET), \ .gpio = (g), \ .muxnames = { m0, m1, m2, m3, m4, m5, m6, m7 }, \ } #else #define _OMAP2420_MUXENTRY(M0, g, m0, m1, m2, m3, m4, m5, m6, m7) \ { \ .reg_offset = (OMAP2420_CONTROL_PADCONF_##M0##_OFFSET), \ .gpio = (g), \ } #endif #define _OMAP2420_BALLENTRY(M0, bb, bt) \ { \ .reg_offset = (OMAP2420_CONTROL_PADCONF_##M0##_OFFSET), \ .balls = { bb, bt }, \ } /* * Superset of all mux modes for omap2420 */ static struct omap_mux __initdata omap2420_muxmodes[] = { _OMAP2420_MUXENTRY(CAM_D0, 54, "cam_d0", "hw_dbg2", "sti_dout", "gpio_54", NULL, NULL, "etk_d2", NULL), _OMAP2420_MUXENTRY(CAM_D1, 53, "cam_d1", "hw_dbg3", "sti_din", "gpio_53", NULL, NULL, "etk_d3", NULL), _OMAP2420_MUXENTRY(CAM_D2, 52, "cam_d2", "hw_dbg4", "mcbsp1_clkx", "gpio_52", NULL, NULL, "etk_d4", NULL), _OMAP2420_MUXENTRY(CAM_D3, 51, "cam_d3", "hw_dbg5", "mcbsp1_dr", "gpio_51", NULL, NULL, "etk_d5", NULL), _OMAP2420_MUXENTRY(CAM_D4, 50, "cam_d4", "hw_dbg6", "mcbsp1_fsr", "gpio_50", NULL, NULL, "etk_d6", NULL), _OMAP2420_MUXENTRY(CAM_D5, 49, "cam_d5", "hw_dbg7", "mcbsp1_clkr", "gpio_49", NULL, NULL, "etk_d7", NULL), _OMAP2420_MUXENTRY(CAM_D6, 0, "cam_d6", "hw_dbg8", NULL, NULL, NULL, NULL, "etk_d8", NULL), _OMAP2420_MUXENTRY(CAM_D7, 0, "cam_d7", "hw_dbg9", NULL, NULL, NULL, NULL, "etk_d9", NULL), _OMAP2420_MUXENTRY(CAM_D8, 54, "cam_d8", "hw_dbg10", NULL, "gpio_54", NULL, NULL, "etk_d10", NULL), _OMAP2420_MUXENTRY(CAM_D9, 53, "cam_d9", "hw_dbg11", NULL, "gpio_53", NULL, NULL, "etk_d11", NULL), _OMAP2420_MUXENTRY(CAM_HS, 55, "cam_hs", "hw_dbg1", "mcbsp1_dx", "gpio_55", NULL, NULL, "etk_d1", NULL), _OMAP2420_MUXENTRY(CAM_LCLK, 57, "cam_lclk", NULL, "mcbsp_clks", "gpio_57", NULL, NULL, "etk_c1", NULL), _OMAP2420_MUXENTRY(CAM_VS, 56, "cam_vs", "hw_dbg0", "mcbsp1_fsx", "gpio_56", NULL, NULL, "etk_d0", NULL), _OMAP2420_MUXENTRY(CAM_XCLK, 0, "cam_xclk", NULL, "sti_clk", NULL, NULL, NULL, "etk_c2", NULL), _OMAP2420_MUXENTRY(DSS_ACBIAS, 48, "dss_acbias", NULL, "mcbsp2_fsx", "gpio_48", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA10, 40, "dss_data10", NULL, NULL, "gpio_40", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA11, 41, "dss_data11", NULL, NULL, "gpio_41", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA12, 42, "dss_data12", NULL, NULL, "gpio_42", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA13, 43, "dss_data13", NULL, NULL, "gpio_43", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA14, 44, "dss_data14", NULL, NULL, "gpio_44", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA15, 45, "dss_data15", NULL, NULL, "gpio_45", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA16, 46, "dss_data16", NULL, NULL, "gpio_46", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA17, 47, "dss_data17", NULL, NULL, "gpio_47", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA8, 38, "dss_data8", NULL, NULL, "gpio_38", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(DSS_DATA9, 39, "dss_data9", NULL, NULL, "gpio_39", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(EAC_AC_DIN, 115, "eac_ac_din", "mcbsp2_dr", NULL, "gpio_115", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(EAC_AC_DOUT, 116, "eac_ac_dout", "mcbsp2_dx", NULL, "gpio_116", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(EAC_AC_FS, 114, "eac_ac_fs", "mcbsp2_fsx", NULL, "gpio_114", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(EAC_AC_MCLK, 117, "eac_ac_mclk", NULL, NULL, "gpio_117", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(EAC_AC_RST, 118, "eac_ac_rst", "eac_bt_din", NULL, "gpio_118", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(EAC_AC_SCLK, 113, "eac_ac_sclk", "mcbsp2_clkx", NULL, "gpio_113", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(EAC_BT_DIN, 73, "eac_bt_din", NULL, NULL, "gpio_73", NULL, NULL, "etk_d9", NULL), _OMAP2420_MUXENTRY(EAC_BT_DOUT, 74, "eac_bt_dout", NULL, "sti_clk", "gpio_74", NULL, NULL, "etk_d8", NULL), _OMAP2420_MUXENTRY(EAC_BT_FS, 72, "eac_bt_fs", NULL, NULL, "gpio_72", NULL, NULL, "etk_d10", NULL), _OMAP2420_MUXENTRY(EAC_BT_SCLK, 71, "eac_bt_sclk", NULL, NULL, "gpio_71", NULL, NULL, "etk_d11", NULL), _OMAP2420_MUXENTRY(GPIO_119, 119, "gpio_119", NULL, "sti_din", "gpio_119", NULL, "sys_boot0", "etk_d12", NULL), _OMAP2420_MUXENTRY(GPIO_120, 120, "gpio_120", NULL, "sti_dout", "gpio_120", "cam_d9", "sys_boot1", "etk_d13", NULL), _OMAP2420_MUXENTRY(GPIO_121, 121, "gpio_121", NULL, NULL, "gpio_121", "jtag_emu2", "sys_boot2", "etk_d14", NULL), _OMAP2420_MUXENTRY(GPIO_122, 122, "gpio_122", NULL, NULL, "gpio_122", "jtag_emu3", "sys_boot3", "etk_d15", NULL), _OMAP2420_MUXENTRY(GPIO_124, 124, "gpio_124", NULL, NULL, "gpio_124", NULL, "sys_boot5", NULL, NULL), _OMAP2420_MUXENTRY(GPIO_125, 125, "gpio_125", "sys_jtagsel1", "sys_jtagsel2", "gpio_125", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPIO_36, 36, "gpio_36", NULL, NULL, "gpio_36", NULL, "sys_boot4", NULL, NULL), _OMAP2420_MUXENTRY(GPIO_62, 62, "gpio_62", "uart1_rx", "usb1_dat", "gpio_62", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPIO_6, 6, "gpio_6", "tv_detpulse", NULL, "gpio_6", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A10, 3, "gpmc_a10", NULL, "sys_ndmareq5", "gpio_3", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A1, 12, "gpmc_a1", "dss_data18", NULL, "gpio_12", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A2, 11, "gpmc_a2", "dss_data19", NULL, "gpio_11", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A3, 10, "gpmc_a3", "dss_data20", NULL, "gpio_10", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A4, 9, "gpmc_a4", "dss_data21", NULL, "gpio_9", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A5, 8, "gpmc_a5", "dss_data22", NULL, "gpio_8", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A6, 7, "gpmc_a6", "dss_data23", NULL, "gpio_7", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A7, 6, "gpmc_a7", NULL, "sys_ndmareq2", "gpio_6", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A8, 5, "gpmc_a8", NULL, "sys_ndmareq3", "gpio_5", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_A9, 4, "gpmc_a9", NULL, "sys_ndmareq4", "gpio_4", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_CLK, 21, "gpmc_clk", NULL, NULL, "gpio_21", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_D10, 18, "gpmc_d10", "ssi2_rdy_rx", NULL, "gpio_18", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_D11, 17, "gpmc_d11", "ssi2_flag_rx", NULL, "gpio_17", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_D12, 16, "gpmc_d12", "ssi2_dat_rx", NULL, "gpio_16", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_D13, 15, "gpmc_d13", "ssi2_rdy_tx", NULL, "gpio_15", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_D14, 14, "gpmc_d14", "ssi2_flag_tx", NULL, "gpio_14", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_D15, 13, "gpmc_d15", "ssi2_dat_tx", NULL, "gpio_13", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_D8, 20, "gpmc_d8", NULL, NULL, "gpio_20", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_D9, 19, "gpmc_d9", "ssi2_wake", NULL, "gpio_19", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NBE0, 29, "gpmc_nbe0", NULL, NULL, "gpio_29", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NBE1, 30, "gpmc_nbe1", NULL, NULL, "gpio_30", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NCS1, 22, "gpmc_ncs1", NULL, NULL, "gpio_22", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NCS2, 23, "gpmc_ncs2", NULL, NULL, "gpio_23", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NCS3, 24, "gpmc_ncs3", "gpmc_io_dir", NULL, "gpio_24", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NCS4, 25, "gpmc_ncs4", NULL, NULL, "gpio_25", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NCS5, 26, "gpmc_ncs5", NULL, NULL, "gpio_26", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NCS6, 27, "gpmc_ncs6", NULL, NULL, "gpio_27", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NCS7, 28, "gpmc_ncs7", "gpmc_io_dir", "gpio_28", NULL, NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_NWP, 31, "gpmc_nwp", NULL, NULL, "gpio_31", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_WAIT1, 33, "gpmc_wait1", NULL, NULL, "gpio_33", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_WAIT2, 34, "gpmc_wait2", NULL, NULL, "gpio_34", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(GPMC_WAIT3, 35, "gpmc_wait3", NULL, NULL, "gpio_35", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(HDQ_SIO, 101, "hdq_sio", "usb2_tllse0", "sys_altclk", "gpio_101", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(I2C2_SCL, 99, "i2c2_scl", NULL, "gpt9_pwm_evt", "gpio_99", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(I2C2_SDA, 100, "i2c2_sda", NULL, "spi2_ncs1", "gpio_100", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(JTAG_EMU0, 127, "jtag_emu0", NULL, NULL, "gpio_127", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(JTAG_EMU1, 126, "jtag_emu1", NULL, NULL, "gpio_126", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MCBSP1_CLKR, 92, "mcbsp1_clkr", "ssi2_dat_tx", "vlynq_tx1", "gpio_92", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MCBSP1_CLKX, 98, "mcbsp1_clkx", "ssi2_wake", "vlynq_nla", "gpio_98", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MCBSP1_DR, 95, "mcbsp1_dr", "ssi2_dat_rx", "vlynq_rx1", "gpio_95", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MCBSP1_DX, 94, "mcbsp1_dx", "ssi2_rdy_tx", "vlynq_clk", "gpio_94", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MCBSP1_FSR, 93, "mcbsp1_fsr", "ssi2_flag_tx", "vlynq_tx0", "gpio_93", "spi2_ncs1", NULL, NULL, NULL), _OMAP2420_MUXENTRY(MCBSP1_FSX, 97, "mcbsp1_fsx", "ssi2_rdy_rx", NULL, "gpio_97", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MCBSP2_CLKX, 12, "mcbsp2_clkx", NULL, "dss_data23", "gpio_12", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MCBSP2_DR, 11, "mcbsp2_dr", NULL, "dss_data22", "gpio_11", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MCBSP_CLKS, 96, "mcbsp_clks", "ssi2_flag_rx", "vlynq_rx0", "gpio_96", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_CLKI, 59, "sdmmc_clki", "ms_clki", NULL, "gpio_59", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_CLKO, 0, "sdmmc_clko", "ms_clko", NULL, NULL, NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_CMD_DIR, 8, "sdmmc_cmd_dir", NULL, NULL, "gpio_8", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_CMD, 0, "sdmmc_cmd", "ms_bs", NULL, NULL, NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_DAT_DIR0, 7, "sdmmc_dat_dir0", "ms_dat0_dir", NULL, "gpio_7", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_DAT0, 0, "sdmmc_dat0", "ms_dat0", NULL, NULL, NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_DAT_DIR1, 78, "sdmmc_dat_dir1", "ms_datu_dir", "uart2_rts", "gpio_78", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_DAT1, 75, "sdmmc_dat1", "ms_dat1", NULL, "gpio_75", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_DAT_DIR2, 79, "sdmmc_dat_dir2", "ms_datu_dir", "uart2_tx", "gpio_79", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_DAT2, 76, "sdmmc_dat2", "ms_dat2", "uart2_cts", "gpio_76", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_DAT_DIR3, 80, "sdmmc_dat_dir3", "ms_datu_dir", "uart2_rx", "gpio_80", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(MMC_DAT3, 77, "sdmmc_dat3", "ms_dat3", NULL, "gpio_77", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SDRC_A12, 2, "sdrc_a12", NULL, NULL, "gpio_2", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SDRC_A13, 1, "sdrc_a13", NULL, NULL, "gpio_1", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SDRC_A14, 0, "sdrc_a14", NULL, NULL, "gpio_0", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SDRC_CKE1, 38, "sdrc_cke1", NULL, NULL, "gpio_38", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SDRC_NCS1, 37, "sdrc_ncs1", NULL, NULL, "gpio_37", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI1_CLK, 81, "spi1_clk", NULL, NULL, "gpio_81", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI1_NCS0, 84, "spi1_ncs0", NULL, NULL, "gpio_84", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI1_NCS1, 85, "spi1_ncs1", NULL, NULL, "gpio_85", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI1_NCS2, 86, "spi1_ncs2", NULL, NULL, "gpio_86", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI1_NCS3, 87, "spi1_ncs3", NULL, NULL, "gpio_87", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI1_SIMO, 82, "spi1_simo", NULL, NULL, "gpio_82", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI1_SOMI, 83, "spi1_somi", NULL, NULL, "gpio_83", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI2_CLK, 88, "spi2_clk", NULL, NULL, "gpio_88", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI2_NCS0, 91, "spi2_ncs0", "gpt12_pwm_evt", NULL, "gpio_91", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI2_SIMO, 89, "spi2_simo", "gpt10_pwm_evt", NULL, "gpio_89", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SPI2_SOMI, 90, "spi2_somi", "gpt11_pwm_evt", NULL, "gpio_90", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SSI1_DAT_RX, 63, "ssi1_dat_rx", "eac_md_sclk", NULL, "gpio_63", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SSI1_DAT_TX, 59, "ssi1_dat_tx", "uart1_tx", "usb1_se0", "gpio_59", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SSI1_FLAG_RX, 64, "ssi1_flag_rx", "eac_md_din", NULL, "gpio_64", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SSI1_FLAG_TX, 25, "ssi1_flag_tx", "uart1_rts", "usb1_rcv", "gpio_25", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SSI1_RDY_RX, 65, "ssi1_rdy_rx", "eac_md_dout", NULL, "gpio_65", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SSI1_RDY_TX, 61, "ssi1_rdy_tx", "uart1_cts", "usb1_txen", "gpio_61", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SSI1_WAKE, 66, "ssi1_wake", "eac_md_fs", NULL, "gpio_66", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SYS_CLKOUT, 123, "sys_clkout", NULL, NULL, "gpio_123", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SYS_CLKREQ, 52, "sys_clkreq", NULL, NULL, "gpio_52", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(SYS_NIRQ, 60, "sys_nirq", NULL, NULL, "gpio_60", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART1_CTS, 32, "uart1_cts", NULL, "dss_data18", "gpio_32", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART1_RTS, 8, "uart1_rts", NULL, "dss_data19", "gpio_8", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART1_RX, 10, "uart1_rx", NULL, "dss_data21", "gpio_10", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART1_TX, 9, "uart1_tx", NULL, "dss_data20", "gpio_9", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART2_CTS, 67, "uart2_cts", "usb1_rcv", "gpt9_pwm_evt", "gpio_67", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART2_RTS, 68, "uart2_rts", "usb1_txen", "gpt10_pwm_evt", "gpio_68", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART2_RX, 70, "uart2_rx", "usb1_dat", "gpt12_pwm_evt", "gpio_70", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART2_TX, 69, "uart2_tx", "usb1_se0", "gpt11_pwm_evt", "gpio_69", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART3_CTS_RCTX, 102, "uart3_cts_rctx", "uart3_rx_irrx", NULL, "gpio_102", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART3_RTS_SD, 103, "uart3_rts_sd", "uart3_tx_irtx", NULL, "gpio_103", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART3_RX_IRRX, 105, "uart3_rx_irrx", NULL, NULL, "gpio_105", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(UART3_TX_IRTX, 104, "uart3_tx_irtx", "uart3_cts_rctx", NULL, "gpio_104", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(USB0_DAT, 112, "usb0_dat", "uart3_rx_irrx", "uart2_rx", "gpio_112", "uart2_tx", NULL, NULL, NULL), _OMAP2420_MUXENTRY(USB0_PUEN, 106, "usb0_puen", "mcbsp2_dx", NULL, "gpio_106", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(USB0_RCV, 109, "usb0_rcv", "mcbsp2_fsx", NULL, "gpio_109", "uart2_cts", NULL, NULL, NULL), _OMAP2420_MUXENTRY(USB0_SE0, 111, "usb0_se0", "uart3_tx_irtx", "uart2_tx", "gpio_111", "uart2_rx", NULL, NULL, NULL), _OMAP2420_MUXENTRY(USB0_TXEN, 110, "usb0_txen", "uart3_cts_rctx", "uart2_cts", "gpio_110", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(USB0_VM, 108, "usb0_vm", "mcbsp2_clkx", NULL, "gpio_108", "uart2_rx", NULL, NULL, NULL), _OMAP2420_MUXENTRY(USB0_VP, 107, "usb0_vp", "mcbsp2_dr", NULL, "gpio_107", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(VLYNQ_CLK, 13, "vlynq_clk", "usb2_se0", "sys_ndmareq0", "gpio_13", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(VLYNQ_NLA, 58, "vlynq_nla", NULL, NULL, "gpio_58", "cam_d6", NULL, NULL, NULL), _OMAP2420_MUXENTRY(VLYNQ_RX0, 15, "vlynq_rx0", "usb2_tllse0", NULL, "gpio_15", "cam_d7", NULL, NULL, NULL), _OMAP2420_MUXENTRY(VLYNQ_RX1, 14, "vlynq_rx1", "usb2_rcv", "sys_ndmareq1", "gpio_14", "cam_d8", NULL, NULL, NULL), _OMAP2420_MUXENTRY(VLYNQ_TX0, 17, "vlynq_tx0", "usb2_txen", NULL, "gpio_17", NULL, NULL, NULL, NULL), _OMAP2420_MUXENTRY(VLYNQ_TX1, 16, "vlynq_tx1", "usb2_dat", "sys_clkout2", "gpio_16", NULL, NULL, NULL, NULL), { .reg_offset = OMAP_MUX_TERMINATOR }, }; /* * Balls for 447-pin POP package */ #ifdef CONFIG_DEBUG_FS static struct omap_ball __initdata omap2420_pop_ball[] = { _OMAP2420_BALLENTRY(CAM_D0, "y4", NULL), _OMAP2420_BALLENTRY(CAM_D1, "y3", NULL), _OMAP2420_BALLENTRY(CAM_D2, "u7", NULL), _OMAP2420_BALLENTRY(CAM_D3, "ab3", NULL), _OMAP2420_BALLENTRY(CAM_D4, "v2", NULL), _OMAP2420_BALLENTRY(CAM_D5, "ad3", NULL), _OMAP2420_BALLENTRY(CAM_D6, "aa4", NULL), _OMAP2420_BALLENTRY(CAM_D7, "ab4", NULL), _OMAP2420_BALLENTRY(CAM_D8, "ac6", NULL), _OMAP2420_BALLENTRY(CAM_D9, "ac7", NULL), _OMAP2420_BALLENTRY(CAM_HS, "v4", NULL), _OMAP2420_BALLENTRY(CAM_LCLK, "ad6", NULL), _OMAP2420_BALLENTRY(CAM_VS, "p7", NULL), _OMAP2420_BALLENTRY(CAM_XCLK, "w4", NULL), _OMAP2420_BALLENTRY(DSS_ACBIAS, "ae8", NULL), _OMAP2420_BALLENTRY(DSS_DATA10, "ac12", NULL), _OMAP2420_BALLENTRY(DSS_DATA11, "ae11", NULL), _OMAP2420_BALLENTRY(DSS_DATA12, "ae13", NULL), _OMAP2420_BALLENTRY(DSS_DATA13, "ad13", NULL), _OMAP2420_BALLENTRY(DSS_DATA14, "ac13", NULL), _OMAP2420_BALLENTRY(DSS_DATA15, "y12", NULL), _OMAP2420_BALLENTRY(DSS_DATA16, "ad14", NULL), _OMAP2420_BALLENTRY(DSS_DATA17, "y13", NULL), _OMAP2420_BALLENTRY(DSS_DATA8, "ad11", NULL), _OMAP2420_BALLENTRY(DSS_DATA9, "ad12", NULL), _OMAP2420_BALLENTRY(EAC_AC_DIN, "ad19", NULL), _OMAP2420_BALLENTRY(EAC_AC_DOUT, "af22", NULL), _OMAP2420_BALLENTRY(EAC_AC_FS, "ad16", NULL), _OMAP2420_BALLENTRY(EAC_AC_MCLK, "y17", NULL), _OMAP2420_BALLENTRY(EAC_AC_RST, "ae22", NULL), _OMAP2420_BALLENTRY(EAC_AC_SCLK, "ac18", NULL), _OMAP2420_BALLENTRY(EAC_BT_DIN, "u8", NULL), _OMAP2420_BALLENTRY(EAC_BT_DOUT, "ad5", NULL), _OMAP2420_BALLENTRY(EAC_BT_FS, "w7", NULL), _OMAP2420_BALLENTRY(EAC_BT_SCLK, "ad4", NULL), _OMAP2420_BALLENTRY(GPIO_119, "af6", NULL), _OMAP2420_BALLENTRY(GPIO_120, "af4", NULL), _OMAP2420_BALLENTRY(GPIO_121, "ae6", NULL), _OMAP2420_BALLENTRY(GPIO_122, "w3", NULL), _OMAP2420_BALLENTRY(GPIO_124, "y19", NULL), _OMAP2420_BALLENTRY(GPIO_125, "ae24", NULL), _OMAP2420_BALLENTRY(GPIO_36, "y18", NULL), _OMAP2420_BALLENTRY(GPIO_6, "d6", NULL), _OMAP2420_BALLENTRY(GPIO_62, "ad18", NULL), _OMAP2420_BALLENTRY(GPMC_A1, "m8", NULL), _OMAP2420_BALLENTRY(GPMC_A10, "d5", NULL), _OMAP2420_BALLENTRY(GPMC_A2, "w9", NULL), _OMAP2420_BALLENTRY(GPMC_A3, "af10", NULL), _OMAP2420_BALLENTRY(GPMC_A4, "w8", NULL), _OMAP2420_BALLENTRY(GPMC_A5, "ae16", NULL), _OMAP2420_BALLENTRY(GPMC_A6, "af9", NULL), _OMAP2420_BALLENTRY(GPMC_A7, "e4", NULL), _OMAP2420_BALLENTRY(GPMC_A8, "j7", NULL), _OMAP2420_BALLENTRY(GPMC_A9, "ae18", NULL), _OMAP2420_BALLENTRY(GPMC_CLK, "p1", "l1"), _OMAP2420_BALLENTRY(GPMC_D10, "t1", "n1"), _OMAP2420_BALLENTRY(GPMC_D11, "u2", "p2"), _OMAP2420_BALLENTRY(GPMC_D12, "u1", "p1"), _OMAP2420_BALLENTRY(GPMC_D13, "p2", "m1"), _OMAP2420_BALLENTRY(GPMC_D14, "h2", "j2"), _OMAP2420_BALLENTRY(GPMC_D15, "h1", "k2"), _OMAP2420_BALLENTRY(GPMC_D8, "v1", "r1"), _OMAP2420_BALLENTRY(GPMC_D9, "y1", "t1"), _OMAP2420_BALLENTRY(GPMC_NBE0, "af12", "aa10"), _OMAP2420_BALLENTRY(GPMC_NBE1, "u3", NULL), _OMAP2420_BALLENTRY(GPMC_NCS1, "af14", "w1"), _OMAP2420_BALLENTRY(GPMC_NCS2, "g4", NULL), _OMAP2420_BALLENTRY(GPMC_NCS3, "t8", NULL), _OMAP2420_BALLENTRY(GPMC_NCS4, "h8", NULL), _OMAP2420_BALLENTRY(GPMC_NCS5, "k3", NULL), _OMAP2420_BALLENTRY(GPMC_NCS6, "m7", NULL), _OMAP2420_BALLENTRY(GPMC_NCS7, "p3", NULL), _OMAP2420_BALLENTRY(GPMC_NWP, "ae15", "y5"), _OMAP2420_BALLENTRY(GPMC_WAIT1, "ae20", "y8"), _OMAP2420_BALLENTRY(GPMC_WAIT2, "n2", NULL), _OMAP2420_BALLENTRY(GPMC_WAIT3, "t4", NULL), _OMAP2420_BALLENTRY(HDQ_SIO, "t23", NULL), _OMAP2420_BALLENTRY(I2C2_SCL, "l2", NULL), _OMAP2420_BALLENTRY(I2C2_SDA, "k19", NULL), _OMAP2420_BALLENTRY(JTAG_EMU0, "n24", NULL), _OMAP2420_BALLENTRY(JTAG_EMU1, "ac22", NULL), _OMAP2420_BALLENTRY(MCBSP1_CLKR, "y24", NULL), _OMAP2420_BALLENTRY(MCBSP1_CLKX, "t19", NULL), _OMAP2420_BALLENTRY(MCBSP1_DR, "u23", NULL), _OMAP2420_BALLENTRY(MCBSP1_DX, "r24", NULL), _OMAP2420_BALLENTRY(MCBSP1_FSR, "r20", NULL), _OMAP2420_BALLENTRY(MCBSP1_FSX, "r23", NULL), _OMAP2420_BALLENTRY(MCBSP2_CLKX, "t24", NULL), _OMAP2420_BALLENTRY(MCBSP2_DR, "p20", NULL), _OMAP2420_BALLENTRY(MCBSP_CLKS, "p23", NULL), _OMAP2420_BALLENTRY(MMC_CLKI, "c23", NULL), _OMAP2420_BALLENTRY(MMC_CLKO, "h23", NULL), _OMAP2420_BALLENTRY(MMC_CMD, "j23", NULL), _OMAP2420_BALLENTRY(MMC_CMD_DIR, "j24", NULL), _OMAP2420_BALLENTRY(MMC_DAT0, "h17", NULL), _OMAP2420_BALLENTRY(MMC_DAT_DIR0, "f23", NULL), _OMAP2420_BALLENTRY(MMC_DAT1, "g19", NULL), _OMAP2420_BALLENTRY(MMC_DAT_DIR1, "d23", NULL), _OMAP2420_BALLENTRY(MMC_DAT2, "h20", NULL), _OMAP2420_BALLENTRY(MMC_DAT_DIR2, "g23", NULL), _OMAP2420_BALLENTRY(MMC_DAT3, "d24", NULL), _OMAP2420_BALLENTRY(MMC_DAT_DIR3, "e23", NULL), _OMAP2420_BALLENTRY(SDRC_A12, "w26", "r21"), _OMAP2420_BALLENTRY(SDRC_A13, "w25", "aa15"), _OMAP2420_BALLENTRY(SDRC_A14, "aa26", "y12"), _OMAP2420_BALLENTRY(SDRC_CKE1, "ae25", "y13"), _OMAP2420_BALLENTRY(SDRC_NCS1, "y25", "t20"), _OMAP2420_BALLENTRY(SPI1_CLK, "y23", NULL), _OMAP2420_BALLENTRY(SPI1_NCS0, "w24", NULL), _OMAP2420_BALLENTRY(SPI1_NCS1, "w23", NULL), _OMAP2420_BALLENTRY(SPI1_NCS2, "v23", NULL), _OMAP2420_BALLENTRY(SPI1_NCS3, "u20", NULL), _OMAP2420_BALLENTRY(SPI1_SIMO, "h10", NULL), _OMAP2420_BALLENTRY(SPI1_SOMI, "v19", NULL), _OMAP2420_BALLENTRY(SPI2_CLK, "v24", NULL), _OMAP2420_BALLENTRY(SPI2_NCS0, "aa24", NULL), _OMAP2420_BALLENTRY(SPI2_SIMO, "u24", NULL), _OMAP2420_BALLENTRY(SPI2_SOMI, "v25", NULL), _OMAP2420_BALLENTRY(SSI1_DAT_RX, "w15", NULL), _OMAP2420_BALLENTRY(SSI1_DAT_TX, "w13", NULL), _OMAP2420_BALLENTRY(SSI1_FLAG_RX, "af11", NULL), _OMAP2420_BALLENTRY(SSI1_FLAG_TX, "ac15", NULL), _OMAP2420_BALLENTRY(SSI1_RDY_RX, "ac16", NULL), _OMAP2420_BALLENTRY(SSI1_RDY_TX, "af15", NULL), _OMAP2420_BALLENTRY(SSI1_WAKE, "ad15", NULL), _OMAP2420_BALLENTRY(SYS_CLKOUT, "ae19", NULL), _OMAP2420_BALLENTRY(SYS_CLKREQ, "ad20", NULL), _OMAP2420_BALLENTRY(SYS_NIRQ, "y20", NULL), _OMAP2420_BALLENTRY(UART1_CTS, "g20", NULL), _OMAP2420_BALLENTRY(UART1_RTS, "k20", NULL), _OMAP2420_BALLENTRY(UART1_RX, "t20", NULL), _OMAP2420_BALLENTRY(UART1_TX, "h12", NULL), _OMAP2420_BALLENTRY(UART2_CTS, "ac24", NULL), _OMAP2420_BALLENTRY(UART2_RTS, "w20", NULL), _OMAP2420_BALLENTRY(UART2_RX, "ad24", NULL), _OMAP2420_BALLENTRY(UART2_TX, "ab24", NULL), _OMAP2420_BALLENTRY(UART3_CTS_RCTX, "k24", NULL), _OMAP2420_BALLENTRY(UART3_RTS_SD, "m20", NULL), _OMAP2420_BALLENTRY(UART3_RX_IRRX, "h24", NULL), _OMAP2420_BALLENTRY(UART3_TX_IRTX, "g24", NULL), _OMAP2420_BALLENTRY(USB0_DAT, "j25", NULL), _OMAP2420_BALLENTRY(USB0_PUEN, "l23", NULL), _OMAP2420_BALLENTRY(USB0_RCV, "k23", NULL), _OMAP2420_BALLENTRY(USB0_SE0, "l24", NULL), _OMAP2420_BALLENTRY(USB0_TXEN, "m24", NULL), _OMAP2420_BALLENTRY(USB0_VM, "n23", NULL), _OMAP2420_BALLENTRY(USB0_VP, "m23", NULL), _OMAP2420_BALLENTRY(VLYNQ_CLK, "w12", NULL), _OMAP2420_BALLENTRY(VLYNQ_NLA, "ae10", NULL), _OMAP2420_BALLENTRY(VLYNQ_RX0, "ad7", NULL), _OMAP2420_BALLENTRY(VLYNQ_RX1, "w10", NULL), _OMAP2420_BALLENTRY(VLYNQ_TX0, "y15", NULL), _OMAP2420_BALLENTRY(VLYNQ_TX1, "w14", NULL), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #else #define omap2420_pop_ball NULL #endif int __init omap2420_mux_init(struct omap_board_mux *board_subset, int flags) { struct omap_ball *package_balls = NULL; switch (flags & OMAP_PACKAGE_MASK) { case OMAP_PACKAGE_ZAC: package_balls = omap2420_pop_ball; break; case OMAP_PACKAGE_ZAF: /* REVISIT: Please add data */ default: pr_warning("%s: No ball data available for omap2420 package\n", __func__); } return omap_mux_init("core", OMAP_MUX_REG_8BIT | OMAP_MUX_GPIO_IN_MODE3, OMAP2420_CONTROL_PADCONF_MUX_PBASE, OMAP2420_CONTROL_PADCONF_MUX_SIZE, omap2420_muxmodes, NULL, board_subset, package_balls); }
gpl-2.0
CyanHacker-Lollipop/kernel_sony_msm8974pro
drivers/input/keyboard/atakbd.c
11335
6416
/* * atakbd.c * * Copyright (c) 2005 Michael Schmitz * * Based on amikbd.c, which is * * Copyright (c) 2000-2001 Vojtech Pavlik * * Based on the work of: * Hamish Macdonald */ /* * Atari keyboard driver for Linux/m68k * * The low level init and interrupt stuff is handled in arch/mm68k/atari/atakeyb.c * (the keyboard ACIA also handles the mouse and joystick data, and the keyboard * interrupt is shared with the MIDI ACIA so MIDI data also get handled there). * This driver only deals with handing key events off to the input layer. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <asm/atariints.h> #include <asm/atarihw.h> #include <asm/atarikb.h> #include <asm/irq.h> MODULE_AUTHOR("Michael Schmitz <schmitz@biophys.uni-duesseldorf.de>"); MODULE_DESCRIPTION("Atari keyboard driver"); MODULE_LICENSE("GPL"); /* 0x47: KP_7 71 0x48: KP_8 72 0x49: KP_9 73 0x62: KP_/ 98 0x4b: KP_4 75 0x4c: KP_5 76 0x4d: KP_6 77 0x37: KP_* 55 0x4f: KP_1 79 0x50: KP_2 80 0x51: KP_3 81 0x4a: KP_- 74 0x52: KP_0 82 0x53: KP_. 83 0x4e: KP_+ 78 0x67: Up 103 0x6c: Down 108 0x69: Left 105 0x6a: Right 106 */ static unsigned char atakbd_keycode[0x72] = { /* American layout */ [0] = KEY_GRAVE, [1] = KEY_ESC, [2] = KEY_1, [3] = KEY_2, [4] = KEY_3, [5] = KEY_4, [6] = KEY_5, [7] = KEY_6, [8] = KEY_7, [9] = KEY_8, [10] = KEY_9, [11] = KEY_0, [12] = KEY_MINUS, [13] = KEY_EQUAL, [14] = KEY_BACKSPACE, [15] = KEY_TAB, [16] = KEY_Q, [17] = KEY_W, [18] = KEY_E, [19] = KEY_R, [20] = KEY_T, [21] = KEY_Y, [22] = KEY_U, [23] = KEY_I, [24] = KEY_O, [25] = KEY_P, [26] = KEY_LEFTBRACE, [27] = KEY_RIGHTBRACE, [28] = KEY_ENTER, [29] = KEY_LEFTCTRL, [30] = KEY_A, [31] = KEY_S, [32] = KEY_D, [33] = KEY_F, [34] = KEY_G, [35] = KEY_H, [36] = KEY_J, [37] = KEY_K, [38] = KEY_L, [39] = KEY_SEMICOLON, [40] = KEY_APOSTROPHE, [41] = KEY_BACKSLASH, /* FIXME, '#' */ [42] = KEY_LEFTSHIFT, [43] = KEY_GRAVE, /* FIXME: '~' */ [44] = KEY_Z, [45] = KEY_X, [46] = KEY_C, [47] = KEY_V, [48] = KEY_B, [49] = KEY_N, [50] = KEY_M, [51] = KEY_COMMA, [52] = KEY_DOT, [53] = KEY_SLASH, [54] = KEY_RIGHTSHIFT, [55] = KEY_KPASTERISK, [56] = KEY_LEFTALT, [57] = KEY_SPACE, [58] = KEY_CAPSLOCK, [59] = KEY_F1, [60] = KEY_F2, [61] = KEY_F3, [62] = KEY_F4, [63] = KEY_F5, [64] = KEY_F6, [65] = KEY_F7, [66] = KEY_F8, [67] = KEY_F9, [68] = KEY_F10, [69] = KEY_ESC, [70] = KEY_DELETE, [71] = KEY_KP7, [72] = KEY_KP8, [73] = KEY_KP9, [74] = KEY_KPMINUS, [75] = KEY_KP4, [76] = KEY_KP5, [77] = KEY_KP6, [78] = KEY_KPPLUS, [79] = KEY_KP1, [80] = KEY_KP2, [81] = KEY_KP3, [82] = KEY_KP0, [83] = KEY_KPDOT, [90] = KEY_KPLEFTPAREN, [91] = KEY_KPRIGHTPAREN, [92] = KEY_KPASTERISK, /* FIXME */ [93] = KEY_KPASTERISK, [94] = KEY_KPPLUS, [95] = KEY_HELP, [96] = KEY_BACKSLASH, /* FIXME: '<' */ [97] = KEY_KPASTERISK, /* FIXME */ [98] = KEY_KPSLASH, [99] = KEY_KPLEFTPAREN, [100] = KEY_KPRIGHTPAREN, [101] = KEY_KPSLASH, [102] = KEY_KPASTERISK, [103] = KEY_UP, [104] = KEY_KPASTERISK, /* FIXME */ [105] = KEY_LEFT, [106] = KEY_RIGHT, [107] = KEY_KPASTERISK, /* FIXME */ [108] = KEY_DOWN, [109] = KEY_KPASTERISK, /* FIXME */ [110] = KEY_KPASTERISK, /* FIXME */ [111] = KEY_KPASTERISK, /* FIXME */ [112] = KEY_KPASTERISK, /* FIXME */ [113] = KEY_KPASTERISK /* FIXME */ }; static struct input_dev *atakbd_dev; static void atakbd_interrupt(unsigned char scancode, char down) { if (scancode < 0x72) { /* scancodes < 0xf2 are keys */ // report raw events here? scancode = atakbd_keycode[scancode]; if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */ input_report_key(atakbd_dev, scancode, 1); input_report_key(atakbd_dev, scancode, 0); input_sync(atakbd_dev); } else { input_report_key(atakbd_dev, scancode, down); input_sync(atakbd_dev); } } else /* scancodes >= 0xf2 are mouse data, most likely */ printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); return; } static int __init atakbd_init(void) { int i, error; if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) return -ENODEV; // need to init core driver if not already done so error = atari_keyb_init(); if (error) return error; atakbd_dev = input_allocate_device(); if (!atakbd_dev) return -ENOMEM; atakbd_dev->name = "Atari Keyboard"; atakbd_dev->phys = "atakbd/input0"; atakbd_dev->id.bustype = BUS_HOST; atakbd_dev->id.vendor = 0x0001; atakbd_dev->id.product = 0x0001; atakbd_dev->id.version = 0x0100; atakbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); atakbd_dev->keycode = atakbd_keycode; atakbd_dev->keycodesize = sizeof(unsigned char); atakbd_dev->keycodemax = ARRAY_SIZE(atakbd_keycode); for (i = 1; i < 0x72; i++) { set_bit(atakbd_keycode[i], atakbd_dev->keybit); } /* error check */ error = input_register_device(atakbd_dev); if (error) { input_free_device(atakbd_dev); return error; } atari_input_keyboard_interrupt_hook = atakbd_interrupt; return 0; } static void __exit atakbd_exit(void) { atari_input_keyboard_interrupt_hook = NULL; input_unregister_device(atakbd_dev); } module_init(atakbd_init); module_exit(atakbd_exit);
gpl-2.0
KylinUI/android_kernel_samsung_hlte
drivers/media/video/cx18/cx18-audio.c
14407
2589
/* * cx18 audio-related functions * * Derived from ivtv-audio.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-cards.h" #include "cx18-audio.h" #define CX18_AUDIO_ENABLE 0xc72014 #define CX18_AI1_MUX_MASK 0x30 #define CX18_AI1_MUX_I2S1 0x00 #define CX18_AI1_MUX_I2S2 0x10 #define CX18_AI1_MUX_843_I2S 0x20 /* Selects the audio input and output according to the current settings. */ int cx18_audio_set_io(struct cx18 *cx) { const struct cx18_card_audio_input *in; u32 u, v; int err; /* Determine which input to use */ if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) in = &cx->card->radio_input; else in = &cx->card->audio_inputs[cx->audio_input]; /* handle muxer chips */ v4l2_subdev_call(cx->sd_extmux, audio, s_routing, (u32) in->muxer_input, 0, 0); err = cx18_call_hw_err(cx, cx->card->hw_audio_ctrl, audio, s_routing, in->audio_input, 0, 0); if (err) return err; /* FIXME - this internal mux should be abstracted to a subdev */ u = cx18_read_reg(cx, CX18_AUDIO_ENABLE); v = u & ~CX18_AI1_MUX_MASK; switch (in->audio_input) { case CX18_AV_AUDIO_SERIAL1: v |= CX18_AI1_MUX_I2S1; break; case CX18_AV_AUDIO_SERIAL2: v |= CX18_AI1_MUX_I2S2; break; default: v |= CX18_AI1_MUX_843_I2S; break; } if (v == u) { /* force a toggle of some AI1 MUX control bits */ u &= ~CX18_AI1_MUX_MASK; switch (in->audio_input) { case CX18_AV_AUDIO_SERIAL1: u |= CX18_AI1_MUX_843_I2S; break; case CX18_AV_AUDIO_SERIAL2: u |= CX18_AI1_MUX_843_I2S; break; default: u |= CX18_AI1_MUX_I2S1; break; } cx18_write_reg_expect(cx, u | 0xb00, CX18_AUDIO_ENABLE, u, CX18_AI1_MUX_MASK); } cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE, v, CX18_AI1_MUX_MASK); return 0; }
gpl-2.0
davidmueller13/kernel_samsung_trelte
drivers/media/pci/cx18/cx18-audio.c
14407
2589
/* * cx18 audio-related functions * * Derived from ivtv-audio.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-cards.h" #include "cx18-audio.h" #define CX18_AUDIO_ENABLE 0xc72014 #define CX18_AI1_MUX_MASK 0x30 #define CX18_AI1_MUX_I2S1 0x00 #define CX18_AI1_MUX_I2S2 0x10 #define CX18_AI1_MUX_843_I2S 0x20 /* Selects the audio input and output according to the current settings. */ int cx18_audio_set_io(struct cx18 *cx) { const struct cx18_card_audio_input *in; u32 u, v; int err; /* Determine which input to use */ if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) in = &cx->card->radio_input; else in = &cx->card->audio_inputs[cx->audio_input]; /* handle muxer chips */ v4l2_subdev_call(cx->sd_extmux, audio, s_routing, (u32) in->muxer_input, 0, 0); err = cx18_call_hw_err(cx, cx->card->hw_audio_ctrl, audio, s_routing, in->audio_input, 0, 0); if (err) return err; /* FIXME - this internal mux should be abstracted to a subdev */ u = cx18_read_reg(cx, CX18_AUDIO_ENABLE); v = u & ~CX18_AI1_MUX_MASK; switch (in->audio_input) { case CX18_AV_AUDIO_SERIAL1: v |= CX18_AI1_MUX_I2S1; break; case CX18_AV_AUDIO_SERIAL2: v |= CX18_AI1_MUX_I2S2; break; default: v |= CX18_AI1_MUX_843_I2S; break; } if (v == u) { /* force a toggle of some AI1 MUX control bits */ u &= ~CX18_AI1_MUX_MASK; switch (in->audio_input) { case CX18_AV_AUDIO_SERIAL1: u |= CX18_AI1_MUX_843_I2S; break; case CX18_AV_AUDIO_SERIAL2: u |= CX18_AI1_MUX_843_I2S; break; default: u |= CX18_AI1_MUX_I2S1; break; } cx18_write_reg_expect(cx, u | 0xb00, CX18_AUDIO_ENABLE, u, CX18_AI1_MUX_MASK); } cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE, v, CX18_AI1_MUX_MASK); return 0; }
gpl-2.0
rodrigues-daniel/linux
drivers/media/pci/cx18/cx18-audio.c
14407
2589
/* * cx18 audio-related functions * * Derived from ivtv-audio.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-cards.h" #include "cx18-audio.h" #define CX18_AUDIO_ENABLE 0xc72014 #define CX18_AI1_MUX_MASK 0x30 #define CX18_AI1_MUX_I2S1 0x00 #define CX18_AI1_MUX_I2S2 0x10 #define CX18_AI1_MUX_843_I2S 0x20 /* Selects the audio input and output according to the current settings. */ int cx18_audio_set_io(struct cx18 *cx) { const struct cx18_card_audio_input *in; u32 u, v; int err; /* Determine which input to use */ if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) in = &cx->card->radio_input; else in = &cx->card->audio_inputs[cx->audio_input]; /* handle muxer chips */ v4l2_subdev_call(cx->sd_extmux, audio, s_routing, (u32) in->muxer_input, 0, 0); err = cx18_call_hw_err(cx, cx->card->hw_audio_ctrl, audio, s_routing, in->audio_input, 0, 0); if (err) return err; /* FIXME - this internal mux should be abstracted to a subdev */ u = cx18_read_reg(cx, CX18_AUDIO_ENABLE); v = u & ~CX18_AI1_MUX_MASK; switch (in->audio_input) { case CX18_AV_AUDIO_SERIAL1: v |= CX18_AI1_MUX_I2S1; break; case CX18_AV_AUDIO_SERIAL2: v |= CX18_AI1_MUX_I2S2; break; default: v |= CX18_AI1_MUX_843_I2S; break; } if (v == u) { /* force a toggle of some AI1 MUX control bits */ u &= ~CX18_AI1_MUX_MASK; switch (in->audio_input) { case CX18_AV_AUDIO_SERIAL1: u |= CX18_AI1_MUX_843_I2S; break; case CX18_AV_AUDIO_SERIAL2: u |= CX18_AI1_MUX_843_I2S; break; default: u |= CX18_AI1_MUX_I2S1; break; } cx18_write_reg_expect(cx, u | 0xb00, CX18_AUDIO_ENABLE, u, CX18_AI1_MUX_MASK); } cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE, v, CX18_AI1_MUX_MASK); return 0; }
gpl-2.0
akopytov/percona-xtrabackup
extra/yassl/examples/client/client.cpp
72
4413
/* Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ /* client.cpp */ #include "../../testsuite/test.hpp" //#define TEST_RESUME void ClientError(SSL_CTX* ctx, SSL* ssl, SOCKET_T& sockfd, const char* msg) { SSL_CTX_free(ctx); SSL_free(ssl); tcp_close(sockfd); err_sys(msg); } #ifdef NON_BLOCKING void NonBlockingSSL_Connect(SSL* ssl, SSL_CTX* ctx, SOCKET_T& sockfd) { int ret = SSL_connect(ssl); int err = SSL_get_error(ssl, 0); while (ret != SSL_SUCCESS && (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE)) { if (err == SSL_ERROR_WANT_READ) printf("... client would read block\n"); else printf("... client would write block\n"); #ifdef _WIN32 Sleep(1000); #else sleep(1); #endif ret = SSL_connect(ssl); err = SSL_get_error(ssl, 0); } if (ret != SSL_SUCCESS) ClientError(ctx, ssl, sockfd, "SSL_connect failed"); } #endif void client_test(void* args) { #ifdef _WIN32 WSADATA wsd; WSAStartup(0x0002, &wsd); #endif SOCKET_T sockfd = 0; int argc = 0; char** argv = 0; set_args(argc, argv, *static_cast<func_args*>(args)); tcp_connect(sockfd); #ifdef NON_BLOCKING tcp_set_nonblocking(sockfd); #endif SSL_METHOD* method = TLSv1_client_method(); SSL_CTX* ctx = SSL_CTX_new(method); set_certs(ctx); SSL* ssl = SSL_new(ctx); SSL_set_fd(ssl, sockfd); #ifdef NON_BLOCKING NonBlockingSSL_Connect(ssl, ctx, sockfd); #else // if you get an error here see note at top of README if (SSL_connect(ssl) != SSL_SUCCESS) ClientError(ctx, ssl, sockfd, "SSL_connect failed"); #endif showPeer(ssl); const char* cipher = 0; int index = 0; char list[1024]; strncpy(list, "cipherlist", 11); while ( (cipher = SSL_get_cipher_list(ssl, index++)) ) { strncat(list, ":", 2); strncat(list, cipher, strlen(cipher) + 1); } printf("%s\n", list); printf("Using Cipher Suite: %s\n", SSL_get_cipher(ssl)); char msg[] = "hello yassl!"; if (SSL_write(ssl, msg, sizeof(msg)) != sizeof(msg)) ClientError(ctx, ssl, sockfd, "SSL_write failed"); char reply[1024]; int input = SSL_read(ssl, reply, sizeof(reply)); if (input > 0) { reply[input] = 0; printf("Server response: %s\n", reply); } #ifdef TEST_RESUME SSL_SESSION* session = SSL_get_session(ssl); SSL* sslResume = SSL_new(ctx); #endif SSL_shutdown(ssl); SSL_free(ssl); tcp_close(sockfd); #ifdef TEST_RESUME tcp_connect(sockfd); SSL_set_fd(sslResume, sockfd); SSL_set_session(sslResume, session); if (SSL_connect(sslResume) != SSL_SUCCESS) ClientError(ctx, sslResume, sockfd, "SSL_resume failed"); showPeer(sslResume); if (SSL_write(sslResume, msg, sizeof(msg)) != sizeof(msg)) ClientError(ctx, sslResume, sockfd, "SSL_write failed"); input = SSL_read(sslResume, reply, sizeof(reply)); if (input > 0) { reply[input] = 0; printf("Server response: %s\n", reply); } SSL_shutdown(sslResume); SSL_free(sslResume); tcp_close(sockfd); #endif // TEST_RESUME SSL_CTX_free(ctx); ((func_args*)args)->return_code = 0; } #ifndef NO_MAIN_DRIVER int main(int argc, char** argv) { func_args args; args.argc = argc; args.argv = argv; client_test(&args); yaSSL_CleanUp(); return args.return_code; } #endif // NO_MAIN_DRIVER
gpl-2.0
jiankangshiye/U-Boot-1.3.4-9G45
board/siemens/CCM/flash.c
72
13732
/* * (C) Copyright 2001 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <mpc8xx.h> flash_info_t flash_info[CFG_MAX_FLASH_BANKS]; /* info for FLASH chips */ /*----------------------------------------------------------------------- * Functions */ static ulong flash_get_size (vu_long *addr, flash_info_t *info); static int write_word (flash_info_t *info, ulong dest, ulong data); static void flash_get_offsets (ulong base, flash_info_t *info); /*----------------------------------------------------------------------- */ unsigned long flash_init (void) { volatile immap_t *immap = (immap_t *)CFG_IMMR; volatile memctl8xx_t *memctl = &immap->im_memctl; unsigned long size_b0, size_b1; int i; /* Init: no FLASHes known */ for (i=0; i<CFG_MAX_FLASH_BANKS; ++i) { flash_info[i].flash_id = FLASH_UNKNOWN; } /* Static FLASH Bank configuration here - FIXME XXX */ size_b0 = flash_get_size((vu_long *)FLASH_BASE0_PRELIM, &flash_info[0]); if (flash_info[0].flash_id == FLASH_UNKNOWN) { printf ("## Unknown FLASH on Bank 0 - Size = 0x%08lx = %ld MB\n", size_b0, size_b0<<20); } size_b1 = flash_get_size((vu_long *)FLASH_BASE1_PRELIM, &flash_info[1]); if (size_b1 > size_b0) { printf ("## ERROR: " "Bank 1 (0x%08lx = %ld MB) > Bank 0 (0x%08lx = %ld MB)\n", size_b1, size_b1<<20, size_b0, size_b0<<20 ); flash_info[0].flash_id = FLASH_UNKNOWN; flash_info[1].flash_id = FLASH_UNKNOWN; flash_info[0].sector_count = -1; flash_info[1].sector_count = -1; flash_info[0].size = 0; flash_info[1].size = 0; return (0); } /* Remap FLASH according to real size */ memctl->memc_or0 = CFG_OR_TIMING_FLASH | (-size_b0 & 0xFFFF8000); memctl->memc_br0 = (CFG_FLASH_BASE & BR_BA_MSK) | BR_MS_GPCM | BR_V; /* Re-do sizing to get full correct info */ size_b0 = flash_get_size((vu_long *)CFG_FLASH_BASE, &flash_info[0]); flash_get_offsets (CFG_FLASH_BASE, &flash_info[0]); #if CFG_MONITOR_BASE >= CFG_FLASH_BASE /* monitor protection ON by default */ flash_protect(FLAG_PROTECT_SET, CFG_MONITOR_BASE, CFG_MONITOR_BASE+monitor_flash_len-1, &flash_info[0]); #endif if (size_b1) { memctl->memc_or1 = CFG_OR_TIMING_FLASH | (-size_b1 & 0xFFFF8000); memctl->memc_br1 = ((CFG_FLASH_BASE + size_b0) & BR_BA_MSK) | BR_MS_GPCM | BR_V; /* Re-do sizing to get full correct info */ size_b1 = flash_get_size((vu_long *)(CFG_FLASH_BASE + size_b0), &flash_info[1]); flash_get_offsets (CFG_FLASH_BASE + size_b0, &flash_info[1]); #if CFG_MONITOR_BASE >= CFG_FLASH_BASE /* monitor protection ON by default */ flash_protect(FLAG_PROTECT_SET, CFG_MONITOR_BASE, CFG_MONITOR_BASE+monitor_flash_len-1, &flash_info[1]); #endif } else { memctl->memc_br1 = 0; /* invalidate bank */ flash_info[1].flash_id = FLASH_UNKNOWN; flash_info[1].sector_count = -1; } flash_info[0].size = size_b0; flash_info[1].size = size_b1; return (size_b0 + size_b1); } /*----------------------------------------------------------------------- */ static void flash_get_offsets (ulong base, flash_info_t *info) { int i; /* set up sector start address table */ if (info->flash_id & FLASH_BTYPE) { /* set sector offsets for bottom boot block type */ info->start[0] = base + 0x00000000; info->start[1] = base + 0x00008000; info->start[2] = base + 0x0000C000; info->start[3] = base + 0x00010000; for (i = 4; i < info->sector_count; i++) { info->start[i] = base + (i * 0x00020000) - 0x00060000; } } else { /* set sector offsets for top boot block type */ i = info->sector_count - 1; info->start[i--] = base + info->size - 0x00008000; info->start[i--] = base + info->size - 0x0000C000; info->start[i--] = base + info->size - 0x00010000; for (; i >= 0; i--) { info->start[i] = base + i * 0x00020000; } } } /*----------------------------------------------------------------------- */ void flash_print_info (flash_info_t *info) { int i; if (info->flash_id == FLASH_UNKNOWN) { printf ("missing or unknown FLASH type\n"); return; } switch (info->flash_id & FLASH_VENDMASK) { case FLASH_MAN_AMD: printf ("AMD "); break; case FLASH_MAN_FUJ: printf ("FUJITSU "); break; default: printf ("Unknown Vendor "); break; } switch (info->flash_id & FLASH_TYPEMASK) { case FLASH_AM400B: printf ("AM29LV400B (4 Mbit, bottom boot sect)\n"); break; case FLASH_AM400T: printf ("AM29LV400T (4 Mbit, top boot sector)\n"); break; case FLASH_AM800B: printf ("AM29LV800B (8 Mbit, bottom boot sect)\n"); break; case FLASH_AM800T: printf ("AM29LV800T (8 Mbit, top boot sector)\n"); break; case FLASH_AM160B: printf ("AM29LV160B (16 Mbit, bottom boot sect)\n"); break; case FLASH_AM160T: printf ("AM29LV160T (16 Mbit, top boot sector)\n"); break; case FLASH_AM320B: printf ("AM29LV320B (32 Mbit, bottom boot sect)\n"); break; case FLASH_AM320T: printf ("AM29LV320T (32 Mbit, top boot sector)\n"); break; default: printf ("Unknown Chip Type\n"); break; } printf (" Size: %ld MB in %d Sectors\n", info->size >> 20, info->sector_count); printf (" Sector Start Addresses:"); for (i=0; i<info->sector_count; ++i) { if ((i % 5) == 0) printf ("\n "); printf (" %08lX%s", info->start[i], info->protect[i] ? " (RO)" : " " ); } printf ("\n"); return; } /*----------------------------------------------------------------------- */ /*----------------------------------------------------------------------- */ /* * The following code cannot be run from FLASH! */ static ulong flash_get_size (vu_long *addr, flash_info_t *info) { short i; ulong value; ulong base = (ulong)addr; /* Write auto select command: read Manufacturer ID */ addr[0x0555] = 0x00AA00AA; addr[0x02AA] = 0x00550055; addr[0x0555] = 0x00900090; value = addr[0]; switch (value) { case AMD_MANUFACT: info->flash_id = FLASH_MAN_AMD; break; case FUJ_MANUFACT: info->flash_id = FLASH_MAN_FUJ; break; default: info->flash_id = FLASH_UNKNOWN; info->sector_count = 0; info->size = 0; return (0); /* no or unknown flash */ } value = addr[1]; /* device ID */ switch (value) { case AMD_ID_LV400T: info->flash_id += FLASH_AM400T; info->sector_count = 11; info->size = 0x00100000; break; /* => 1 MB */ case AMD_ID_LV400B: info->flash_id += FLASH_AM400B; info->sector_count = 11; info->size = 0x00100000; break; /* => 1 MB */ case AMD_ID_LV800T: info->flash_id += FLASH_AM800T; info->sector_count = 19; info->size = 0x00200000; break; /* => 2 MB */ case AMD_ID_LV800B: info->flash_id += FLASH_AM800B; info->sector_count = 19; info->size = 0x00200000; break; /* => 2 MB */ case AMD_ID_LV160T: info->flash_id += FLASH_AM160T; info->sector_count = 35; info->size = 0x00400000; break; /* => 4 MB */ case AMD_ID_LV160B: info->flash_id += FLASH_AM160B; info->sector_count = 35; info->size = 0x00400000; break; /* => 4 MB */ #if 0 /* enable when device IDs are available */ case AMD_ID_LV320T: info->flash_id += FLASH_AM320T; info->sector_count = 67; info->size = 0x00800000; break; /* => 8 MB */ case AMD_ID_LV320B: info->flash_id += FLASH_AM320B; info->sector_count = 67; info->size = 0x00800000; break; /* => 8 MB */ #endif default: info->flash_id = FLASH_UNKNOWN; return (0); /* => no or unknown flash */ } /* set up sector start address table */ if (info->flash_id & FLASH_BTYPE) { /* set sector offsets for bottom boot block type */ info->start[0] = base + 0x00000000; info->start[1] = base + 0x00008000; info->start[2] = base + 0x0000C000; info->start[3] = base + 0x00010000; for (i = 4; i < info->sector_count; i++) { info->start[i] = base + (i * 0x00020000) - 0x00060000; } } else { /* set sector offsets for top boot block type */ i = info->sector_count - 1; info->start[i--] = base + info->size - 0x00008000; info->start[i--] = base + info->size - 0x0000C000; info->start[i--] = base + info->size - 0x00010000; for (; i >= 0; i--) { info->start[i] = base + i * 0x00020000; } } /* check for protected sectors */ for (i = 0; i < info->sector_count; i++) { /* read sector protection at sector address, (A7 .. A0) = 0x02 */ /* D0 = 1 if protected */ addr = (volatile unsigned long *)(info->start[i]); info->protect[i] = addr[2] & 1; } /* * Prevent writes to uninitialized FLASH. */ if (info->flash_id != FLASH_UNKNOWN) { addr = (volatile unsigned long *)info->start[0]; *addr = 0x00F000F0; /* reset bank */ } return (info->size); } /*----------------------------------------------------------------------- */ int flash_erase (flash_info_t *info, int s_first, int s_last) { vu_long *addr = (vu_long*)(info->start[0]); int flag, prot, sect, l_sect; ulong start, now, last; if ((s_first < 0) || (s_first > s_last)) { if (info->flash_id == FLASH_UNKNOWN) { printf ("- missing\n"); } else { printf ("- no sectors to erase\n"); } return 1; } if ((info->flash_id == FLASH_UNKNOWN) || (info->flash_id > FLASH_AMD_COMP)) { printf ("Can't erase unknown flash type %08lx - aborted\n", info->flash_id); return 1; } prot = 0; for (sect=s_first; sect<=s_last; ++sect) { if (info->protect[sect]) { prot++; } } if (prot) { printf ("- Warning: %d protected sectors will not be erased!\n", prot); } else { printf ("\n"); } l_sect = -1; /* Disable interrupts which might cause a timeout here */ flag = disable_interrupts(); addr[0x0555] = 0x00AA00AA; addr[0x02AA] = 0x00550055; addr[0x0555] = 0x00800080; addr[0x0555] = 0x00AA00AA; addr[0x02AA] = 0x00550055; /* Start erase on unprotected sectors */ for (sect = s_first; sect<=s_last; sect++) { if (info->protect[sect] == 0) { /* not protected */ addr = (vu_long*)(info->start[sect]); addr[0] = 0x00300030; l_sect = sect; } } /* re-enable interrupts if necessary */ if (flag) enable_interrupts(); /* wait at least 80us - let's wait 1 ms */ udelay (1000); /* * We wait for the last triggered sector */ if (l_sect < 0) goto DONE; start = get_timer (0); last = start; addr = (vu_long*)(info->start[l_sect]); while ((addr[0] & 0x00800080) != 0x00800080) { if ((now = get_timer(start)) > CFG_FLASH_ERASE_TOUT) { printf ("Timeout\n"); return 1; } /* show that we're waiting */ if ((now - last) > 1000) { /* every second */ putc ('.'); last = now; } } DONE: /* reset to read mode */ addr = (volatile unsigned long *)info->start[0]; addr[0] = 0x00F000F0; /* reset bank */ printf (" done\n"); return 0; } /*----------------------------------------------------------------------- * Copy memory to flash, returns: * 0 - OK * 1 - write timeout * 2 - Flash not erased */ int write_buff (flash_info_t *info, uchar *src, ulong addr, ulong cnt) { ulong cp, wp, data; int i, l, rc; wp = (addr & ~3); /* get lower word aligned address */ /* * handle unaligned start bytes */ if ((l = addr - wp) != 0) { data = 0; for (i=0, cp=wp; i<l; ++i, ++cp) { data = (data << 8) | (*(uchar *)cp); } for (; i<4 && cnt>0; ++i) { data = (data << 8) | *src++; --cnt; ++cp; } for (; cnt==0 && i<4; ++i, ++cp) { data = (data << 8) | (*(uchar *)cp); } if ((rc = write_word(info, wp, data)) != 0) { return (rc); } wp += 4; } /* * handle word aligned part */ while (cnt >= 4) { data = 0; for (i=0; i<4; ++i) { data = (data << 8) | *src++; } if ((rc = write_word(info, wp, data)) != 0) { return (rc); } wp += 4; cnt -= 4; } if (cnt == 0) { return (0); } /* * handle unaligned tail bytes */ data = 0; for (i=0, cp=wp; i<4 && cnt>0; ++i, ++cp) { data = (data << 8) | *src++; --cnt; } for (; i<4; ++i, ++cp) { data = (data << 8) | (*(uchar *)cp); } return (write_word(info, wp, data)); } /*----------------------------------------------------------------------- * Write a word to Flash, returns: * 0 - OK * 1 - write timeout * 2 - Flash not erased */ static int write_word (flash_info_t *info, ulong dest, ulong data) { vu_long *addr = (vu_long*)(info->start[0]); ulong start; int flag; /* Check if Flash is (sufficiently) erased */ if ((*((vu_long *)dest) & data) != data) { return (2); } /* Disable interrupts which might cause a timeout here */ flag = disable_interrupts(); addr[0x0555] = 0x00AA00AA; addr[0x02AA] = 0x00550055; addr[0x0555] = 0x00A000A0; *((vu_long *)dest) = data; /* re-enable interrupts if necessary */ if (flag) enable_interrupts(); /* data polling for D7 */ start = get_timer (0); while ((*((vu_long *)dest) & 0x00800080) != (data & 0x00800080)) { if (get_timer(start) > CFG_FLASH_WRITE_TOUT) { return (1); } } return (0); } /*----------------------------------------------------------------------- */
gpl-2.0
male-puppies/linux-3.18.pps
drivers/staging/octeon/ethernet-rx.c
328
15358
/********************************************************************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/cache.h> #include <linux/cpumask.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/string.h> #include <linux/prefetch.h> #include <linux/ratelimit.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <net/dst.h> #ifdef CONFIG_XFRM #include <linux/xfrm.h> #include <net/xfrm.h> #endif /* CONFIG_XFRM */ #include <linux/atomic.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "ethernet-mem.h" #include "ethernet-rx.h" #include "octeon-ethernet.h" #include "ethernet-util.h" #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-wqe.h> #include <asm/octeon/cvmx-fau.h> #include <asm/octeon/cvmx-pow.h> #include <asm/octeon/cvmx-pip.h> #include <asm/octeon/cvmx-scratch.h> #include <asm/octeon/cvmx-gmxx-defs.h> struct cvm_napi_wrapper { struct napi_struct napi; } ____cacheline_aligned_in_smp; static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp; struct cvm_oct_core_state { int baseline_cores; /* * The number of additional cores that could be processing * input packets. */ atomic_t available_cores; cpumask_t cpu_state; } ____cacheline_aligned_in_smp; static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp; static int cvm_irq_cpu; static void cvm_oct_enable_napi(void *_) { int cpu = smp_processor_id(); napi_schedule(&cvm_oct_napi[cpu].napi); } static void cvm_oct_enable_one_cpu(void) { int v; int cpu; /* Check to see if more CPUs are available for receive processing... */ v = atomic_sub_if_positive(1, &core_state.available_cores); if (v < 0) return; /* ... if a CPU is available, Turn on NAPI polling for that CPU. */ for_each_online_cpu(cpu) { if (!cpu_test_and_set(cpu, core_state.cpu_state)) { v = smp_call_function_single(cpu, cvm_oct_enable_napi, NULL, 0); if (v) panic("Can't enable NAPI."); break; } } } static void cvm_oct_no_more_work(void) { int cpu = smp_processor_id(); if (cpu == cvm_irq_cpu) { enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); return; } cpu_clear(cpu, core_state.cpu_state); atomic_add(1, &core_state.available_cores); } /** * cvm_oct_do_interrupt - interrupt handler. * * The interrupt occurs whenever the POW has packets in our group. * */ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) { /* Disable the IRQ and start napi_poll. */ disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); cvm_irq_cpu = smp_processor_id(); cvm_oct_enable_napi(NULL); return IRQ_HANDLED; } /** * cvm_oct_check_rcv_error - process receive errors * @work: Work queue entry pointing to the packet. * * Returns Non-zero if the packet can be dropped, zero otherwise. */ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) { if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { /* * Ignore length errors on min size packets. Some * equipment incorrectly pads packets to 64+4FCS * instead of 60+4FCS. Note these packets still get * counted as frame errors. */ } else if (USE_10MBPS_PREAMBLE_WORKAROUND && ((work->word2.snoip.err_code == 5) || (work->word2.snoip.err_code == 7))) { /* * We received a packet with either an alignment error * or a FCS error. This may be signalling that we are * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK] * off. If this is the case we need to parse the * packet to determine if we can remove a non spec * preamble and generate a correct packet. */ int interface = cvmx_helper_get_interface_num(work->ipprt); int index = cvmx_helper_get_interface_index_num(work->ipprt); union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { uint8_t *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr); int i = 0; while (i < work->len - 1) { if (*ptr != 0x55) break; ptr++; i++; } if (*ptr == 0xd5) { /* printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt); */ work->packet_ptr.s.addr += i + 1; work->len -= i + 5; } else if ((*ptr & 0xf) == 0xd) { /* printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt); */ work->packet_ptr.s.addr += i; work->len -= i + 4; for (i = 0; i < work->len; i++) { *ptr = ((*ptr & 0xf0) >> 4) | ((*(ptr + 1) & 0xf) << 4); ptr++; } } else { printk_ratelimited("Port %d unknown preamble, packet dropped\n", work->ipprt); /* cvmx_helper_dump_packet(work); */ cvm_oct_free_work(work); return 1; } } } else { printk_ratelimited("Port %d receive error code %d, packet dropped\n", work->ipprt, work->word2.snoip.err_code); cvm_oct_free_work(work); return 1; } return 0; } /** * cvm_oct_napi_poll - the NAPI poll function. * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller * @budget: Maximum number of packets to receive. * * Returns the number of packets processed. */ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) { const int coreid = cvmx_get_core_num(); uint64_t old_group_mask; uint64_t old_scratch; int rx_count = 0; int did_work_request = 0; int packet_not_copied; /* Prefetch cvm_oct_device since we know we need it soon */ prefetch(cvm_oct_device); if (USE_ASYNC_IOBDMA) { /* Save scratch in case userspace is using it */ CVMX_SYNCIOBDMA; old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); } /* Only allow work for our group (and preserve priorities) */ old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); if (USE_ASYNC_IOBDMA) { cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); did_work_request = 1; } while (rx_count < budget) { struct sk_buff *skb = NULL; struct sk_buff **pskb = NULL; int skb_in_hw; cvmx_wqe_t *work; if (USE_ASYNC_IOBDMA && did_work_request) work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); else work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); prefetch(work); did_work_request = 0; if (work == NULL) { union cvmx_pow_wq_int wq_int; wq_int.u64 = 0; wq_int.s.iq_dis = 1 << pow_receive_group; wq_int.s.wq_int = 1 << pow_receive_group; cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); break; } pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *)); prefetch(pskb); if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); did_work_request = 1; } if (rx_count == 0) { /* * First time through, see if there is enough * work waiting to merit waking another * CPU. */ union cvmx_pow_wq_int_cntx counts; int backlog; int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores); counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group)); backlog = counts.s.iq_cnt + counts.s.ds_cnt; if (backlog > budget * cores_in_use && napi != NULL) cvm_oct_enable_one_cpu(); } rx_count++; skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; if (likely(skb_in_hw)) { skb = *pskb; prefetch(&skb->head); prefetch(&skb->len); } prefetch(cvm_oct_device[work->ipprt]); /* Immediately throw away all packets with receive errors */ if (unlikely(work->word2.snoip.rcv_error)) { if (cvm_oct_check_rcv_error(work)) continue; } /* * We can only use the zero copy path if skbuffs are * in the FPA pool and the packet fits in a single * buffer. */ if (likely(skb_in_hw)) { skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head); prefetch(skb->data); skb->len = work->len; skb_set_tail_pointer(skb, skb->len); packet_not_copied = 1; } else { /* * We have to copy the packet. First allocate * an skbuff for it. */ skb = dev_alloc_skb(work->len); if (!skb) { cvm_oct_free_work(work); continue; } /* * Check if we've received a packet that was * entirely stored in the work entry. */ if (unlikely(work->word2.s.bufs == 0)) { uint8_t *ptr = work->packet_data; if (likely(!work->word2.s.not_IP)) { /* * The beginning of the packet * moves for IP packets. */ if (work->word2.s.is_v6) ptr += 2; else ptr += 6; } memcpy(skb_put(skb, work->len), ptr, work->len); /* No packet buffers to free */ } else { int segments = work->word2.s.bufs; union cvmx_buf_ptr segment_ptr = work->packet_ptr; int len = work->len; while (segments--) { union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); /* * Octeon Errata PKI-100: The segment size is * wrong. Until it is fixed, calculate the * segment size based on the packet pool * buffer size. When it is fixed, the * following line should be replaced with this * one: int segment_size = * segment_ptr.s.size; */ int segment_size = CVMX_FPA_PACKET_POOL_SIZE - (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7)); /* * Don't copy more than what * is left in the packet. */ if (segment_size > len) segment_size = len; /* Copy the data into the packet */ memcpy(skb_put(skb, segment_size), cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size); len -= segment_size; segment_ptr = next_ptr; } } packet_not_copied = 0; } if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && cvm_oct_device[work->ipprt])) { struct net_device *dev = cvm_oct_device[work->ipprt]; struct octeon_ethernet *priv = netdev_priv(dev); /* * Only accept packets for devices that are * currently up. */ if (likely(dev->flags & IFF_UP)) { skb->protocol = eth_type_trans(skb, dev); skb->dev = dev; if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) skb->ip_summed = CHECKSUM_NONE; else skb->ip_summed = CHECKSUM_UNNECESSARY; /* Increment RX stats for virtual ports */ if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { #ifdef CONFIG_64BIT atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); #else atomic_add(1, (atomic_t *)&priv->stats.rx_packets); atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); #endif } netif_receive_skb(skb); } else { /* Drop any packet received for a device that isn't up */ /* printk_ratelimited("%s: Device not up, packet dropped\n", dev->name); */ #ifdef CONFIG_64BIT atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); #else atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); #endif dev_kfree_skb_irq(skb); } } else { /* * Drop any packet received for a device that * doesn't exist. */ printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", work->ipprt); dev_kfree_skb_irq(skb); } /* * Check to see if the skbuff and work share the same * packet buffer. */ if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) { /* * This buffer needs to be replaced, increment * the number of buffers we need to free by * one. */ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 1); cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); } else { cvm_oct_free_work(work); } } /* Restore the original POW group mask */ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); if (USE_ASYNC_IOBDMA) { /* Restore the scratch area */ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); } cvm_oct_rx_refill_pool(0); if (rx_count < budget && napi != NULL) { /* No more work */ napi_complete(napi); cvm_oct_no_more_work(); } return rx_count; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * cvm_oct_poll_controller - poll for receive packets * device. * * @dev: Device to poll. Unused */ void cvm_oct_poll_controller(struct net_device *dev) { cvm_oct_napi_poll(NULL, 16); } #endif void cvm_oct_rx_initialize(void) { int i; struct net_device *dev_for_napi = NULL; union cvmx_pow_wq_int_thrx int_thr; union cvmx_pow_wq_int_pc int_pc; for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { if (cvm_oct_device[i]) { dev_for_napi = cvm_oct_device[i]; break; } } if (NULL == dev_for_napi) panic("No net_devices were allocated."); if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus()) atomic_set(&core_state.available_cores, max_rx_cpus); else atomic_set(&core_state.available_cores, num_online_cpus()); core_state.baseline_cores = atomic_read(&core_state.available_cores); core_state.cpu_state = CPU_MASK_NONE; for_each_possible_cpu(i) { netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi, cvm_oct_napi_poll, rx_napi_weight); napi_enable(&cvm_oct_napi[i].napi); } /* Register an IRQ handler to receive POW interrupts */ i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); if (i) panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_WORKQ0 + pow_receive_group); disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); int_thr.u64 = 0; int_thr.s.tc_en = 1; int_thr.s.tc_thr = 1; /* Enable POW interrupt when our port has at least one packet */ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64); int_pc.u64 = 0; int_pc.s.pc_thr = 5; cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); /* Scheduld NAPI now. This will indirectly enable interrupts. */ cvm_oct_enable_one_cpu(); } void cvm_oct_rx_shutdown(void) { int i; /* Shutdown all of the NAPIs */ for_each_possible_cpu(i) netif_napi_del(&cvm_oct_napi[i].napi); }
gpl-2.0
rkollataj/linux-can-next
drivers/pinctrl/freescale/pinctrl-imx1-core.c
328
17210
/* * Core driver for the imx pin controller in imx1/21/27 * * Copyright (C) 2013 Pengutronix * Author: Markus Pargmann <mpa@pengutronix.de> * * Based on pinctrl-imx.c: * Author: Dong Aisheng <dong.aisheng@linaro.org> * Copyright (C) 2012 Freescale Semiconductor, Inc. * Copyright (C) 2012 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/bitops.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/slab.h> #include "../core.h" #include "pinctrl-imx1.h" struct imx1_pinctrl { struct device *dev; struct pinctrl_dev *pctl; void __iomem *base; const struct imx1_pinctrl_soc_info *info; }; /* * MX1 register offsets */ #define MX1_DDIR 0x00 #define MX1_OCR 0x04 #define MX1_ICONFA 0x0c #define MX1_ICONFB 0x14 #define MX1_GIUS 0x20 #define MX1_GPR 0x38 #define MX1_PUEN 0x40 #define MX1_PORT_STRIDE 0x100 /* * MUX_ID format defines */ #define MX1_MUX_FUNCTION(val) (BIT(0) & val) #define MX1_MUX_GPIO(val) ((BIT(1) & val) >> 1) #define MX1_MUX_DIR(val) ((BIT(2) & val) >> 2) #define MX1_MUX_OCONF(val) (((BIT(4) | BIT(5)) & val) >> 4) #define MX1_MUX_ICONFA(val) (((BIT(8) | BIT(9)) & val) >> 8) #define MX1_MUX_ICONFB(val) (((BIT(10) | BIT(11)) & val) >> 10) /* * IMX1 IOMUXC manages the pins based on ports. Each port has 32 pins. IOMUX * control register are seperated into function, output configuration, input * configuration A, input configuration B, GPIO in use and data direction. * * Those controls that are represented by 1 bit have a direct mapping between * bit position and pin id. If they are represented by 2 bit, the lower 16 pins * are in the first register and the upper 16 pins in the second (next) * register. pin_id is stored in bit (pin_id%16)*2 and the bit above. */ /* * Calculates the register offset from a pin_id */ static void __iomem *imx1_mem(struct imx1_pinctrl *ipctl, unsigned int pin_id) { unsigned int port = pin_id / 32; return ipctl->base + port * MX1_PORT_STRIDE; } /* * Write to a register with 2 bits per pin. The function will automatically * use the next register if the pin is managed in the second register. */ static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 value, u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; int offset = (pin_id % 16) * 2; /* offset, regardless of register used */ int mask = ~(0x3 << offset); /* Mask for 2 bits at offset */ u32 old_val; u32 new_val; /* Use the next register if the pin's port pin number is >=16 */ if (pin_id % 32 >= 16) reg += 0x04; dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n", reg, offset, value); /* Get current state of pins */ old_val = readl(reg); old_val &= mask; new_val = value & 0x3; /* Make sure value is really 2 bit */ new_val <<= offset; new_val |= old_val;/* Set new state for pin_id */ writel(new_val, reg); } static void imx1_write_bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 value, u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; int offset = pin_id % 32; int mask = ~BIT_MASK(offset); u32 old_val; u32 new_val; /* Get current state of pins */ old_val = readl(reg); old_val &= mask; new_val = value & 0x1; /* Make sure value is really 1 bit */ new_val <<= offset; new_val |= old_val;/* Set new state for pin_id */ writel(new_val, reg); } static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; int offset = (pin_id % 16) * 2; /* Use the next register if the pin's port pin number is >=16 */ if (pin_id % 32 >= 16) reg += 0x04; return (readl(reg) & (BIT(offset) | BIT(offset+1))) >> offset; } static int imx1_read_bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; int offset = pin_id % 32; return !!(readl(reg) & BIT(offset)); } static inline const struct imx1_pin_group *imx1_pinctrl_find_group_by_name( const struct imx1_pinctrl_soc_info *info, const char *name) { const struct imx1_pin_group *grp = NULL; int i; for (i = 0; i < info->ngroups; i++) { if (!strcmp(info->groups[i].name, name)) { grp = &info->groups[i]; break; } } return grp; } static int imx1_get_groups_count(struct pinctrl_dev *pctldev) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; return info->ngroups; } static const char *imx1_get_group_name(struct pinctrl_dev *pctldev, unsigned selector) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; return info->groups[selector].name; } static int imx1_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector, const unsigned int **pins, unsigned *npins) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; if (selector >= info->ngroups) return -EINVAL; *pins = info->groups[selector].pin_ids; *npins = info->groups[selector].npins; return 0; } static void imx1_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); seq_printf(s, "GPIO %d, function %d, direction %d, oconf %d, iconfa %d, iconfb %d", imx1_read_bit(ipctl, offset, MX1_GIUS), imx1_read_bit(ipctl, offset, MX1_GPR), imx1_read_bit(ipctl, offset, MX1_DDIR), imx1_read_2bit(ipctl, offset, MX1_OCR), imx1_read_2bit(ipctl, offset, MX1_ICONFA), imx1_read_2bit(ipctl, offset, MX1_ICONFB)); } static int imx1_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, unsigned *num_maps) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; const struct imx1_pin_group *grp; struct pinctrl_map *new_map; struct device_node *parent; int map_num = 1; int i, j; /* * first find the group of this node and check if we need create * config maps for pins */ grp = imx1_pinctrl_find_group_by_name(info, np->name); if (!grp) { dev_err(info->dev, "unable to find group for node %s\n", np->name); return -EINVAL; } for (i = 0; i < grp->npins; i++) map_num++; new_map = kmalloc(sizeof(struct pinctrl_map) * map_num, GFP_KERNEL); if (!new_map) return -ENOMEM; *map = new_map; *num_maps = map_num; /* create mux map */ parent = of_get_parent(np); if (!parent) { kfree(new_map); return -EINVAL; } new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; new_map[0].data.mux.function = parent->name; new_map[0].data.mux.group = np->name; of_node_put(parent); /* create config map */ new_map++; for (i = j = 0; i < grp->npins; i++) { new_map[j].type = PIN_MAP_TYPE_CONFIGS_PIN; new_map[j].data.configs.group_or_pin = pin_get_name(pctldev, grp->pins[i].pin_id); new_map[j].data.configs.configs = &grp->pins[i].config; new_map[j].data.configs.num_configs = 1; j++; } dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", (*map)->data.mux.function, (*map)->data.mux.group, map_num); return 0; } static void imx1_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { kfree(map); } static const struct pinctrl_ops imx1_pctrl_ops = { .get_groups_count = imx1_get_groups_count, .get_group_name = imx1_get_group_name, .get_group_pins = imx1_get_group_pins, .pin_dbg_show = imx1_pin_dbg_show, .dt_node_to_map = imx1_dt_node_to_map, .dt_free_map = imx1_dt_free_map, }; static int imx1_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, unsigned group) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; const struct imx1_pin *pins; unsigned int npins; int i; /* * Configure the mux mode for each pin in the group for a specific * function. */ pins = info->groups[group].pins; npins = info->groups[group].npins; WARN_ON(!pins || !npins); dev_dbg(ipctl->dev, "enable function %s group %s\n", info->functions[selector].name, info->groups[group].name); for (i = 0; i < npins; i++) { unsigned int mux = pins[i].mux_id; unsigned int pin_id = pins[i].pin_id; unsigned int afunction = MX1_MUX_FUNCTION(mux); unsigned int gpio_in_use = MX1_MUX_GPIO(mux); unsigned int direction = MX1_MUX_DIR(mux); unsigned int gpio_oconf = MX1_MUX_OCONF(mux); unsigned int gpio_iconfa = MX1_MUX_ICONFA(mux); unsigned int gpio_iconfb = MX1_MUX_ICONFB(mux); dev_dbg(pctldev->dev, "%s, pin 0x%x, function %d, gpio %d, direction %d, oconf %d, iconfa %d, iconfb %d\n", __func__, pin_id, afunction, gpio_in_use, direction, gpio_oconf, gpio_iconfa, gpio_iconfb); imx1_write_bit(ipctl, pin_id, gpio_in_use, MX1_GIUS); imx1_write_bit(ipctl, pin_id, direction, MX1_DDIR); if (gpio_in_use) { imx1_write_2bit(ipctl, pin_id, gpio_oconf, MX1_OCR); imx1_write_2bit(ipctl, pin_id, gpio_iconfa, MX1_ICONFA); imx1_write_2bit(ipctl, pin_id, gpio_iconfb, MX1_ICONFB); } else { imx1_write_bit(ipctl, pin_id, afunction, MX1_GPR); } } return 0; } static int imx1_pmx_get_funcs_count(struct pinctrl_dev *pctldev) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; return info->nfunctions; } static const char *imx1_pmx_get_func_name(struct pinctrl_dev *pctldev, unsigned selector) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; return info->functions[selector].name; } static int imx1_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector, const char * const **groups, unsigned * const num_groups) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; *groups = info->functions[selector].groups; *num_groups = info->functions[selector].num_groups; return 0; } static const struct pinmux_ops imx1_pmx_ops = { .get_functions_count = imx1_pmx_get_funcs_count, .get_function_name = imx1_pmx_get_func_name, .get_function_groups = imx1_pmx_get_groups, .set_mux = imx1_pmx_set, }; static int imx1_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin_id, unsigned long *config) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); *config = imx1_read_bit(ipctl, pin_id, MX1_PUEN); return 0; } static int imx1_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin_id, unsigned long *configs, unsigned num_configs) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); int i; for (i = 0; i != num_configs; ++i) { imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", pin_desc_get(pctldev, pin_id)->name); } return 0; } static void imx1_pinconf_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned pin_id) { unsigned long config; imx1_pinconf_get(pctldev, pin_id, &config); seq_printf(s, "0x%lx", config); } static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned group) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; struct imx1_pin_group *grp; unsigned long config; const char *name; int i, ret; if (group > info->ngroups) return; seq_puts(s, "\n"); grp = &info->groups[group]; for (i = 0; i < grp->npins; i++) { name = pin_get_name(pctldev, grp->pins[i].pin_id); ret = imx1_pinconf_get(pctldev, grp->pins[i].pin_id, &config); if (ret) return; seq_printf(s, "%s: 0x%lx", name, config); } } static const struct pinconf_ops imx1_pinconf_ops = { .pin_config_get = imx1_pinconf_get, .pin_config_set = imx1_pinconf_set, .pin_config_dbg_show = imx1_pinconf_dbg_show, .pin_config_group_dbg_show = imx1_pinconf_group_dbg_show, }; static struct pinctrl_desc imx1_pinctrl_desc = { .pctlops = &imx1_pctrl_ops, .pmxops = &imx1_pmx_ops, .confops = &imx1_pinconf_ops, .owner = THIS_MODULE, }; static int imx1_pinctrl_parse_groups(struct device_node *np, struct imx1_pin_group *grp, struct imx1_pinctrl_soc_info *info, u32 index) { int size; const __be32 *list; int i; dev_dbg(info->dev, "group(%d): %s\n", index, np->name); /* Initialise group */ grp->name = np->name; /* * the binding format is fsl,pins = <PIN MUX_ID CONFIG> */ list = of_get_property(np, "fsl,pins", &size); /* we do not check return since it's safe node passed down */ if (!size || size % 12) { dev_notice(info->dev, "Not a valid fsl,pins property (%s)\n", np->name); return -EINVAL; } grp->npins = size / 12; grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(struct imx1_pin), GFP_KERNEL); grp->pin_ids = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int), GFP_KERNEL); if (!grp->pins || !grp->pin_ids) return -ENOMEM; for (i = 0; i < grp->npins; i++) { grp->pins[i].pin_id = be32_to_cpu(*list++); grp->pins[i].mux_id = be32_to_cpu(*list++); grp->pins[i].config = be32_to_cpu(*list++); grp->pin_ids[i] = grp->pins[i].pin_id; } return 0; } static int imx1_pinctrl_parse_functions(struct device_node *np, struct imx1_pinctrl_soc_info *info, u32 index) { struct device_node *child; struct imx1_pmx_func *func; struct imx1_pin_group *grp; int ret; static u32 grp_index; u32 i = 0; dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name); func = &info->functions[index]; /* Initialise function */ func->name = np->name; func->num_groups = of_get_child_count(np); if (func->num_groups == 0) return -EINVAL; func->groups = devm_kzalloc(info->dev, func->num_groups * sizeof(char *), GFP_KERNEL); if (!func->groups) return -ENOMEM; for_each_child_of_node(np, child) { func->groups[i] = child->name; grp = &info->groups[grp_index++]; ret = imx1_pinctrl_parse_groups(child, grp, info, i++); if (ret == -ENOMEM) { of_node_put(child); return ret; } } return 0; } static int imx1_pinctrl_parse_dt(struct platform_device *pdev, struct imx1_pinctrl *pctl, struct imx1_pinctrl_soc_info *info) { struct device_node *np = pdev->dev.of_node; struct device_node *child; int ret; u32 nfuncs = 0; u32 ngroups = 0; u32 ifunc = 0; if (!np) return -ENODEV; for_each_child_of_node(np, child) { ++nfuncs; ngroups += of_get_child_count(child); } if (!nfuncs) { dev_err(&pdev->dev, "No pin functions defined\n"); return -EINVAL; } info->nfunctions = nfuncs; info->functions = devm_kzalloc(&pdev->dev, nfuncs * sizeof(struct imx1_pmx_func), GFP_KERNEL); info->ngroups = ngroups; info->groups = devm_kzalloc(&pdev->dev, ngroups * sizeof(struct imx1_pin_group), GFP_KERNEL); if (!info->functions || !info->groups) return -ENOMEM; for_each_child_of_node(np, child) { ret = imx1_pinctrl_parse_functions(child, info, ifunc++); if (ret == -ENOMEM) { of_node_put(child); return -ENOMEM; } } return 0; } int imx1_pinctrl_core_probe(struct platform_device *pdev, struct imx1_pinctrl_soc_info *info) { struct imx1_pinctrl *ipctl; struct resource *res; struct pinctrl_desc *pctl_desc; int ret; if (!info || !info->pins || !info->npins) { dev_err(&pdev->dev, "wrong pinctrl info\n"); return -EINVAL; } info->dev = &pdev->dev; /* Create state holders etc for this driver */ ipctl = devm_kzalloc(&pdev->dev, sizeof(*ipctl), GFP_KERNEL); if (!ipctl) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENOENT; ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!ipctl->base) return -ENOMEM; pctl_desc = &imx1_pinctrl_desc; pctl_desc->name = dev_name(&pdev->dev); pctl_desc->pins = info->pins; pctl_desc->npins = info->npins; ret = imx1_pinctrl_parse_dt(pdev, ipctl, info); if (ret) { dev_err(&pdev->dev, "fail to probe dt properties\n"); return ret; } ipctl->info = info; ipctl->dev = info->dev; platform_set_drvdata(pdev, ipctl); ipctl->pctl = devm_pinctrl_register(&pdev->dev, pctl_desc, ipctl); if (IS_ERR(ipctl->pctl)) { dev_err(&pdev->dev, "could not register IMX pinctrl driver\n"); return PTR_ERR(ipctl->pctl); } ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); if (ret) { pinctrl_unregister(ipctl->pctl); dev_err(&pdev->dev, "Failed to populate subdevices\n"); return ret; } dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); return 0; }
gpl-2.0
cr1exe/android_kernel_sony_taoshan
drivers/staging/prima/CORE/SYS/legacy/src/utils/src/macTrace.c
328
29067
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /**========================================================================= * Copyright (c) 2013 Qualcomm Atheros, Inc. * All Rights Reserved. * Qualcomm Atheros Confidential and Proprietary. \file macTrace.c \brief implementation for trace related APIs \author Sunit Bhatia Copyright 2008 (c) Qualcomm, Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary. ========================================================================*/ /*-------------------------------------------------------------------------- Include Files ------------------------------------------------------------------------*/ #include "macTrace.h" #include "wlan_qct_wda.h" #include "vos_trace.h" #ifdef TRACE_RECORD tANI_U8* macTraceGetSmeMsgString( tANI_U16 smeMsg ) { switch( smeMsg ) { CASE_RETURN_STRING(eWNI_SME_START_REQ); CASE_RETURN_STRING(eWNI_SME_START_RSP); CASE_RETURN_STRING(eWNI_SME_SYS_READY_IND); CASE_RETURN_STRING(eWNI_SME_SCAN_REQ); CASE_RETURN_STRING(eWNI_SME_SCAN_ABORT_IND); CASE_RETURN_STRING(eWNI_SME_SCAN_RSP); #ifdef FEATURE_OEM_DATA_SUPPORT CASE_RETURN_STRING(eWNI_SME_OEM_DATA_REQ); CASE_RETURN_STRING(eWNI_SME_OEM_DATA_RSP); #endif CASE_RETURN_STRING(eWNI_SME_JOIN_REQ); CASE_RETURN_STRING(eWNI_SME_JOIN_RSP); CASE_RETURN_STRING(eWNI_SME_SETCONTEXT_REQ); CASE_RETURN_STRING(eWNI_SME_SETCONTEXT_RSP); CASE_RETURN_STRING(eWNI_SME_REASSOC_REQ); CASE_RETURN_STRING(eWNI_SME_REASSOC_RSP); CASE_RETURN_STRING(eWNI_SME_AUTH_REQ); CASE_RETURN_STRING(eWNI_SME_AUTH_RSP); CASE_RETURN_STRING(eWNI_SME_DISASSOC_REQ); CASE_RETURN_STRING(eWNI_SME_DISASSOC_RSP); CASE_RETURN_STRING(eWNI_SME_DISASSOC_IND); CASE_RETURN_STRING(eWNI_SME_DISASSOC_CNF); CASE_RETURN_STRING(eWNI_SME_DEAUTH_REQ); CASE_RETURN_STRING(eWNI_SME_DEAUTH_RSP); CASE_RETURN_STRING(eWNI_SME_DEAUTH_IND); CASE_RETURN_STRING(eWNI_SME_WM_STATUS_CHANGE_NTF); CASE_RETURN_STRING(eWNI_SME_IBSS_NEW_PEER_IND); CASE_RETURN_STRING(eWNI_SME_IBSS_PEER_DEPARTED_IND); CASE_RETURN_STRING(eWNI_SME_START_BSS_REQ); CASE_RETURN_STRING(eWNI_SME_START_BSS_RSP); CASE_RETURN_STRING(eWNI_SME_AUTH_IND); CASE_RETURN_STRING(eWNI_SME_ASSOC_IND); CASE_RETURN_STRING(eWNI_SME_ASSOC_CNF); CASE_RETURN_STRING(eWNI_SME_REASSOC_IND); CASE_RETURN_STRING(eWNI_SME_REASSOC_CNF); CASE_RETURN_STRING(eWNI_SME_SWITCH_CHL_REQ); CASE_RETURN_STRING(eWNI_SME_SWITCH_CHL_RSP); CASE_RETURN_STRING(eWNI_SME_STOP_BSS_REQ); CASE_RETURN_STRING(eWNI_SME_STOP_BSS_RSP); CASE_RETURN_STRING(eWNI_SME_DEL_BA_PEER_IND); CASE_RETURN_STRING(eWNI_SME_DEFINE_QOS_REQ); CASE_RETURN_STRING(eWNI_SME_DEFINE_QOS_RSP); CASE_RETURN_STRING(eWNI_SME_DELETE_QOS_REQ); CASE_RETURN_STRING(eWNI_SME_DELETE_QOS_RSP); CASE_RETURN_STRING(eWNI_SME_PROMISCUOUS_MODE_REQ); CASE_RETURN_STRING(eWNI_SME_PROMISCUOUS_MODE_RSP); CASE_RETURN_STRING(eWNI_SME_LINK_TEST_START_REQ); CASE_RETURN_STRING(eWNI_SME_LINK_TEST_START_RSP); CASE_RETURN_STRING(eWNI_SME_LINK_TEST_STOP_REQ); CASE_RETURN_STRING(eWNI_SME_LINK_TEST_STOP_RSP); CASE_RETURN_STRING(eWNI_SME_LINK_TEST_REPORT_IND); CASE_RETURN_STRING(eWNI_SME_NEIGHBOR_BSS_IND); CASE_RETURN_STRING(eWNI_SME_MEASUREMENT_REQ); CASE_RETURN_STRING(eWNI_SME_MEASUREMENT_RSP); CASE_RETURN_STRING(eWNI_SME_MEASUREMENT_IND); CASE_RETURN_STRING(eWNI_SME_SET_WDS_INFO_REQ); CASE_RETURN_STRING(eWNI_SME_SET_WDS_INFO_RSP); CASE_RETURN_STRING(eWNI_SME_WDS_INFO_IND); CASE_RETURN_STRING(eWNI_SME_SET_POWER_REQ); CASE_RETURN_STRING(eWNI_SME_SET_POWER_RSP); CASE_RETURN_STRING(eWNI_SME_CLIENT_SIDE_LOAD_BALANCE_REQ); CASE_RETURN_STRING(eWNI_SME_CLIENT_SIDE_LOAD_BALANCE_RSP); CASE_RETURN_STRING(eWNI_SME_SELECT_CHANNEL_REQ); CASE_RETURN_STRING(eWNI_SME_SELECT_CHANNEL_RSP); CASE_RETURN_STRING(eWNI_SME_SET_PROPRIETARY_IE_REQ); CASE_RETURN_STRING(eWNI_SME_SET_PROPRIETARY_IE_RSP); // #endif CASE_RETURN_STRING(eWNI_SME_DISCARD_SKB_NTF); // Used to cleanup SKBs by HDD CASE_RETURN_STRING(eWNI_SME_DEAUTH_CNF); CASE_RETURN_STRING(eWNI_SME_MIC_FAILURE_IND); CASE_RETURN_STRING(eWNI_SME_ADDTS_REQ); CASE_RETURN_STRING(eWNI_SME_ADDTS_RSP); CASE_RETURN_STRING(eWNI_SME_ADDTS_CNF); CASE_RETURN_STRING(eWNI_SME_ADDTS_IND); CASE_RETURN_STRING(eWNI_SME_DELTS_REQ); CASE_RETURN_STRING(eWNI_SME_DELTS_RSP); CASE_RETURN_STRING(eWNI_SME_DELTS_IND); CASE_RETURN_STRING(eWNI_SME_SET_BACKGROUND_SCAN_MODE_REQ); CASE_RETURN_STRING(eWNI_SME_SWITCH_CHL_CB_PRIMARY_REQ); CASE_RETURN_STRING(eWNI_SME_SWITCH_CHL_CB_PRIMARY_RSP); CASE_RETURN_STRING(eWNI_SME_SWITCH_CHL_CB_SECONDARY_REQ); CASE_RETURN_STRING(eWNI_SME_SWITCH_CHL_CB_SECONDARY_RSP); CASE_RETURN_STRING(eWNI_SME_PROBE_REQ); CASE_RETURN_STRING(eWNI_SME_STA_STAT_REQ); CASE_RETURN_STRING(eWNI_SME_STA_STAT_RSP); CASE_RETURN_STRING(eWNI_SME_AGGR_STAT_REQ); CASE_RETURN_STRING(eWNI_SME_AGGR_STAT_RSP); CASE_RETURN_STRING(eWNI_SME_GLOBAL_STAT_REQ); CASE_RETURN_STRING(eWNI_SME_GLOBAL_STAT_RSP); CASE_RETURN_STRING(eWNI_SME_STAT_SUMM_REQ); CASE_RETURN_STRING(eWNI_SME_STAT_SUMM_RSP); CASE_RETURN_STRING(eWNI_SME_REMOVEKEY_REQ); CASE_RETURN_STRING(eWNI_SME_REMOVEKEY_RSP); CASE_RETURN_STRING(eWNI_SME_GET_SCANNED_CHANNEL_REQ); CASE_RETURN_STRING(eWNI_SME_GET_SCANNED_CHANNEL_RSP); CASE_RETURN_STRING(eWNI_SME_SET_TX_POWER_REQ); CASE_RETURN_STRING(eWNI_SME_SET_TX_POWER_RSP); CASE_RETURN_STRING(eWNI_SME_GET_TX_POWER_REQ); CASE_RETURN_STRING(eWNI_SME_GET_TX_POWER_RSP); CASE_RETURN_STRING(eWNI_SME_GET_NOISE_REQ); CASE_RETURN_STRING(eWNI_SME_GET_NOISE_RSP); CASE_RETURN_STRING(eWNI_SME_LOW_RSSI_IND); CASE_RETURN_STRING(eWNI_SME_GET_STATISTICS_REQ); CASE_RETURN_STRING(eWNI_SME_GET_STATISTICS_RSP); CASE_RETURN_STRING(eWNI_SME_GET_RSSI_REQ); CASE_RETURN_STRING(eWNI_SME_GET_ASSOC_STAS_REQ); CASE_RETURN_STRING(eWNI_SME_TKIP_CNTR_MEAS_REQ); CASE_RETURN_STRING(eWNI_SME_UPDATE_APWPSIE_REQ); CASE_RETURN_STRING(eWNI_SME_GET_WPSPBC_SESSION_REQ); CASE_RETURN_STRING(eWNI_SME_WPS_PBC_PROBE_REQ_IND); CASE_RETURN_STRING(eWNI_SME_SET_APWPARSNIEs_REQ); CASE_RETURN_STRING(eWNI_SME_UPPER_LAYER_ASSOC_CNF); CASE_RETURN_STRING(eWNI_SME_HIDE_SSID_REQ); CASE_RETURN_STRING(eWNI_SME_REMAIN_ON_CHANNEL_REQ); CASE_RETURN_STRING(eWNI_SME_REMAIN_ON_CHN_IND); CASE_RETURN_STRING(eWNI_SME_REMAIN_ON_CHN_RSP); CASE_RETURN_STRING(eWNI_SME_MGMT_FRM_IND); CASE_RETURN_STRING(eWNI_SME_REMAIN_ON_CHN_RDY_IND); CASE_RETURN_STRING(eWNI_SME_SEND_ACTION_FRAME_IND); CASE_RETURN_STRING(eWNI_SME_ACTION_FRAME_SEND_CNF); CASE_RETURN_STRING(eWNI_SME_ABORT_REMAIN_ON_CHAN_IND); CASE_RETURN_STRING(eWNI_SME_UPDATE_NOA); CASE_RETURN_STRING(eWNI_SME_CLEAR_DFS_CHANNEL_LIST); CASE_RETURN_STRING(eWNI_SME_PRE_CHANNEL_SWITCH_FULL_POWER); CASE_RETURN_STRING(eWNI_PMC_MSG_TYPES_BEGIN); //General Power Save Messages CASE_RETURN_STRING(eWNI_PMC_PWR_SAVE_CFG); //BMPS Messages CASE_RETURN_STRING(eWNI_PMC_ENTER_BMPS_REQ); CASE_RETURN_STRING(eWNI_PMC_ENTER_BMPS_RSP); CASE_RETURN_STRING(eWNI_PMC_EXIT_BMPS_REQ); CASE_RETURN_STRING(eWNI_PMC_EXIT_BMPS_RSP); CASE_RETURN_STRING(eWNI_PMC_EXIT_BMPS_IND); //IMPS Messages. CASE_RETURN_STRING(eWNI_PMC_ENTER_IMPS_REQ); CASE_RETURN_STRING(eWNI_PMC_ENTER_IMPS_RSP); CASE_RETURN_STRING(eWNI_PMC_EXIT_IMPS_REQ); CASE_RETURN_STRING(eWNI_PMC_EXIT_IMPS_RSP); //UAPSD Messages CASE_RETURN_STRING(eWNI_PMC_ENTER_UAPSD_REQ); CASE_RETURN_STRING(eWNI_PMC_ENTER_UAPSD_RSP); CASE_RETURN_STRING(eWNI_PMC_EXIT_UAPSD_REQ); CASE_RETURN_STRING(eWNI_PMC_EXIT_UAPSD_RSP); CASE_RETURN_STRING(eWNI_PMC_SMPS_STATE_IND); CASE_RETURN_STRING(eWNI_PMC_WOWL_ADD_BCAST_PTRN); CASE_RETURN_STRING(eWNI_PMC_WOWL_DEL_BCAST_PTRN); CASE_RETURN_STRING(eWNI_PMC_ENTER_WOWL_REQ); CASE_RETURN_STRING(eWNI_PMC_ENTER_WOWL_RSP); CASE_RETURN_STRING(eWNI_PMC_EXIT_WOWL_REQ); CASE_RETURN_STRING(eWNI_PMC_EXIT_WOWL_RSP); #ifdef WLAN_FEATURE_PACKET_FILTERING CASE_RETURN_STRING(eWNI_PMC_PACKET_COALESCING_FILTER_MATCH_COUNT_RSP); #endif // WLAN_FEATURE_PACKET_FILTERING #if defined WLAN_FEATURE_VOWIFI CASE_RETURN_STRING(eWNI_SME_RRM_MSG_TYPE_BEGIN); CASE_RETURN_STRING(eWNI_SME_NEIGHBOR_REPORT_REQ_IND); CASE_RETURN_STRING(eWNI_SME_NEIGHBOR_REPORT_IND); CASE_RETURN_STRING(eWNI_SME_BEACON_REPORT_REQ_IND); CASE_RETURN_STRING(eWNI_SME_BEACON_REPORT_RESP_XMIT_IND); #endif CASE_RETURN_STRING(eWNI_SME_ADD_STA_SELF_REQ); CASE_RETURN_STRING(eWNI_SME_ADD_STA_SELF_RSP); CASE_RETURN_STRING(eWNI_SME_DEL_STA_SELF_REQ); CASE_RETURN_STRING(eWNI_SME_DEL_STA_SELF_RSP); #if defined WLAN_FEATURE_VOWIFI_11R CASE_RETURN_STRING(eWNI_SME_FT_PRE_AUTH_REQ); CASE_RETURN_STRING(eWNI_SME_FT_PRE_AUTH_RSP); CASE_RETURN_STRING(eWNI_SME_FT_UPDATE_KEY); CASE_RETURN_STRING(eWNI_SME_FT_AGGR_QOS_REQ); CASE_RETURN_STRING(eWNI_SME_FT_AGGR_QOS_RSP); #endif #if defined FEATURE_WLAN_CCX CASE_RETURN_STRING(eWNI_SME_CCX_ADJACENT_AP_REPORT); #endif CASE_RETURN_STRING(eWNI_SME_REGISTER_MGMT_FRAME_REQ); CASE_RETURN_STRING(eWNI_SME_COEX_IND); #ifdef FEATURE_WLAN_SCAN_PNO CASE_RETURN_STRING(eWNI_SME_PREF_NETWORK_FOUND_IND); #endif // FEATURE_WLAN_SCAN_PNO CASE_RETURN_STRING(eWNI_SME_TX_PER_HIT_IND); CASE_RETURN_STRING(eWNI_SME_CHANGE_COUNTRY_CODE); CASE_RETURN_STRING(eWNI_SME_PRE_SWITCH_CHL_IND); CASE_RETURN_STRING(eWNI_SME_POST_SWITCH_CHL_IND); CASE_RETURN_STRING(eWNI_SME_MAX_ASSOC_EXCEEDED); CASE_RETURN_STRING(eWNI_SME_BTAMP_LOG_LINK_IND);//to serialize the create/accpet LL req from HCI #ifdef WLAN_FEATURE_GTK_OFFLOAD CASE_RETURN_STRING(eWNI_PMC_GTK_OFFLOAD_GETINFO_RSP); #endif // WLAN_FEATURE_GTK_OFFLOAD #ifdef WLAN_WAKEUP_EVENTS CASE_RETURN_STRING(eWNI_SME_WAKE_REASON_IND); #endif // WLAN_WAKEUP_EVENTS CASE_RETURN_STRING(eWNI_SME_EXCLUDE_UNENCRYPTED); CASE_RETURN_STRING(eWNI_SME_RSSI_IND); //RSSI indication from TL to be serialized on MC thread CASE_RETURN_STRING(eWNI_SME_MSG_TYPES_END); CASE_RETURN_STRING(eWNI_SME_GET_ROAM_RSSI_REQ); CASE_RETURN_STRING(eWNI_SME_GET_ROAM_RSSI_RSP); default: return( (tANI_U8*)"UNKNOWN" ); break; } } tANI_U8* macTraceGetWdaMsgString( tANI_U16 wdaMsg ) { switch( wdaMsg ) { CASE_RETURN_STRING(WDA_APP_SETUP_NTF); CASE_RETURN_STRING(WDA_NIC_OPER_NTF); CASE_RETURN_STRING(WDA_INIT_START_REQ); CASE_RETURN_STRING(WDA_RESET_REQ); CASE_RETURN_STRING(WDA_HDD_ADDBA_REQ); CASE_RETURN_STRING(WDA_HDD_ADDBA_RSP); CASE_RETURN_STRING(WDA_DELETEBA_IND); CASE_RETURN_STRING(WDA_BA_FAIL_IND); CASE_RETURN_STRING(WDA_TL_FLUSH_AC_REQ); CASE_RETURN_STRING(WDA_TL_FLUSH_AC_RSP); CASE_RETURN_STRING(WDA_ITC_MSG_TYPES_BEGIN); CASE_RETURN_STRING(WDA_WDT_KAM_RSP); CASE_RETURN_STRING(WDA_TIMER_TEMP_MEAS_REQ); CASE_RETURN_STRING(WDA_TIMER_PERIODIC_STATS_COLLECT_REQ); CASE_RETURN_STRING(WDA_CAL_REQ_NTF); CASE_RETURN_STRING(WDA_MNT_OPEN_TPC_TEMP_MEAS_REQ); CASE_RETURN_STRING(WDA_CCA_MONITOR_INTERVAL_TO); CASE_RETURN_STRING(WDA_CCA_MONITOR_DURATION_TO); CASE_RETURN_STRING(WDA_CCA_MONITOR_START); CASE_RETURN_STRING(WDA_CCA_MONITOR_STOP); CASE_RETURN_STRING(WDA_CCA_CHANGE_MODE); CASE_RETURN_STRING(WDA_TIMER_WRAP_AROUND_STATS_COLLECT_REQ); CASE_RETURN_STRING(WDA_ADD_STA_REQ); CASE_RETURN_STRING(WDA_ADD_STA_RSP); CASE_RETURN_STRING(WDA_ADD_STA_SELF_RSP); CASE_RETURN_STRING(WDA_DEL_STA_SELF_RSP); CASE_RETURN_STRING(WDA_DELETE_STA_REQ); CASE_RETURN_STRING(WDA_DELETE_STA_RSP); CASE_RETURN_STRING(WDA_ADD_BSS_REQ); CASE_RETURN_STRING(WDA_ADD_BSS_RSP); CASE_RETURN_STRING(WDA_DELETE_BSS_REQ); CASE_RETURN_STRING(WDA_DELETE_BSS_RSP); CASE_RETURN_STRING(WDA_INIT_SCAN_REQ); CASE_RETURN_STRING(WDA_INIT_SCAN_RSP); CASE_RETURN_STRING(WDA_START_SCAN_REQ); CASE_RETURN_STRING(WDA_START_SCAN_RSP); CASE_RETURN_STRING(WDA_END_SCAN_REQ); CASE_RETURN_STRING(WDA_END_SCAN_RSP); CASE_RETURN_STRING(WDA_FINISH_SCAN_REQ); CASE_RETURN_STRING(WDA_FINISH_SCAN_RSP); CASE_RETURN_STRING(WDA_SEND_BEACON_REQ); CASE_RETURN_STRING(WDA_SEND_BEACON_RSP); CASE_RETURN_STRING(WDA_INIT_CFG_REQ); CASE_RETURN_STRING(WDA_INIT_CFG_RSP); CASE_RETURN_STRING(WDA_INIT_WM_CFG_REQ); CASE_RETURN_STRING(WDA_INIT_WM_CFG_RSP); CASE_RETURN_STRING(WDA_SET_BSSKEY_REQ); CASE_RETURN_STRING(WDA_SET_BSSKEY_RSP); CASE_RETURN_STRING(WDA_SET_STAKEY_REQ); CASE_RETURN_STRING(WDA_SET_STAKEY_RSP); CASE_RETURN_STRING(WDA_DPU_STATS_REQ); CASE_RETURN_STRING(WDA_DPU_STATS_RSP); CASE_RETURN_STRING(WDA_GET_DPUINFO_REQ); CASE_RETURN_STRING(WDA_GET_DPUINFO_RSP); CASE_RETURN_STRING(WDA_UPDATE_EDCA_PROFILE_IND); CASE_RETURN_STRING(WDA_UPDATE_STARATEINFO_REQ); CASE_RETURN_STRING(WDA_UPDATE_STARATEINFO_RSP); CASE_RETURN_STRING(WDA_UPDATE_BEACON_IND); CASE_RETURN_STRING(WDA_UPDATE_CF_IND); CASE_RETURN_STRING(WDA_CHNL_SWITCH_REQ); CASE_RETURN_STRING(WDA_ADD_TS_REQ); CASE_RETURN_STRING(WDA_DEL_TS_REQ); CASE_RETURN_STRING(WDA_SOFTMAC_TXSTAT_REPORT); CASE_RETURN_STRING(WDA_MBOX_SENDMSG_COMPLETE_IND); CASE_RETURN_STRING(WDA_EXIT_BMPS_REQ); CASE_RETURN_STRING(WDA_EXIT_BMPS_RSP); CASE_RETURN_STRING(WDA_EXIT_BMPS_IND); CASE_RETURN_STRING(WDA_ENTER_BMPS_REQ); CASE_RETURN_STRING(WDA_ENTER_BMPS_RSP); CASE_RETURN_STRING(WDA_BMPS_STATUS_IND); CASE_RETURN_STRING(WDA_MISSED_BEACON_IND); CASE_RETURN_STRING(WDA_CFG_RXP_FILTER_REQ); CASE_RETURN_STRING(WDA_CFG_RXP_FILTER_RSP); CASE_RETURN_STRING(WDA_SWITCH_CHANNEL_RSP); CASE_RETURN_STRING(WDA_P2P_NOA_ATTR_IND); CASE_RETURN_STRING(WDA_P2P_NOA_START_IND); CASE_RETURN_STRING(WDA_PWR_SAVE_CFG); CASE_RETURN_STRING(WDA_REGISTER_PE_CALLBACK); CASE_RETURN_STRING(WDA_SOFTMAC_MEM_READREQUEST); CASE_RETURN_STRING(WDA_SOFTMAC_MEM_WRITEREQUEST); CASE_RETURN_STRING(WDA_SOFTMAC_MEM_READRESPONSE); CASE_RETURN_STRING(WDA_SOFTMAC_BULKREGWRITE_CONFIRM); CASE_RETURN_STRING(WDA_SOFTMAC_BULKREGREAD_RESPONSE); CASE_RETURN_STRING(WDA_SOFTMAC_HOSTMESG_MSGPROCESSRESULT); CASE_RETURN_STRING(WDA_ADDBA_REQ); CASE_RETURN_STRING(WDA_ADDBA_RSP); CASE_RETURN_STRING(WDA_DELBA_IND); CASE_RETURN_STRING(WDA_DEL_BA_IND); CASE_RETURN_STRING(WDA_MIC_FAILURE_IND); CASE_RETURN_STRING(WDA_DELBA_REQ); CASE_RETURN_STRING(WDA_IBSS_STA_ADD); CASE_RETURN_STRING(WDA_TIMER_ADJUST_ADAPTIVE_THRESHOLD_IND); CASE_RETURN_STRING(WDA_SET_LINK_STATE); CASE_RETURN_STRING(WDA_SET_LINK_STATE_RSP); CASE_RETURN_STRING(WDA_ENTER_IMPS_REQ); CASE_RETURN_STRING(WDA_ENTER_IMPS_RSP); CASE_RETURN_STRING(WDA_EXIT_IMPS_RSP); CASE_RETURN_STRING(WDA_EXIT_IMPS_REQ); CASE_RETURN_STRING(WDA_SOFTMAC_HOSTMESG_PS_STATUS_IND); CASE_RETURN_STRING(WDA_POSTPONE_ENTER_IMPS_RSP); CASE_RETURN_STRING(WDA_STA_STAT_REQ); CASE_RETURN_STRING(WDA_GLOBAL_STAT_REQ); CASE_RETURN_STRING(WDA_AGGR_STAT_REQ); CASE_RETURN_STRING(WDA_STA_STAT_RSP); CASE_RETURN_STRING(WDA_GLOBAL_STAT_RSP); CASE_RETURN_STRING(WDA_AGGR_STAT_RSP); CASE_RETURN_STRING(WDA_STAT_SUMM_REQ); CASE_RETURN_STRING(WDA_STAT_SUMM_RSP); CASE_RETURN_STRING(WDA_REMOVE_BSSKEY_REQ); CASE_RETURN_STRING(WDA_REMOVE_BSSKEY_RSP); CASE_RETURN_STRING(WDA_REMOVE_STAKEY_REQ); CASE_RETURN_STRING(WDA_REMOVE_STAKEY_RSP); CASE_RETURN_STRING(WDA_SET_STA_BCASTKEY_REQ); CASE_RETURN_STRING(WDA_SET_STA_BCASTKEY_RSP); CASE_RETURN_STRING(WDA_REMOVE_STA_BCASTKEY_REQ); CASE_RETURN_STRING(WDA_REMOVE_STA_BCASTKEY_RSP); CASE_RETURN_STRING(WDA_ADD_TS_RSP); CASE_RETURN_STRING(WDA_DPU_MIC_ERROR); CASE_RETURN_STRING(WDA_TIMER_BA_ACTIVITY_REQ); CASE_RETURN_STRING(WDA_TIMER_CHIP_MONITOR_TIMEOUT); CASE_RETURN_STRING(WDA_TIMER_TRAFFIC_ACTIVITY_REQ); CASE_RETURN_STRING(WDA_TIMER_ADC_RSSI_STATS); #ifdef FEATURE_WLAN_CCX CASE_RETURN_STRING(WDA_TSM_STATS_REQ); CASE_RETURN_STRING(WDA_TSM_STATS_RSP); #endif CASE_RETURN_STRING(WDA_UPDATE_UAPSD_IND); CASE_RETURN_STRING(WDA_SET_MIMOPS_REQ); CASE_RETURN_STRING(WDA_SET_MIMOPS_RSP); CASE_RETURN_STRING(WDA_SYS_READY_IND ); CASE_RETURN_STRING(WDA_SET_TX_POWER_REQ); CASE_RETURN_STRING(WDA_SET_TX_POWER_RSP); CASE_RETURN_STRING(WDA_GET_TX_POWER_REQ); CASE_RETURN_STRING(WDA_GET_TX_POWER_RSP); CASE_RETURN_STRING(WDA_GET_NOISE_REQ ); CASE_RETURN_STRING(WDA_GET_NOISE_RSP); CASE_RETURN_STRING(WDA_SET_TX_PER_TRACKING_REQ); CASE_RETURN_STRING(WDA_TRANSMISSION_CONTROL_IND); CASE_RETURN_STRING(WDA_INIT_RADAR_IND); CASE_RETURN_STRING(WDA_BEACON_PRE_IND ); CASE_RETURN_STRING(WDA_ENTER_UAPSD_REQ); CASE_RETURN_STRING(WDA_ENTER_UAPSD_RSP); CASE_RETURN_STRING(WDA_EXIT_UAPSD_REQ ); CASE_RETURN_STRING(WDA_EXIT_UAPSD_RSP ); CASE_RETURN_STRING(WDA_LOW_RSSI_IND ); CASE_RETURN_STRING(WDA_BEACON_FILTER_IND); CASE_RETURN_STRING(WDA_WOWL_ADD_BCAST_PTRN); CASE_RETURN_STRING(WDA_WOWL_DEL_BCAST_PTRN); CASE_RETURN_STRING(WDA_WOWL_ENTER_REQ); CASE_RETURN_STRING(WDA_WOWL_ENTER_RSP); CASE_RETURN_STRING(WDA_WOWL_EXIT_REQ ); CASE_RETURN_STRING(WDA_WOWL_EXIT_RSP ); CASE_RETURN_STRING(WDA_TX_COMPLETE_IND); CASE_RETURN_STRING(WDA_TIMER_RA_COLLECT_AND_ADAPT); CASE_RETURN_STRING(WDA_GET_STATISTICS_REQ); CASE_RETURN_STRING(WDA_GET_STATISTICS_RSP); CASE_RETURN_STRING(WDA_SET_KEY_DONE); CASE_RETURN_STRING(WDA_BTC_SET_CFG); CASE_RETURN_STRING(WDA_SIGNAL_BT_EVENT); CASE_RETURN_STRING(WDA_HANDLE_FW_MBOX_RSP); CASE_RETURN_STRING(WDA_UPDATE_PROBE_RSP_TEMPLATE_IND); CASE_RETURN_STRING(WDA_SIGNAL_BTAMP_EVENT); #ifdef FEATURE_OEM_DATA_SUPPORT CASE_RETURN_STRING(WDA_START_OEM_DATA_REQ ); CASE_RETURN_STRING(WDA_START_OEM_DATA_RSP); CASE_RETURN_STRING(WDA_FINISH_OEM_DATA_REQ); #endif //SUPPORT_BEACON_FILTER CASE_RETURN_STRING(WDA_SET_MAX_TX_POWER_REQ); CASE_RETURN_STRING(WDA_SET_MAX_TX_POWER_RSP); CASE_RETURN_STRING(WDA_SEND_MSG_COMPLETE); CASE_RETURN_STRING(WDA_SET_HOST_OFFLOAD); CASE_RETURN_STRING(WDA_SET_KEEP_ALIVE); #ifdef WLAN_NS_OFFLOAD CASE_RETURN_STRING(WDA_SET_NS_OFFLOAD); #endif //WLAN_NS_OFFLOAD CASE_RETURN_STRING(WDA_ADD_STA_SELF_REQ); CASE_RETURN_STRING(WDA_DEL_STA_SELF_REQ); CASE_RETURN_STRING(WDA_SET_P2P_GO_NOA_REQ); CASE_RETURN_STRING(WDA_TX_COMPLETE_TIMEOUT_IND); CASE_RETURN_STRING(WDA_WLAN_SUSPEND_IND); CASE_RETURN_STRING(WDA_WLAN_RESUME_REQ); CASE_RETURN_STRING(WDA_MSG_TYPES_END); CASE_RETURN_STRING(WDA_MMH_TXMB_READY_EVT); CASE_RETURN_STRING(WDA_MMH_RXMB_DONE_EVT); CASE_RETURN_STRING(WDA_MMH_MSGQ_NE_EVT); #ifdef WLAN_FEATURE_VOWIFI_11R CASE_RETURN_STRING(WDA_AGGR_QOS_REQ); CASE_RETURN_STRING(WDA_AGGR_QOS_RSP); #endif /* WLAN_FEATURE_VOWIFI_11R */ CASE_RETURN_STRING(WDA_FTM_CMD_REQ); CASE_RETURN_STRING(WDA_FTM_CMD_RSP); #ifdef FEATURE_WLAN_SCAN_PNO CASE_RETURN_STRING(WDA_SET_PNO_REQ); CASE_RETURN_STRING(WDA_SET_RSSI_FILTER_REQ); CASE_RETURN_STRING(WDA_UPDATE_SCAN_PARAMS_REQ); CASE_RETURN_STRING(WDA_SET_PNO_CHANGED_IND); #endif // FEATURE_WLAN_SCAN_PNO #ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD CASE_RETURN_STRING(WDA_ROAM_SCAN_OFFLOAD_REQ); #endif #ifdef WLAN_WAKEUP_EVENTS CASE_RETURN_STRING(WDA_WAKE_REASON_IND); #endif // WLAN_WAKEUP_EVENTS #ifdef WLAN_FEATURE_PACKET_FILTERING CASE_RETURN_STRING(WDA_8023_MULTICAST_LIST_REQ); CASE_RETURN_STRING(WDA_RECEIVE_FILTER_SET_FILTER_REQ); CASE_RETURN_STRING(WDA_PACKET_COALESCING_FILTER_MATCH_COUNT_REQ); CASE_RETURN_STRING(WDA_PACKET_COALESCING_FILTER_MATCH_COUNT_RSP); CASE_RETURN_STRING(WDA_RECEIVE_FILTER_CLEAR_FILTER_REQ); #endif // WLAN_FEATURE_PACKET_FILTERING CASE_RETURN_STRING(WDA_SET_POWER_PARAMS_REQ); #ifdef WLAN_FEATURE_GTK_OFFLOAD CASE_RETURN_STRING(WDA_GTK_OFFLOAD_REQ); CASE_RETURN_STRING(WDA_GTK_OFFLOAD_GETINFO_REQ); CASE_RETURN_STRING(WDA_GTK_OFFLOAD_GETINFO_RSP); #endif //WLAN_FEATURE_GTK_OFFLOAD CASE_RETURN_STRING(WDA_SET_TM_LEVEL_REQ); #ifdef WLAN_FEATURE_11AC CASE_RETURN_STRING(WDA_UPDATE_OP_MODE); #endif default: return((tANI_U8*) "UNKNOWN" ); break; } } tANI_U8* macTraceGetLimMsgString( tANI_U16 limMsg ) { switch( limMsg ) { CASE_RETURN_STRING(SIR_LIM_RETRY_INTERRUPT_MSG); CASE_RETURN_STRING(SIR_BB_XPORT_MGMT_MSG ); CASE_RETURN_STRING(SIR_LIM_INV_KEY_INTERRUPT_MSG ); CASE_RETURN_STRING(SIR_LIM_KEY_ID_INTERRUPT_MSG ); CASE_RETURN_STRING(SIR_LIM_REPLAY_THRES_INTERRUPT_MSG ); CASE_RETURN_STRING(SIR_LIM_TD_DUMMY_CALLBACK_MSG ); CASE_RETURN_STRING(SIR_LIM_SCH_CLEAN_MSG ); CASE_RETURN_STRING(SIR_LIM_RADAR_DETECT_IND); CASE_RETURN_STRING(SIR_LIM_DEL_TS_IND); CASE_RETURN_STRING(SIR_LIM_ADD_BA_IND ); CASE_RETURN_STRING(SIR_LIM_DEL_BA_ALL_IND); CASE_RETURN_STRING(SIR_LIM_DELETE_STA_CONTEXT_IND); CASE_RETURN_STRING(SIR_LIM_DEL_BA_IND ); CASE_RETURN_STRING(SIR_LIM_UPDATE_BEACON); CASE_RETURN_STRING(SIR_LIM_MIN_CHANNEL_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_MAX_CHANNEL_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_JOIN_FAIL_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_AUTH_FAIL_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_AUTH_RSP_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_ASSOC_FAIL_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_REASSOC_FAIL_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_HEART_BEAT_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_CHANNEL_SCAN_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_PROBE_HB_FAILURE_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_ADDTS_RSP_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_LINK_TEST_DURATION_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_HASH_MISS_THRES_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_CNF_WAIT_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_KEEPALIVE_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_UPDATE_OLBC_CACHEL_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_CHANNEL_SWITCH_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_QUIET_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_QUIET_BSS_TIMEOUT ); CASE_RETURN_STRING(SIR_LIM_WPS_OVERLAP_TIMEOUT); #ifdef WLAN_FEATURE_VOWIFI_11R CASE_RETURN_STRING(SIR_LIM_FT_PREAUTH_RSP_TIMEOUT); #endif CASE_RETURN_STRING(SIR_LIM_REMAIN_CHN_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_INSERT_SINGLESHOT_NOA_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_CONVERT_ACTIVE_CHANNEL_TO_PASSIVE); #ifdef WMM_APSD CASE_RETURN_STRING(SIR_LIM_WMM_APSD_SP_START_MSG_TYPE ); CASE_RETURN_STRING(SIR_LIM_WMM_APSD_SP_END_MSG_TYPE ); #endif CASE_RETURN_STRING(SIR_LIM_BEACON_GEN_IND ); CASE_RETURN_STRING(SIR_LIM_PERIODIC_PROBE_REQ_TIMEOUT); #ifdef FEATURE_WLAN_CCX CASE_RETURN_STRING(SIR_LIM_CCX_TSM_TIMEOUT); #endif CASE_RETURN_STRING(SIR_LIM_DISASSOC_ACK_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_DEAUTH_ACK_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_PERIODIC_JOIN_PROBE_REQ_TIMEOUT); CASE_RETURN_STRING(SIR_LIM_MSG_TYPES_END); default: return( (tANI_U8*)"UNKNOWN" ); break; } } tANI_U8* macTraceGetCfgMsgString( tANI_U16 cfgMsg ) { switch( cfgMsg ) { CASE_RETURN_STRING(WNI_CFG_PARAM_UPDATE_IND); CASE_RETURN_STRING(WNI_CFG_DNLD_REQ); CASE_RETURN_STRING(WNI_CFG_DNLD_CNF); CASE_RETURN_STRING(WNI_CFG_GET_RSP); CASE_RETURN_STRING(WNI_CFG_SET_CNF); CASE_RETURN_STRING(SIR_CFG_PARAM_UPDATE_IND); CASE_RETURN_STRING(SIR_CFG_DOWNLOAD_COMPLETE_IND); CASE_RETURN_STRING(WNI_CFG_SET_REQ_NO_RSP); default: return( (tANI_U8*)"UNKNOWN" ); break; } } tANI_U8* macTraceGetInfoLogString( tANI_U16 infoLog ) { switch( infoLog ) { CASE_RETURN_STRING(eLOG_NODROP_MISSED_BEACON_SCENARIO); CASE_RETURN_STRING(eLOG_PROC_DEAUTH_FRAME_SCENARIO); default: return( (tANI_U8*)"UNKNOWN" ); break; } } tANI_U8* macTraceGetModuleString( tANI_U8 moduleId ) { return ((tANI_U8*)"PE"); //return gVosTraceInfo[moduleId].moduleNameStr; } void macTraceReset(tpAniSirGlobal pMac) { } void macTrace(tpAniSirGlobal pMac, tANI_U8 code, tANI_U8 session, tANI_U32 data) { //Today macTrace is being invoked by PE only, need to remove this function once PE is migrated to using new trace API. macTraceNew(pMac, VOS_MODULE_ID_PE, code, session, data); } void macTraceNew(tpAniSirGlobal pMac, tANI_U8 module, tANI_U8 code, tANI_U8 session, tANI_U32 data) { vos_trace(module, code, session, data); } tANI_U8* macTraceMsgString(tpAniSirGlobal pMac, tANI_U32 msgType) { tANI_U16 msgId = (tANI_U16)MAC_TRACE_GET_MSG_ID(msgType); tANI_U8 moduleId = (tANI_U8)MAC_TRACE_GET_MODULE_ID(msgType); switch(moduleId) { case SIR_LIM_MODULE_ID: if(msgId >= SIR_LIM_ITC_MSG_TYPES_BEGIN) return macTraceGetLimMsgString((tANI_U16)msgType); else return macTraceGetSmeMsgString((tANI_U16)msgType); break; case SIR_WDA_MODULE_ID: return macTraceGetWdaMsgString((tANI_U16)msgType); case SIR_CFG_MODULE_ID: return macTraceGetCfgMsgString((tANI_U16)msgType); default: return ((tANI_U8*)"Unknown MsgType"); } } #endif
gpl-2.0
kratos1988/operating_systems
arch/arm/mach-at91/at91rm9200_time.c
584
6137
/* * linux/arch/arm/mach-at91/at91rm9200_time.c * * Copyright (C) 2003 SAN People * Copyright (C) 2003 ATMEL * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/clockchips.h> #include <asm/mach/time.h> #include <mach/at91_st.h> static unsigned long last_crtr; static u32 irqmask; static struct clock_event_device clkevt; /* * The ST_CRTR is updated asynchronously to the master clock ... but * the updates as seen by the CPU don't seem to be strictly monotonic. * Waiting until we read the same value twice avoids glitching. */ static inline unsigned long read_CRTR(void) { unsigned long x1, x2; x1 = at91_sys_read(AT91_ST_CRTR); do { x2 = at91_sys_read(AT91_ST_CRTR); if (x1 == x2) break; x1 = x2; } while (1); return x1; } /* * IRQ handler for the timer. */ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id) { u32 sr = at91_sys_read(AT91_ST_SR) & irqmask; /* simulate "oneshot" timer with alarm */ if (sr & AT91_ST_ALMS) { clkevt.event_handler(&clkevt); return IRQ_HANDLED; } /* periodic mode should handle delayed ticks */ if (sr & AT91_ST_PITS) { u32 crtr = read_CRTR(); while (((crtr - last_crtr) & AT91_ST_CRTV) >= LATCH) { last_crtr += LATCH; clkevt.event_handler(&clkevt); } return IRQ_HANDLED; } /* this irq is shared ... */ return IRQ_NONE; } static struct irqaction at91rm9200_timer_irq = { .name = "at91_tick", .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = at91rm9200_timer_interrupt }; static cycle_t read_clk32k(struct clocksource *cs) { return read_CRTR(); } static struct clocksource clk32k = { .name = "32k_counter", .rating = 150, .read = read_clk32k, .mask = CLOCKSOURCE_MASK(20), .shift = 10, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static void clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev) { /* Disable and flush pending timer interrupts */ at91_sys_write(AT91_ST_IDR, AT91_ST_PITS | AT91_ST_ALMS); (void) at91_sys_read(AT91_ST_SR); last_crtr = read_CRTR(); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: /* PIT for periodic irqs; fixed rate of 1/HZ */ irqmask = AT91_ST_PITS; at91_sys_write(AT91_ST_PIMR, LATCH); break; case CLOCK_EVT_MODE_ONESHOT: /* ALM for oneshot irqs, set by next_event() * before 32 seconds have passed */ irqmask = AT91_ST_ALMS; at91_sys_write(AT91_ST_RTAR, last_crtr); break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_RESUME: irqmask = 0; break; } at91_sys_write(AT91_ST_IER, irqmask); } static int clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) { unsigned long flags; u32 alm; int status = 0; BUG_ON(delta < 2); /* Use "raw" primitives so we behave correctly on RT kernels. */ raw_local_irq_save(flags); /* * According to Thomas Gleixner irqs are already disabled here. Simply * removing raw_local_irq_save above (and the matching * raw_local_irq_restore) was not accepted. See * http://thread.gmane.org/gmane.linux.ports.arm.kernel/41174 * So for now (2008-11-20) just warn once if irqs were not disabled ... */ WARN_ON_ONCE(!raw_irqs_disabled_flags(flags)); /* The alarm IRQ uses absolute time (now+delta), not the relative * time (delta) in our calling convention. Like all clockevents * using such "match" hardware, we have a race to defend against. * * Our defense here is to have set up the clockevent device so the * delta is at least two. That way we never end up writing RTAR * with the value then held in CRTR ... which would mean the match * wouldn't trigger until 32 seconds later, after CRTR wraps. */ alm = read_CRTR(); /* Cancel any pending alarm; flush any pending IRQ */ at91_sys_write(AT91_ST_RTAR, alm); (void) at91_sys_read(AT91_ST_SR); /* Schedule alarm by writing RTAR. */ alm += delta; at91_sys_write(AT91_ST_RTAR, alm); raw_local_irq_restore(flags); return status; } static struct clock_event_device clkevt = { .name = "at91_tick", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, .rating = 150, .set_next_event = clkevt32k_next_event, .set_mode = clkevt32k_mode, }; /* * ST (system timer) module supports both clockevents and clocksource. */ void __init at91rm9200_timer_init(void) { /* Disable all timer interrupts, and clear any pending ones */ at91_sys_write(AT91_ST_IDR, AT91_ST_PITS | AT91_ST_WDOVF | AT91_ST_RTTINC | AT91_ST_ALMS); (void) at91_sys_read(AT91_ST_SR); /* Make IRQs happen for the system timer */ setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq); /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used * directly for the clocksource and all clockevents, after adjusting * its prescaler from the 1 Hz default. */ at91_sys_write(AT91_ST_RTMR, 1); /* Setup timer clockevent, with minimum of two ticks (important!!) */ clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; clkevt.cpumask = cpumask_of(0); clockevents_register_device(&clkevt); /* register clocksource */ clk32k.mult = clocksource_hz2mult(AT91_SLOW_CLOCK, clk32k.shift); clocksource_register(&clk32k); } struct sys_timer at91rm9200_timer = { .init = at91rm9200_timer_init, };
gpl-2.0
attn1/cm-kernel-vivo-2.6.35
drivers/staging/usbip/usbip_event.c
584
3232
/* * Copyright (C) 2003-2008 Takahiro Hirofuchi * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include "usbip_common.h" #include <linux/kthread.h> static int event_handler(struct usbip_device *ud) { usbip_dbg_eh("enter\n"); /* * Events are handled by only this thread. */ while (usbip_event_happened(ud)) { usbip_dbg_eh("pending event %lx\n", ud->event); /* * NOTE: shutdown must come first. * Shutdown the device. */ if (ud->event & USBIP_EH_SHUTDOWN) { ud->eh_ops.shutdown(ud); ud->event &= ~USBIP_EH_SHUTDOWN; break; } /* Stop the error handler. */ if (ud->event & USBIP_EH_BYE) return -1; /* Reset the device. */ if (ud->event & USBIP_EH_RESET) { ud->eh_ops.reset(ud); ud->event &= ~USBIP_EH_RESET; break; } /* Mark the device as unusable. */ if (ud->event & USBIP_EH_UNUSABLE) { ud->eh_ops.unusable(ud); ud->event &= ~USBIP_EH_UNUSABLE; break; } /* NOTREACHED */ printk(KERN_ERR "%s: unknown event\n", __func__); return -1; } return 0; } static void event_handler_loop(struct usbip_task *ut) { struct usbip_device *ud = container_of(ut, struct usbip_device, eh); while (1) { if (signal_pending(current)) { usbip_dbg_eh("signal catched!\n"); break; } if (event_handler(ud) < 0) break; wait_event_interruptible(ud->eh_waitq, usbip_event_happened(ud)); usbip_dbg_eh("wakeup\n"); } } int usbip_start_eh(struct usbip_device *ud) { struct usbip_task *eh = &ud->eh; struct task_struct *th; init_waitqueue_head(&ud->eh_waitq); ud->event = 0; usbip_task_init(eh, "usbip_eh", event_handler_loop); th = kthread_run(usbip_thread, (void *)eh, "usbip"); if (IS_ERR(th)) { printk(KERN_WARNING "Unable to start control thread\n"); return PTR_ERR(th); } wait_for_completion(&eh->thread_done); return 0; } EXPORT_SYMBOL_GPL(usbip_start_eh); void usbip_stop_eh(struct usbip_device *ud) { struct usbip_task *eh = &ud->eh; if (eh->thread == current) return; /* do not wait for myself */ wait_for_completion(&eh->thread_done); usbip_dbg_eh("usbip_eh has finished\n"); } EXPORT_SYMBOL_GPL(usbip_stop_eh); void usbip_event_add(struct usbip_device *ud, unsigned long event) { spin_lock(&ud->lock); ud->event |= event; wake_up(&ud->eh_waitq); spin_unlock(&ud->lock); } EXPORT_SYMBOL_GPL(usbip_event_add); int usbip_event_happened(struct usbip_device *ud) { int happened = 0; spin_lock(&ud->lock); if (ud->event != 0) happened = 1; spin_unlock(&ud->lock); return happened; } EXPORT_SYMBOL_GPL(usbip_event_happened);
gpl-2.0
ShinySide/G530P_Permissive
drivers/platform/msm/pft.c
584
46027
/* * Copyright (c) 2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Per-File-Tagger (PFT). * * This driver tags enterprise file for encryption/decryption, * as part of the Per-File-Encryption (PFE) feature. * * Enterprise registered applications are identified by their UID. * * The PFT exposes character-device interface to the user-space application, * to handle the following commands: * 1. Update registered applications list * 2. Encryption (in-place) of a file that was created before. * 3. Set State - update the state. * * The PFT exposes kernel API hooks that are intercepting file operations * like create/open/read/write for tagging files and also for access control. * It utilizes the existing security framework hooks * that calls the selinux hooks. * * The PFT exposes kernel API to the dm-req-crypt driver to provide the info * if a file is tagged or not. The dm-req-crypt driver is doing the * actual encryption/decryptiom. * * Tagging the file: * 1. Non-volatile tagging on storage using file extra-attribute (xattr). * 2. Volatile tagging on the file's inode, for fast access. * */ /* Uncomment the line below to enable debug messages */ /* #define DEBUG 1 */ #define pr_fmt(fmt) "pft [%s]: " fmt, __func__ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/cred.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/printk.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> #include <linux/bitops.h> #include <linux/fdtable.h> #include <linux/selinux.h> #include <linux/security.h> #include <linux/pft.h> #include <uapi/linux/msm_pft.h> #include "objsec.h" /* File tagging as encrypted/non-encrypted is valid */ #define PFT_TAG_MAGIC ((u32)(0xABC00000)) /* File tagged as encrypted */ #define PFT_TAG_ENCRYPTED BIT(16) #define PFT_TAG_MAGIC_MASK 0xFFF00000 #define PFT_TAG_FLAGS_MASK 0x000F0000 #define PFT_TAG_KEY_MASK 0x0000FFFF /* The defualt encryption key index */ #define PFT_DEFAULT_KEY_INDEX 1 /* The defualt key index for non-encrypted files */ #define PFT_NO_KEY 0 /* PFT extended attribute name */ #define XATTR_NAME_PFE "security.pfe" /* PFT driver requested major number */ #define PFT_REQUESTED_MAJOR 213 /* PFT driver name */ #define DEVICE_NAME "pft" /* Maximum registered applications */ #define PFT_MAX_APPS 1000 /* Maximum command size */ #define PFT_MAX_COMMAND_SIZE (PAGE_SIZE) /* Current Process ID */ #define current_pid() ((u32)(current->pid)) static const char *pft_state_name[PFT_STATE_MAX_INDEX] = { "deactivated", "deactivating", "key_removed", "removing_key", "key_loaded", }; /** * struct pft_file_info - pft file node info. * @file: pointer to file stucture. * @pid: process ID. * @list: next list item. * * A node in the list of the current open encrypted files. */ struct pft_file_info { struct file *file; pid_t pid; struct list_head list; }; /** * struct pft_device - device state structure. * * @open_count: device open count. * @major: device major number. * @state: Per-File-Encryption state. * @response: command response. * @pfm_pid: PFM process id. * @inplace_file: file for in-place encryption. * @uid_table: registered application array (UID). * @uid_count: number of registered applications. * @open_file_list: open encrypted file list. * @lock: lock protect list access. * * The open_count purpose is to ensure that only one user space * application uses this driver. * The open_file_list is used to close open encrypted files * after the key is removed from the encryption hardware. */ struct pft_device { struct cdev cdev; dev_t device_no; struct class *driver_class; int open_count; int major; enum pft_state state; struct pft_command_response response; u32 pfm_pid; struct file *inplace_file; u32 *uid_table; u32 uid_count; struct list_head open_file_list; struct mutex lock; bool is_chosen_lsm; }; /* Device Driver State */ static struct pft_device *pft_dev; static struct inode *pft_bio_get_inode(struct bio *bio); static int pft_inode_alloc_security(struct inode *inode) { struct inode_security_struct *i_sec = NULL; i_sec = kzalloc(sizeof(*i_sec), GFP_KERNEL); if (i_sec == NULL) { pr_err("i_security malloc failure\n"); return -ENOMEM; } inode->i_security = i_sec; return 0; } static void pft_inode_free_security(struct inode *inode) { kzfree(inode->i_security); } static struct security_operations pft_security_ops = { .name = "pft", .inode_create = pft_inode_create, .inode_post_create = pft_inode_post_create, .inode_unlink = pft_inode_unlink, .inode_mknod = pft_inode_mknod, .inode_rename = pft_inode_rename, .inode_setxattr = pft_inode_set_xattr, .inode_alloc_security = pft_inode_alloc_security, .inode_free_security = pft_inode_free_security, .file_open = pft_file_open, .file_permission = pft_file_permission, .file_close = pft_file_close, .allow_merge_bio = pft_allow_merge_bio, }; static int __init pft_lsm_init(struct pft_device *dev) { int ret; /* Check if PFT is the chosen lsm via security_module_enable() */ if (security_module_enable(&pft_security_ops)) { /* replace null callbacks with empty callbacks */ security_fixup_ops(&pft_security_ops); ret = register_security(&pft_security_ops); if (ret) { pr_err("pft lsm registeration failed, ret=%d.\n", ret); return 0; } dev->is_chosen_lsm = true; pr_debug("pft is the chosen lsm, registered sucessfully !\n"); } else { pr_debug("pft is not the chosen lsm.\n"); } return 0; } /** * pft_is_ready() - driver is initialized and ready. * * Return: true if the driver is ready. */ static bool pft_is_ready(void) { return (pft_dev != NULL); } /** * file_to_filename() - get the filename from file pointer. * @filp: file pointer * * it is used for debug prints. * * Return: filename string or "unknown". */ static char *file_to_filename(struct file *filp) { struct dentry *dentry = NULL; char *filename = NULL; if (!filp || !filp->f_dentry) return "unknown"; dentry = filp->f_dentry; filename = dentry->d_iname; return filename; } /** * inode_to_filename() - get the filename from inode pointer. * @inode: inode pointer * * it is used for debug prints. * * Return: filename string or "unknown". */ static char *inode_to_filename(struct inode *inode) { struct dentry *dentry = NULL; char *filename = NULL; if (hlist_empty(&inode->i_dentry)) return "unknown"; dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); filename = dentry->d_iname; return filename; } /** * pft_set_response() - set response error code. * * @error_code: The error code to return on response. */ static inline void pft_set_response(u32 error_code) { pft_dev->response.error_code = error_code; } /** * pft_add_file()- Add the file to the list of opened encrypted * files. * @filp: file to add. * * Return: 0 of successful operation, negative value otherwise. */ static int pft_add_file(struct file *filp) { struct pft_file_info *node = NULL; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) { pr_err("malloc failure\n"); return -ENOMEM; } node->file = filp; INIT_LIST_HEAD(&node->list); mutex_lock(&pft_dev->lock); list_add(&node->list, &pft_dev->open_file_list); pr_debug("adding file %s to open list.\n", file_to_filename(filp)); mutex_unlock(&pft_dev->lock); return 0; } /** * pft_remove_file()- Remove the given file from the list of * open encrypted files. * @filp: file to remove. * * Return: 0 on success, negative value on failure. */ static int pft_remove_file(struct file *filp) { int ret = -ENOENT; struct pft_file_info *tmp = NULL; struct list_head *pos = NULL; struct list_head *next = NULL; bool found = false; mutex_lock(&pft_dev->lock); list_for_each_safe(pos, next, &pft_dev->open_file_list) { tmp = list_entry(pos, struct pft_file_info, list); if (filp == tmp->file) { found = true; break; } } if (found) { pr_debug("remove file %s. from open list.\n ", file_to_filename(filp)); list_del(&tmp->list); kfree(tmp); ret = 0; } mutex_unlock(&pft_dev->lock); return ret; } /** * pft_is_current_process_registered()- Check if current process * is registered. * * Return: true if current process is registered. */ static bool pft_is_current_process_registered(void) { int is_registered = false; int i; u32 uid = current_uid(); mutex_lock(&pft_dev->lock); for (i = 0; i < pft_dev->uid_count; i++) { if (pft_dev->uid_table[i] == uid) { pr_debug("current UID [%u] is registerd.\n", uid); is_registered = true; break; } } mutex_unlock(&pft_dev->lock); return is_registered; } /** * pft_is_xattr_supported() - Check if the filesystem supports * extended attributes. * @indoe: pointer to the file inode * * Return: true if supported, false if not. */ static bool pft_is_xattr_supported(struct inode *inode) { if (inode == NULL) { pr_err("invalid argument inode passed as NULL"); return false; } if (inode->i_security == NULL) { pr_debug("i_security is NULL, not ready yet\n"); return false; } if (inode->i_op == NULL) { pr_debug("i_op is NULL\n"); return false; } if (inode->i_op->getxattr == NULL) { pr_debug_once("getxattr() not supported , filename=%s\n", inode_to_filename(inode)); return false; } if (inode->i_op->setxattr == NULL) { pr_debug("setxattr() not supported\n"); return false; } return true; } /** * pft_get_inode_tag() - get the file tag. * @indoe: pointer to the file inode * * Return: tag */ static u32 pft_get_inode_tag(struct inode *inode) { struct inode_security_struct *isec = inode->i_security; if (isec == NULL) return 0; return isec->tag; } /** * pft_get_inode_key_index() - get the file key. * @indoe: pointer to the file inode * * Return: key index */ static inline u32 pft_get_inode_key_index(struct inode *inode) { return pft_get_inode_tag(inode) & PFT_TAG_KEY_MASK; } /** * pft_is_tag_valid() - is the tag valid * @indoe: pointer to the file inode * * The tagging is set to valid when an enterprise file is created * or when an file is opened first time after power up and the * xattr was checked to see if the file is encrypted or not. * * Return: true if the tag is valid. */ static inline bool pft_is_tag_valid(struct inode *inode) { struct inode_security_struct *isec = inode->i_security; if (isec == NULL) return false; return ((isec->tag & PFT_TAG_MAGIC_MASK) == PFT_TAG_MAGIC) ? true : false; } /** * pft_is_file_encrypted() - is inode tagged as encrypted. * * @tag: holds the key index and tagging flags. * * Return: true if the file is encrypted. */ static inline bool pft_is_file_encrypted(u32 tag) { return (tag & PFT_TAG_ENCRYPTED) ? true : false; } /** * pft_tag_inode_non_encrypted() - Tag the inode as * non-encrypted. * @indoe: pointer to the file inode * * Tag file as non-encrypted, only the valid bit is set, * the encrypted bit is not set. */ static inline void pft_tag_inode_non_encrypted(struct inode *inode) { struct inode_security_struct *isec = inode->i_security; isec->tag = (u32)(PFT_TAG_MAGIC); } /** * pft_tag_inode_encrypted() - Tag the inode as encrypted. * @indoe: pointer to the file inode * * Set the valid bit, the encrypted bit, and the key index. */ static void pft_tag_inode_encrypted(struct inode *inode, u32 key_index) { struct inode_security_struct *isec = inode->i_security; isec->tag = key_index | PFT_TAG_ENCRYPTED | PFT_TAG_MAGIC; } /** * pft_get_file_tag()- get the file tag. * @dentry: pointer to file dentry. * @tag_ptr: pointer to tag. * * This is the major function for detecting tag files. * Get the tag from the inode if tag is valid, * or from the xattr if this is the 1st time after power up. * * Return: 0 on successe, negative value on failure. */ static int pft_get_file_tag(struct dentry *dentry, u32 *tag_ptr) { ssize_t size = 0; struct inode *inode; const char *xattr_name = XATTR_NAME_PFE; u32 key; if (!dentry || !dentry->d_inode || !tag_ptr) { pr_err("invalid param"); return -EINVAL; } inode = dentry->d_inode; if (pft_is_tag_valid(inode)) { *tag_ptr = pft_get_inode_tag(inode); return 0; } /* * For the first time reading the tag, the tag is not valid, hence * get xattr. */ size = inode->i_op->getxattr(dentry, xattr_name, &key, sizeof(key)); if (size == -ENODATA || size == -EOPNOTSUPP) { pft_tag_inode_non_encrypted(inode); *tag_ptr = pft_get_inode_tag(inode); } else if (size > 0) { pr_debug("First time file %s opened, found xattr = %u.\n", inode_to_filename(inode), key); pft_tag_inode_encrypted(inode, key); *tag_ptr = pft_get_inode_tag(inode); } else { pr_err("getxattr() failure, ret=%zu\n", size); return -EINVAL; } return 0; } /** * pft_tag_file() - Tag the file saving the key_index. * @dentry: file dentry. * @key_index: encryption key index. * * This is the major fuction for tagging a file. * Tag the file on both the xattr and the inode. * * Return: 0 on successe, negative value on failure. */ static int pft_tag_file(struct dentry *dentry, u32 key_index) { int size = 0; const char *xattr_name = XATTR_NAME_PFE; if (!dentry || !dentry->d_inode) { pr_err("invalid NULL param"); return -EINVAL; } if (!pft_is_xattr_supported(dentry->d_inode)) { pr_err("set xattr for file %s is not support.\n", dentry->d_iname); return -EINVAL; } size = dentry->d_inode->i_op->setxattr(dentry, xattr_name, &key_index, sizeof(key_index), 0); if (size < 0) { pr_err("failed to set xattr for file %s, ret =%d.\n", dentry->d_iname, size); return -EFAULT; } pft_tag_inode_encrypted(dentry->d_inode, key_index); pr_debug("file %s tagged encrypted\n", dentry->d_iname); return 0; } /** * pft_get_app_key_index() - get the application key index. * @uid: registered application UID * * Get key index based on the given registered application UID. * Currently only one key is supported. * * Return: encryption key index. */ static inline u32 pft_get_app_key_index(u32 uid) { return PFT_DEFAULT_KEY_INDEX; } /** * pft_is_encrypted_file() - is the file encrypted. * @dentry: file pointer. * * Return: true if the file is encrypted, false otherwise. */ static bool pft_is_encrypted_file(struct dentry *dentry) { int rc; u32 tag; if (!pft_is_ready()) return false; if (!pft_is_xattr_supported(dentry->d_inode)) return false; rc = pft_get_file_tag(dentry, &tag); if (rc < 0) return false; return pft_is_file_encrypted(tag); } /** * pft_is_inplace_inode() - is this the inode of file for * in-place encryption. * @inode: inode of file to check. * * Return: true if this file is being encrypted, false * otherwise. */ static bool pft_is_inplace_inode(struct inode *inode) { if (!pft_dev->inplace_file || !pft_dev->inplace_file->f_path.dentry) return false; return (pft_dev->inplace_file->f_path.dentry->d_inode == inode); } /** * pft_is_inplace_file() - is this the file for in-place * encryption. * @filp: file to check. * * A file struct might be allocated per process, inode should be * only one. * * Return: true if this file is being encrypted, false * otherwise. */ static inline bool pft_is_inplace_file(struct file *filp) { if (!filp || !filp->f_path.dentry || !filp->f_path.dentry->d_inode) return false; return pft_is_inplace_inode(filp->f_path.dentry->d_inode); } /** * pft_get_key_index() - get the key index and other indications * @inode: Pointer to inode struct * @key_index: Pointer to the return value of key index * @is_encrypted: Pointer to the return value. * @is_inplace: Pointer to the return value. * * Provides the given inode's encryption key index, and well as * indications whether the file is encrypted or is it currently * being in-placed encrypted. * This API is called by the dm-req-crypt to decide if to * encrypt/decrypt the file. * File tagging depends on the hooks to be called from selinux, * so if selinux is disabled then tagging is also not * valid. * * Return: 0 on successe, negative value on failure. */ int pft_get_key_index(struct bio *bio, u32 *key_index, bool *is_encrypted, bool *is_inplace) { u32 tag = 0; struct inode *inode = NULL; if (!pft_is_ready()) return -ENODEV; if (!selinux_is_enabled() && !pft_dev->is_chosen_lsm) return -ENODEV; if (!bio) return -EPERM; if (!is_encrypted) { pr_err("is_encrypted is NULL\n"); return -EPERM; } if (!is_inplace) { pr_err("is_inplace is NULL\n"); return -EPERM; } if (!key_index) { pr_err("key_index is NULL\n"); return -EPERM; } inode = pft_bio_get_inode(bio); if (!inode) return -EINVAL; if (!pft_is_tag_valid(inode)) { pr_debug("file %s, Tag not valid\n", inode_to_filename(inode)); return -EINVAL; } if (!pft_is_xattr_supported(inode)) { *is_encrypted = false; *is_inplace = false; *key_index = 0; return 0; } tag = pft_get_inode_tag(inode); *is_encrypted = pft_is_file_encrypted(tag); *key_index = pft_get_inode_key_index(inode); *is_inplace = pft_is_inplace_inode(inode); if (*is_encrypted) pr_debug("file %s is encrypted\n", inode_to_filename(inode)); return 0; } EXPORT_SYMBOL(pft_get_key_index); /** * pft_bio_get_inode() - get the inode from a bio. * @bio: Pointer to BIO structure. * * Walk the bio struct links to get the inode. * * Return: pointer to the inode struct if successful, or NULL otherwise. */ static struct inode *pft_bio_get_inode(struct bio *bio) { if (!bio) return NULL; /* check bio vec count > 0 before using the bio->bi_io_vec[] array */ if (!bio->bi_vcnt) return NULL; if (!bio->bi_io_vec) return NULL; if (!bio->bi_io_vec->bv_page) return NULL; if (PageAnon(bio->bi_io_vec->bv_page)) { struct inode *inode; /* Using direct-io (O_DIRECT) without page cache */ inode = dio_bio_get_inode(bio); pr_debug("inode on direct-io, inode = 0x%p.\n", inode); return inode; } if (!bio->bi_io_vec->bv_page->mapping) return NULL; if (!bio->bi_io_vec->bv_page->mapping->host) return NULL; return bio->bi_io_vec->bv_page->mapping->host; } /** * pft_allow_merge_bio()- Check if 2 BIOs can be merged. * @bio1: Pointer to first BIO structure. * @bio2: Pointer to second BIO structure. * * Prevent merging of BIOs from encrypted and non-encrypted * files, or files encrypted with different key. * This API is called by the file system block layer. * * Return: true if the BIOs allowed to be merged, false * otherwise. */ bool pft_allow_merge_bio(struct bio *bio1, struct bio *bio2) { u32 key_index1 = 0, key_index2 = 0; bool is_encrypted1 = false, is_encrypted2 = false; bool allow = false; bool is_inplace = false; /* N.A. */ int ret; if (!pft_is_ready()) return true; /* * Encrypted BIOs are created only when file encryption is enabled, * which happens only when key is loaded. */ if (pft_dev->state != PFT_STATE_KEY_LOADED) return true; ret = pft_get_key_index(bio1, &key_index1, &is_encrypted1, &is_inplace); if (ret) is_encrypted1 = false; ret = pft_get_key_index(bio2, &key_index2, &is_encrypted2, &is_inplace); if (ret) is_encrypted2 = false; allow = ((is_encrypted1 == is_encrypted2) && (key_index1 == key_index2)); return allow; } EXPORT_SYMBOL(pft_allow_merge_bio); /** * pft_inode_create() - file creation callback. * @dir: directory inode pointer * @dentry: file dentry pointer * @mode: flags * * This hook is called when file is created by VFS. * This hook is called from the selinux driver. * This hooks check file creation permission for enterprise * applications. * Call path: * vfs_create()->security_inode_create()->selinux_inode_create() * * Return: 0 on successe, negative value on failure. */ int pft_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode) { if (!dir || !dentry) return 0; if (!pft_is_ready()) return 0; switch (pft_dev->state) { case PFT_STATE_DEACTIVATED: case PFT_STATE_KEY_LOADED: break; case PFT_STATE_KEY_REMOVED: case PFT_STATE_DEACTIVATING: case PFT_STATE_REMOVING_KEY: /* At this state no new encrypted files can be created */ if (pft_is_current_process_registered()) { pr_debug("key removed, registered uid %u is denied from creating new file %s\n", current_uid(), dentry->d_iname); return -EACCES; } break; default: BUG(); /* State is set by "set state" command */ break; } return 0; } EXPORT_SYMBOL(pft_inode_create); /** * pft_inode_post_create() - file creation callback. * @dir: directory inode pointer * @dentry: file dentry pointer * @mode: flags * * This hook is called when file is created by VFS. * This hook is called from the selinux driver. * This hooks tags new files as encrypted when created by * enterprise applications. * Call path: * vfs_create()->security_inode_post_create()->selinux_inode_post_create() * * Return: 0 on successe, negative value on failure. */ int pft_inode_post_create(struct inode *dir, struct dentry *dentry, umode_t mode) { int ret; if (!dir || !dentry) return 0; if (!pft_is_ready()) return 0; switch (pft_dev->state) { case PFT_STATE_DEACTIVATED: case PFT_STATE_KEY_REMOVED: case PFT_STATE_DEACTIVATING: case PFT_STATE_REMOVING_KEY: break; case PFT_STATE_KEY_LOADED: /* Check whether the new file should be encrypted */ if (pft_is_current_process_registered()) { u32 key_index = pft_get_app_key_index(current_uid()); ret = pft_tag_file(dentry, key_index); if (ret == 0) pr_debug("key loaded, pid [%u] uid [%d] is creating file %s\n", current_pid(), current_uid(), dentry->d_iname); else { pr_err("Failed to tag file %s by pid %d\n", dentry->d_iname, current_pid()); return -EFAULT; } } break; default: BUG(); /* State is set by "set state" command */ break; } return 0; } EXPORT_SYMBOL(pft_inode_post_create); /** * pft_inode_mknod() - mknode file hook (callback) * @dir: directory inode pointer * @dentry: file dentry pointer * @mode: flags * @dev: * * This hook checks encrypted file access permission by * enterprise application. * Call path: * vfs_mknod()->security_inode_mknod()->selinux_inode_mknod()->pft_inode_mknod() * * Return: 0 on successful operation, negative value otherwise. */ int pft_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { int rc; /* Check if allowed to create new encrypted files */ rc = pft_inode_create(dir, dentry, mode); return rc; } EXPORT_SYMBOL(pft_inode_mknod); /** * pft_inode_rename() - file rename hook. * @inode: directory inode * @dentry: file dentry * @new_inode * @new_dentry * * Block attempt to rename enterprise file. * * Return: 0 on allowed operation, negative value otherwise. */ int pft_inode_rename(struct inode *inode, struct dentry *dentry, struct inode *new_inode, struct dentry *new_dentry) { if (!inode || !dentry || !new_inode || !new_dentry || !dentry->d_inode) return 0; if (!pft_is_ready()) return 0; /* do nothing for non-encrypted files */ if (!pft_is_encrypted_file(dentry)) return 0; pr_debug("attempt to rename encrypted file [%s]\n", dentry->d_iname); if (pft_is_inplace_inode(dentry->d_inode)) { pr_err("access in-place-encryption file %s by uid [%d] pid [%d] is blocked.\n", inode_to_filename(inode), current_uid(), current_pid()); return -EACCES; } if (!pft_is_current_process_registered()) { pr_err("unregistered app (uid %u pid %u) is trying to access encrypted file %s\n", current_uid(), current_pid(), dentry->d_iname); return -EACCES; } else pr_debug("rename file %s\n", dentry->d_iname); return 0; } EXPORT_SYMBOL(pft_inode_rename); /** * pft_file_open() - file open hook (callback). * @filp: file pointer * @cred: credentials pointer * * This hook is called when file is opened by VFS. * It is called from the selinux driver. * It checks enterprise file xattr when first opened. * It adds encrypted file to the list of open files. * Call path: * do_filp_open()->security_dentry_open()->selinux_dentry_open() * * Return: 0 on successe, negative value on failure. */ int pft_file_open(struct file *filp, const struct cred *cred) { int ret; if (!filp || !filp->f_path.dentry) return 0; if (!pft_is_ready()) return 0; if (filp->f_flags & O_DIRECT) pr_debug("file %s using O_DIRECT.\n", file_to_filename(filp)); /* do nothing for non-encrypted files */ if (!pft_is_encrypted_file(filp->f_dentry)) return 0; /* * Only PFM allowed to access in-place-encryption-file * during in-place-encryption process */ if (pft_is_inplace_file(filp) && current_pid() != pft_dev->pfm_pid) { pr_err("Access in-place-encryption file %s by uid %d pid %d is blocked.\n", file_to_filename(filp), current_uid(), current_pid()); return -EACCES; } switch (pft_dev->state) { case PFT_STATE_DEACTIVATED: case PFT_STATE_KEY_REMOVED: case PFT_STATE_DEACTIVATING: case PFT_STATE_REMOVING_KEY: /* Block any access for encrypted files when key not loaded */ pr_debug("key not loaded. uid (%u) can not access file %s\n", current_uid(), file_to_filename(filp)); return -EACCES; case PFT_STATE_KEY_LOADED: /* Only registered apps may access encrypted files. */ if (!pft_is_current_process_registered()) { pr_err("unregistered app (uid %u pid %u) is trying to access encrypted file %s\n", current_uid(), current_pid(), file_to_filename(filp)); return -EACCES; } ret = pft_add_file(filp); if (ret) { pr_err("failed to add file %s to the list.\n", file_to_filename(filp)); return -EFAULT; } break; default: BUG(); /* State is set by "set state" command */ break; } return 0; } EXPORT_SYMBOL(pft_file_open); /** * pft_file_permission() - check file access permission. * @filp: file pointer * @mask: flags * * This hook is called when file is read/write by VFS. * This hook is called from the selinux driver. * This hook checks encrypted file access permission by * enterprise application. * Call path: * vfs_read()->security_file_permission()->selinux_file_permission() * * Return: 0 on successe, negative value on failure. */ int pft_file_permission(struct file *filp, int mask) { if (!filp) return 0; if (!pft_is_ready()) return 0; /* do nothing for non-encrypted files */ if (!pft_is_encrypted_file(filp->f_dentry)) return 0; /* * Only PFM allowed to access in-place-encryption-file * during in-place encryption process */ if (pft_is_inplace_file(filp)) { if (current_pid() == pft_dev->pfm_pid) { /* mask MAY_WRITE=2 / MAY_READ=4 */ pr_debug("r/w [mask 0x%x] in-place-encryption file %s by PFM (UID %d, PID %d).\n", mask, file_to_filename(filp), current_uid(), current_pid()); return 0; } else { pr_err("Access in-place-encryption file %s by App (UID %d, PID %d) is blocked.\n", file_to_filename(filp), current_uid(), current_pid()); return -EACCES; } } switch (pft_dev->state) { case PFT_STATE_DEACTIVATED: case PFT_STATE_KEY_REMOVED: case PFT_STATE_DEACTIVATING: case PFT_STATE_REMOVING_KEY: /* Block any access for encrypted files when key not loaded */ pr_debug("key not loaded. uid (%u) can not access file %s\n", current_uid(), file_to_filename(filp)); return -EACCES; case PFT_STATE_KEY_LOADED: /* Only registered apps can access encrypted files. */ if (!pft_is_current_process_registered()) { pr_err("unregistered app (uid %u pid %u) is trying to access encrypted file %s\n", current_uid(), current_pid(), file_to_filename(filp)); return -EACCES; } break; default: BUG(); /* State is set by "set state" command */ break; } return 0; } EXPORT_SYMBOL(pft_file_permission); /** * pft_sync_file() - sync the file. * @filp: file pointer * * Complete writting any pending write request of encrypted data * before key is removed, to avoid writting garbage to * enterprise files. */ static void pft_sync_file(struct file *filp) { int ret; ret = vfs_fsync(filp, false); if (ret) pr_debug("failed to sync file %s, ret = %d.\n", file_to_filename(filp), ret); else pr_debug("Sync file %s ok.\n", file_to_filename(filp)); } /** * pft_file_close()- handle file close event * @filp: file pointer * * This hook is called when file is closed by VFS. * This hook is called from the selinux driver. * * Return: 0 on successful operation, negative value otherwise. */ int pft_file_close(struct file *filp) { if (!filp) return 0; if (!pft_is_ready()) return 0; /* do nothing for non-encrypted files */ if (!pft_is_encrypted_file(filp->f_dentry)) return 0; if (pft_is_inplace_file(filp)) { pr_debug("pid [%u] uid [%u] is closing in-place-encryption file %s\n", current_pid(), current_uid(), file_to_filename(filp)); pft_dev->inplace_file = NULL; } switch (pft_dev->state) { case PFT_STATE_DEACTIVATING: case PFT_STATE_REMOVING_KEY: /* * Do not allow apps to close file when * pft_close_opened_enc_files() is closing files. * Normally, all enterprise apps are closed by PFM * before getting to this state, so the apps files are * norammly closed by now. * pft_close_opened_enc_files() is running in PFM context. */ if (current_pid() != pft_dev->pfm_pid) return -EACCES; case PFT_STATE_DEACTIVATED: case PFT_STATE_KEY_LOADED: case PFT_STATE_KEY_REMOVED: break; default: BUG(); /* State is set by "set state" command */ break; } pft_sync_file(filp); pft_remove_file(filp); return 0; } EXPORT_SYMBOL(pft_file_close); /** * pft_inode_unlink() - Delete file hook. * @dir: directory inode pointer * @dentry: file dentry pointer * * call path: vfs_unlink()->security_inode_unlink(). * * Return: 0 on successful operation, negative value otherwise. */ int pft_inode_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = NULL; if (!dir || !dentry || !dentry->d_inode) return 0; if (!pft_is_ready()) return 0; inode = dentry->d_inode; /* do nothing for non-encrypted files */ if (!pft_is_encrypted_file(dentry)) return 0; if (pft_is_inplace_inode(inode)) { pr_err("block delete in-place-encryption file %s by uid [%d] pid [%d], while encryption in progress.\n", inode_to_filename(inode), current_uid(), current_pid()); return -EBUSY; } if (!pft_is_current_process_registered()) { pr_err("unregistered app (uid %u pid %u) is trying to access encrypted file %s\n", current_uid(), current_pid(), inode_to_filename(inode)); return -EACCES; } else pr_debug("delete file %s\n", inode_to_filename(inode)); return 0; } EXPORT_SYMBOL(pft_inode_unlink); /** * pft_inode_set_xattr() - set/remove xattr callback. * @dentry: file dentry pointer * @name: xattr name. * * This hook checks attempt to set/remove PFE xattr. * Only this kernel driver allows to set the PFE xattr, so block * any attempt to do it from user space. Allow access for other * xattr. * * Return: 0 on successful operation, negative value otherwise. */ int pft_inode_set_xattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct inode *inode = NULL; if (!dentry || !dentry->d_inode) return 0; inode = dentry->d_inode; if (strcmp(name, XATTR_NAME_PFE) != 0) { pr_debug("xattr name=%s file %s\n", name, inode_to_filename(inode)); return 0; /* Not PFE xattr so it is ok */ } pr_err("Attemp to set/remove PFE xattr for file %s\n", inode_to_filename(inode)); /* Only PFT kernel driver allows to set the PFE xattr */ return -EACCES; } EXPORT_SYMBOL(pft_inode_set_xattr); /** * pft_close_opened_enc_files() - Close all the currently open * encrypted files * * Close all open encrypted file when removing key or * deactivating. */ static void pft_close_opened_enc_files(void) { struct pft_file_info *tmp = NULL; struct list_head *pos = NULL; struct list_head *next = NULL; list_for_each_safe(pos, next, &pft_dev->open_file_list) { struct file *filp; tmp = list_entry(pos, struct pft_file_info, list); filp = tmp->file; pr_debug("closing file %s.\n", file_to_filename(filp)); /* filp_close() eventually calls pft_file_close() */ filp_close(filp, NULL); } } /** * pft_set_state() - Handle "Set State" command * @command: command buffer. * @size: size of command buffer. * * The command execution status is reported by the response. * * Return: 0 on successful operation, negative value otherwise. */ static int pft_set_state(struct pft_command *command, int size) { u32 state = command->set_state.state; int expected_size = sizeof(command->opcode) + sizeof(command->set_state); if (size != expected_size) { pr_err("Invalid buffer size\n"); pft_set_response(PFT_CMD_RESP_INVALID_CMD_PARAMS); return -EINVAL; } if (state >= PFT_STATE_MAX_INDEX) { pr_err("Invalid state %d\n", command->set_state.state); pft_set_response(PFT_CMD_RESP_INVALID_STATE); return 0; } pr_debug("Set State %d [%s].\n", state, pft_state_name[state]); switch (command->set_state.state) { case PFT_STATE_DEACTIVATING: case PFT_STATE_REMOVING_KEY: pft_close_opened_enc_files(); /* Fall through */ case PFT_STATE_DEACTIVATED: case PFT_STATE_KEY_LOADED: case PFT_STATE_KEY_REMOVED: pft_dev->state = command->set_state.state; pft_set_response(PFT_CMD_RESP_SUCCESS); break; default: pr_err("Invalid state %d\n", command->set_state.state); pft_set_response(PFT_CMD_RESP_INVALID_STATE); break; } return 0; } /** * pft_get_process_open_file() - get file pointer using file * descriptor index. * @index: file descriptor index. * * Return: file pointer on success, NULL on failure. */ static struct file *pft_get_process_open_file(int index) { struct fdtable *files_table; files_table = files_fdtable(current->files); if (files_table == NULL) return NULL; if (index >= files_table->max_fds) return NULL; else return files_table->fd[index]; } /** * pft_set_inplace_file() - handle "inplace file encryption" * command. * @command: command buffer. * @size: size of command buffer. * * The command execution status is reported by the response. * * Return: 0 if command is valid, negative value otherwise. */ static int pft_set_inplace_file(struct pft_command *command, int size) { int expected_size; u32 fd; int rc; struct file *filp = NULL; struct inode *inode = NULL; int writecount; expected_size = sizeof(command->opcode) + sizeof(command->preform_in_place_file_enc.file_descriptor); if (size != expected_size) { pr_err("invalid command size %d expected %d.\n", size, expected_size); pft_set_response(PFT_CMD_RESP_INVALID_CMD_PARAMS); return -EINVAL; } if (pft_dev->state != (u32) PFT_STATE_KEY_LOADED) { pr_err("Key not loaded, state [%d], In-place-encryption is not allowed.\n", pft_dev->state); pft_set_response(PFT_CMD_RESP_GENERAL_ERROR); return 0; } /* allow only one in-place file encryption at a time */ if (pft_dev->inplace_file != NULL) { pr_err("file %s in-place-encryption in progress.\n", file_to_filename(pft_dev->inplace_file)); /* @todo - use new error code */ pft_set_response(PFT_CMD_RESP_INPLACE_FILE_IS_OPEN); return 0; } fd = command->preform_in_place_file_enc.file_descriptor; filp = pft_get_process_open_file(fd); if (filp == NULL) { pr_err("failed to find file by fd %d.\n", fd); pft_set_response(PFT_CMD_RESP_GENERAL_ERROR); return 0; } /* Verify the file is not already open by other than PFM */ if (!filp->f_path.dentry || !filp->f_path.dentry->d_inode) { pr_err("failed to get inode of inplace-file.\n"); pft_set_response(PFT_CMD_RESP_GENERAL_ERROR); return 0; } inode = filp->f_path.dentry->d_inode; writecount = atomic_read(&inode->i_writecount); if (writecount > 1) { pr_err("file %s is opened %d times for write.\n", file_to_filename(filp), writecount); pft_set_response(PFT_CMD_RESP_INPLACE_FILE_IS_OPEN); return 0; } /* * Check if the file was already encryprted. * In practice, it is unlikely to happen, * because PFM is not an enterprise application * it won't be able to open encrypted file. */ if (pft_is_encrypted_file(filp->f_dentry)) { pr_err("file %s is already encrypted.\n", file_to_filename(filp)); pft_set_response(PFT_CMD_RESP_GENERAL_ERROR); return 0; } /* Update the current in-place-encryption file */ pft_dev->inplace_file = filp; /* * Now, any new access to this file is allowed only to PFM. * Lets make sure that all pending writes are completed * before encrypting the file. */ pft_sync_file(filp); rc = pft_tag_file(pft_dev->inplace_file->f_dentry, pft_get_app_key_index(current_uid())); if (!rc) { pr_debug("tagged file %s to be encrypted.\n", file_to_filename(pft_dev->inplace_file)); pft_set_response(PFT_CMD_RESP_SUCCESS); } else { pr_err("failed to tag file %s for encryption.\n", file_to_filename(pft_dev->inplace_file)); pft_set_response(PFT_CMD_RESP_GENERAL_ERROR); } return 0; } /** * pft_update_reg_apps() - Update the registered application * list. * @command: command buffer. * @size: size of command buffer. * * The command execution status is reported by the response. * * Return: 0 on successful operation, negative value otherwise. */ static int pft_update_reg_apps(struct pft_command *command, int size) { int i; int expected_size; void *buf; int buf_size; u32 items_count = command->update_app_list.items_count; if (items_count > PFT_MAX_APPS) { pr_err("Number of apps [%d] > max apps [%d]\n", items_count , PFT_MAX_APPS); pft_set_response(PFT_CMD_RESP_INVALID_CMD_PARAMS); return -EINVAL; } expected_size = sizeof(command->opcode) + sizeof(command->update_app_list.items_count) + (command->update_app_list.items_count * sizeof(u32)); if (size != expected_size) { pr_err("invalid command size %d expected %d.\n", size, expected_size); pft_set_response(PFT_CMD_RESP_INVALID_CMD_PARAMS); return -EINVAL; } mutex_lock(&pft_dev->lock); /* Free old table */ kfree(pft_dev->uid_table); pft_dev->uid_table = NULL; pft_dev->uid_count = 0; if (items_count == 0) { pr_info("empty app list - clear list.\n"); mutex_unlock(&pft_dev->lock); return 0; } buf_size = command->update_app_list.items_count * sizeof(u32); buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) { pr_err("malloc failure\n"); pft_set_response(PFT_CMD_RESP_GENERAL_ERROR); mutex_unlock(&pft_dev->lock); return 0; } pft_dev->uid_table = buf; pft_dev->uid_count = command->update_app_list.items_count; pr_debug("uid_count = %d\n", pft_dev->uid_count); for (i = 0; i < pft_dev->uid_count; i++) pft_dev->uid_table[i] = command->update_app_list.table[i]; pft_set_response(PFT_CMD_RESP_SUCCESS); mutex_unlock(&pft_dev->lock); return 0; } /** * pft_handle_command() - Handle user space app commands. * @buf: command buffer. * @buf_size: command buffer size. * * Return: 0 on successful operation, negative value otherwise. */ static int pft_handle_command(void *buf, int buf_size) { int ret = 0; struct pft_command *command = NULL; /* opcode field is the minimum length of command */ if (buf_size < sizeof(command->opcode)) { pr_err("Invalid argument used buffer size\n"); return -EINVAL; } command = (struct pft_command *)buf; pft_dev->response.command_id = command->opcode; switch (command->opcode) { case PFT_CMD_OPCODE_SET_STATE: ret = pft_set_state(command, buf_size); break; case PFT_CMD_OPCODE_UPDATE_REG_APP_UID: ret = pft_update_reg_apps(command, buf_size); break; case PFT_CMD_OPCODE_PERFORM_IN_PLACE_FILE_ENC: ret = pft_set_inplace_file(command, buf_size); break; default: pr_err("Invalid command_op_code %u\n", command->opcode); pft_set_response(PFT_CMD_RESP_INVALID_COMMAND); return 0; } return ret; } static int pft_device_open(struct inode *inode, struct file *file) { int ret; mutex_lock(&pft_dev->lock); if (pft_dev->open_count > 0) { pr_err("PFT device is already opened (%d)\n", pft_dev->open_count); ret = -EBUSY; } else { pft_dev->open_count++; pft_dev->pfm_pid = current_pid(); pr_debug("PFT device opened by %d (%d)\n", pft_dev->pfm_pid, pft_dev->open_count); ret = 0; } mutex_unlock(&pft_dev->lock); pr_debug("device opened, count %d\n", pft_dev->open_count); return ret; } static int pft_device_release(struct inode *inode, struct file *file) { mutex_lock(&pft_dev->lock); if (pft_dev->open_count > 0) pft_dev->open_count--; pft_dev->pfm_pid = UINT_MAX; mutex_unlock(&pft_dev->lock); pr_debug("device released, count %d\n", pft_dev->open_count); return 0; } /** * pft_device_write() - Get commands from user sapce. * * Return: number of bytes to write on success to get the * command buffer, negative value on failure. * The error code for handling the command should be retrive by * reading the response. * Note: any reurn value of 0..size-1 will cause retry by the * OS, so avoid it. */ static ssize_t pft_device_write(struct file *filp, const char __user *user_buff, size_t size, loff_t *f_pos) { int ret; char *cmd_buf; if (size > PFT_MAX_COMMAND_SIZE || !user_buff || !f_pos) { pr_err("inavlid parameters.\n"); return -EINVAL; } cmd_buf = kzalloc(size, GFP_KERNEL); if (cmd_buf == NULL) { pr_err("malloc failure for command buffer\n"); return -ENOMEM; } ret = copy_from_user(cmd_buf, user_buff, size); if (ret) { pr_err("Unable to copy from user (err %d)\n", ret); kfree(cmd_buf); return -EFAULT; } ret = pft_handle_command(cmd_buf, size); if (ret) { kfree(cmd_buf); return -EFAULT; } kfree(cmd_buf); return size; } /** * pft_device_read() - return response of last command. * * Return: number of bytes to read on success, negative value on * failure. */ static ssize_t pft_device_read(struct file *filp, char __user *buffer, size_t length, loff_t *f_pos) { int ret = 0; if (!buffer || !f_pos || length < sizeof(pft_dev->response)) { pr_err("inavlid parameters.\n"); return -EFAULT; } ret = copy_to_user(buffer, &(pft_dev->response), sizeof(pft_dev->response)); if (ret) { pr_err("Unable to copy to user, err = %d.\n", ret); return -EINVAL; } return sizeof(pft_dev->response); } static const struct file_operations fops = { .owner = THIS_MODULE, .read = pft_device_read, .write = pft_device_write, .open = pft_device_open, .release = pft_device_release, }; static int __init pft_register_chardev(void) { int rc; unsigned baseminor = 0; unsigned count = 1; struct device *class_dev; rc = alloc_chrdev_region(&pft_dev->device_no, baseminor, count, DEVICE_NAME); if (rc < 0) { pr_err("alloc_chrdev_region failed %d\n", rc); return rc; } pft_dev->driver_class = class_create(THIS_MODULE, DEVICE_NAME); if (IS_ERR(pft_dev->driver_class)) { rc = -ENOMEM; pr_err("class_create failed %d\n", rc); goto exit_unreg_chrdev_region; } class_dev = device_create(pft_dev->driver_class, NULL, pft_dev->device_no, NULL, DEVICE_NAME); if (!class_dev) { pr_err("class_device_create failed %d\n", rc); rc = -ENOMEM; goto exit_destroy_class; } cdev_init(&pft_dev->cdev, &fops); pft_dev->cdev.owner = THIS_MODULE; rc = cdev_add(&pft_dev->cdev, MKDEV(MAJOR(pft_dev->device_no), 0), 1); if (rc < 0) { pr_err("cdev_add failed %d\n", rc); goto exit_destroy_device; } return 0; exit_destroy_device: device_destroy(pft_dev->driver_class, pft_dev->device_no); exit_destroy_class: class_destroy(pft_dev->driver_class); exit_unreg_chrdev_region: unregister_chrdev_region(pft_dev->device_no, 1); return rc; } static void __exit pft_unregister_chrdev(void) { cdev_del(&pft_dev->cdev); device_destroy(pft_dev->driver_class, pft_dev->device_no); class_destroy(pft_dev->driver_class); unregister_chrdev_region(pft_dev->device_no, 1); } static void __exit pft_free_open_files_list(void) { struct pft_file_info *tmp = NULL; struct list_head *pos = NULL; struct list_head *next = NULL; mutex_lock(&pft_dev->lock); list_for_each_safe(pos, next, &pft_dev->open_file_list) { tmp = list_entry(pos, struct pft_file_info, list); list_del(&tmp->list); kfree(tmp); } mutex_unlock(&pft_dev->lock); } static void __exit pft_exit(void) { if (pft_dev == NULL) return; pft_unregister_chrdev(); pft_free_open_files_list(); kfree(pft_dev->uid_table); kfree(pft_dev); pft_dev = NULL; } static int __init pft_init(void) { int ret; struct pft_device *dev = NULL; dev = kzalloc(sizeof(struct pft_device), GFP_KERNEL); if (dev == NULL) { pr_err("No memory for device structr\n"); return -ENOMEM; } pft_dev = dev; dev->state = PFT_STATE_DEACTIVATED; dev->pfm_pid = UINT_MAX; INIT_LIST_HEAD(&dev->open_file_list); mutex_init(&dev->lock); ret = pft_register_chardev(); if (ret) { pr_err("create character device failed.\n"); goto fail; } pft_lsm_init(dev); pr_info("Drivr initialized successfully %s %s.n", __DATE__, __TIME__); return 0; fail: pr_err("Failed to init driver.\n"); kfree(dev); pft_dev = NULL; return -ENODEV; } module_init(pft_init); module_exit(pft_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Per-File-Tagger driver");
gpl-2.0
oxforever/linux-4.1
drivers/infiniband/core/agent.c
840
6392
/* * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/slab.h> #include <linux/string.h> #include "agent.h" #include "smi.h" #include "mad_priv.h" #define SPFX "ib_agent: " struct ib_agent_port_private { struct list_head port_list; struct ib_mad_agent *agent[2]; }; static DEFINE_SPINLOCK(ib_agent_port_list_lock); static LIST_HEAD(ib_agent_port_list); static struct ib_agent_port_private * __ib_get_agent_port(struct ib_device *device, int port_num) { struct ib_agent_port_private *entry; list_for_each_entry(entry, &ib_agent_port_list, port_list) { if (entry->agent[1]->device == device && entry->agent[1]->port_num == port_num) return entry; } return NULL; } static struct ib_agent_port_private * ib_get_agent_port(struct ib_device *device, int port_num) { struct ib_agent_port_private *entry; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); entry = __ib_get_agent_port(device, port_num); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return entry; } void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device, int port_num, int qpn) { struct ib_agent_port_private *port_priv; struct ib_mad_agent *agent; struct ib_mad_send_buf *send_buf; struct ib_ah *ah; struct ib_mad_send_wr_private *mad_send_wr; if (device->node_type == RDMA_NODE_IB_SWITCH) port_priv = ib_get_agent_port(device, 0); else port_priv = ib_get_agent_port(device, port_num); if (!port_priv) { dev_err(&device->dev, "Unable to find port agent\n"); return; } agent = port_priv->agent[qpn]; ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); if (IS_ERR(ah)) { dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n", PTR_ERR(ah)); return; } send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_KERNEL); if (IS_ERR(send_buf)) { dev_err(&device->dev, "ib_create_send_mad error\n"); goto err1; } memcpy(send_buf->mad, mad, sizeof *mad); send_buf->ah = ah; if (device->node_type == RDMA_NODE_IB_SWITCH) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_send_wr->send_wr.wr.ud.port_num = port_num; } if (ib_post_send_mad(send_buf, NULL)) { dev_err(&device->dev, "ib_post_send_mad error\n"); goto err2; } return; err2: ib_free_send_mad(send_buf); err1: ib_destroy_ah(ah); } static void agent_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { ib_destroy_ah(mad_send_wc->send_buf->ah); ib_free_send_mad(mad_send_wc->send_buf); } int ib_agent_port_open(struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; unsigned long flags; int ret; /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { dev_err(&device->dev, "No memory for ib_agent_port_private\n"); ret = -ENOMEM; goto error1; } if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) { /* Obtain send only MAD agent for SMI QP */ port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, &agent_send_handler, NULL, NULL, 0); if (IS_ERR(port_priv->agent[0])) { ret = PTR_ERR(port_priv->agent[0]); goto error2; } } /* Obtain send only MAD agent for GSI QP */ port_priv->agent[1] = ib_register_mad_agent(device, port_num, IB_QPT_GSI, NULL, 0, &agent_send_handler, NULL, NULL, 0); if (IS_ERR(port_priv->agent[1])) { ret = PTR_ERR(port_priv->agent[1]); goto error3; } spin_lock_irqsave(&ib_agent_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_agent_port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); return 0; error3: if (port_priv->agent[0]) ib_unregister_mad_agent(port_priv->agent[0]); error2: kfree(port_priv); error1: return ret; } int ib_agent_port_close(struct ib_device *device, int port_num) { struct ib_agent_port_private *port_priv; unsigned long flags; spin_lock_irqsave(&ib_agent_port_list_lock, flags); port_priv = __ib_get_agent_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); dev_err(&device->dev, "Port %d not found\n", port_num); return -ENODEV; } list_del(&port_priv->port_list); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); ib_unregister_mad_agent(port_priv->agent[1]); if (port_priv->agent[0]) ib_unregister_mad_agent(port_priv->agent[0]); kfree(port_priv); return 0; }
gpl-2.0
bigzz/f2fs-stable
net/bluetooth/selftest.c
1096
8082
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2014 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/debugfs.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "ecc.h" #include "smp.h" #include "selftest.h" #if IS_ENABLED(CONFIG_BT_SELFTEST_ECDH) static const u8 priv_a_1[32] __initconst = { 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58, 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a, 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74, 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f, }; static const u8 priv_b_1[32] __initconst = { 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b, 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59, 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90, 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55, }; static const u8 pub_a_1[64] __initconst = { 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20, 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74, 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76, 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63, 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc, }; static const u8 pub_b_1[64] __initconst = { 0x90, 0xa1, 0xaa, 0x2f, 0xb2, 0x77, 0x90, 0x55, 0x9f, 0xa6, 0x15, 0x86, 0xfd, 0x8a, 0xb5, 0x47, 0x00, 0x4c, 0x9e, 0xf1, 0x84, 0x22, 0x59, 0x09, 0x96, 0x1d, 0xaf, 0x1f, 0xf0, 0xf0, 0xa1, 0x1e, 0x4a, 0x21, 0xb1, 0x15, 0xf9, 0xaf, 0x89, 0x5f, 0x76, 0x36, 0x8e, 0xe2, 0x30, 0x11, 0x2d, 0x47, 0x60, 0x51, 0xb8, 0x9a, 0x3a, 0x70, 0x56, 0x73, 0x37, 0xad, 0x9d, 0x42, 0x3e, 0xf3, 0x55, 0x4c, }; static const u8 dhkey_1[32] __initconst = { 0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86, 0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99, 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec, }; static const u8 priv_a_2[32] __initconst = { 0x63, 0x76, 0x45, 0xd0, 0xf7, 0x73, 0xac, 0xb7, 0xff, 0xdd, 0x03, 0x72, 0xb9, 0x72, 0x85, 0xb4, 0x41, 0xb6, 0x5d, 0x0c, 0x5d, 0x54, 0x84, 0x60, 0x1a, 0xa3, 0x9a, 0x3c, 0x69, 0x16, 0xa5, 0x06, }; static const u8 priv_b_2[32] __initconst = { 0xba, 0x30, 0x55, 0x50, 0x19, 0xa2, 0xca, 0xa3, 0xa5, 0x29, 0x08, 0xc6, 0xb5, 0x03, 0x88, 0x7e, 0x03, 0x2b, 0x50, 0x73, 0xd4, 0x2e, 0x50, 0x97, 0x64, 0xcd, 0x72, 0x0d, 0x67, 0xa0, 0x9a, 0x52, }; static const u8 pub_a_2[64] __initconst = { 0xdd, 0x78, 0x5c, 0x74, 0x03, 0x9b, 0x7e, 0x98, 0xcb, 0x94, 0x87, 0x4a, 0xad, 0xfa, 0xf8, 0xd5, 0x43, 0x3e, 0x5c, 0xaf, 0xea, 0xb5, 0x4c, 0xf4, 0x9e, 0x80, 0x79, 0x57, 0x7b, 0xa4, 0x31, 0x2c, 0x4f, 0x5d, 0x71, 0x43, 0x77, 0x43, 0xf8, 0xea, 0xd4, 0x3e, 0xbd, 0x17, 0x91, 0x10, 0x21, 0xd0, 0x1f, 0x87, 0x43, 0x8e, 0x40, 0xe2, 0x52, 0xcd, 0xbe, 0xdf, 0x98, 0x38, 0x18, 0x12, 0x95, 0x91, }; static const u8 pub_b_2[64] __initconst = { 0xcc, 0x00, 0x65, 0xe1, 0xf5, 0x6c, 0x0d, 0xcf, 0xec, 0x96, 0x47, 0x20, 0x66, 0xc9, 0xdb, 0x84, 0x81, 0x75, 0xa8, 0x4d, 0xc0, 0xdf, 0xc7, 0x9d, 0x1b, 0x3f, 0x3d, 0xf2, 0x3f, 0xe4, 0x65, 0xf4, 0x79, 0xb2, 0xec, 0xd8, 0xca, 0x55, 0xa1, 0xa8, 0x43, 0x4d, 0x6b, 0xca, 0x10, 0xb0, 0xc2, 0x01, 0xc2, 0x33, 0x4e, 0x16, 0x24, 0xc4, 0xef, 0xee, 0x99, 0xd8, 0xbb, 0xbc, 0x48, 0xd0, 0x01, 0x02, }; static const u8 dhkey_2[32] __initconst = { 0x69, 0xeb, 0x21, 0x32, 0xf2, 0xc6, 0x05, 0x41, 0x60, 0x19, 0xcd, 0x5e, 0x94, 0xe1, 0xe6, 0x5f, 0x33, 0x07, 0xe3, 0x38, 0x4b, 0x68, 0xe5, 0x62, 0x3f, 0x88, 0x6d, 0x2f, 0x3a, 0x84, 0x85, 0xab, }; static const u8 priv_a_3[32] __initconst = { 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58, 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a, 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74, 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f, }; static const u8 pub_a_3[64] __initconst = { 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20, 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74, 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76, 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63, 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc, }; static const u8 dhkey_3[32] __initconst = { 0x2d, 0xab, 0x00, 0x48, 0xcb, 0xb3, 0x7b, 0xda, 0x55, 0x7b, 0x8b, 0x72, 0xa8, 0x57, 0x87, 0xc3, 0x87, 0x27, 0x99, 0x32, 0xfc, 0x79, 0x5f, 0xae, 0x7c, 0x1c, 0xf9, 0x49, 0xe6, 0xd7, 0xaa, 0x70, }; static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32], const u8 pub_a[64], const u8 pub_b[64], const u8 dhkey[32]) { u8 dhkey_a[32], dhkey_b[32]; ecdh_shared_secret(pub_b, priv_a, dhkey_a); ecdh_shared_secret(pub_a, priv_b, dhkey_b); if (memcmp(dhkey_a, dhkey, 32)) return -EINVAL; if (memcmp(dhkey_b, dhkey, 32)) return -EINVAL; return 0; } static char test_ecdh_buffer[32]; static ssize_t test_ecdh_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(user_buf, count, ppos, test_ecdh_buffer, strlen(test_ecdh_buffer)); } static const struct file_operations test_ecdh_fops = { .open = simple_open, .read = test_ecdh_read, .llseek = default_llseek, }; static int __init test_ecdh(void) { ktime_t calltime, delta, rettime; unsigned long long duration; int err; calltime = ktime_get(); err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1); if (err) { BT_ERR("ECDH sample 1 failed"); goto done; } err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2); if (err) { BT_ERR("ECDH sample 2 failed"); goto done; } err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3); if (err) { BT_ERR("ECDH sample 3 failed"); goto done; } rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; BT_INFO("ECDH test passed in %llu usecs", duration); done: if (!err) snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "PASS (%llu usecs)\n", duration); else snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "FAIL\n"); debugfs_create_file("selftest_ecdh", 0444, bt_debugfs, NULL, &test_ecdh_fops); return err; } #else static inline int test_ecdh(void) { return 0; } #endif static int __init run_selftest(void) { int err; BT_INFO("Starting self testing"); err = test_ecdh(); if (err) goto done; err = bt_selftest_smp(); done: BT_INFO("Finished self testing"); return err; } #if IS_MODULE(CONFIG_BT) /* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=m and is just a * wrapper to allow running this at module init. * * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all. */ int __init bt_selftest(void) { return run_selftest(); } #else /* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=y and is run * via late_initcall() as last item in the initialization sequence. * * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all. */ static int __init bt_selftest_init(void) { return run_selftest(); } late_initcall(bt_selftest_init); #endif
gpl-2.0
AndroidGX/SimpleGX-MM-6.0_H815_20i
drivers/infiniband/ulp/ipoib/ipoib_ib.c
2120
28106
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/delay.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/ip.h> #include <linux/tcp.h> #include "ipoib.h" #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA static int data_debug_level; module_param(data_debug_level, int, 0644); MODULE_PARM_DESC(data_debug_level, "Enable data path debug tracing if > 0"); #endif static DEFINE_MUTEX(pkey_mutex); struct ipoib_ah *ipoib_create_ah(struct net_device *dev, struct ib_pd *pd, struct ib_ah_attr *attr) { struct ipoib_ah *ah; struct ib_ah *vah; ah = kmalloc(sizeof *ah, GFP_KERNEL); if (!ah) return ERR_PTR(-ENOMEM); ah->dev = dev; ah->last_send = 0; kref_init(&ah->ref); vah = ib_create_ah(pd, attr); if (IS_ERR(vah)) { kfree(ah); ah = (struct ipoib_ah *)vah; } else { ah->ah = vah; ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); } return ah; } void ipoib_free_ah(struct kref *kref) { struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); struct ipoib_dev_priv *priv = netdev_priv(ah->dev); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); list_add_tail(&ah->list, &priv->dead_ahs); spin_unlock_irqrestore(&priv->lock, flags); } static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, u64 mapping[IPOIB_UD_RX_SG]) { if (ipoib_ud_need_sg(priv->max_ib_mtu)) { ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, DMA_FROM_DEVICE); ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE, DMA_FROM_DEVICE); } else ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE); } static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv, struct sk_buff *skb, unsigned int length) { if (ipoib_ud_need_sg(priv->max_ib_mtu)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int size; /* * There is only two buffers needed for max_payload = 4K, * first buf size is IPOIB_UD_HEAD_SIZE */ skb->tail += IPOIB_UD_HEAD_SIZE; skb->len += length; size = length - IPOIB_UD_HEAD_SIZE; skb_frag_size_set(frag, size); skb->data_len += size; skb->truesize += PAGE_SIZE; } else skb_put(skb, length); } static int ipoib_ib_post_receive(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_recv_wr *bad_wr; int ret; priv->rx_wr.wr_id = id | IPOIB_OP_RECV; priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); if (unlikely(ret)) { ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); dev_kfree_skb_any(priv->rx_ring[id].skb); priv->rx_ring[id].skb = NULL; } return ret; } static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; int buf_size; int tailroom; u64 *mapping; if (ipoib_ud_need_sg(priv->max_ib_mtu)) { buf_size = IPOIB_UD_HEAD_SIZE; tailroom = 128; /* reserve some tailroom for IP/TCP headers */ } else { buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); tailroom = 0; } skb = dev_alloc_skb(buf_size + tailroom + 4); if (unlikely(!skb)) return NULL; /* * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte * header. So we need 4 more bytes to get to 48 and align the * IP header to a multiple of 16. */ skb_reserve(skb, 4); mapping = priv->rx_ring[id].mapping; mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) goto error; if (ipoib_ud_need_sg(priv->max_ib_mtu)) { struct page *page = alloc_page(GFP_ATOMIC); if (!page) goto partial_error; skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE); mapping[1] = ib_dma_map_page(priv->ca, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1]))) goto partial_error; } priv->rx_ring[id].skb = skb; return skb; partial_error: ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE); error: dev_kfree_skb_any(skb); return NULL; } static int ipoib_ib_post_receives(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i; for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_alloc_rx_skb(dev, i)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); return -ENOMEM; } if (ipoib_ib_post_receive(dev, i)) { ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); return -EIO; } } return 0; } static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; struct sk_buff *skb; u64 mapping[IPOIB_UD_RX_SG]; union ib_gid *dgid; ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_recvq_size)) { ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", wr_id, ipoib_recvq_size); return; } skb = priv->rx_ring[wr_id].skb; if (unlikely(wc->status != IB_WC_SUCCESS)) { if (wc->status != IB_WC_WR_FLUSH_ERR) ipoib_warn(priv, "failed recv event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); dev_kfree_skb_any(skb); priv->rx_ring[wr_id].skb = NULL; return; } /* * Drop packets that this interface sent, ie multicast packets * that the HCA has replicated. */ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) goto repost; memcpy(mapping, priv->rx_ring[wr_id].mapping, IPOIB_UD_RX_SG * sizeof *mapping); /* * If we can't allocate a new RX buffer, dump * this packet and reuse the old buffer. */ if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) { ++dev->stats.rx_dropped; goto repost; } ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", wc->byte_len, wc->slid); ipoib_ud_dma_unmap_rx(priv, mapping); ipoib_ud_skb_put_frags(priv, skb, wc->byte_len); /* First byte of dgid signals multicast when 0xff */ dgid = &((struct ib_grh *)skb->data)->dgid; if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff) skb->pkt_type = PACKET_HOST; else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; skb_pull(skb, IB_GRH_BYTES); skb->protocol = ((struct ipoib_header *) skb->data)->proto; skb_reset_mac_header(skb); skb_pull(skb, IPOIB_ENCAP_LEN); ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; skb->dev = dev; if ((dev->features & NETIF_F_RXCSUM) && likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&priv->napi, skb); repost: if (unlikely(ipoib_ib_post_receive(dev, wr_id))) ipoib_warn(priv, "ipoib_ib_post_receive failed " "for buf %d\n", wr_id); } static int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; int i; int off; if (skb_headlen(skb)) { mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) return -EIO; off = 1; } else off = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping[i + off] = ib_dma_map_page(ca, skb_frag_page(frag), frag->page_offset, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) goto partial_error; } return 0; partial_error: for (; i > 0; --i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE); } if (off) ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); return -EIO; } static void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; int i; int off; if (skb_headlen(skb)) { ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); off = 1; } else off = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), DMA_TO_DEVICE); } } static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id; struct ipoib_tx_buf *tx_req; ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_sendq_size)) { ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", wr_id, ipoib_sendq_size); return; } tx_req = &priv->tx_ring[wr_id]; ipoib_dma_unmap_tx(priv->ca, tx_req); ++dev->stats.tx_packets; dev->stats.tx_bytes += tx_req->skb->len; dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && netif_queue_stopped(dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(dev); if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) ipoib_warn(priv, "failed send event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); } static int poll_tx(struct ipoib_dev_priv *priv) { int n, i; n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); for (i = 0; i < n; ++i) ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i); return n == MAX_SEND_CQE; } int ipoib_poll(struct napi_struct *napi, int budget) { struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); struct net_device *dev = priv->dev; int done; int t; int n, i; done = 0; poll_more: while (done < budget) { int max = (budget - done); t = min(IPOIB_NUM_WC, max); n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); for (i = 0; i < n; i++) { struct ib_wc *wc = priv->ibwc + i; if (wc->wr_id & IPOIB_OP_RECV) { ++done; if (wc->wr_id & IPOIB_OP_CM) ipoib_cm_handle_rx_wc(dev, wc); else ipoib_ib_handle_rx_wc(dev, wc); } else ipoib_cm_handle_tx_wc(priv->dev, wc); } if (n != t) break; } if (done < budget) { napi_complete(napi); if (unlikely(ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)) && napi_reschedule(napi)) goto poll_more; } return done; } void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) { struct net_device *dev = dev_ptr; struct ipoib_dev_priv *priv = netdev_priv(dev); napi_schedule(&priv->napi); } static void drain_tx_cq(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); netif_tx_lock(dev); while (poll_tx(priv)) ; /* nothing */ if (netif_queue_stopped(dev)) mod_timer(&priv->poll_timer, jiffies + 1); netif_tx_unlock(dev); } void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) { struct ipoib_dev_priv *priv = netdev_priv(dev_ptr); mod_timer(&priv->poll_timer, jiffies); } static inline int post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head, int hlen) { struct ib_send_wr *bad_wr; int i, off; struct sk_buff *skb = tx_req->skb; skb_frag_t *frags = skb_shinfo(skb)->frags; int nr_frags = skb_shinfo(skb)->nr_frags; u64 *mapping = tx_req->mapping; if (skb_headlen(skb)) { priv->tx_sge[0].addr = mapping[0]; priv->tx_sge[0].length = skb_headlen(skb); off = 1; } else off = 0; for (i = 0; i < nr_frags; ++i) { priv->tx_sge[i + off].addr = mapping[i + off]; priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); } priv->tx_wr.num_sge = nr_frags + off; priv->tx_wr.wr_id = wr_id; priv->tx_wr.wr.ud.remote_qpn = qpn; priv->tx_wr.wr.ud.ah = address; if (head) { priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size; priv->tx_wr.wr.ud.header = head; priv->tx_wr.wr.ud.hlen = hlen; priv->tx_wr.opcode = IB_WR_LSO; } else priv->tx_wr.opcode = IB_WR_SEND; return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); } void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_ah *address, u32 qpn) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_tx_buf *tx_req; int hlen, rc; void *phead; if (skb_is_gso(skb)) { hlen = skb_transport_offset(skb) + tcp_hdrlen(skb); phead = skb->data; if (unlikely(!skb_pull(skb, hlen))) { ipoib_warn(priv, "linear data too small\n"); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } } else { if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); return; } phead = NULL; hlen = 0; } ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", skb->len, address, qpn); /* * We put the skb into the tx_ring _before_ we call post_send() * because it's entirely possible that the completion handler will * run before we execute anything after the post_send(). That * means we have to make sure everything is properly recorded and * our state is consistent before we call post_send(). */ tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } if (skb->ip_summed == CHECKSUM_PARTIAL) priv->tx_wr.send_flags |= IB_SEND_IP_CSUM; else priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) ipoib_warn(priv, "request notify on send CQ failed\n"); netif_stop_queue(dev); } skb_orphan(skb); skb_dst_drop(skb); rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, tx_req, phead, hlen); if (unlikely(rc)) { ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; --priv->tx_outstanding; ipoib_dma_unmap_tx(priv->ca, tx_req); dev_kfree_skb_any(skb); if (netif_queue_stopped(dev)) netif_wake_queue(dev); } else { dev->trans_start = jiffies; address->last_send = priv->tx_head; ++priv->tx_head; } if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) while (poll_tx(priv)) ; /* nothing */ } static void __ipoib_reap_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_ah *ah, *tah; LIST_HEAD(remove_list); unsigned long flags; netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) if ((int) priv->tx_tail - (int) ah->last_send >= 0) { list_del(&ah->list); ib_destroy_ah(ah->ah); kfree(ah); } spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); } void ipoib_reap_ah(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, ah_reap_task.work); struct net_device *dev = priv->dev; __ipoib_reap_ah(dev); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, round_jiffies_relative(HZ)); } static void ipoib_ib_tx_timer_func(unsigned long ctx) { drain_tx_cq((struct net_device *)ctx); } int ipoib_ib_dev_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) { ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey); clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); return -1; } set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); ret = ipoib_init_qp(dev); if (ret) { ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); return -1; } ret = ipoib_ib_post_receives(dev); if (ret) { ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); ipoib_ib_dev_stop(dev, 1); return -1; } ret = ipoib_cm_dev_open(dev); if (ret) { ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); ipoib_ib_dev_stop(dev, 1); return -1; } clear_bit(IPOIB_STOP_REAPER, &priv->flags); queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, round_jiffies_relative(HZ)); if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); return 0; } static void ipoib_pkey_dev_check_presence(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); u16 pkey_index = 0; if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); else set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); } int ipoib_ib_dev_up(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_pkey_dev_check_presence(dev); if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { ipoib_dbg(priv, "PKEY is not assigned.\n"); return 0; } set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); return ipoib_mcast_start_thread(dev); } int ipoib_ib_dev_down(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "downing ib_dev\n"); clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); netif_carrier_off(dev); /* Shutdown the P_Key thread if still active */ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { mutex_lock(&pkey_mutex); set_bit(IPOIB_PKEY_STOP, &priv->flags); cancel_delayed_work(&priv->pkey_poll_task); mutex_unlock(&pkey_mutex); if (flush) flush_workqueue(ipoib_workqueue); } ipoib_mcast_stop_thread(dev, flush); ipoib_mcast_dev_flush(dev); ipoib_flush_paths(dev); return 0; } static int recvs_pending(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int pending = 0; int i; for (i = 0; i < ipoib_recvq_size; ++i) if (priv->rx_ring[i].skb) ++pending; return pending; } void ipoib_drain_cq(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i, n; /* * We call completion handling routines that expect to be * called from the BH-disabled NAPI poll context, so disable * BHs here too. */ local_bh_disable(); do { n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); for (i = 0; i < n; ++i) { /* * Convert any successful completions to flush * errors to avoid passing packets up the * stack after bringing the device down. */ if (priv->ibwc[i].status == IB_WC_SUCCESS) priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) { if (priv->ibwc[i].wr_id & IPOIB_OP_CM) ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); else ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); } else ipoib_cm_handle_tx_wc(dev, priv->ibwc + i); } } while (n == IPOIB_NUM_WC); while (poll_tx(priv)) ; /* nothing */ local_bh_enable(); } int ipoib_ib_dev_stop(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; unsigned long begin; struct ipoib_tx_buf *tx_req; int i; if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_disable(&priv->napi); ipoib_cm_dev_stop(dev); /* * Move our QP to the error state and then reinitialize in * when all work requests have completed or have been flushed. */ qp_attr.qp_state = IB_QPS_ERR; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); /* Wait for all sends and receives to complete */ begin = jiffies; while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { if (time_after(jiffies, begin + 5 * HZ)) { ipoib_warn(priv, "timing out; %d sends %d receives not completed\n", priv->tx_head - priv->tx_tail, recvs_pending(dev)); /* * assume the HW is wedged and just free up * all our pending work requests. */ while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & (ipoib_sendq_size - 1)]; ipoib_dma_unmap_tx(priv->ca, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; --priv->tx_outstanding; } for (i = 0; i < ipoib_recvq_size; ++i) { struct ipoib_rx_buf *rx_req; rx_req = &priv->rx_ring[i]; if (!rx_req->skb) continue; ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[i].mapping); dev_kfree_skb_any(rx_req->skb); rx_req->skb = NULL; } goto timeout; } ipoib_drain_cq(dev); msleep(1); } ipoib_dbg(priv, "All sends and receives done.\n"); timeout: del_timer_sync(&priv->poll_timer); qp_attr.qp_state = IB_QPS_RESET; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to RESET state\n"); /* Wait for all AHs to be reaped */ set_bit(IPOIB_STOP_REAPER, &priv->flags); cancel_delayed_work(&priv->ah_reap_task); if (flush) flush_workqueue(ipoib_workqueue); begin = jiffies; while (!list_empty(&priv->dead_ahs)) { __ipoib_reap_ah(dev); if (time_after(jiffies, begin + HZ)) { ipoib_warn(priv, "timing out; will leak address handles\n"); break; } msleep(1); } ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); return 0; } int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) { struct ipoib_dev_priv *priv = netdev_priv(dev); priv->ca = ca; priv->port = port; priv->qp = NULL; if (ipoib_transport_dev_init(dev, ca)) { printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name); return -ENODEV; } setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func, (unsigned long) dev); if (dev->flags & IFF_UP) { if (ipoib_ib_dev_open(dev)) { ipoib_transport_dev_cleanup(dev); return -ENODEV; } } return 0; } static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, enum ipoib_flush_level level) { struct ipoib_dev_priv *cpriv; struct net_device *dev = priv->dev; u16 new_index; mutex_lock(&priv->vlan_mutex); /* * Flush any child interfaces too -- they might be up even if * the parent is down. */ list_for_each_entry(cpriv, &priv->child_intfs, list) __ipoib_ib_dev_flush(cpriv, level); mutex_unlock(&priv->vlan_mutex); if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); return; } if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); return; } if (level == IPOIB_FLUSH_HEAVY) { if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); ipoib_ib_dev_down(dev, 0); ipoib_ib_dev_stop(dev, 0); if (ipoib_pkey_dev_delay_open(dev)) return; } /* restart QP only if P_Key index is changed */ if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && new_index == priv->pkey_index) { ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); return; } priv->pkey_index = new_index; } if (level == IPOIB_FLUSH_LIGHT) { ipoib_mark_paths_invalid(dev); ipoib_mcast_dev_flush(dev); } if (level >= IPOIB_FLUSH_NORMAL) ipoib_ib_dev_down(dev, 0); if (level == IPOIB_FLUSH_HEAVY) { ipoib_ib_dev_stop(dev, 0); ipoib_ib_dev_open(dev); } /* * The device could have been brought down between the start and when * we get here, don't bring it back up if it's not configured up */ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { if (level >= IPOIB_FLUSH_NORMAL) ipoib_ib_dev_up(dev); ipoib_mcast_restart_task(&priv->restart_task); } } void ipoib_ib_dev_flush_light(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_light); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); } void ipoib_ib_dev_flush_normal(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_normal); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); } void ipoib_ib_dev_flush_heavy(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_heavy); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); } void ipoib_ib_dev_cleanup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "cleaning up ib_dev\n"); ipoib_mcast_stop_thread(dev, 1); ipoib_mcast_dev_flush(dev); ipoib_transport_dev_cleanup(dev); } /* * Delayed P_Key Assigment Interim Support * * The following is initial implementation of delayed P_Key assigment * mechanism. It is using the same approach implemented for the multicast * group join. The single goal of this implementation is to quickly address * Bug #2507. This implementation will probably be removed when the P_Key * change async notification is available. */ void ipoib_pkey_poll(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, pkey_poll_task.work); struct net_device *dev = priv->dev; ipoib_pkey_dev_check_presence(dev); if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) ipoib_open(dev); else { mutex_lock(&pkey_mutex); if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->pkey_poll_task, HZ); mutex_unlock(&pkey_mutex); } } int ipoib_pkey_dev_delay_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); /* Look for the interface pkey value in the IB Port P_Key table and */ /* set the interface pkey assigment flag */ ipoib_pkey_dev_check_presence(dev); /* P_Key value not assigned yet - start polling */ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { mutex_lock(&pkey_mutex); clear_bit(IPOIB_PKEY_STOP, &priv->flags); queue_delayed_work(ipoib_workqueue, &priv->pkey_poll_task, HZ); mutex_unlock(&pkey_mutex); return 1; } return 0; }
gpl-2.0
quancao/kernel-imx-controlboard
drivers/staging/media/as102/as102_usb_drv.c
2376
12135
/* * Abilis Systems Single DVB-T Receiver * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com> * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/usb.h> #include "as102_drv.h" #include "as102_usb_drv.h" #include "as102_fw.h" static void as102_usb_disconnect(struct usb_interface *interface); static int as102_usb_probe(struct usb_interface *interface, const struct usb_device_id *id); static int as102_usb_start_stream(struct as102_dev_t *dev); static void as102_usb_stop_stream(struct as102_dev_t *dev); static int as102_open(struct inode *inode, struct file *file); static int as102_release(struct inode *inode, struct file *file); static struct usb_device_id as102_usb_id_table[] = { { USB_DEVICE(AS102_USB_DEVICE_VENDOR_ID, AS102_USB_DEVICE_PID_0001) }, { USB_DEVICE(PCTV_74E_USB_VID, PCTV_74E_USB_PID) }, { USB_DEVICE(ELGATO_EYETV_DTT_USB_VID, ELGATO_EYETV_DTT_USB_PID) }, { USB_DEVICE(NBOX_DVBT_DONGLE_USB_VID, NBOX_DVBT_DONGLE_USB_PID) }, { USB_DEVICE(SKY_IT_DIGITAL_KEY_USB_VID, SKY_IT_DIGITAL_KEY_USB_PID) }, { } /* Terminating entry */ }; /* Note that this table must always have the same number of entries as the as102_usb_id_table struct */ static const char * const as102_device_names[] = { AS102_REFERENCE_DESIGN, AS102_PCTV_74E, AS102_ELGATO_EYETV_DTT_NAME, AS102_NBOX_DVBT_DONGLE_NAME, AS102_SKY_IT_DIGITAL_KEY_NAME, NULL /* Terminating entry */ }; /* eLNA configuration: devices built on the reference design work best with 0xA0, while custom designs seem to require 0xC0 */ static uint8_t const as102_elna_cfg[] = { 0xA0, 0xC0, 0xC0, 0xA0, 0xA0, 0x00 /* Terminating entry */ }; struct usb_driver as102_usb_driver = { .name = DRIVER_FULL_NAME, .probe = as102_usb_probe, .disconnect = as102_usb_disconnect, .id_table = as102_usb_id_table }; static const struct file_operations as102_dev_fops = { .owner = THIS_MODULE, .open = as102_open, .release = as102_release, }; static struct usb_class_driver as102_usb_class_driver = { .name = "aton2-%d", .fops = &as102_dev_fops, .minor_base = AS102_DEVICE_MAJOR, }; static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap, unsigned char *send_buf, int send_buf_len, unsigned char *recv_buf, int recv_buf_len) { int ret = 0; ENTER(); if (send_buf != NULL) { ret = usb_control_msg(bus_adap->usb_dev, usb_sndctrlpipe(bus_adap->usb_dev, 0), AS102_USB_DEVICE_TX_CTRL_CMD, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, bus_adap->cmd_xid, /* value */ 0, /* index */ send_buf, send_buf_len, USB_CTRL_SET_TIMEOUT /* 200 */); if (ret < 0) { dprintk(debug, "usb_control_msg(send) failed, err %i\n", ret); return ret; } if (ret != send_buf_len) { dprintk(debug, "only wrote %d of %d bytes\n", ret, send_buf_len); return -1; } } if (recv_buf != NULL) { #ifdef TRACE dprintk(debug, "want to read: %d bytes\n", recv_buf_len); #endif ret = usb_control_msg(bus_adap->usb_dev, usb_rcvctrlpipe(bus_adap->usb_dev, 0), AS102_USB_DEVICE_RX_CTRL_CMD, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, bus_adap->cmd_xid, /* value */ 0, /* index */ recv_buf, recv_buf_len, USB_CTRL_GET_TIMEOUT /* 200 */); if (ret < 0) { dprintk(debug, "usb_control_msg(recv) failed, err %i\n", ret); return ret; } #ifdef TRACE dprintk(debug, "read %d bytes\n", recv_buf_len); #endif } LEAVE(); return ret; } static int as102_send_ep1(struct as10x_bus_adapter_t *bus_adap, unsigned char *send_buf, int send_buf_len, int swap32) { int ret = 0, actual_len; ret = usb_bulk_msg(bus_adap->usb_dev, usb_sndbulkpipe(bus_adap->usb_dev, 1), send_buf, send_buf_len, &actual_len, 200); if (ret) { dprintk(debug, "usb_bulk_msg(send) failed, err %i\n", ret); return ret; } if (actual_len != send_buf_len) { dprintk(debug, "only wrote %d of %d bytes\n", actual_len, send_buf_len); return -1; } return ret ? ret : actual_len; } static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap, unsigned char *recv_buf, int recv_buf_len) { int ret = 0, actual_len; if (recv_buf == NULL) return -EINVAL; ret = usb_bulk_msg(bus_adap->usb_dev, usb_rcvbulkpipe(bus_adap->usb_dev, 2), recv_buf, recv_buf_len, &actual_len, 200); if (ret) { dprintk(debug, "usb_bulk_msg(recv) failed, err %i\n", ret); return ret; } if (actual_len != recv_buf_len) { dprintk(debug, "only read %d of %d bytes\n", actual_len, recv_buf_len); return -1; } return ret ? ret : actual_len; } struct as102_priv_ops_t as102_priv_ops = { .upload_fw_pkt = as102_send_ep1, .xfer_cmd = as102_usb_xfer_cmd, .as102_read_ep2 = as102_read_ep2, .start_stream = as102_usb_start_stream, .stop_stream = as102_usb_stop_stream, }; static int as102_submit_urb_stream(struct as102_dev_t *dev, struct urb *urb) { int err; usb_fill_bulk_urb(urb, dev->bus_adap.usb_dev, usb_rcvbulkpipe(dev->bus_adap.usb_dev, 0x2), urb->transfer_buffer, AS102_USB_BUF_SIZE, as102_urb_stream_irq, dev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dprintk(debug, "%s: usb_submit_urb failed\n", __func__); return err; } void as102_urb_stream_irq(struct urb *urb) { struct as102_dev_t *as102_dev = urb->context; if (urb->actual_length > 0) { dvb_dmx_swfilter(&as102_dev->dvb_dmx, urb->transfer_buffer, urb->actual_length); } else { if (urb->actual_length == 0) memset(urb->transfer_buffer, 0, AS102_USB_BUF_SIZE); } /* is not stopped, re-submit urb */ if (as102_dev->streaming) as102_submit_urb_stream(as102_dev, urb); } static void as102_free_usb_stream_buffer(struct as102_dev_t *dev) { int i; ENTER(); for (i = 0; i < MAX_STREAM_URB; i++) usb_free_urb(dev->stream_urb[i]); usb_free_coherent(dev->bus_adap.usb_dev, MAX_STREAM_URB * AS102_USB_BUF_SIZE, dev->stream, dev->dma_addr); LEAVE(); } static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev) { int i, ret = 0; ENTER(); dev->stream = usb_alloc_coherent(dev->bus_adap.usb_dev, MAX_STREAM_URB * AS102_USB_BUF_SIZE, GFP_KERNEL, &dev->dma_addr); if (!dev->stream) { dprintk(debug, "%s: usb_buffer_alloc failed\n", __func__); return -ENOMEM; } memset(dev->stream, 0, MAX_STREAM_URB * AS102_USB_BUF_SIZE); /* init urb buffers */ for (i = 0; i < MAX_STREAM_URB; i++) { struct urb *urb; urb = usb_alloc_urb(0, GFP_ATOMIC); if (urb == NULL) { dprintk(debug, "%s: usb_alloc_urb failed\n", __func__); as102_free_usb_stream_buffer(dev); return -ENOMEM; } urb->transfer_buffer = dev->stream + (i * AS102_USB_BUF_SIZE); urb->transfer_dma = dev->dma_addr + (i * AS102_USB_BUF_SIZE); urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer_length = AS102_USB_BUF_SIZE; dev->stream_urb[i] = urb; } LEAVE(); return ret; } static void as102_usb_stop_stream(struct as102_dev_t *dev) { int i; for (i = 0; i < MAX_STREAM_URB; i++) usb_kill_urb(dev->stream_urb[i]); } static int as102_usb_start_stream(struct as102_dev_t *dev) { int i, ret = 0; for (i = 0; i < MAX_STREAM_URB; i++) { ret = as102_submit_urb_stream(dev, dev->stream_urb[i]); if (ret) { as102_usb_stop_stream(dev); return ret; } } return 0; } static void as102_usb_release(struct kref *kref) { struct as102_dev_t *as102_dev; ENTER(); as102_dev = container_of(kref, struct as102_dev_t, kref); if (as102_dev != NULL) { usb_put_dev(as102_dev->bus_adap.usb_dev); kfree(as102_dev); } LEAVE(); } static void as102_usb_disconnect(struct usb_interface *intf) { struct as102_dev_t *as102_dev; ENTER(); /* extract as102_dev_t from usb_device private data */ as102_dev = usb_get_intfdata(intf); /* unregister dvb layer */ as102_dvb_unregister(as102_dev); /* free usb buffers */ as102_free_usb_stream_buffer(as102_dev); usb_set_intfdata(intf, NULL); /* usb unregister device */ usb_deregister_dev(intf, &as102_usb_class_driver); /* decrement usage counter */ kref_put(&as102_dev->kref, as102_usb_release); pr_info("%s: device has been disconnected\n", DRIVER_NAME); LEAVE(); } static int as102_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct as102_dev_t *as102_dev; int i; ENTER(); /* This should never actually happen */ if (ARRAY_SIZE(as102_usb_id_table) != (sizeof(as102_device_names) / sizeof(const char *))) { pr_err("Device names table invalid size"); return -EINVAL; } as102_dev = kzalloc(sizeof(struct as102_dev_t), GFP_KERNEL); if (as102_dev == NULL) return -ENOMEM; /* Assign the user-friendly device name */ for (i = 0; i < ARRAY_SIZE(as102_usb_id_table); i++) { if (id == &as102_usb_id_table[i]) { as102_dev->name = as102_device_names[i]; as102_dev->elna_cfg = as102_elna_cfg[i]; } } if (as102_dev->name == NULL) as102_dev->name = "Unknown AS102 device"; /* set private callback functions */ as102_dev->bus_adap.ops = &as102_priv_ops; /* init cmd token for usb bus */ as102_dev->bus_adap.cmd = &as102_dev->bus_adap.token.usb.c; as102_dev->bus_adap.rsp = &as102_dev->bus_adap.token.usb.r; /* init kernel device reference */ kref_init(&as102_dev->kref); /* store as102 device to usb_device private data */ usb_set_intfdata(intf, (void *) as102_dev); /* store in as102 device the usb_device pointer */ as102_dev->bus_adap.usb_dev = usb_get_dev(interface_to_usbdev(intf)); /* we can register the device now, as it is ready */ ret = usb_register_dev(intf, &as102_usb_class_driver); if (ret < 0) { /* something prevented us from registering this driver */ dev_err(&intf->dev, "%s: usb_register_dev() failed (errno = %d)\n", __func__, ret); goto failed; } pr_info("%s: device has been detected\n", DRIVER_NAME); /* request buffer allocation for streaming */ ret = as102_alloc_usb_stream_buffer(as102_dev); if (ret != 0) goto failed; /* register dvb layer */ ret = as102_dvb_register(as102_dev); LEAVE(); return ret; failed: usb_set_intfdata(intf, NULL); kfree(as102_dev); return ret; } static int as102_open(struct inode *inode, struct file *file) { int ret = 0, minor = 0; struct usb_interface *intf = NULL; struct as102_dev_t *dev = NULL; ENTER(); /* read minor from inode */ minor = iminor(inode); /* fetch device from usb interface */ intf = usb_find_interface(&as102_usb_driver, minor); if (intf == NULL) { pr_err("%s: can't find device for minor %d\n", __func__, minor); ret = -ENODEV; goto exit; } /* get our device */ dev = usb_get_intfdata(intf); if (dev == NULL) { ret = -EFAULT; goto exit; } /* save our device object in the file's private structure */ file->private_data = dev; /* increment our usage count for the device */ kref_get(&dev->kref); exit: LEAVE(); return ret; } static int as102_release(struct inode *inode, struct file *file) { int ret = 0; struct as102_dev_t *dev = NULL; ENTER(); dev = file->private_data; if (dev != NULL) { /* decrement the count on our device */ kref_put(&dev->kref, as102_usb_release); } LEAVE(); return ret; } MODULE_DEVICE_TABLE(usb, as102_usb_id_table);
gpl-2.0
BigBot96/android_kernel_samsung_konawifixx
drivers/net/mv643xx_eth.c
2376
72351
/* * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> * * Based on the 64360 driver from: * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> * Rabeeh Khoury <rabeeh@marvell.com> * * Copyright (C) 2003 PMC-Sierra, Inc., * written by Manish Lachwani * * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> * * Copyright (C) 2004-2006 MontaVista Software, Inc. * Dale Farnsworth <dale@farnsworth.org> * * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> * <sjhill@realitydiluted.com> * * Copyright (C) 2007-2008 Marvell Semiconductor * Lennert Buytenhek <buytenh@marvell.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/phy.h> #include <linux/mv643xx_eth.h> #include <linux/io.h> #include <linux/types.h> #include <linux/inet_lro.h> #include <linux/slab.h> #include <asm/system.h> static char mv643xx_eth_driver_name[] = "mv643xx_eth"; static char mv643xx_eth_driver_version[] = "1.4"; /* * Registers shared between all ports. */ #define PHY_ADDR 0x0000 #define SMI_REG 0x0004 #define SMI_BUSY 0x10000000 #define SMI_READ_VALID 0x08000000 #define SMI_OPCODE_READ 0x04000000 #define SMI_OPCODE_WRITE 0x00000000 #define ERR_INT_CAUSE 0x0080 #define ERR_INT_SMI_DONE 0x00000010 #define ERR_INT_MASK 0x0084 #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) #define WINDOW_BAR_ENABLE 0x0290 #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) /* * Main per-port registers. These live at offset 0x0400 for * port #0, 0x0800 for port #1, and 0x0c00 for port #2. */ #define PORT_CONFIG 0x0000 #define UNICAST_PROMISCUOUS_MODE 0x00000001 #define PORT_CONFIG_EXT 0x0004 #define MAC_ADDR_LOW 0x0014 #define MAC_ADDR_HIGH 0x0018 #define SDMA_CONFIG 0x001c #define TX_BURST_SIZE_16_64BIT 0x01000000 #define TX_BURST_SIZE_4_64BIT 0x00800000 #define BLM_TX_NO_SWAP 0x00000020 #define BLM_RX_NO_SWAP 0x00000010 #define RX_BURST_SIZE_16_64BIT 0x00000008 #define RX_BURST_SIZE_4_64BIT 0x00000004 #define PORT_SERIAL_CONTROL 0x003c #define SET_MII_SPEED_TO_100 0x01000000 #define SET_GMII_SPEED_TO_1000 0x00800000 #define SET_FULL_DUPLEX_MODE 0x00200000 #define MAX_RX_PACKET_9700BYTE 0x000a0000 #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 #define DO_NOT_FORCE_LINK_FAIL 0x00000400 #define SERIAL_PORT_CONTROL_RESERVED 0x00000200 #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 #define FORCE_LINK_PASS 0x00000002 #define SERIAL_PORT_ENABLE 0x00000001 #define PORT_STATUS 0x0044 #define TX_FIFO_EMPTY 0x00000400 #define TX_IN_PROGRESS 0x00000080 #define PORT_SPEED_MASK 0x00000030 #define PORT_SPEED_1000 0x00000010 #define PORT_SPEED_100 0x00000020 #define PORT_SPEED_10 0x00000000 #define FLOW_CONTROL_ENABLED 0x00000008 #define FULL_DUPLEX 0x00000004 #define LINK_UP 0x00000002 #define TXQ_COMMAND 0x0048 #define TXQ_FIX_PRIO_CONF 0x004c #define TX_BW_RATE 0x0050 #define TX_BW_MTU 0x0058 #define TX_BW_BURST 0x005c #define INT_CAUSE 0x0060 #define INT_TX_END 0x07f80000 #define INT_TX_END_0 0x00080000 #define INT_RX 0x000003fc #define INT_RX_0 0x00000004 #define INT_EXT 0x00000002 #define INT_CAUSE_EXT 0x0064 #define INT_EXT_LINK_PHY 0x00110000 #define INT_EXT_TX 0x000000ff #define INT_MASK 0x0068 #define INT_MASK_EXT 0x006c #define TX_FIFO_URGENT_THRESHOLD 0x0074 #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc #define TX_BW_RATE_MOVED 0x00e0 #define TX_BW_MTU_MOVED 0x00e8 #define TX_BW_BURST_MOVED 0x00ec #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) #define RXQ_COMMAND 0x0280 #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) /* * Misc per-port registers. */ #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) #define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) /* * SDMA configuration register default value. */ #if defined(__BIG_ENDIAN) #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ (RX_BURST_SIZE_4_64BIT | \ TX_BURST_SIZE_4_64BIT) #elif defined(__LITTLE_ENDIAN) #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ (RX_BURST_SIZE_4_64BIT | \ BLM_RX_NO_SWAP | \ BLM_TX_NO_SWAP | \ TX_BURST_SIZE_4_64BIT) #else #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined #endif /* * Misc definitions. */ #define DEFAULT_RX_QUEUE_SIZE 128 #define DEFAULT_TX_QUEUE_SIZE 256 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) /* * RX/TX descriptors. */ #if defined(__BIG_ENDIAN) struct rx_desc { u16 byte_cnt; /* Descriptor buffer byte count */ u16 buf_size; /* Buffer size */ u32 cmd_sts; /* Descriptor command status */ u32 next_desc_ptr; /* Next descriptor pointer */ u32 buf_ptr; /* Descriptor buffer pointer */ }; struct tx_desc { u16 byte_cnt; /* buffer byte count */ u16 l4i_chk; /* CPU provided TCP checksum */ u32 cmd_sts; /* Command/status field */ u32 next_desc_ptr; /* Pointer to next descriptor */ u32 buf_ptr; /* pointer to buffer for this descriptor*/ }; #elif defined(__LITTLE_ENDIAN) struct rx_desc { u32 cmd_sts; /* Descriptor command status */ u16 buf_size; /* Buffer size */ u16 byte_cnt; /* Descriptor buffer byte count */ u32 buf_ptr; /* Descriptor buffer pointer */ u32 next_desc_ptr; /* Next descriptor pointer */ }; struct tx_desc { u32 cmd_sts; /* Command/status field */ u16 l4i_chk; /* CPU provided TCP checksum */ u16 byte_cnt; /* buffer byte count */ u32 buf_ptr; /* pointer to buffer for this descriptor*/ u32 next_desc_ptr; /* Pointer to next descriptor */ }; #else #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined #endif /* RX & TX descriptor command */ #define BUFFER_OWNED_BY_DMA 0x80000000 /* RX & TX descriptor status */ #define ERROR_SUMMARY 0x00000001 /* RX descriptor status */ #define LAYER_4_CHECKSUM_OK 0x40000000 #define RX_ENABLE_INTERRUPT 0x20000000 #define RX_FIRST_DESC 0x08000000 #define RX_LAST_DESC 0x04000000 #define RX_IP_HDR_OK 0x02000000 #define RX_PKT_IS_IPV4 0x01000000 #define RX_PKT_IS_ETHERNETV2 0x00800000 #define RX_PKT_LAYER4_TYPE_MASK 0x00600000 #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 #define RX_PKT_IS_VLAN_TAGGED 0x00080000 /* TX descriptor command */ #define TX_ENABLE_INTERRUPT 0x00800000 #define GEN_CRC 0x00400000 #define TX_FIRST_DESC 0x00200000 #define TX_LAST_DESC 0x00100000 #define ZERO_PADDING 0x00080000 #define GEN_IP_V4_CHECKSUM 0x00040000 #define GEN_TCP_UDP_CHECKSUM 0x00020000 #define UDP_FRAME 0x00010000 #define MAC_HDR_EXTRA_4_BYTES 0x00008000 #define MAC_HDR_EXTRA_8_BYTES 0x00000200 #define TX_IHL_SHIFT 11 /* global *******************************************************************/ struct mv643xx_eth_shared_private { /* * Ethernet controller base address. */ void __iomem *base; /* * Points at the right SMI instance to use. */ struct mv643xx_eth_shared_private *smi; /* * Provides access to local SMI interface. */ struct mii_bus *smi_bus; /* * If we have access to the error interrupt pin (which is * somewhat misnamed as it not only reflects internal errors * but also reflects SMI completion), use that to wait for * SMI access completion instead of polling the SMI busy bit. */ int err_interrupt; wait_queue_head_t smi_busy_wait; /* * Per-port MBUS window access register value. */ u32 win_protect; /* * Hardware-specific parameters. */ unsigned int t_clk; int extended_rx_coal_limit; int tx_bw_control; int tx_csum_limit; }; #define TX_BW_CONTROL_ABSENT 0 #define TX_BW_CONTROL_OLD_LAYOUT 1 #define TX_BW_CONTROL_NEW_LAYOUT 2 static int mv643xx_eth_open(struct net_device *dev); static int mv643xx_eth_stop(struct net_device *dev); /* per-port *****************************************************************/ struct mib_counters { u64 good_octets_received; u32 bad_octets_received; u32 internal_mac_transmit_err; u32 good_frames_received; u32 bad_frames_received; u32 broadcast_frames_received; u32 multicast_frames_received; u32 frames_64_octets; u32 frames_65_to_127_octets; u32 frames_128_to_255_octets; u32 frames_256_to_511_octets; u32 frames_512_to_1023_octets; u32 frames_1024_to_max_octets; u64 good_octets_sent; u32 good_frames_sent; u32 excessive_collision; u32 multicast_frames_sent; u32 broadcast_frames_sent; u32 unrec_mac_control_received; u32 fc_sent; u32 good_fc_received; u32 bad_fc_received; u32 undersize_received; u32 fragments_received; u32 oversize_received; u32 jabber_received; u32 mac_receive_error; u32 bad_crc_event; u32 collision; u32 late_collision; }; struct lro_counters { u32 lro_aggregated; u32 lro_flushed; u32 lro_no_desc; }; struct rx_queue { int index; int rx_ring_size; int rx_desc_count; int rx_curr_desc; int rx_used_desc; struct rx_desc *rx_desc_area; dma_addr_t rx_desc_dma; int rx_desc_area_size; struct sk_buff **rx_skb; struct net_lro_mgr lro_mgr; struct net_lro_desc lro_arr[8]; }; struct tx_queue { int index; int tx_ring_size; int tx_desc_count; int tx_curr_desc; int tx_used_desc; struct tx_desc *tx_desc_area; dma_addr_t tx_desc_dma; int tx_desc_area_size; struct sk_buff_head tx_skb; unsigned long tx_packets; unsigned long tx_bytes; unsigned long tx_dropped; }; struct mv643xx_eth_private { struct mv643xx_eth_shared_private *shared; void __iomem *base; int port_num; struct net_device *dev; struct phy_device *phy; struct timer_list mib_counters_timer; spinlock_t mib_counters_lock; struct mib_counters mib_counters; struct lro_counters lro_counters; struct work_struct tx_timeout_task; struct napi_struct napi; u32 int_mask; u8 oom; u8 work_link; u8 work_tx; u8 work_tx_end; u8 work_rx; u8 work_rx_refill; int skb_size; struct sk_buff_head rx_recycle; /* * RX state. */ int rx_ring_size; unsigned long rx_desc_sram_addr; int rx_desc_sram_size; int rxq_count; struct timer_list rx_oom; struct rx_queue rxq[8]; /* * TX state. */ int tx_ring_size; unsigned long tx_desc_sram_addr; int tx_desc_sram_size; int txq_count; struct tx_queue txq[8]; }; /* port register accessors **************************************************/ static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) { return readl(mp->shared->base + offset); } static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) { return readl(mp->base + offset); } static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) { writel(data, mp->shared->base + offset); } static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) { writel(data, mp->base + offset); } /* rxq/txq helper functions *************************************************/ static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) { return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); } static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) { return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); } static void rxq_enable(struct rx_queue *rxq) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); wrlp(mp, RXQ_COMMAND, 1 << rxq->index); } static void rxq_disable(struct rx_queue *rxq) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); u8 mask = 1 << rxq->index; wrlp(mp, RXQ_COMMAND, mask << 8); while (rdlp(mp, RXQ_COMMAND) & mask) udelay(10); } static void txq_reset_hw_ptr(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); u32 addr; addr = (u32)txq->tx_desc_dma; addr += txq->tx_curr_desc * sizeof(struct tx_desc); wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); } static void txq_enable(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); wrlp(mp, TXQ_COMMAND, 1 << txq->index); } static void txq_disable(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); u8 mask = 1 << txq->index; wrlp(mp, TXQ_COMMAND, mask << 8); while (rdlp(mp, TXQ_COMMAND) & mask) udelay(10); } static void txq_maybe_wake(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); if (netif_tx_queue_stopped(nq)) { __netif_tx_lock(nq, smp_processor_id()); if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) netif_tx_wake_queue(nq); __netif_tx_unlock(nq); } } /* rx napi ******************************************************************/ static int mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, u64 *hdr_flags, void *priv) { unsigned long cmd_sts = (unsigned long)priv; /* * Make sure that this packet is Ethernet II, is not VLAN * tagged, is IPv4, has a valid IP header, and is TCP. */ if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | RX_PKT_IS_VLAN_TAGGED)) != (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) return -1; skb_reset_network_header(skb); skb_set_transport_header(skb, ip_hdrlen(skb)); *iphdr = ip_hdr(skb); *tcph = tcp_hdr(skb); *hdr_flags = LRO_IPV4 | LRO_TCP; return 0; } static int rxq_process(struct rx_queue *rxq, int budget) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); struct net_device_stats *stats = &mp->dev->stats; int lro_flush_needed; int rx; lro_flush_needed = 0; rx = 0; while (rx < budget && rxq->rx_desc_count) { struct rx_desc *rx_desc; unsigned int cmd_sts; struct sk_buff *skb; u16 byte_cnt; rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; cmd_sts = rx_desc->cmd_sts; if (cmd_sts & BUFFER_OWNED_BY_DMA) break; rmb(); skb = rxq->rx_skb[rxq->rx_curr_desc]; rxq->rx_skb[rxq->rx_curr_desc] = NULL; rxq->rx_curr_desc++; if (rxq->rx_curr_desc == rxq->rx_ring_size) rxq->rx_curr_desc = 0; dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, rx_desc->buf_size, DMA_FROM_DEVICE); rxq->rx_desc_count--; rx++; mp->work_rx_refill |= 1 << rxq->index; byte_cnt = rx_desc->byte_cnt; /* * Update statistics. * * Note that the descriptor byte count includes 2 dummy * bytes automatically inserted by the hardware at the * start of the packet (which we don't count), and a 4 * byte CRC at the end of the packet (which we do count). */ stats->rx_packets++; stats->rx_bytes += byte_cnt - 2; /* * In case we received a packet without first / last bits * on, or the error summary bit is set, the packet needs * to be dropped. */ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) != (RX_FIRST_DESC | RX_LAST_DESC)) goto err; /* * The -4 is for the CRC in the trailer of the * received packet */ skb_put(skb, byte_cnt - 2 - 4); if (cmd_sts & LAYER_4_CHECKSUM_OK) skb->ip_summed = CHECKSUM_UNNECESSARY; skb->protocol = eth_type_trans(skb, mp->dev); if (skb->dev->features & NETIF_F_LRO && skb->ip_summed == CHECKSUM_UNNECESSARY) { lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); lro_flush_needed = 1; } else netif_receive_skb(skb); continue; err: stats->rx_dropped++; if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != (RX_FIRST_DESC | RX_LAST_DESC)) { if (net_ratelimit()) netdev_err(mp->dev, "received packet spanning multiple descriptors\n"); } if (cmd_sts & ERROR_SUMMARY) stats->rx_errors++; dev_kfree_skb(skb); } if (lro_flush_needed) lro_flush_all(&rxq->lro_mgr); if (rx < budget) mp->work_rx &= ~(1 << rxq->index); return rx; } static int rxq_refill(struct rx_queue *rxq, int budget) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); int refilled; refilled = 0; while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { struct sk_buff *skb; int rx; struct rx_desc *rx_desc; int size; skb = __skb_dequeue(&mp->rx_recycle); if (skb == NULL) skb = dev_alloc_skb(mp->skb_size); if (skb == NULL) { mp->oom = 1; goto oom; } if (SKB_DMA_REALIGN) skb_reserve(skb, SKB_DMA_REALIGN); refilled++; rxq->rx_desc_count++; rx = rxq->rx_used_desc++; if (rxq->rx_used_desc == rxq->rx_ring_size) rxq->rx_used_desc = 0; rx_desc = rxq->rx_desc_area + rx; size = skb->end - skb->data; rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, size, DMA_FROM_DEVICE); rx_desc->buf_size = size; rxq->rx_skb[rx] = skb; wmb(); rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; wmb(); /* * The hardware automatically prepends 2 bytes of * dummy data to each received packet, so that the * IP header ends up 16-byte aligned. */ skb_reserve(skb, 2); } if (refilled < budget) mp->work_rx_refill &= ~(1 << rxq->index); oom: return refilled; } /* tx ***********************************************************************/ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) { int frag; for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; if (fragp->size <= 8 && fragp->page_offset & 7) return 1; } return 0; } static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int nr_frags = skb_shinfo(skb)->nr_frags; int frag; for (frag = 0; frag < nr_frags; frag++) { skb_frag_t *this_frag; int tx_index; struct tx_desc *desc; this_frag = &skb_shinfo(skb)->frags[frag]; tx_index = txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; /* * The last fragment will generate an interrupt * which will free the skb on TX completion. */ if (frag == nr_frags - 1) { desc->cmd_sts = BUFFER_OWNED_BY_DMA | ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; } else { desc->cmd_sts = BUFFER_OWNED_BY_DMA; } desc->l4i_chk = 0; desc->byte_cnt = this_frag->size; desc->buf_ptr = dma_map_page(mp->dev->dev.parent, this_frag->page, this_frag->page_offset, this_frag->size, DMA_TO_DEVICE); } } static inline __be16 sum16_as_be(__sum16 sum) { return (__force __be16)sum; } static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int nr_frags = skb_shinfo(skb)->nr_frags; int tx_index; struct tx_desc *desc; u32 cmd_sts; u16 l4i_chk; int length; cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; l4i_chk = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { int hdr_len; int tag_bytes; BUG_ON(skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_8021Q)); hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; tag_bytes = hdr_len - ETH_HLEN; if (skb->len - hdr_len > mp->shared->tx_csum_limit || unlikely(tag_bytes & ~12)) { if (skb_checksum_help(skb) == 0) goto no_csum; kfree_skb(skb); return 1; } if (tag_bytes & 4) cmd_sts |= MAC_HDR_EXTRA_4_BYTES; if (tag_bytes & 8) cmd_sts |= MAC_HDR_EXTRA_8_BYTES; cmd_sts |= GEN_TCP_UDP_CHECKSUM | GEN_IP_V4_CHECKSUM | ip_hdr(skb)->ihl << TX_IHL_SHIFT; switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: cmd_sts |= UDP_FRAME; l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); break; case IPPROTO_TCP: l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); break; default: BUG(); } } else { no_csum: /* Errata BTS #50, IHL must be 5 if no HW checksum */ cmd_sts |= 5 << TX_IHL_SHIFT; } tx_index = txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; if (nr_frags) { txq_submit_frag_skb(txq, skb); length = skb_headlen(skb); } else { cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; length = skb->len; } desc->l4i_chk = l4i_chk; desc->byte_cnt = length; desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, length, DMA_TO_DEVICE); __skb_queue_tail(&txq->tx_skb, skb); /* ensure all other descriptors are written before first cmd_sts */ wmb(); desc->cmd_sts = cmd_sts; /* clear TX_END status */ mp->work_tx_end &= ~(1 << txq->index); /* ensure all descriptors are written before poking hardware */ wmb(); txq_enable(txq); txq->tx_desc_count += nr_frags + 1; return 0; } static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); int queue; struct tx_queue *txq; struct netdev_queue *nq; queue = skb_get_queue_mapping(skb); txq = mp->txq + queue; nq = netdev_get_tx_queue(dev, queue); if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { txq->tx_dropped++; netdev_printk(KERN_DEBUG, dev, "failed to linearize skb with tiny unaligned fragment\n"); return NETDEV_TX_BUSY; } if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { if (net_ratelimit()) netdev_err(dev, "tx queue full?!\n"); kfree_skb(skb); return NETDEV_TX_OK; } if (!txq_submit_skb(txq, skb)) { int entries_left; txq->tx_bytes += skb->len; txq->tx_packets++; entries_left = txq->tx_ring_size - txq->tx_desc_count; if (entries_left < MAX_SKB_FRAGS + 1) netif_tx_stop_queue(nq); } return NETDEV_TX_OK; } /* tx napi ******************************************************************/ static void txq_kick(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); u32 hw_desc_ptr; u32 expected_ptr; __netif_tx_lock(nq, smp_processor_id()); if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) goto out; hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); expected_ptr = (u32)txq->tx_desc_dma + txq->tx_curr_desc * sizeof(struct tx_desc); if (hw_desc_ptr != expected_ptr) txq_enable(txq); out: __netif_tx_unlock(nq); mp->work_tx_end &= ~(1 << txq->index); } static int txq_reclaim(struct tx_queue *txq, int budget, int force) { struct mv643xx_eth_private *mp = txq_to_mp(txq); struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; __netif_tx_lock(nq, smp_processor_id()); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { int tx_index; struct tx_desc *desc; u32 cmd_sts; struct sk_buff *skb; tx_index = txq->tx_used_desc; desc = &txq->tx_desc_area[tx_index]; cmd_sts = desc->cmd_sts; if (cmd_sts & BUFFER_OWNED_BY_DMA) { if (!force) break; desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; } txq->tx_used_desc = tx_index + 1; if (txq->tx_used_desc == txq->tx_ring_size) txq->tx_used_desc = 0; reclaimed++; txq->tx_desc_count--; skb = NULL; if (cmd_sts & TX_LAST_DESC) skb = __skb_dequeue(&txq->tx_skb); if (cmd_sts & ERROR_SUMMARY) { netdev_info(mp->dev, "tx error\n"); mp->dev->stats.tx_errors++; } if (cmd_sts & TX_FIRST_DESC) { dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); } else { dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); } if (skb != NULL) { if (skb_queue_len(&mp->rx_recycle) < mp->rx_ring_size && skb_recycle_check(skb, mp->skb_size)) __skb_queue_head(&mp->rx_recycle, skb); else dev_kfree_skb(skb); } } __netif_tx_unlock(nq); if (reclaimed < budget) mp->work_tx &= ~(1 << txq->index); return reclaimed; } /* tx rate control **********************************************************/ /* * Set total maximum TX rate (shared by all TX queues for this port) * to 'rate' bits per second, with a maximum burst of 'burst' bytes. */ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) { int token_rate; int mtu; int bucket_size; token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); if (token_rate > 1023) token_rate = 1023; mtu = (mp->dev->mtu + 255) >> 8; if (mtu > 63) mtu = 63; bucket_size = (burst + 255) >> 8; if (bucket_size > 65535) bucket_size = 65535; switch (mp->shared->tx_bw_control) { case TX_BW_CONTROL_OLD_LAYOUT: wrlp(mp, TX_BW_RATE, token_rate); wrlp(mp, TX_BW_MTU, mtu); wrlp(mp, TX_BW_BURST, bucket_size); break; case TX_BW_CONTROL_NEW_LAYOUT: wrlp(mp, TX_BW_RATE_MOVED, token_rate); wrlp(mp, TX_BW_MTU_MOVED, mtu); wrlp(mp, TX_BW_BURST_MOVED, bucket_size); break; } } static void txq_set_rate(struct tx_queue *txq, int rate, int burst) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int token_rate; int bucket_size; token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); if (token_rate > 1023) token_rate = 1023; bucket_size = (burst + 255) >> 8; if (bucket_size > 65535) bucket_size = 65535; wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); } static void txq_set_fixed_prio_mode(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int off; u32 val; /* * Turn on fixed priority mode. */ off = 0; switch (mp->shared->tx_bw_control) { case TX_BW_CONTROL_OLD_LAYOUT: off = TXQ_FIX_PRIO_CONF; break; case TX_BW_CONTROL_NEW_LAYOUT: off = TXQ_FIX_PRIO_CONF_MOVED; break; } if (off) { val = rdlp(mp, off); val |= 1 << txq->index; wrlp(mp, off, val); } } /* mii management interface *************************************************/ static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) { struct mv643xx_eth_shared_private *msp = dev_id; if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); wake_up(&msp->smi_busy_wait); return IRQ_HANDLED; } return IRQ_NONE; } static int smi_is_done(struct mv643xx_eth_shared_private *msp) { return !(readl(msp->base + SMI_REG) & SMI_BUSY); } static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) { if (msp->err_interrupt == NO_IRQ) { int i; for (i = 0; !smi_is_done(msp); i++) { if (i == 10) return -ETIMEDOUT; msleep(10); } return 0; } if (!smi_is_done(msp)) { wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), msecs_to_jiffies(100)); if (!smi_is_done(msp)) return -ETIMEDOUT; } return 0; } static int smi_bus_read(struct mii_bus *bus, int addr, int reg) { struct mv643xx_eth_shared_private *msp = bus->priv; void __iomem *smi_reg = msp->base + SMI_REG; int ret; if (smi_wait_ready(msp)) { pr_warn("SMI bus busy timeout\n"); return -ETIMEDOUT; } writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); if (smi_wait_ready(msp)) { pr_warn("SMI bus busy timeout\n"); return -ETIMEDOUT; } ret = readl(smi_reg); if (!(ret & SMI_READ_VALID)) { pr_warn("SMI bus read not valid\n"); return -ENODEV; } return ret & 0xffff; } static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) { struct mv643xx_eth_shared_private *msp = bus->priv; void __iomem *smi_reg = msp->base + SMI_REG; if (smi_wait_ready(msp)) { pr_warn("SMI bus busy timeout\n"); return -ETIMEDOUT; } writel(SMI_OPCODE_WRITE | (reg << 21) | (addr << 16) | (val & 0xffff), smi_reg); if (smi_wait_ready(msp)) { pr_warn("SMI bus busy timeout\n"); return -ETIMEDOUT; } return 0; } /* statistics ***************************************************************/ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned long tx_packets = 0; unsigned long tx_bytes = 0; unsigned long tx_dropped = 0; int i; for (i = 0; i < mp->txq_count; i++) { struct tx_queue *txq = mp->txq + i; tx_packets += txq->tx_packets; tx_bytes += txq->tx_bytes; tx_dropped += txq->tx_dropped; } stats->tx_packets = tx_packets; stats->tx_bytes = tx_bytes; stats->tx_dropped = tx_dropped; return stats; } static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) { u32 lro_aggregated = 0; u32 lro_flushed = 0; u32 lro_no_desc = 0; int i; for (i = 0; i < mp->rxq_count; i++) { struct rx_queue *rxq = mp->rxq + i; lro_aggregated += rxq->lro_mgr.stats.aggregated; lro_flushed += rxq->lro_mgr.stats.flushed; lro_no_desc += rxq->lro_mgr.stats.no_desc; } mp->lro_counters.lro_aggregated = lro_aggregated; mp->lro_counters.lro_flushed = lro_flushed; mp->lro_counters.lro_no_desc = lro_no_desc; } static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) { return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); } static void mib_counters_clear(struct mv643xx_eth_private *mp) { int i; for (i = 0; i < 0x80; i += 4) mib_read(mp, i); } static void mib_counters_update(struct mv643xx_eth_private *mp) { struct mib_counters *p = &mp->mib_counters; spin_lock_bh(&mp->mib_counters_lock); p->good_octets_received += mib_read(mp, 0x00); p->bad_octets_received += mib_read(mp, 0x08); p->internal_mac_transmit_err += mib_read(mp, 0x0c); p->good_frames_received += mib_read(mp, 0x10); p->bad_frames_received += mib_read(mp, 0x14); p->broadcast_frames_received += mib_read(mp, 0x18); p->multicast_frames_received += mib_read(mp, 0x1c); p->frames_64_octets += mib_read(mp, 0x20); p->frames_65_to_127_octets += mib_read(mp, 0x24); p->frames_128_to_255_octets += mib_read(mp, 0x28); p->frames_256_to_511_octets += mib_read(mp, 0x2c); p->frames_512_to_1023_octets += mib_read(mp, 0x30); p->frames_1024_to_max_octets += mib_read(mp, 0x34); p->good_octets_sent += mib_read(mp, 0x38); p->good_frames_sent += mib_read(mp, 0x40); p->excessive_collision += mib_read(mp, 0x44); p->multicast_frames_sent += mib_read(mp, 0x48); p->broadcast_frames_sent += mib_read(mp, 0x4c); p->unrec_mac_control_received += mib_read(mp, 0x50); p->fc_sent += mib_read(mp, 0x54); p->good_fc_received += mib_read(mp, 0x58); p->bad_fc_received += mib_read(mp, 0x5c); p->undersize_received += mib_read(mp, 0x60); p->fragments_received += mib_read(mp, 0x64); p->oversize_received += mib_read(mp, 0x68); p->jabber_received += mib_read(mp, 0x6c); p->mac_receive_error += mib_read(mp, 0x70); p->bad_crc_event += mib_read(mp, 0x74); p->collision += mib_read(mp, 0x78); p->late_collision += mib_read(mp, 0x7c); spin_unlock_bh(&mp->mib_counters_lock); mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } static void mib_counters_timer_wrapper(unsigned long _mp) { struct mv643xx_eth_private *mp = (void *)_mp; mib_counters_update(mp); } /* interrupt coalescing *****************************************************/ /* * Hardware coalescing parameters are set in units of 64 t_clk * cycles. I.e.: * * coal_delay_in_usec = 64000000 * register_value / t_clk_rate * * register_value = coal_delay_in_usec * t_clk_rate / 64000000 * * In the ->set*() methods, we round the computed register value * to the nearest integer. */ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) { u32 val = rdlp(mp, SDMA_CONFIG); u64 temp; if (mp->shared->extended_rx_coal_limit) temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); else temp = (val & 0x003fff00) >> 8; temp *= 64000000; do_div(temp, mp->shared->t_clk); return (unsigned int)temp; } static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) { u64 temp; u32 val; temp = (u64)usec * mp->shared->t_clk; temp += 31999999; do_div(temp, 64000000); val = rdlp(mp, SDMA_CONFIG); if (mp->shared->extended_rx_coal_limit) { if (temp > 0xffff) temp = 0xffff; val &= ~0x023fff80; val |= (temp & 0x8000) << 10; val |= (temp & 0x7fff) << 7; } else { if (temp > 0x3fff) temp = 0x3fff; val &= ~0x003fff00; val |= (temp & 0x3fff) << 8; } wrlp(mp, SDMA_CONFIG, val); } static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) { u64 temp; temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; temp *= 64000000; do_div(temp, mp->shared->t_clk); return (unsigned int)temp; } static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) { u64 temp; temp = (u64)usec * mp->shared->t_clk; temp += 31999999; do_div(temp, 64000000); if (temp > 0x3fff) temp = 0x3fff; wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); } /* ethtool ******************************************************************/ struct mv643xx_eth_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int netdev_off; int mp_off; }; #define SSTAT(m) \ { #m, FIELD_SIZEOF(struct net_device_stats, m), \ offsetof(struct net_device, stats.m), -1 } #define MIBSTAT(m) \ { #m, FIELD_SIZEOF(struct mib_counters, m), \ -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } #define LROSTAT(m) \ { #m, FIELD_SIZEOF(struct lro_counters, m), \ -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { SSTAT(rx_packets), SSTAT(tx_packets), SSTAT(rx_bytes), SSTAT(tx_bytes), SSTAT(rx_errors), SSTAT(tx_errors), SSTAT(rx_dropped), SSTAT(tx_dropped), MIBSTAT(good_octets_received), MIBSTAT(bad_octets_received), MIBSTAT(internal_mac_transmit_err), MIBSTAT(good_frames_received), MIBSTAT(bad_frames_received), MIBSTAT(broadcast_frames_received), MIBSTAT(multicast_frames_received), MIBSTAT(frames_64_octets), MIBSTAT(frames_65_to_127_octets), MIBSTAT(frames_128_to_255_octets), MIBSTAT(frames_256_to_511_octets), MIBSTAT(frames_512_to_1023_octets), MIBSTAT(frames_1024_to_max_octets), MIBSTAT(good_octets_sent), MIBSTAT(good_frames_sent), MIBSTAT(excessive_collision), MIBSTAT(multicast_frames_sent), MIBSTAT(broadcast_frames_sent), MIBSTAT(unrec_mac_control_received), MIBSTAT(fc_sent), MIBSTAT(good_fc_received), MIBSTAT(bad_fc_received), MIBSTAT(undersize_received), MIBSTAT(fragments_received), MIBSTAT(oversize_received), MIBSTAT(jabber_received), MIBSTAT(mac_receive_error), MIBSTAT(bad_crc_event), MIBSTAT(collision), MIBSTAT(late_collision), LROSTAT(lro_aggregated), LROSTAT(lro_flushed), LROSTAT(lro_no_desc), }; static int mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, struct ethtool_cmd *cmd) { int err; err = phy_read_status(mp->phy); if (err == 0) err = phy_ethtool_gset(mp->phy, cmd); /* * The MAC does not support 1000baseT_Half. */ cmd->supported &= ~SUPPORTED_1000baseT_Half; cmd->advertising &= ~ADVERTISED_1000baseT_Half; return err; } static int mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, struct ethtool_cmd *cmd) { u32 port_status; port_status = rdlp(mp, PORT_STATUS); cmd->supported = SUPPORTED_MII; cmd->advertising = ADVERTISED_MII; switch (port_status & PORT_SPEED_MASK) { case PORT_SPEED_10: ethtool_cmd_speed_set(cmd, SPEED_10); break; case PORT_SPEED_100: ethtool_cmd_speed_set(cmd, SPEED_100); break; case PORT_SPEED_1000: ethtool_cmd_speed_set(cmd, SPEED_1000); break; default: cmd->speed = -1; break; } cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; cmd->port = PORT_MII; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 1; cmd->maxrxpkt = 1; return 0; } static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); if (mp->phy != NULL) return mv643xx_eth_get_settings_phy(mp, cmd); else return mv643xx_eth_get_settings_phyless(mp, cmd); } static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); if (mp->phy == NULL) return -EINVAL; /* * The MAC does not support 1000baseT_Half. */ cmd->advertising &= ~ADVERTISED_1000baseT_Half; return phy_ethtool_sset(mp->phy, cmd); } static void mv643xx_eth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, "platform", 32); drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); } static int mv643xx_eth_nway_reset(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); if (mp->phy == NULL) return -EINVAL; return genphy_restart_aneg(mp->phy); } static int mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct mv643xx_eth_private *mp = netdev_priv(dev); ec->rx_coalesce_usecs = get_rx_coal(mp); ec->tx_coalesce_usecs = get_tx_coal(mp); return 0; } static int mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct mv643xx_eth_private *mp = netdev_priv(dev); set_rx_coal(mp, ec->rx_coalesce_usecs); set_tx_coal(mp, ec->tx_coalesce_usecs); return 0; } static void mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) { struct mv643xx_eth_private *mp = netdev_priv(dev); er->rx_max_pending = 4096; er->tx_max_pending = 4096; er->rx_mini_max_pending = 0; er->rx_jumbo_max_pending = 0; er->rx_pending = mp->rx_ring_size; er->tx_pending = mp->tx_ring_size; er->rx_mini_pending = 0; er->rx_jumbo_pending = 0; } static int mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) { struct mv643xx_eth_private *mp = netdev_priv(dev); if (er->rx_mini_pending || er->rx_jumbo_pending) return -EINVAL; mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; if (netif_running(dev)) { mv643xx_eth_stop(dev); if (mv643xx_eth_open(dev)) { netdev_err(dev, "fatal error on re-opening device after ring param change\n"); return -ENOMEM; } } return 0; } static int mv643xx_eth_set_features(struct net_device *dev, u32 features) { struct mv643xx_eth_private *mp = netdev_priv(dev); u32 rx_csum = features & NETIF_F_RXCSUM; wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); return 0; } static void mv643xx_eth_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { int i; if (stringset == ETH_SS_STATS) { for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { memcpy(data + i * ETH_GSTRING_LEN, mv643xx_eth_stats[i].stat_string, ETH_GSTRING_LEN); } } } static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, uint64_t *data) { struct mv643xx_eth_private *mp = netdev_priv(dev); int i; mv643xx_eth_get_stats(dev); mib_counters_update(mp); mv643xx_eth_grab_lro_stats(mp); for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { const struct mv643xx_eth_stats *stat; void *p; stat = mv643xx_eth_stats + i; if (stat->netdev_off >= 0) p = ((void *)mp->dev) + stat->netdev_off; else p = ((void *)mp) + stat->mp_off; data[i] = (stat->sizeof_stat == 8) ? *(uint64_t *)p : *(uint32_t *)p; } } static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) { if (sset == ETH_SS_STATS) return ARRAY_SIZE(mv643xx_eth_stats); return -EOPNOTSUPP; } static const struct ethtool_ops mv643xx_eth_ethtool_ops = { .get_settings = mv643xx_eth_get_settings, .set_settings = mv643xx_eth_set_settings, .get_drvinfo = mv643xx_eth_get_drvinfo, .nway_reset = mv643xx_eth_nway_reset, .get_link = ethtool_op_get_link, .get_coalesce = mv643xx_eth_get_coalesce, .set_coalesce = mv643xx_eth_set_coalesce, .get_ringparam = mv643xx_eth_get_ringparam, .set_ringparam = mv643xx_eth_set_ringparam, .get_strings = mv643xx_eth_get_strings, .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, .get_sset_count = mv643xx_eth_get_sset_count, }; /* address handling *********************************************************/ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) { unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); addr[0] = (mac_h >> 24) & 0xff; addr[1] = (mac_h >> 16) & 0xff; addr[2] = (mac_h >> 8) & 0xff; addr[3] = mac_h & 0xff; addr[4] = (mac_l >> 8) & 0xff; addr[5] = mac_l & 0xff; } static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) { wrlp(mp, MAC_ADDR_HIGH, (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); } static u32 uc_addr_filter_mask(struct net_device *dev) { struct netdev_hw_addr *ha; u32 nibbles; if (dev->flags & IFF_PROMISC) return 0; nibbles = 1 << (dev->dev_addr[5] & 0x0f); netdev_for_each_uc_addr(ha, dev) { if (memcmp(dev->dev_addr, ha->addr, 5)) return 0; if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) return 0; nibbles |= 1 << (ha->addr[5] & 0x0f); } return nibbles; } static void mv643xx_eth_program_unicast_filter(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); u32 port_config; u32 nibbles; int i; uc_addr_set(mp, dev->dev_addr); port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; nibbles = uc_addr_filter_mask(dev); if (!nibbles) { port_config |= UNICAST_PROMISCUOUS_MODE; nibbles = 0xffff; } for (i = 0; i < 16; i += 4) { int off = UNICAST_TABLE(mp->port_num) + i; u32 v; v = 0; if (nibbles & 1) v |= 0x00000001; if (nibbles & 2) v |= 0x00000100; if (nibbles & 4) v |= 0x00010000; if (nibbles & 8) v |= 0x01000000; nibbles >>= 4; wrl(mp, off, v); } wrlp(mp, PORT_CONFIG, port_config); } static int addr_crc(unsigned char *addr) { int crc = 0; int i; for (i = 0; i < 6; i++) { int j; crc = (crc ^ addr[i]) << 8; for (j = 7; j >= 0; j--) { if (crc & (0x100 << j)) crc ^= 0x107 << j; } } return crc; } static void mv643xx_eth_program_multicast_filter(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); u32 *mc_spec; u32 *mc_other; struct netdev_hw_addr *ha; int i; if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { int port_num; u32 accept; oom: port_num = mp->port_num; accept = 0x01010101; for (i = 0; i < 0x100; i += 4) { wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); } return; } mc_spec = kmalloc(0x200, GFP_ATOMIC); if (mc_spec == NULL) goto oom; mc_other = mc_spec + (0x100 >> 2); memset(mc_spec, 0, 0x100); memset(mc_other, 0, 0x100); netdev_for_each_mc_addr(ha, dev) { u8 *a = ha->addr; u32 *table; int entry; if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { table = mc_spec; entry = a[5]; } else { table = mc_other; entry = addr_crc(a); } table[entry >> 2] |= 1 << (8 * (entry & 3)); } for (i = 0; i < 0x100; i += 4) { wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); } kfree(mc_spec); } static void mv643xx_eth_set_rx_mode(struct net_device *dev) { mv643xx_eth_program_unicast_filter(dev); mv643xx_eth_program_multicast_filter(dev); } static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = addr; if (!is_valid_ether_addr(sa->sa_data)) return -EINVAL; memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); netif_addr_lock_bh(dev); mv643xx_eth_program_unicast_filter(dev); netif_addr_unlock_bh(dev); return 0; } /* rx/tx queue initialisation ***********************************************/ static int rxq_init(struct mv643xx_eth_private *mp, int index) { struct rx_queue *rxq = mp->rxq + index; struct rx_desc *rx_desc; int size; int i; rxq->index = index; rxq->rx_ring_size = mp->rx_ring_size; rxq->rx_desc_count = 0; rxq->rx_curr_desc = 0; rxq->rx_used_desc = 0; size = rxq->rx_ring_size * sizeof(struct rx_desc); if (index == 0 && size <= mp->rx_desc_sram_size) { rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, mp->rx_desc_sram_size); rxq->rx_desc_dma = mp->rx_desc_sram_addr; } else { rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, size, &rxq->rx_desc_dma, GFP_KERNEL); } if (rxq->rx_desc_area == NULL) { netdev_err(mp->dev, "can't allocate rx ring (%d bytes)\n", size); goto out; } memset(rxq->rx_desc_area, 0, size); rxq->rx_desc_area_size = size; rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), GFP_KERNEL); if (rxq->rx_skb == NULL) { netdev_err(mp->dev, "can't allocate rx skb ring\n"); goto out_free; } rx_desc = (struct rx_desc *)rxq->rx_desc_area; for (i = 0; i < rxq->rx_ring_size; i++) { int nexti; nexti = i + 1; if (nexti == rxq->rx_ring_size) nexti = 0; rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + nexti * sizeof(struct rx_desc); } rxq->lro_mgr.dev = mp->dev; memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); rxq->lro_mgr.features = LRO_F_NAPI; rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); rxq->lro_mgr.max_aggr = 32; rxq->lro_mgr.frag_align_pad = 0; rxq->lro_mgr.lro_arr = rxq->lro_arr; rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); return 0; out_free: if (index == 0 && size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else dma_free_coherent(mp->dev->dev.parent, size, rxq->rx_desc_area, rxq->rx_desc_dma); out: return -ENOMEM; } static void rxq_deinit(struct rx_queue *rxq) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); int i; rxq_disable(rxq); for (i = 0; i < rxq->rx_ring_size; i++) { if (rxq->rx_skb[i]) { dev_kfree_skb(rxq->rx_skb[i]); rxq->rx_desc_count--; } } if (rxq->rx_desc_count) { netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", rxq->rx_desc_count); } if (rxq->index == 0 && rxq->rx_desc_area_size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, rxq->rx_desc_area, rxq->rx_desc_dma); kfree(rxq->rx_skb); } static int txq_init(struct mv643xx_eth_private *mp, int index) { struct tx_queue *txq = mp->txq + index; struct tx_desc *tx_desc; int size; int i; txq->index = index; txq->tx_ring_size = mp->tx_ring_size; txq->tx_desc_count = 0; txq->tx_curr_desc = 0; txq->tx_used_desc = 0; size = txq->tx_ring_size * sizeof(struct tx_desc); if (index == 0 && size <= mp->tx_desc_sram_size) { txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, mp->tx_desc_sram_size); txq->tx_desc_dma = mp->tx_desc_sram_addr; } else { txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, size, &txq->tx_desc_dma, GFP_KERNEL); } if (txq->tx_desc_area == NULL) { netdev_err(mp->dev, "can't allocate tx ring (%d bytes)\n", size); return -ENOMEM; } memset(txq->tx_desc_area, 0, size); txq->tx_desc_area_size = size; tx_desc = (struct tx_desc *)txq->tx_desc_area; for (i = 0; i < txq->tx_ring_size; i++) { struct tx_desc *txd = tx_desc + i; int nexti; nexti = i + 1; if (nexti == txq->tx_ring_size) nexti = 0; txd->cmd_sts = 0; txd->next_desc_ptr = txq->tx_desc_dma + nexti * sizeof(struct tx_desc); } skb_queue_head_init(&txq->tx_skb); return 0; } static void txq_deinit(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); txq_disable(txq); txq_reclaim(txq, txq->tx_ring_size, 1); BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); if (txq->index == 0 && txq->tx_desc_area_size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, txq->tx_desc_area, txq->tx_desc_dma); } /* netdev ops and related ***************************************************/ static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) { u32 int_cause; u32 int_cause_ext; int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; if (int_cause == 0) return 0; int_cause_ext = 0; if (int_cause & INT_EXT) { int_cause &= ~INT_EXT; int_cause_ext = rdlp(mp, INT_CAUSE_EXT); } if (int_cause) { wrlp(mp, INT_CAUSE, ~int_cause); mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & ~(rdlp(mp, TXQ_COMMAND) & 0xff); mp->work_rx |= (int_cause & INT_RX) >> 2; } int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; if (int_cause_ext) { wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); if (int_cause_ext & INT_EXT_LINK_PHY) mp->work_link = 1; mp->work_tx |= int_cause_ext & INT_EXT_TX; } return 1; } static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct mv643xx_eth_private *mp = netdev_priv(dev); if (unlikely(!mv643xx_eth_collect_events(mp))) return IRQ_NONE; wrlp(mp, INT_MASK, 0); napi_schedule(&mp->napi); return IRQ_HANDLED; } static void handle_link_event(struct mv643xx_eth_private *mp) { struct net_device *dev = mp->dev; u32 port_status; int speed; int duplex; int fc; port_status = rdlp(mp, PORT_STATUS); if (!(port_status & LINK_UP)) { if (netif_carrier_ok(dev)) { int i; netdev_info(dev, "link down\n"); netif_carrier_off(dev); for (i = 0; i < mp->txq_count; i++) { struct tx_queue *txq = mp->txq + i; txq_reclaim(txq, txq->tx_ring_size, 1); txq_reset_hw_ptr(txq); } } return; } switch (port_status & PORT_SPEED_MASK) { case PORT_SPEED_10: speed = 10; break; case PORT_SPEED_100: speed = 100; break; case PORT_SPEED_1000: speed = 1000; break; default: speed = -1; break; } duplex = (port_status & FULL_DUPLEX) ? 1 : 0; fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", speed, duplex ? "full" : "half", fc ? "en" : "dis"); if (!netif_carrier_ok(dev)) netif_carrier_on(dev); } static int mv643xx_eth_poll(struct napi_struct *napi, int budget) { struct mv643xx_eth_private *mp; int work_done; mp = container_of(napi, struct mv643xx_eth_private, napi); if (unlikely(mp->oom)) { mp->oom = 0; del_timer(&mp->rx_oom); } work_done = 0; while (work_done < budget) { u8 queue_mask; int queue; int work_tbd; if (mp->work_link) { mp->work_link = 0; handle_link_event(mp); work_done++; continue; } queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; if (likely(!mp->oom)) queue_mask |= mp->work_rx_refill; if (!queue_mask) { if (mv643xx_eth_collect_events(mp)) continue; break; } queue = fls(queue_mask) - 1; queue_mask = 1 << queue; work_tbd = budget - work_done; if (work_tbd > 16) work_tbd = 16; if (mp->work_tx_end & queue_mask) { txq_kick(mp->txq + queue); } else if (mp->work_tx & queue_mask) { work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); txq_maybe_wake(mp->txq + queue); } else if (mp->work_rx & queue_mask) { work_done += rxq_process(mp->rxq + queue, work_tbd); } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { work_done += rxq_refill(mp->rxq + queue, work_tbd); } else { BUG(); } } if (work_done < budget) { if (mp->oom) mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); napi_complete(napi); wrlp(mp, INT_MASK, mp->int_mask); } return work_done; } static inline void oom_timer_wrapper(unsigned long data) { struct mv643xx_eth_private *mp = (void *)data; napi_schedule(&mp->napi); } static void phy_reset(struct mv643xx_eth_private *mp) { int data; data = phy_read(mp->phy, MII_BMCR); if (data < 0) return; data |= BMCR_RESET; if (phy_write(mp->phy, MII_BMCR, data) < 0) return; do { data = phy_read(mp->phy, MII_BMCR); } while (data >= 0 && data & BMCR_RESET); } static void port_start(struct mv643xx_eth_private *mp) { u32 pscr; int i; /* * Perform PHY reset, if there is a PHY. */ if (mp->phy != NULL) { struct ethtool_cmd cmd; mv643xx_eth_get_settings(mp->dev, &cmd); phy_reset(mp); mv643xx_eth_set_settings(mp->dev, &cmd); } /* * Configure basic link parameters. */ pscr = rdlp(mp, PORT_SERIAL_CONTROL); pscr |= SERIAL_PORT_ENABLE; wrlp(mp, PORT_SERIAL_CONTROL, pscr); pscr |= DO_NOT_FORCE_LINK_FAIL; if (mp->phy == NULL) pscr |= FORCE_LINK_PASS; wrlp(mp, PORT_SERIAL_CONTROL, pscr); /* * Configure TX path and queues. */ tx_set_rate(mp, 1000000000, 16777216); for (i = 0; i < mp->txq_count; i++) { struct tx_queue *txq = mp->txq + i; txq_reset_hw_ptr(txq); txq_set_rate(txq, 1000000000, 16777216); txq_set_fixed_prio_mode(txq); } /* * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast * frames to RX queue #0, and include the pseudo-header when * calculating receive checksums. */ mv643xx_eth_set_features(mp->dev, mp->dev->features); /* * Treat BPDUs as normal multicasts, and disable partition mode. */ wrlp(mp, PORT_CONFIG_EXT, 0x00000000); /* * Add configured unicast addresses to address filter table. */ mv643xx_eth_program_unicast_filter(mp->dev); /* * Enable the receive queues. */ for (i = 0; i < mp->rxq_count; i++) { struct rx_queue *rxq = mp->rxq + i; u32 addr; addr = (u32)rxq->rx_desc_dma; addr += rxq->rx_curr_desc * sizeof(struct rx_desc); wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); rxq_enable(rxq); } } static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) { int skb_size; /* * Reserve 2+14 bytes for an ethernet header (the hardware * automatically prepends 2 bytes of dummy data to each * received packet), 16 bytes for up to four VLAN tags, and * 4 bytes for the trailing FCS -- 36 bytes total. */ skb_size = mp->dev->mtu + 36; /* * Make sure that the skb size is a multiple of 8 bytes, as * the lower three bits of the receive descriptor's buffer * size field are ignored by the hardware. */ mp->skb_size = (skb_size + 7) & ~7; /* * If NET_SKB_PAD is smaller than a cache line, * netdev_alloc_skb() will cause skb->data to be misaligned * to a cache line boundary. If this is the case, include * some extra space to allow re-aligning the data area. */ mp->skb_size += SKB_DMA_REALIGN; } static int mv643xx_eth_open(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); int err; int i; wrlp(mp, INT_CAUSE, 0); wrlp(mp, INT_CAUSE_EXT, 0); rdlp(mp, INT_CAUSE_EXT); err = request_irq(dev->irq, mv643xx_eth_irq, IRQF_SHARED, dev->name, dev); if (err) { netdev_err(dev, "can't assign irq\n"); return -EAGAIN; } mv643xx_eth_recalc_skb_size(mp); napi_enable(&mp->napi); skb_queue_head_init(&mp->rx_recycle); mp->int_mask = INT_EXT; for (i = 0; i < mp->rxq_count; i++) { err = rxq_init(mp, i); if (err) { while (--i >= 0) rxq_deinit(mp->rxq + i); goto out; } rxq_refill(mp->rxq + i, INT_MAX); mp->int_mask |= INT_RX_0 << i; } if (mp->oom) { mp->rx_oom.expires = jiffies + (HZ / 10); add_timer(&mp->rx_oom); } for (i = 0; i < mp->txq_count; i++) { err = txq_init(mp, i); if (err) { while (--i >= 0) txq_deinit(mp->txq + i); goto out_free; } mp->int_mask |= INT_TX_END_0 << i; } port_start(mp); wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); wrlp(mp, INT_MASK, mp->int_mask); return 0; out_free: for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); out: free_irq(dev->irq, dev); return err; } static void port_reset(struct mv643xx_eth_private *mp) { unsigned int data; int i; for (i = 0; i < mp->rxq_count; i++) rxq_disable(mp->rxq + i); for (i = 0; i < mp->txq_count; i++) txq_disable(mp->txq + i); while (1) { u32 ps = rdlp(mp, PORT_STATUS); if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) break; udelay(10); } /* Reset the Enable bit in the Configuration Register */ data = rdlp(mp, PORT_SERIAL_CONTROL); data &= ~(SERIAL_PORT_ENABLE | DO_NOT_FORCE_LINK_FAIL | FORCE_LINK_PASS); wrlp(mp, PORT_SERIAL_CONTROL, data); } static int mv643xx_eth_stop(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); int i; wrlp(mp, INT_MASK_EXT, 0x00000000); wrlp(mp, INT_MASK, 0x00000000); rdlp(mp, INT_MASK); napi_disable(&mp->napi); del_timer_sync(&mp->rx_oom); netif_carrier_off(dev); free_irq(dev->irq, dev); port_reset(mp); mv643xx_eth_get_stats(dev); mib_counters_update(mp); del_timer_sync(&mp->mib_counters_timer); skb_queue_purge(&mp->rx_recycle); for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); for (i = 0; i < mp->txq_count; i++) txq_deinit(mp->txq + i); return 0; } static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); if (mp->phy != NULL) return phy_mii_ioctl(mp->phy, ifr, cmd); return -EOPNOTSUPP; } static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) { struct mv643xx_eth_private *mp = netdev_priv(dev); if (new_mtu < 64 || new_mtu > 9500) return -EINVAL; dev->mtu = new_mtu; mv643xx_eth_recalc_skb_size(mp); tx_set_rate(mp, 1000000000, 16777216); if (!netif_running(dev)) return 0; /* * Stop and then re-open the interface. This will allocate RX * skbs of the new MTU. * There is a possible danger that the open will not succeed, * due to memory being full. */ mv643xx_eth_stop(dev); if (mv643xx_eth_open(dev)) { netdev_err(dev, "fatal error on re-opening device after MTU change\n"); } return 0; } static void tx_timeout_task(struct work_struct *ugly) { struct mv643xx_eth_private *mp; mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); if (netif_running(mp->dev)) { netif_tx_stop_all_queues(mp->dev); port_reset(mp); port_start(mp); netif_tx_wake_all_queues(mp->dev); } } static void mv643xx_eth_tx_timeout(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); netdev_info(dev, "tx timeout\n"); schedule_work(&mp->tx_timeout_task); } #ifdef CONFIG_NET_POLL_CONTROLLER static void mv643xx_eth_netpoll(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); wrlp(mp, INT_MASK, 0x00000000); rdlp(mp, INT_MASK); mv643xx_eth_irq(dev->irq, dev); wrlp(mp, INT_MASK, mp->int_mask); } #endif /* platform glue ************************************************************/ static void mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, struct mbus_dram_target_info *dram) { void __iomem *base = msp->base; u32 win_enable; u32 win_protect; int i; for (i = 0; i < 6; i++) { writel(0, base + WINDOW_BASE(i)); writel(0, base + WINDOW_SIZE(i)); if (i < 4) writel(0, base + WINDOW_REMAP_HIGH(i)); } win_enable = 0x3f; win_protect = 0; for (i = 0; i < dram->num_cs; i++) { struct mbus_dram_window *cs = dram->cs + i; writel((cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); win_enable &= ~(1 << i); win_protect |= 3 << (2 * i); } writel(win_enable, base + WINDOW_BAR_ENABLE); msp->win_protect = win_protect; } static void infer_hw_params(struct mv643xx_eth_shared_private *msp) { /* * Check whether we have a 14-bit coal limit field in bits * [21:8], or a 16-bit coal limit in bits [25,21:7] of the * SDMA config register. */ writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) msp->extended_rx_coal_limit = 1; else msp->extended_rx_coal_limit = 0; /* * Check whether the MAC supports TX rate control, and if * yes, whether its associated registers are in the old or * the new place. */ writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; } else { writel(7, msp->base + 0x0400 + TX_BW_RATE); if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; else msp->tx_bw_control = TX_BW_CONTROL_ABSENT; } } static int mv643xx_eth_shared_probe(struct platform_device *pdev) { static int mv643xx_eth_version_printed; struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; struct mv643xx_eth_shared_private *msp; struct resource *res; int ret; if (!mv643xx_eth_version_printed++) pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", mv643xx_eth_driver_version); ret = -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) goto out; ret = -ENOMEM; msp = kzalloc(sizeof(*msp), GFP_KERNEL); if (msp == NULL) goto out; msp->base = ioremap(res->start, res->end - res->start + 1); if (msp->base == NULL) goto out_free; /* * Set up and register SMI bus. */ if (pd == NULL || pd->shared_smi == NULL) { msp->smi_bus = mdiobus_alloc(); if (msp->smi_bus == NULL) goto out_unmap; msp->smi_bus->priv = msp; msp->smi_bus->name = "mv643xx_eth smi"; msp->smi_bus->read = smi_bus_read; msp->smi_bus->write = smi_bus_write, snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); msp->smi_bus->parent = &pdev->dev; msp->smi_bus->phy_mask = 0xffffffff; if (mdiobus_register(msp->smi_bus) < 0) goto out_free_mii_bus; msp->smi = msp; } else { msp->smi = platform_get_drvdata(pd->shared_smi); } msp->err_interrupt = NO_IRQ; init_waitqueue_head(&msp->smi_busy_wait); /* * Check whether the error interrupt is hooked up. */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res != NULL) { int err; err = request_irq(res->start, mv643xx_eth_err_irq, IRQF_SHARED, "mv643xx_eth", msp); if (!err) { writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); msp->err_interrupt = res->start; } } /* * (Re-)program MBUS remapping windows if we are asked to. */ if (pd != NULL && pd->dram != NULL) mv643xx_eth_conf_mbus_windows(msp, pd->dram); /* * Detect hardware parameters. */ msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? pd->tx_csum_limit : 9 * 1024; infer_hw_params(msp); platform_set_drvdata(pdev, msp); return 0; out_free_mii_bus: mdiobus_free(msp->smi_bus); out_unmap: iounmap(msp->base); out_free: kfree(msp); out: return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; if (pd == NULL || pd->shared_smi == NULL) { mdiobus_unregister(msp->smi_bus); mdiobus_free(msp->smi_bus); } if (msp->err_interrupt != NO_IRQ) free_irq(msp->err_interrupt, msp); iounmap(msp->base); kfree(msp); return 0; } static struct platform_driver mv643xx_eth_shared_driver = { .probe = mv643xx_eth_shared_probe, .remove = mv643xx_eth_shared_remove, .driver = { .name = MV643XX_ETH_SHARED_NAME, .owner = THIS_MODULE, }, }; static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) { int addr_shift = 5 * mp->port_num; u32 data; data = rdl(mp, PHY_ADDR); data &= ~(0x1f << addr_shift); data |= (phy_addr & 0x1f) << addr_shift; wrl(mp, PHY_ADDR, data); } static int phy_addr_get(struct mv643xx_eth_private *mp) { unsigned int data; data = rdl(mp, PHY_ADDR); return (data >> (5 * mp->port_num)) & 0x1f; } static void set_params(struct mv643xx_eth_private *mp, struct mv643xx_eth_platform_data *pd) { struct net_device *dev = mp->dev; if (is_valid_ether_addr(pd->mac_addr)) memcpy(dev->dev_addr, pd->mac_addr, 6); else uc_addr_get(mp, dev->dev_addr); mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; if (pd->rx_queue_size) mp->rx_ring_size = pd->rx_queue_size; mp->rx_desc_sram_addr = pd->rx_sram_addr; mp->rx_desc_sram_size = pd->rx_sram_size; mp->rxq_count = pd->rx_queue_count ? : 1; mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; if (pd->tx_queue_size) mp->tx_ring_size = pd->tx_queue_size; mp->tx_desc_sram_addr = pd->tx_sram_addr; mp->tx_desc_sram_size = pd->tx_sram_size; mp->txq_count = pd->tx_queue_count ? : 1; } static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, int phy_addr) { struct mii_bus *bus = mp->shared->smi->smi_bus; struct phy_device *phydev; int start; int num; int i; if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { start = phy_addr_get(mp) & 0x1f; num = 32; } else { start = phy_addr & 0x1f; num = 1; } phydev = NULL; for (i = 0; i < num; i++) { int addr = (start + i) & 0x1f; if (bus->phy_map[addr] == NULL) mdiobus_scan(bus, addr); if (phydev == NULL) { phydev = bus->phy_map[addr]; if (phydev != NULL) phy_addr_set(mp, addr); } } return phydev; } static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) { struct phy_device *phy = mp->phy; phy_reset(mp); phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; phy->duplex = 0; phy->advertising = phy->supported | ADVERTISED_Autoneg; } else { phy->autoneg = AUTONEG_DISABLE; phy->advertising = 0; phy->speed = speed; phy->duplex = duplex; } phy_start_aneg(phy); } static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) { u32 pscr; pscr = rdlp(mp, PORT_SERIAL_CONTROL); if (pscr & SERIAL_PORT_ENABLE) { pscr &= ~SERIAL_PORT_ENABLE; wrlp(mp, PORT_SERIAL_CONTROL, pscr); } pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; if (mp->phy == NULL) { pscr |= DISABLE_AUTO_NEG_SPEED_GMII; if (speed == SPEED_1000) pscr |= SET_GMII_SPEED_TO_1000; else if (speed == SPEED_100) pscr |= SET_MII_SPEED_TO_100; pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; if (duplex == DUPLEX_FULL) pscr |= SET_FULL_DUPLEX_MODE; } wrlp(mp, PORT_SERIAL_CONTROL, pscr); } static const struct net_device_ops mv643xx_eth_netdev_ops = { .ndo_open = mv643xx_eth_open, .ndo_stop = mv643xx_eth_stop, .ndo_start_xmit = mv643xx_eth_xmit, .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, .ndo_set_mac_address = mv643xx_eth_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = mv643xx_eth_ioctl, .ndo_change_mtu = mv643xx_eth_change_mtu, .ndo_set_features = mv643xx_eth_set_features, .ndo_tx_timeout = mv643xx_eth_tx_timeout, .ndo_get_stats = mv643xx_eth_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mv643xx_eth_netpoll, #endif }; static int mv643xx_eth_probe(struct platform_device *pdev) { struct mv643xx_eth_platform_data *pd; struct mv643xx_eth_private *mp; struct net_device *dev; struct resource *res; int err; pd = pdev->dev.platform_data; if (pd == NULL) { dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); return -ENODEV; } if (pd->shared == NULL) { dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); return -ENODEV; } dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); if (!dev) return -ENOMEM; mp = netdev_priv(dev); platform_set_drvdata(pdev, mp); mp->shared = platform_get_drvdata(pd->shared); mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); mp->port_num = pd->port_number; mp->dev = dev; set_params(mp, pd); netif_set_real_num_tx_queues(dev, mp->txq_count); netif_set_real_num_rx_queues(dev, mp->rxq_count); if (pd->phy_addr != MV643XX_ETH_PHY_NONE) mp->phy = phy_scan(mp, pd->phy_addr); if (mp->phy != NULL) phy_init(mp, pd->speed, pd->duplex); SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); init_pscr(mp, pd->speed, pd->duplex); mib_counters_clear(mp); init_timer(&mp->mib_counters_timer); mp->mib_counters_timer.data = (unsigned long)mp; mp->mib_counters_timer.function = mib_counters_timer_wrapper; mp->mib_counters_timer.expires = jiffies + 30 * HZ; add_timer(&mp->mib_counters_timer); spin_lock_init(&mp->mib_counters_lock); INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); init_timer(&mp->rx_oom); mp->rx_oom.data = (unsigned long)mp; mp->rx_oom.function = oom_timer_wrapper; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); BUG_ON(!res); dev->irq = res->start; dev->netdev_ops = &mv643xx_eth_netdev_ops; dev->watchdog_timeo = 2 * HZ; dev->base_addr = 0; dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_LRO; dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; SET_NETDEV_DEV(dev, &pdev->dev); if (mp->shared->win_protect) wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); netif_carrier_off(dev); wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); set_rx_coal(mp, 250); set_tx_coal(mp, 0); err = register_netdev(dev); if (err) goto out; netdev_notice(dev, "port %d with MAC address %pM\n", mp->port_num, dev->dev_addr); if (mp->tx_desc_sram_size > 0) netdev_notice(dev, "configured with sram\n"); return 0; out: free_netdev(dev); return err; } static int mv643xx_eth_remove(struct platform_device *pdev) { struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); unregister_netdev(mp->dev); if (mp->phy != NULL) phy_detach(mp->phy); cancel_work_sync(&mp->tx_timeout_task); free_netdev(mp->dev); platform_set_drvdata(pdev, NULL); return 0; } static void mv643xx_eth_shutdown(struct platform_device *pdev) { struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); /* Mask all interrupts on ethernet port */ wrlp(mp, INT_MASK, 0); rdlp(mp, INT_MASK); if (netif_running(mp->dev)) port_reset(mp); } static struct platform_driver mv643xx_eth_driver = { .probe = mv643xx_eth_probe, .remove = mv643xx_eth_remove, .shutdown = mv643xx_eth_shutdown, .driver = { .name = MV643XX_ETH_NAME, .owner = THIS_MODULE, }, }; static int __init mv643xx_eth_init_module(void) { int rc; rc = platform_driver_register(&mv643xx_eth_shared_driver); if (!rc) { rc = platform_driver_register(&mv643xx_eth_driver); if (rc) platform_driver_unregister(&mv643xx_eth_shared_driver); } return rc; } module_init(mv643xx_eth_init_module); static void __exit mv643xx_eth_cleanup_module(void) { platform_driver_unregister(&mv643xx_eth_driver); platform_driver_unregister(&mv643xx_eth_shared_driver); } module_exit(mv643xx_eth_cleanup_module); MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
gpl-2.0
cmartinbaughman/android_kernel_htc_msm8660-bt
arch/cris/arch-v10/drivers/ds1302.c
3144
12080
/*!*************************************************************************** *! *! FILE NAME : ds1302.c *! *! DESCRIPTION: Implements an interface for the DS1302 RTC through Etrax I/O *! *! Functions exported: ds1302_readreg, ds1302_writereg, ds1302_init *! *! --------------------------------------------------------------------------- *! *! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN *! *!***************************************************************************/ #include <linux/fs.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/bcd.h> #include <linux/capability.h> #include <asm/uaccess.h> #include <asm/system.h> #include <arch/svinto.h> #include <asm/io.h> #include <asm/rtc.h> #include <arch/io_interface_mux.h> #include "i2c.h" #define RTC_MAJOR_NR 121 /* local major, change later */ static DEFINE_MUTEX(ds1302_mutex); static const char ds1302_name[] = "ds1302"; /* The DS1302 might be connected to different bits on different products. * It has three signals - SDA, SCL and RST. RST and SCL are always outputs, * but SDA can have a selected direction. * For now, only PORT_PB is hardcoded. */ /* The RST bit may be on either the Generic Port or Port PB. */ #ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT #define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x) #define TK_RST_DIR(x) #else #define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x) #define TK_RST_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x) #endif #define TK_SDA_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SDABIT, x) #define TK_SCL_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x) #define TK_SDA_IN() ((*R_PORT_PB_READ >> CONFIG_ETRAX_DS1302_SDABIT) & 1) /* 1 is out, 0 is in */ #define TK_SDA_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SDABIT, x) #define TK_SCL_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x) /* * The reason for tempudelay and not udelay is that loops_per_usec * (used in udelay) is not set when functions here are called from time.c */ static void tempudelay(int usecs) { volatile int loops; for(loops = usecs * 12; loops > 0; loops--) /* nothing */; } /* Send 8 bits. */ static void out_byte(unsigned char x) { int i; TK_SDA_DIR(1); for (i = 8; i--;) { /* The chip latches incoming bits on the rising edge of SCL. */ TK_SCL_OUT(0); TK_SDA_OUT(x & 1); tempudelay(1); TK_SCL_OUT(1); tempudelay(1); x >>= 1; } TK_SDA_DIR(0); } static unsigned char in_byte(void) { unsigned char x = 0; int i; /* Read byte. Bits come LSB first, on the falling edge of SCL. * Assume SDA is in input direction already. */ TK_SDA_DIR(0); for (i = 8; i--;) { TK_SCL_OUT(0); tempudelay(1); x >>= 1; x |= (TK_SDA_IN() << 7); TK_SCL_OUT(1); tempudelay(1); } return x; } /* Prepares for a transaction by de-activating RST (active-low). */ static void start(void) { TK_SCL_OUT(0); tempudelay(1); TK_RST_OUT(0); tempudelay(5); TK_RST_OUT(1); } /* Ends a transaction by taking RST active again. */ static void stop(void) { tempudelay(2); TK_RST_OUT(0); } /* Enable writing. */ static void ds1302_wenable(void) { start(); out_byte(0x8e); /* Write control register */ out_byte(0x00); /* Disable write protect bit 7 = 0 */ stop(); } /* Disable writing. */ static void ds1302_wdisable(void) { start(); out_byte(0x8e); /* Write control register */ out_byte(0x80); /* Disable write protect bit 7 = 0 */ stop(); } /* Read a byte from the selected register in the DS1302. */ unsigned char ds1302_readreg(int reg) { unsigned char x; start(); out_byte(0x81 | (reg << 1)); /* read register */ x = in_byte(); stop(); return x; } /* Write a byte to the selected register. */ void ds1302_writereg(int reg, unsigned char val) { #ifndef CONFIG_ETRAX_RTC_READONLY int do_writereg = 1; #else int do_writereg = 0; if (reg == RTC_TRICKLECHARGER) do_writereg = 1; #endif if (do_writereg) { ds1302_wenable(); start(); out_byte(0x80 | (reg << 1)); /* write register */ out_byte(val); stop(); ds1302_wdisable(); } } void get_rtc_time(struct rtc_time *rtc_tm) { unsigned long flags; local_irq_save(flags); rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS); rtc_tm->tm_min = CMOS_READ(RTC_MINUTES); rtc_tm->tm_hour = CMOS_READ(RTC_HOURS); rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); rtc_tm->tm_mon = CMOS_READ(RTC_MONTH); rtc_tm->tm_year = CMOS_READ(RTC_YEAR); local_irq_restore(flags); rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); /* * Account for differences between how the RTC uses the values * and how they are defined in a struct rtc_time; */ if (rtc_tm->tm_year <= 69) rtc_tm->tm_year += 100; rtc_tm->tm_mon--; } static unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; /* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */ static int rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned long flags; switch(cmd) { case RTC_RD_TIME: /* read the time/date from RTC */ { struct rtc_time rtc_tm; memset(&rtc_tm, 0, sizeof (struct rtc_time)); get_rtc_time(&rtc_tm); if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time))) return -EFAULT; return 0; } case RTC_SET_TIME: /* set the RTC */ { struct rtc_time rtc_tm; unsigned char mon, day, hrs, min, sec, leap_yr; unsigned int yrs; if (!capable(CAP_SYS_TIME)) return -EPERM; if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time))) return -EFAULT; yrs = rtc_tm.tm_year + 1900; mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ day = rtc_tm.tm_mday; hrs = rtc_tm.tm_hour; min = rtc_tm.tm_min; sec = rtc_tm.tm_sec; if ((yrs < 1970) || (yrs > 2069)) return -EINVAL; leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); if ((mon > 12) || (day == 0)) return -EINVAL; if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) return -EINVAL; if ((hrs >= 24) || (min >= 60) || (sec >= 60)) return -EINVAL; if (yrs >= 2000) yrs -= 2000; /* RTC (0, 1, ... 69) */ else yrs -= 1900; /* RTC (70, 71, ... 99) */ sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); day = bin2bcd(day); mon = bin2bcd(mon); yrs = bin2bcd(yrs); local_irq_save(flags); CMOS_WRITE(yrs, RTC_YEAR); CMOS_WRITE(mon, RTC_MONTH); CMOS_WRITE(day, RTC_DAY_OF_MONTH); CMOS_WRITE(hrs, RTC_HOURS); CMOS_WRITE(min, RTC_MINUTES); CMOS_WRITE(sec, RTC_SECONDS); local_irq_restore(flags); /* Notice that at this point, the RTC is updated but * the kernel is still running with the old time. * You need to set that separately with settimeofday * or adjtimex. */ return 0; } case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */ { int tcs_val; if (!capable(CAP_SYS_TIME)) return -EPERM; if(copy_from_user(&tcs_val, (int*)arg, sizeof(int))) return -EFAULT; tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F); ds1302_writereg(RTC_TRICKLECHARGER, tcs_val); return 0; } case RTC_VL_READ: { /* TODO: * Implement voltage low detection support */ printk(KERN_WARNING "DS1302: RTC Voltage Low detection" " is not supported\n"); return 0; } case RTC_VL_CLR: { /* TODO: * Nothing to do since Voltage Low detection is not supported */ return 0; } default: return -ENOIOCTLCMD; } } static long rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&ds1302_mutex); ret = rtc_ioctl(file, cmd, arg); mutex_unlock(&ds1302_mutex); return ret; } static void print_rtc_status(void) { struct rtc_time tm; get_rtc_time(&tm); /* * There is no way to tell if the luser has the RTC set for local * time or for Universal Standard Time (GMT). Probably local though. */ printk(KERN_INFO "rtc_time\t: %02d:%02d:%02d\n", tm.tm_hour, tm.tm_min, tm.tm_sec); printk(KERN_INFO "rtc_date\t: %04d-%02d-%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); } /* The various file operations we support. */ static const struct file_operations rtc_fops = { .owner = THIS_MODULE, .unlocked_ioctl = rtc_unlocked_ioctl, .llseek = noop_llseek, }; /* Probe for the chip by writing something to its RAM and try reading it back. */ #define MAGIC_PATTERN 0x42 static int __init ds1302_probe(void) { int retval, res; TK_RST_DIR(1); TK_SCL_DIR(1); TK_SDA_DIR(0); /* Try to talk to timekeeper. */ ds1302_wenable(); start(); out_byte(0xc0); /* write RAM byte 0 */ out_byte(MAGIC_PATTERN); /* write something magic */ start(); out_byte(0xc1); /* read RAM byte 0 */ if((res = in_byte()) == MAGIC_PATTERN) { stop(); ds1302_wdisable(); printk(KERN_INFO "%s: RTC found.\n", ds1302_name); printk(KERN_INFO "%s: SDA, SCL, RST on PB%i, PB%i, %s%i\n", ds1302_name, CONFIG_ETRAX_DS1302_SDABIT, CONFIG_ETRAX_DS1302_SCLBIT, #ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT "GENIO", #else "PB", #endif CONFIG_ETRAX_DS1302_RSTBIT); print_rtc_status(); retval = 1; } else { stop(); retval = 0; } return retval; } /* Just probe for the RTC and register the device to handle the ioctl needed. */ int __init ds1302_init(void) { #ifdef CONFIG_ETRAX_I2C i2c_init(); #endif if (!ds1302_probe()) { #ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT #if CONFIG_ETRAX_DS1302_RSTBIT == 27 /* * The only way to set g27 to output is to enable ATA. * * Make sure that R_GEN_CONFIG is setup correct. */ /* Allocating the ATA interface will grab almost all * pins in I/O groups a, b, c and d. A consequence of * allocating the ATA interface is that the fixed * interfaces shared RAM, parallel port 0, parallel * port 1, parallel port W, SCSI-8 port 0, SCSI-8 port * 1, SCSI-W, serial port 2, serial port 3, * synchronous serial port 3 and USB port 2 and almost * all GPIO pins on port g cannot be used. */ if (cris_request_io_interface(if_ata, "ds1302/ATA")) { printk(KERN_WARNING "ds1302: Failed to get IO interface\n"); return -1; } #elif CONFIG_ETRAX_DS1302_RSTBIT == 0 if (cris_io_interface_allocate_pins(if_gpio_grp_a, 'g', CONFIG_ETRAX_DS1302_RSTBIT, CONFIG_ETRAX_DS1302_RSTBIT)) { printk(KERN_WARNING "ds1302: Failed to get IO interface\n"); return -1; } /* Set the direction of this bit to out. */ genconfig_shadow = ((genconfig_shadow & ~IO_MASK(R_GEN_CONFIG, g0dir)) | (IO_STATE(R_GEN_CONFIG, g0dir, out))); *R_GEN_CONFIG = genconfig_shadow; #endif if (!ds1302_probe()) { printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name); return -1; } #else printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name); return -1; #endif } /* Initialise trickle charger */ ds1302_writereg(RTC_TRICKLECHARGER, RTC_TCR_PATTERN |(CONFIG_ETRAX_DS1302_TRICKLE_CHARGE & 0x0F)); /* Start clock by resetting CLOCK_HALT */ ds1302_writereg(RTC_SECONDS, (ds1302_readreg(RTC_SECONDS) & 0x7F)); return 0; } static int __init ds1302_register(void) { ds1302_init(); if (register_chrdev(RTC_MAJOR_NR, ds1302_name, &rtc_fops)) { printk(KERN_INFO "%s: unable to get major %d for rtc\n", ds1302_name, RTC_MAJOR_NR); return -1; } return 0; } module_init(ds1302_register);
gpl-2.0
LorDClockaN/LorDNeo_2639
arch/x86/mm/pgtable_32.c
4168
3260
#include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/nmi.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/spinlock.h> #include <linux/module.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> #include <asm/e820.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/io.h> unsigned int __VMALLOC_RESERVE = 128 << 20; /* * Associate a virtual page frame with a given physical page frame * and protection flags for that frame. */ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = swapper_pg_dir + pgd_index(vaddr); if (pgd_none(*pgd)) { BUG(); return; } pud = pud_offset(pgd, vaddr); if (pud_none(*pud)) { BUG(); return; } pmd = pmd_offset(pud, vaddr); if (pmd_none(*pmd)) { BUG(); return; } pte = pte_offset_kernel(pmd, vaddr); if (pte_val(pteval)) set_pte_at(&init_mm, vaddr, pte, pteval); else pte_clear(&init_mm, vaddr, pte); /* * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ __flush_tlb_one(vaddr); } /* * Associate a large virtual page frame with a given physical page frame * and protection flags for that frame. pfn is for the base of the page, * vaddr is what the page gets mapped to - both must be properly aligned. * The pmd must already be instantiated. Assumes PAE mode. */ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); return; /* BUG(); */ } if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); return; /* BUG(); */ } pgd = swapper_pg_dir + pgd_index(vaddr); if (pgd_none(*pgd)) { printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); return; /* BUG(); */ } pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); set_pmd(pmd, pfn_pmd(pfn, flags)); /* * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ __flush_tlb_one(vaddr); } unsigned long __FIXADDR_TOP = 0xfffff000; EXPORT_SYMBOL(__FIXADDR_TOP); /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the * vmalloc area - the default is 128m. */ static int __init parse_vmalloc(char *arg) { if (!arg) return -EINVAL; /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET; return 0; } early_param("vmalloc", parse_vmalloc); /* * reservetop=size reserves a hole at the top of the kernel address space which * a hypervisor can load into later. Needed for dynamically loaded hypervisors, * so relocating the fixmap can be done before paging initialization. */ static int __init parse_reservetop(char *arg) { unsigned long address; if (!arg) return -EINVAL; address = memparse(arg, &arg); reserve_top_address(address); fixup_early_ioremap(); return 0; } early_param("reservetop", parse_reservetop);
gpl-2.0
dpoman76/android_kernel_lge_msm8226
arch/sh/kernel/cpu/sh4a/clock-sh7366.c
4424
9369
/* * arch/sh/kernel/cpu/sh4a/clock-sh7366.c * * SH7366 clock framework support * * Copyright (C) 2009 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clkdev.h> #include <asm/clock.h> /* SH7366 registers */ #define FRQCR 0xa4150000 #define VCLKCR 0xa4150004 #define SCLKACR 0xa4150008 #define SCLKBCR 0xa415000c #define PLLCR 0xa4150024 #define MSTPCR0 0xa4150030 #define MSTPCR1 0xa4150034 #define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ static struct clk r_clk = { .rate = 32768, }; /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ struct clk extal_clk = { .rate = 33333333, }; /* The dll block multiplies the 32khz r_clk, may be used instead of extal */ static unsigned long dll_recalc(struct clk *clk) { unsigned long mult; if (__raw_readl(PLLCR) & 0x1000) mult = __raw_readl(DLLFRQ); else mult = 0; return clk->parent->rate * mult; } static struct sh_clk_ops dll_clk_ops = { .recalc = dll_recalc, }; static struct clk dll_clk = { .ops = &dll_clk_ops, .parent = &r_clk, .flags = CLK_ENABLE_ON_INIT, }; static unsigned long pll_recalc(struct clk *clk) { unsigned long mult = 1; unsigned long div = 1; if (__raw_readl(PLLCR) & 0x4000) mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); else div = 2; return (clk->parent->rate * mult) / div; } static struct sh_clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, }; struct clk *main_clks[] = { &r_clk, &extal_clk, &dll_clk, &pll_clk, }; static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), .multipliers = multipliers, .nr_multipliers = ARRAY_SIZE(multipliers), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_SIUA, DIV4_SIUB, DIV4_NR }; #define DIV4(_reg, _bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT), [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0), [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0), [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0), }; enum { DIV6_V, DIV6_NR }; struct clk div6_clks[DIV6_NR] = { [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0), }; #define MSTP(_parent, _reg, _bit, _flags) \ SH_CLK_MSTP32(_parent, _reg, _bit, _flags) enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026, MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016, MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010, MSTP007, MSTP006, MSTP005, MSTP002, MSTP001, MSTP109, MSTP100, MSTP227, MSTP226, MSTP224, MSTP223, MSTP222, MSTP218, MSTP217, MSTP211, MSTP207, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, MSTP_NR }; static struct clk mstp_clks[MSTP_NR] = { /* See page 52 of Datasheet V0.40: Overview -> Block Diagram */ [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), [MSTP028] = MSTP(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT), [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0), [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0), [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0), [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0), [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0), [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0), [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0), [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0), [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0), [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0), [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0), [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0), [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0), [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0), [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0), [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0), [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0), [MSTP227] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 27, 0), [MSTP226] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 26, 0), [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0), [MSTP223] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 23, 0), [MSTP222] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 22, 0), [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0), [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0), [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0), [MSTP207] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT), [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0), [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0), [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0), [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0), }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("rclk", &r_clk), CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("dll_clk", &dll_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), /* DIV4 clocks */ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]), CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]), CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]), /* DIV6 clocks */ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), /* MSTP32 clocks */ CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]), CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]), CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]), CLKDEV_CON_ID("rsmem0", &mstp_clks[MSTP028]), CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]), CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]), CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]), CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]), CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]), CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]), CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]), CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]), CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]), CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]), CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]), CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]), CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]), CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]), CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]), CLKDEV_CON_ID("msiof0", &mstp_clks[MSTP002]), CLKDEV_CON_ID("sbr0", &mstp_clks[MSTP001]), CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]), CLKDEV_CON_ID("icb0", &mstp_clks[MSTP227]), CLKDEV_CON_ID("meram0", &mstp_clks[MSTP226]), CLKDEV_CON_ID("dacy1", &mstp_clks[MSTP224]), CLKDEV_CON_ID("dacy0", &mstp_clks[MSTP223]), CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP222]), CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]), CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]), CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]), CLKDEV_CON_ID("veu1", &mstp_clks[MSTP207]), CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]), CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]), CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]), CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]), CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]), CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]), }; int __init arch_clk_init(void) { int k, ret = 0; /* autodetect extal or dll configuration */ if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &dll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); return ret; }
gpl-2.0
syhost/android_kernel_zte_msm8974
arch/powerpc/kernel/kprobes.c
4680
15878
/* * Kernel Probes (KProbes) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel * Probes initial implementation ( includes contributions from * Rusty Russell). * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes * interface to access function arguments. * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port * for PPC64 */ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/preempt.h> #include <linux/module.h> #include <linux/kdebug.h> #include <linux/slab.h> #include <asm/cacheflush.h> #include <asm/sstep.h> #include <asm/uaccess.h> #ifdef CONFIG_PPC_ADV_DEBUG_REGS #define MSR_SINGLESTEP (MSR_DE) #else #define MSR_SINGLESTEP (MSR_SE) #endif DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; int __kprobes arch_prepare_kprobe(struct kprobe *p) { int ret = 0; kprobe_opcode_t insn = *p->addr; if ((unsigned long)p->addr & 0x03) { printk("Attempt to register kprobe at an unaligned address\n"); ret = -EINVAL; } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) { printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n"); ret = -EINVAL; } /* insn must be on a special executable page on ppc64. This is * not explicitly required on ppc32 (right now), but it doesn't hurt */ if (!ret) { p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) ret = -ENOMEM; } if (!ret) { memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); p->opcode = *p->addr; flush_icache_range((unsigned long)p->ainsn.insn, (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); } p->ainsn.boostable = 0; return ret; } void __kprobes arch_arm_kprobe(struct kprobe *p) { *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_disarm_kprobe(struct kprobe *p) { *p->addr = p->opcode; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { free_insn_slot(p->ainsn.insn, 0); p->ainsn.insn = NULL; } } static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { /* We turn off async exceptions to ensure that the single step will * be for the instruction we have the kprobe on, if we dont its * possible we'd get the single step reported for an exception handler * like Decrementer or External Interrupt */ regs->msr &= ~MSR_EE; regs->msr |= MSR_SINGLESTEP; #ifdef CONFIG_PPC_ADV_DEBUG_REGS regs->msr &= ~MSR_CE; mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); #ifdef CONFIG_PPC_47x isync(); #endif #endif /* * On powerpc we should single step on the original * instruction even if the probed insn is a trap * variant as values in regs could play a part in * if the trap is taken or not */ regs->nip = (unsigned long)p->ainsn.insn; } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; } static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; } static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; kcb->kprobe_saved_msr = regs->msr; } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->link; /* Replace the return addr with trampoline addr */ regs->link = (unsigned long)kretprobe_trampoline; } static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; unsigned int *addr = (unsigned int *)regs->nip; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); kcb = get_kprobe_ctlblk(); /* Check we're not actually recursing */ if (kprobe_running()) { p = get_kprobe(addr); if (p) { kprobe_opcode_t insn = *p->ainsn.insn; if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) { /* Turn off 'trace' bits */ regs->msr &= ~MSR_SINGLESTEP; regs->msr |= kcb->kprobe_saved_msr; goto no_kprobe; } /* We have reentered the kprobe_handler(), since * another probe was hit while within the handler. * We here save the original kprobes variables and * just single step on the instruction of the new probe * without calling any user handlers. */ save_previous_kprobe(kcb); set_current_kprobe(p, regs, kcb); kcb->kprobe_saved_msr = regs->msr; kprobes_inc_nmissed_count(p); prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_REENTER; return 1; } else { if (*addr != BREAKPOINT_INSTRUCTION) { /* If trap variant, then it belongs not to us */ kprobe_opcode_t cur_insn = *addr; if (is_trap(cur_insn)) goto no_kprobe; /* The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ ret = 1; goto no_kprobe; } p = __get_cpu_var(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { goto ss_probe; } } goto no_kprobe; } p = get_kprobe(addr); if (!p) { if (*addr != BREAKPOINT_INSTRUCTION) { /* * PowerPC has multiple variants of the "trap" * instruction. If the current instruction is a * trap variant, it could belong to someone else */ kprobe_opcode_t cur_insn = *addr; if (is_trap(cur_insn)) goto no_kprobe; /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. */ ret = 1; } /* Not one of ours: let kernel handle it */ goto no_kprobe; } kcb->kprobe_status = KPROBE_HIT_ACTIVE; set_current_kprobe(p, regs, kcb); if (p->pre_handler && p->pre_handler(p, regs)) /* handler has already set things up, so skip ss setup */ return 1; ss_probe: if (p->ainsn.boostable >= 0) { unsigned int insn = *p->ainsn.insn; /* regs->nip is also adjusted if emulate_step returns 1 */ ret = emulate_step(regs, insn); if (ret > 0) { /* * Once this instruction has been boosted * successfully, set the boostable flag */ if (unlikely(p->ainsn.boostable == 0)) p->ainsn.boostable = 1; if (p->post_handler) p->post_handler(p, regs, 0); kcb->kprobe_status = KPROBE_HIT_SSDONE; reset_current_kprobe(); preempt_enable_no_resched(); return 1; } else if (ret < 0) { /* * We don't allow kprobes on mtmsr(d)/rfi(d), etc. * So, we should never get here... but, its still * good to catch them, just in case... */ printk("Can't step on instruction %x\n", insn); BUG(); } else if (ret == 0) /* This instruction can't be boosted */ p->ainsn.boostable = -1; } prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; } /* * Function return probe trampoline: * - init_kprobes() establishes a probepoint here * - When the probed function returns, this probe * causes the handlers to fire */ static void __used kretprobe_trampoline_holder(void) { asm volatile(".global kretprobe_trampoline\n" "kretprobe_trampoline:\n" "nop\n"); } /* * Called when the probe at kretprobe trampoline is hit */ static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *node, *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: * - instances are always inserted at the head of the list * - when multiple return probes are registered for the same * function, the first instance's ret_addr will point to the * real return address, and all the rest will point to * kretprobe_trampoline */ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; if (ri->rp && ri->rp->handler) ri->rp->handler(ri, regs); orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) /* * This is the real return address. Any other * instances associated with this task are for * other calls deeper on the call stack */ break; } kretprobe_assert(ri, orig_ret_address, trampoline_address); regs->nip = orig_ret_address; reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler * to run (and have re-enabled preemption) */ return 1; } /* * Called after single-stepping. p->addr is the address of the * instruction whose first byte has been replaced by the "breakpoint" * instruction. To avoid the SMP problems that can occur when we * temporarily put back the original opcode to single-step, we * single-stepped a copy of the instruction. The address of this * copy is p->ainsn.insn. */ static int __kprobes post_kprobe_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (!cur) return 0; /* make sure we got here for instruction we have a kprobe on */ if (((unsigned long)cur->ainsn.insn + 4) != regs->nip) return 0; if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } /* Adjust nip to after the single-stepped instruction */ regs->nip = (unsigned long)cur->addr + 4; regs->msr |= kcb->kprobe_saved_msr; /*Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); goto out; } reset_current_kprobe(); out: preempt_enable_no_resched(); /* * if somebody else is singlestepping across a probe point, msr * will have DE/SE set, in which case, continue the remaining processing * of do_debug, as if this is not a probe hit. */ if (regs->msr & MSR_SINGLESTEP) return 0; return 1; } int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); const struct exception_table_entry *entry; switch(kcb->kprobe_status) { case KPROBE_HIT_SS: case KPROBE_REENTER: /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the nip points back to the probe address * and allow the page fault handler to continue as a * normal page fault. */ regs->nip = (unsigned long)cur->addr; regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */ regs->msr |= kcb->kprobe_saved_msr; if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else reset_current_kprobe(); preempt_enable_no_resched(); break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, * we can also use npre/npostfault count for accouting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); /* * We come here because instructions in the pre/post * handler caused the page_fault, this could happen * if handler tries to access user space by * copy_from_user(), get_user() etc. Let the * user-specified handler try to fix it first. */ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) return 1; /* * In case the user-specified fault handler returned * zero, try to fix up. */ if ((entry = search_exception_tables(regs->nip)) != NULL) { regs->nip = entry->fixup; return 1; } /* * fixup_exception() could not handle it, * Let do_page_fault() fix it. */ break; default: break; } return 0; } /* * Wrapper routine to for handling exceptions. */ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; if (args->regs && user_mode(args->regs)) return ret; switch (val) { case DIE_BPT: if (kprobe_handler(args->regs)) ret = NOTIFY_STOP; break; case DIE_SSTEP: if (post_kprobe_handler(args->regs)) ret = NOTIFY_STOP; break; default: break; } return ret; } #ifdef CONFIG_PPC64 unsigned long arch_deref_entry_point(void *entry) { return ((func_descr_t *)entry)->entry; } #endif int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); /* setup return addr to the jprobe handler routine */ regs->nip = arch_deref_entry_point(jp->entry); #ifdef CONFIG_PPC64 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); #endif return 1; } void __used __kprobes jprobe_return(void) { asm volatile("trap" ::: "memory"); } static void __used __kprobes jprobe_return_end(void) { }; int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); /* * FIXME - we should ideally be validating that we got here 'cos * of the "trap" in jprobe_return() above, before restoring the * saved regs... */ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); preempt_enable_no_resched(); return 1; } static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *) &kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; int __init arch_init_kprobes(void) { return register_kprobe(&trampoline_p); } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) return 1; return 0; }
gpl-2.0
dankocher/android_kernel_lge_w7ds
arch/powerpc/kernel/kprobes.c
4680
15878
/* * Kernel Probes (KProbes) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel * Probes initial implementation ( includes contributions from * Rusty Russell). * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes * interface to access function arguments. * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port * for PPC64 */ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/preempt.h> #include <linux/module.h> #include <linux/kdebug.h> #include <linux/slab.h> #include <asm/cacheflush.h> #include <asm/sstep.h> #include <asm/uaccess.h> #ifdef CONFIG_PPC_ADV_DEBUG_REGS #define MSR_SINGLESTEP (MSR_DE) #else #define MSR_SINGLESTEP (MSR_SE) #endif DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; int __kprobes arch_prepare_kprobe(struct kprobe *p) { int ret = 0; kprobe_opcode_t insn = *p->addr; if ((unsigned long)p->addr & 0x03) { printk("Attempt to register kprobe at an unaligned address\n"); ret = -EINVAL; } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) { printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n"); ret = -EINVAL; } /* insn must be on a special executable page on ppc64. This is * not explicitly required on ppc32 (right now), but it doesn't hurt */ if (!ret) { p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) ret = -ENOMEM; } if (!ret) { memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); p->opcode = *p->addr; flush_icache_range((unsigned long)p->ainsn.insn, (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); } p->ainsn.boostable = 0; return ret; } void __kprobes arch_arm_kprobe(struct kprobe *p) { *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_disarm_kprobe(struct kprobe *p) { *p->addr = p->opcode; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { free_insn_slot(p->ainsn.insn, 0); p->ainsn.insn = NULL; } } static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { /* We turn off async exceptions to ensure that the single step will * be for the instruction we have the kprobe on, if we dont its * possible we'd get the single step reported for an exception handler * like Decrementer or External Interrupt */ regs->msr &= ~MSR_EE; regs->msr |= MSR_SINGLESTEP; #ifdef CONFIG_PPC_ADV_DEBUG_REGS regs->msr &= ~MSR_CE; mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); #ifdef CONFIG_PPC_47x isync(); #endif #endif /* * On powerpc we should single step on the original * instruction even if the probed insn is a trap * variant as values in regs could play a part in * if the trap is taken or not */ regs->nip = (unsigned long)p->ainsn.insn; } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; } static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; } static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; kcb->kprobe_saved_msr = regs->msr; } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->link; /* Replace the return addr with trampoline addr */ regs->link = (unsigned long)kretprobe_trampoline; } static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; unsigned int *addr = (unsigned int *)regs->nip; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); kcb = get_kprobe_ctlblk(); /* Check we're not actually recursing */ if (kprobe_running()) { p = get_kprobe(addr); if (p) { kprobe_opcode_t insn = *p->ainsn.insn; if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) { /* Turn off 'trace' bits */ regs->msr &= ~MSR_SINGLESTEP; regs->msr |= kcb->kprobe_saved_msr; goto no_kprobe; } /* We have reentered the kprobe_handler(), since * another probe was hit while within the handler. * We here save the original kprobes variables and * just single step on the instruction of the new probe * without calling any user handlers. */ save_previous_kprobe(kcb); set_current_kprobe(p, regs, kcb); kcb->kprobe_saved_msr = regs->msr; kprobes_inc_nmissed_count(p); prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_REENTER; return 1; } else { if (*addr != BREAKPOINT_INSTRUCTION) { /* If trap variant, then it belongs not to us */ kprobe_opcode_t cur_insn = *addr; if (is_trap(cur_insn)) goto no_kprobe; /* The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ ret = 1; goto no_kprobe; } p = __get_cpu_var(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { goto ss_probe; } } goto no_kprobe; } p = get_kprobe(addr); if (!p) { if (*addr != BREAKPOINT_INSTRUCTION) { /* * PowerPC has multiple variants of the "trap" * instruction. If the current instruction is a * trap variant, it could belong to someone else */ kprobe_opcode_t cur_insn = *addr; if (is_trap(cur_insn)) goto no_kprobe; /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. */ ret = 1; } /* Not one of ours: let kernel handle it */ goto no_kprobe; } kcb->kprobe_status = KPROBE_HIT_ACTIVE; set_current_kprobe(p, regs, kcb); if (p->pre_handler && p->pre_handler(p, regs)) /* handler has already set things up, so skip ss setup */ return 1; ss_probe: if (p->ainsn.boostable >= 0) { unsigned int insn = *p->ainsn.insn; /* regs->nip is also adjusted if emulate_step returns 1 */ ret = emulate_step(regs, insn); if (ret > 0) { /* * Once this instruction has been boosted * successfully, set the boostable flag */ if (unlikely(p->ainsn.boostable == 0)) p->ainsn.boostable = 1; if (p->post_handler) p->post_handler(p, regs, 0); kcb->kprobe_status = KPROBE_HIT_SSDONE; reset_current_kprobe(); preempt_enable_no_resched(); return 1; } else if (ret < 0) { /* * We don't allow kprobes on mtmsr(d)/rfi(d), etc. * So, we should never get here... but, its still * good to catch them, just in case... */ printk("Can't step on instruction %x\n", insn); BUG(); } else if (ret == 0) /* This instruction can't be boosted */ p->ainsn.boostable = -1; } prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; } /* * Function return probe trampoline: * - init_kprobes() establishes a probepoint here * - When the probed function returns, this probe * causes the handlers to fire */ static void __used kretprobe_trampoline_holder(void) { asm volatile(".global kretprobe_trampoline\n" "kretprobe_trampoline:\n" "nop\n"); } /* * Called when the probe at kretprobe trampoline is hit */ static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *node, *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: * - instances are always inserted at the head of the list * - when multiple return probes are registered for the same * function, the first instance's ret_addr will point to the * real return address, and all the rest will point to * kretprobe_trampoline */ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; if (ri->rp && ri->rp->handler) ri->rp->handler(ri, regs); orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) /* * This is the real return address. Any other * instances associated with this task are for * other calls deeper on the call stack */ break; } kretprobe_assert(ri, orig_ret_address, trampoline_address); regs->nip = orig_ret_address; reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler * to run (and have re-enabled preemption) */ return 1; } /* * Called after single-stepping. p->addr is the address of the * instruction whose first byte has been replaced by the "breakpoint" * instruction. To avoid the SMP problems that can occur when we * temporarily put back the original opcode to single-step, we * single-stepped a copy of the instruction. The address of this * copy is p->ainsn.insn. */ static int __kprobes post_kprobe_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (!cur) return 0; /* make sure we got here for instruction we have a kprobe on */ if (((unsigned long)cur->ainsn.insn + 4) != regs->nip) return 0; if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } /* Adjust nip to after the single-stepped instruction */ regs->nip = (unsigned long)cur->addr + 4; regs->msr |= kcb->kprobe_saved_msr; /*Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); goto out; } reset_current_kprobe(); out: preempt_enable_no_resched(); /* * if somebody else is singlestepping across a probe point, msr * will have DE/SE set, in which case, continue the remaining processing * of do_debug, as if this is not a probe hit. */ if (regs->msr & MSR_SINGLESTEP) return 0; return 1; } int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); const struct exception_table_entry *entry; switch(kcb->kprobe_status) { case KPROBE_HIT_SS: case KPROBE_REENTER: /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the nip points back to the probe address * and allow the page fault handler to continue as a * normal page fault. */ regs->nip = (unsigned long)cur->addr; regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */ regs->msr |= kcb->kprobe_saved_msr; if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else reset_current_kprobe(); preempt_enable_no_resched(); break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, * we can also use npre/npostfault count for accouting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); /* * We come here because instructions in the pre/post * handler caused the page_fault, this could happen * if handler tries to access user space by * copy_from_user(), get_user() etc. Let the * user-specified handler try to fix it first. */ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) return 1; /* * In case the user-specified fault handler returned * zero, try to fix up. */ if ((entry = search_exception_tables(regs->nip)) != NULL) { regs->nip = entry->fixup; return 1; } /* * fixup_exception() could not handle it, * Let do_page_fault() fix it. */ break; default: break; } return 0; } /* * Wrapper routine to for handling exceptions. */ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; if (args->regs && user_mode(args->regs)) return ret; switch (val) { case DIE_BPT: if (kprobe_handler(args->regs)) ret = NOTIFY_STOP; break; case DIE_SSTEP: if (post_kprobe_handler(args->regs)) ret = NOTIFY_STOP; break; default: break; } return ret; } #ifdef CONFIG_PPC64 unsigned long arch_deref_entry_point(void *entry) { return ((func_descr_t *)entry)->entry; } #endif int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); /* setup return addr to the jprobe handler routine */ regs->nip = arch_deref_entry_point(jp->entry); #ifdef CONFIG_PPC64 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); #endif return 1; } void __used __kprobes jprobe_return(void) { asm volatile("trap" ::: "memory"); } static void __used __kprobes jprobe_return_end(void) { }; int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); /* * FIXME - we should ideally be validating that we got here 'cos * of the "trap" in jprobe_return() above, before restoring the * saved regs... */ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); preempt_enable_no_resched(); return 1; } static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *) &kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; int __init arch_init_kprobes(void) { return register_kprobe(&trampoline_p); } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) return 1; return 0; }
gpl-2.0
schqiushui/kernel_sense_a5dug
drivers/mtd/nand/cmx270_nand.c
4936
5642
/* * linux/drivers/mtd/nand/cmx270-nand.c * * Copyright (C) 2006 Compulab, Ltd. * Mike Rapoport <mike@compulab.co.il> * * Derived from drivers/mtd/nand/h1910.c * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Overview: * This is a device driver for the NAND flash device found on the * CM-X270 board. */ #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/module.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/pxa2xx-regs.h> #define GPIO_NAND_CS (11) #define GPIO_NAND_RB (89) /* MTD structure for CM-X270 board */ static struct mtd_info *cmx270_nand_mtd; /* remaped IO address of the device */ static void __iomem *cmx270_nand_io; /* * Define static partitions for flash device */ static struct mtd_partition partition_info[] = { [0] = { .name = "cmx270-0", .offset = 0, .size = MTDPART_SIZ_FULL } }; #define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) static u_char cmx270_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; return (readl(this->IO_ADDR_R) >> 16); } static void cmx270_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { int i; struct nand_chip *this = mtd->priv; for (i=0; i<len; i++) writel((*buf++ << 16), this->IO_ADDR_W); } static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len) { int i; struct nand_chip *this = mtd->priv; for (i=0; i<len; i++) *buf++ = readl(this->IO_ADDR_R) >> 16; } static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) { int i; struct nand_chip *this = mtd->priv; for (i=0; i<len; i++) if (buf[i] != (u_char)(readl(this->IO_ADDR_R) >> 16)) return -EFAULT; return 0; } static inline void nand_cs_on(void) { gpio_set_value(GPIO_NAND_CS, 0); } static void nand_cs_off(void) { dsb(); gpio_set_value(GPIO_NAND_CS, 1); } /* * hardware specific access to control-lines */ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat, unsigned int ctrl) { struct nand_chip* this = mtd->priv; unsigned int nandaddr = (unsigned int)this->IO_ADDR_W; dsb(); if (ctrl & NAND_CTRL_CHANGE) { if ( ctrl & NAND_ALE ) nandaddr |= (1 << 3); else nandaddr &= ~(1 << 3); if ( ctrl & NAND_CLE ) nandaddr |= (1 << 2); else nandaddr &= ~(1 << 2); if ( ctrl & NAND_NCE ) nand_cs_on(); else nand_cs_off(); } dsb(); this->IO_ADDR_W = (void __iomem*)nandaddr; if (dat != NAND_CMD_NONE) writel((dat << 16), this->IO_ADDR_W); dsb(); } /* * read device ready pin */ static int cmx270_device_ready(struct mtd_info *mtd) { dsb(); return (gpio_get_value(GPIO_NAND_RB)); } /* * Main initialization routine */ static int __init cmx270_init(void) { struct nand_chip *this; int ret; if (!(machine_is_armcore() && cpu_is_pxa27x())) return -ENODEV; ret = gpio_request(GPIO_NAND_CS, "NAND CS"); if (ret) { pr_warning("CM-X270: failed to request NAND CS gpio\n"); return ret; } gpio_direction_output(GPIO_NAND_CS, 1); ret = gpio_request(GPIO_NAND_RB, "NAND R/B"); if (ret) { pr_warning("CM-X270: failed to request NAND R/B gpio\n"); goto err_gpio_request; } gpio_direction_input(GPIO_NAND_RB); /* Allocate memory for MTD device structure and private data */ cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!cmx270_nand_mtd) { pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n"); ret = -ENOMEM; goto err_kzalloc; } cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); if (!cmx270_nand_io) { pr_debug("Unable to ioremap NAND device\n"); ret = -EINVAL; goto err_ioremap; } /* Get pointer to private data */ this = (struct nand_chip *)(&cmx270_nand_mtd[1]); /* Link the private data with the MTD structure */ cmx270_nand_mtd->owner = THIS_MODULE; cmx270_nand_mtd->priv = this; /* insert callbacks */ this->IO_ADDR_R = cmx270_nand_io; this->IO_ADDR_W = cmx270_nand_io; this->cmd_ctrl = cmx270_hwcontrol; this->dev_ready = cmx270_device_ready; /* 15 us command delay time */ this->chip_delay = 20; this->ecc.mode = NAND_ECC_SOFT; /* read/write functions */ this->read_byte = cmx270_read_byte; this->read_buf = cmx270_read_buf; this->write_buf = cmx270_write_buf; this->verify_buf = cmx270_verify_buf; /* Scan to find existence of the device */ if (nand_scan (cmx270_nand_mtd, 1)) { pr_notice("No NAND device\n"); ret = -ENXIO; goto err_scan; } /* Register the partitions */ ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL, partition_info, NUM_PARTITIONS); if (ret) goto err_scan; /* Return happy */ return 0; err_scan: iounmap(cmx270_nand_io); err_ioremap: kfree(cmx270_nand_mtd); err_kzalloc: gpio_free(GPIO_NAND_RB); err_gpio_request: gpio_free(GPIO_NAND_CS); return ret; } module_init(cmx270_init); /* * Clean up routine */ static void __exit cmx270_cleanup(void) { /* Release resources, unregister device */ nand_release(cmx270_nand_mtd); gpio_free(GPIO_NAND_RB); gpio_free(GPIO_NAND_CS); iounmap(cmx270_nand_io); /* Free the MTD device structure */ kfree (cmx270_nand_mtd); } module_exit(cmx270_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>"); MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
gpl-2.0
acuicultor/android_kernel_oneplus_msm8974-1
drivers/scsi/bfa/bfa_core.c
4936
49822
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include "bfad_drv.h" #include "bfa_modules.h" #include "bfi_reg.h" BFA_TRC_FILE(HAL, CORE); /* * BFA module list terminated by NULL */ static struct bfa_module_s *hal_mods[] = { &hal_mod_fcdiag, &hal_mod_sgpg, &hal_mod_fcport, &hal_mod_fcxp, &hal_mod_lps, &hal_mod_uf, &hal_mod_rport, &hal_mod_fcp, &hal_mod_dconf, NULL }; /* * Message handlers for various modules. */ static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { bfa_isr_unhandled, /* NONE */ bfa_isr_unhandled, /* BFI_MC_IOC */ bfa_fcdiag_intr, /* BFI_MC_DIAG */ bfa_isr_unhandled, /* BFI_MC_FLASH */ bfa_isr_unhandled, /* BFI_MC_CEE */ bfa_fcport_isr, /* BFI_MC_FCPORT */ bfa_isr_unhandled, /* BFI_MC_IOCFC */ bfa_isr_unhandled, /* BFI_MC_LL */ bfa_uf_isr, /* BFI_MC_UF */ bfa_fcxp_isr, /* BFI_MC_FCXP */ bfa_lps_isr, /* BFI_MC_LPS */ bfa_rport_isr, /* BFI_MC_RPORT */ bfa_itn_isr, /* BFI_MC_ITN */ bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ bfa_ioim_isr, /* BFI_MC_IOIM */ bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ bfa_tskim_isr, /* BFI_MC_TSKIM */ bfa_isr_unhandled, /* BFI_MC_SBOOT */ bfa_isr_unhandled, /* BFI_MC_IPFC */ bfa_isr_unhandled, /* BFI_MC_PORT */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ }; /* * Message handlers for mailbox command classes */ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { NULL, NULL, /* BFI_MC_IOC */ NULL, /* BFI_MC_DIAG */ NULL, /* BFI_MC_FLASH */ NULL, /* BFI_MC_CEE */ NULL, /* BFI_MC_PORT */ bfa_iocfc_isr, /* BFI_MC_IOCFC */ NULL, }; static void bfa_com_port_attach(struct bfa_s *bfa) { struct bfa_port_s *port = &bfa->modules.port; struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp); } /* * ablk module attach */ static void bfa_com_ablk_attach(struct bfa_s *bfa) { struct bfa_ablk_s *ablk = &bfa->modules.ablk; struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); bfa_ablk_attach(ablk, &bfa->ioc); bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp); } static void bfa_com_cee_attach(struct bfa_s *bfa) { struct bfa_cee_s *cee = &bfa->modules.cee; struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); cee->trcmod = bfa->trcmod; bfa_cee_attach(cee, &bfa->ioc, bfa); bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp); } static void bfa_com_sfp_attach(struct bfa_s *bfa) { struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa); struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod); bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp); } static void bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) { struct bfa_flash_s *flash = BFA_FLASH(bfa); struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg); bfa_flash_memclaim(flash, flash_dma->kva_curp, flash_dma->dma_curp, mincfg); } static void bfa_com_diag_attach(struct bfa_s *bfa) { struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa); struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod); bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp); } static void bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) { struct bfa_phy_s *phy = BFA_PHY(bfa); struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg); bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); } /* * BFA IOC FC related definitions */ /* * IOC local definitions */ #define BFA_IOCFC_TOV 5000 /* msecs */ enum { BFA_IOCFC_ACT_NONE = 0, BFA_IOCFC_ACT_INIT = 1, BFA_IOCFC_ACT_STOP = 2, BFA_IOCFC_ACT_DISABLE = 3, BFA_IOCFC_ACT_ENABLE = 4, }; #define DEF_CFG_NUM_FABRICS 1 #define DEF_CFG_NUM_LPORTS 256 #define DEF_CFG_NUM_CQS 4 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) #define DEF_CFG_NUM_TSKIM_REQS 128 #define DEF_CFG_NUM_FCXP_REQS 64 #define DEF_CFG_NUM_UF_BUFS 64 #define DEF_CFG_NUM_RPORTS 1024 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) #define DEF_CFG_NUM_TINS 256 #define DEF_CFG_NUM_SGPGS 2048 #define DEF_CFG_NUM_REQQ_ELEMS 256 #define DEF_CFG_NUM_RSPQ_ELEMS 64 #define DEF_CFG_NUM_SBOOT_TGTS 16 #define DEF_CFG_NUM_SBOOT_LUNS 16 /* * IOCFC state machine definitions/declarations */ bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, init_cfg_done, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, operational, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, dconf_write, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, init_failed, struct bfa_iocfc_s, enum iocfc_event); /* * forward declaration for IOC FC functions */ static void bfa_iocfc_start_submod(struct bfa_s *bfa); static void bfa_iocfc_disable_submod(struct bfa_s *bfa); static void bfa_iocfc_send_cfg(void *bfa_arg); static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); static void bfa_iocfc_disable_cbfn(void *bfa_arg); static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); static void bfa_iocfc_reset_cbfn(void *bfa_arg); static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete); static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl); static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl); static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl); static void bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc) { } static void bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_INIT: case IOCFC_E_ENABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc) { bfa_ioc_enable(&iocfc->bfa->ioc); } static void bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc) { bfa_dconf_modinit(iocfc->bfa); } static void bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_DCONF_DONE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc) { bfa_iocfc_send_cfg(iocfc->bfa); } static void bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_CFG_DONE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc) { iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, iocfc->bfa); } static void bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_START: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); break; case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc) { bfa_fcport_init(iocfc->bfa); bfa_iocfc_start_submod(iocfc->bfa); } static void bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc) { bfa_dconf_modexit(iocfc->bfa); } static void bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_DCONF_DONE: case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc) { bfa_ioc_disable(&iocfc->bfa->ioc); } static void bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_IOC_DISABLED: bfa_isr_disable(iocfc->bfa); bfa_iocfc_disable_submod(iocfc->bfa); bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, iocfc->bfa); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc) { bfa_ioc_enable(&iocfc->bfa->ioc); } static void bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) break; iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, bfa_iocfc_enable_cb, iocfc->bfa); iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc) { bfa_iocfc_send_cfg(iocfc->bfa); } static void bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_CFG_DONE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) break; iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, bfa_iocfc_enable_cb, iocfc->bfa); iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) break; iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, bfa_iocfc_enable_cb, iocfc->bfa); iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc) { bfa_ioc_disable(&iocfc->bfa->ioc); } static void bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_IOC_DISABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc) { bfa_isr_disable(iocfc->bfa); bfa_iocfc_disable_submod(iocfc->bfa); iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, iocfc->bfa); } static void bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); break; case IOCFC_E_ENABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc) { bfa_isr_disable(iocfc->bfa); bfa_iocfc_disable_submod(iocfc->bfa); } static void bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); break; case IOCFC_E_IOC_FAILED: break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc) { bfa_isr_disable(iocfc->bfa); iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, iocfc->bfa); } static void bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); break; case IOCFC_E_DISABLE: bfa_ioc_disable(&iocfc->bfa->ioc); break; case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); break; case IOCFC_E_IOC_DISABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, iocfc->bfa); break; case IOCFC_E_IOC_FAILED: break; default: bfa_sm_fault(iocfc->bfa, event); break; } } /* * BFA Interrupt handling functions */ static void bfa_reqq_resume(struct bfa_s *bfa, int qid) { struct list_head *waitq, *qe, *qen; struct bfa_reqq_wait_s *wqe; waitq = bfa_reqq(bfa, qid); list_for_each_safe(qe, qen, waitq) { /* * Callback only as long as there is room in request queue */ if (bfa_reqq_full(bfa, qid)) break; list_del(qe); wqe = (struct bfa_reqq_wait_s *) qe; wqe->qresume(wqe->cbarg); } } bfa_boolean_t bfa_isr_rspq(struct bfa_s *bfa, int qid) { struct bfi_msg_s *m; u32 pi, ci; struct list_head *waitq; bfa_boolean_t ret; ci = bfa_rspq_ci(bfa, qid); pi = bfa_rspq_pi(bfa, qid); ret = (ci != pi); while (ci != pi) { m = bfa_rspq_elem(bfa, qid, ci); WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); bfa_isrs[m->mhdr.msg_class] (bfa, m); CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); } /* * acknowledge RME completions and update CI */ bfa_isr_rspq_ack(bfa, qid, ci); /* * Resume any pending requests in the corresponding reqq. */ waitq = bfa_reqq(bfa, qid); if (!list_empty(waitq)) bfa_reqq_resume(bfa, qid); return ret; } static inline void bfa_isr_reqq(struct bfa_s *bfa, int qid) { struct list_head *waitq; bfa_isr_reqq_ack(bfa, qid); /* * Resume any pending requests in the corresponding reqq. */ waitq = bfa_reqq(bfa, qid); if (!list_empty(waitq)) bfa_reqq_resume(bfa, qid); } void bfa_msix_all(struct bfa_s *bfa, int vec) { u32 intr, qintr; int queue; intr = readl(bfa->iocfc.bfa_regs.intr_status); if (!intr) return; /* * RME completion queue interrupt */ qintr = intr & __HFN_INT_RME_MASK; if (qintr && bfa->queue_process) { for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) bfa_isr_rspq(bfa, queue); } intr &= ~qintr; if (!intr) return; /* * CPE completion queue interrupt */ qintr = intr & __HFN_INT_CPE_MASK; if (qintr && bfa->queue_process) { for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) bfa_isr_reqq(bfa, queue); } intr &= ~qintr; if (!intr) return; bfa_msix_lpu_err(bfa, intr); } bfa_boolean_t bfa_intx(struct bfa_s *bfa) { u32 intr, qintr; int queue; bfa_boolean_t rspq_comp = BFA_FALSE; intr = readl(bfa->iocfc.bfa_regs.intr_status); qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); if (qintr) writel(qintr, bfa->iocfc.bfa_regs.intr_status); /* * Unconditional RME completion queue interrupt */ if (bfa->queue_process) { for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) if (bfa_isr_rspq(bfa, queue)) rspq_comp = BFA_TRUE; } if (!intr) return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE; /* * CPE completion queue interrupt */ qintr = intr & __HFN_INT_CPE_MASK; if (qintr && bfa->queue_process) { for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) bfa_isr_reqq(bfa, queue); } intr &= ~qintr; if (!intr) return BFA_TRUE; bfa_msix_lpu_err(bfa, intr); return BFA_TRUE; } void bfa_isr_enable(struct bfa_s *bfa) { u32 umsk; int pci_func = bfa_ioc_pcifn(&bfa->ioc); bfa_trc(bfa, pci_func); bfa_msix_ctrl_install(bfa); if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { umsk = __HFN_INT_ERR_MASK_CT2; umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; } else { umsk = __HFN_INT_ERR_MASK; umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; } writel(umsk, bfa->iocfc.bfa_regs.intr_status); writel(~umsk, bfa->iocfc.bfa_regs.intr_mask); bfa->iocfc.intr_mask = ~umsk; bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); } void bfa_isr_disable(struct bfa_s *bfa) { bfa_isr_mode_set(bfa, BFA_FALSE); writel(-1L, bfa->iocfc.bfa_regs.intr_mask); bfa_msix_uninstall(bfa); } void bfa_msix_reqq(struct bfa_s *bfa, int vec) { bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0); } void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) { bfa_trc(bfa, m->mhdr.msg_class); bfa_trc(bfa, m->mhdr.msg_id); bfa_trc(bfa, m->mhdr.mtag.i2htok); WARN_ON(1); bfa_trc_stop(bfa->trcmod); } void bfa_msix_rspq(struct bfa_s *bfa, int vec) { bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); } void bfa_msix_lpu_err(struct bfa_s *bfa, int vec) { u32 intr, curr_value; bfa_boolean_t lpu_isr, halt_isr, pss_isr; intr = readl(bfa->iocfc.bfa_regs.intr_status); if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { halt_isr = intr & __HFN_INT_CPQ_HALT_CT2; pss_isr = intr & __HFN_INT_ERR_PSS_CT2; lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 | __HFN_INT_MBOX_LPU1_CT2); intr &= __HFN_INT_ERR_MASK_CT2; } else { halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ? (intr & __HFN_INT_LL_HALT) : 0; pss_isr = intr & __HFN_INT_ERR_PSS; lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); intr &= __HFN_INT_ERR_MASK; } if (lpu_isr) bfa_ioc_mbox_isr(&bfa->ioc); if (intr) { if (halt_isr) { /* * If LL_HALT bit is set then FW Init Halt LL Port * Register needs to be cleared as well so Interrupt * Status Register will be cleared. */ curr_value = readl(bfa->ioc.ioc_regs.ll_halt); curr_value &= ~__FW_INIT_HALT_P; writel(curr_value, bfa->ioc.ioc_regs.ll_halt); } if (pss_isr) { /* * ERR_PSS bit needs to be cleared as well in case * interrups are shared so driver's interrupt handler is * still called even though it is already masked out. */ curr_value = readl( bfa->ioc.ioc_regs.pss_err_status_reg); writel(curr_value, bfa->ioc.ioc_regs.pss_err_status_reg); } writel(intr, bfa->iocfc.bfa_regs.intr_status); bfa_ioc_error_isr(&bfa->ioc); } } /* * BFA IOC FC related functions */ /* * BFA IOC private functions */ /* * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ */ static void bfa_iocfc_send_cfg(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfg_req_s cfg_req; struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; int i; WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); bfa_trc(bfa, cfg->fwcfg.num_cqs); bfa_iocfc_reset_queues(bfa); /* * initialize IOC configuration info */ cfg_info->single_msix_vec = 0; if (bfa->msix.nvecs == 1) cfg_info->single_msix_vec = 1; cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; cfg_info->num_cqs = cfg->fwcfg.num_cqs; cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs); cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); /* * dma map REQ and RSP circular queues and shadow pointers */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) { bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], iocfc->req_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], iocfc->req_cq_shadow_ci[i].pa); cfg_info->req_cq_elems[i] = cpu_to_be16(cfg->drvcfg.num_reqq_elems); bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], iocfc->rsp_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], iocfc->rsp_cq_shadow_pi[i].pa); cfg_info->rsp_cq_elems[i] = cpu_to_be16(cfg->drvcfg.num_rspq_elems); } /* * Enable interrupt coalescing if it is driver init path * and not ioc disable/enable path. */ if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait)) cfg_info->intr_attr.coalesce = BFA_TRUE; /* * dma map IOC configuration itself */ bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, bfa_fn_lpu(bfa)); bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, sizeof(struct bfi_iocfc_cfg_req_s)); } static void bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; bfa->bfad = bfad; iocfc->bfa = bfa; iocfc->cfg = *cfg; /* * Initialize chip specific handlers. */ if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) { iocfc->hwif.hw_reginit = bfa_hwct_reginit; iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install; iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install; iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT; iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT; } else { iocfc->hwif.hw_reginit = bfa_hwcb_reginit; iocfc->hwif.hw_reqq_ack = NULL; iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB + bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB + bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; } if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { iocfc->hwif.hw_reginit = bfa_hwct2_reginit; iocfc->hwif.hw_isr_mode_set = NULL; iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack; } iocfc->hwif.hw_reginit(bfa); bfa->msix.nvecs = 0; } static void bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg) { u8 *dm_kva = NULL; u64 dm_pa = 0; int i, per_reqq_sz, per_rspq_sz, dbgsz; struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); struct bfa_mem_dma_s *reqq_dma, *rspq_dma; /* First allocate dma memory for IOC */ bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma), bfa_mem_dma_phys(ioc_dma)); /* Claim DMA-able memory for the request/response queues */ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); for (i = 0; i < cfg->fwcfg.num_cqs; i++) { reqq_dma = BFA_MEM_REQQ_DMA(bfa, i); iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma); iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma); memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz); rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i); iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma); iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma); memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz); } /* Claim IOCFC dma memory - for shadow CI/PI */ dm_kva = bfa_mem_dma_virt(iocfc_dma); dm_pa = bfa_mem_dma_phys(iocfc_dma); for (i = 0; i < cfg->fwcfg.num_cqs; i++) { iocfc->req_cq_shadow_ci[i].kva = dm_kva; iocfc->req_cq_shadow_ci[i].pa = dm_pa; dm_kva += BFA_CACHELINE_SZ; dm_pa += BFA_CACHELINE_SZ; iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; dm_kva += BFA_CACHELINE_SZ; dm_pa += BFA_CACHELINE_SZ; } /* Claim IOCFC dma memory - for the config info page */ bfa->iocfc.cfg_info.kva = dm_kva; bfa->iocfc.cfg_info.pa = dm_pa; bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); /* Claim IOCFC dma memory - for the config response */ bfa->iocfc.cfgrsp_dma.kva = dm_kva; bfa->iocfc.cfgrsp_dma.pa = dm_pa; bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); /* Claim IOCFC kva memory */ dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; if (dbgsz > 0) { bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc)); bfa_mem_kva_curp(iocfc) += dbgsz; } } /* * Start BFA submodules. */ static void bfa_iocfc_start_submod(struct bfa_s *bfa) { int i; bfa->queue_process = BFA_TRUE; for (i = 0; i < BFI_IOC_MAX_CQS; i++) bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i)); for (i = 0; hal_mods[i]; i++) hal_mods[i]->start(bfa); bfa->iocfc.submod_enabled = BFA_TRUE; } /* * Disable BFA submodules. */ static void bfa_iocfc_disable_submod(struct bfa_s *bfa) { int i; if (bfa->iocfc.submod_enabled == BFA_FALSE) return; for (i = 0; hal_mods[i]; i++) hal_mods[i]->iocdisable(bfa); bfa->iocfc.submod_enabled = BFA_FALSE; } static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) { struct bfa_s *bfa = bfa_arg; if (complete) bfa_cb_init(bfa->bfad, bfa->iocfc.op_status); } static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->comp); } static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->enable_comp); } static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->disable_comp); } /** * configure queue registers from firmware response */ static void bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) { int i; struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); for (i = 0; i < BFI_IOC_MAX_CQS; i++) { bfa->iocfc.hw_qid[i] = qreg->hw_qid[i]; r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]); r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]); r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]); r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]); r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]); r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]); } } static void bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) { bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); bfa_rport_res_recfg(bfa, fwcfg->num_rports); bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs); bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); } /* * Update BFA configuration from firmware configuration. */ static void bfa_iocfc_cfgrsp(struct bfa_s *bfa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; fwcfg->num_cqs = fwcfg->num_cqs; fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs); fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); /* * configure queue register offsets as learnt from firmware */ bfa_iocfc_qreg(bfa, &cfgrsp->qreg); /* * Re-configure resources as learnt from Firmware */ bfa_iocfc_res_recfg(bfa, fwcfg); /* * Install MSIX queue handlers */ bfa_msix_queue_install(bfa); if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) { bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn; bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn; bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); } } void bfa_iocfc_reset_queues(struct bfa_s *bfa) { int q; for (q = 0; q < BFI_IOC_MAX_CQS; q++) { bfa_reqq_ci(bfa, q) = 0; bfa_reqq_pi(bfa, q) = 0; bfa_rspq_ci(bfa, q) = 0; bfa_rspq_pi(bfa, q) = 0; } } /* * Process FAA pwwn msg from fw. */ static void bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn; cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn; bfa->ioc.attr->pwwn = msg->pwwn; bfa->ioc.attr->nwwn = msg->nwwn; bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); } /* Fabric Assigned Address specific functions */ /* * Check whether IOC is ready before sending command down */ static bfa_status_t bfa_faa_validate_request(struct bfa_s *bfa) { enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); u32 card_type = bfa->ioc.attr->card_type; if (bfa_ioc_is_operational(&bfa->ioc)) { if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) return BFA_STATUS_FEATURE_NOT_SUPPORTED; } else { return BFA_STATUS_IOC_NON_OP; } return BFA_STATUS_OK; } bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, bfa_cb_iocfc_t cbfn, void *cbarg) { struct bfi_faa_query_s faa_attr_req; struct bfa_iocfc_s *iocfc = &bfa->iocfc; bfa_status_t status; iocfc->faa_args.faa_attr = attr; iocfc->faa_args.faa_cb.faa_cbfn = cbfn; iocfc->faa_args.faa_cb.faa_cbarg = cbarg; status = bfa_faa_validate_request(bfa); if (status != BFA_STATUS_OK) return status; if (iocfc->faa_args.busy == BFA_TRUE) return BFA_STATUS_DEVBUSY; iocfc->faa_args.busy = BFA_TRUE; memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s)); bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa)); bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req, sizeof(struct bfi_faa_query_s)); return BFA_STATUS_OK; } /* * FAA query response */ static void bfa_faa_query_reply(struct bfa_iocfc_s *iocfc, bfi_faa_query_rsp_t *rsp) { void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; if (iocfc->faa_args.faa_attr) { iocfc->faa_args.faa_attr->faa = rsp->faa; iocfc->faa_args.faa_attr->faa_state = rsp->faa_status; iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source; } WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK); iocfc->faa_args.busy = BFA_FALSE; } /* * IOC enable request is complete */ static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) { struct bfa_s *bfa = bfa_arg; if (status == BFA_STATUS_OK) bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED); else bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); } /* * IOC disable request is complete */ static void bfa_iocfc_disable_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED); } /* * Notify sub-modules of hardware failure. */ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa->queue_process = BFA_FALSE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); } /* * Actions on chip-reset completion. */ static void bfa_iocfc_reset_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa_iocfc_reset_queues(bfa); bfa_isr_enable(bfa); } /* * Query IOC memory requirement information. */ void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa) { int q, per_reqq_sz, per_rspq_sz; struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa); u32 dm_len = 0; /* dma memory setup for IOC */ bfa_mem_dma_setup(meminfo, ioc_dma, BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ)); /* dma memory setup for REQ/RSP queues */ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); for (q = 0; q < cfg->fwcfg.num_cqs; q++) { bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q), per_reqq_sz); bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q), per_rspq_sz); } /* IOCFC dma memory - calculate Shadow CI/PI size */ for (q = 0; q < cfg->fwcfg.num_cqs; q++) dm_len += (2 * BFA_CACHELINE_SZ); /* IOCFC dma memory - calculate config info / rsp size */ dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); /* dma memory setup for IOCFC */ bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len); /* kva memory setup for IOCFC */ bfa_mem_kva_setup(meminfo, iocfc_kva, ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0)); } /* * Query IOC memory requirement information. */ void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { int i; struct bfa_ioc_s *ioc = &bfa->ioc; bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; ioc->trcmod = bfa->trcmod; bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC); bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); bfa_iocfc_mem_claim(bfa, cfg); INIT_LIST_HEAD(&bfa->timer_mod.timer_q); INIT_LIST_HEAD(&bfa->comp_q); for (i = 0; i < BFI_IOC_MAX_CQS; i++) INIT_LIST_HEAD(&bfa->reqq_waitq[i]); bfa->iocfc.cb_reqd = BFA_FALSE; bfa->iocfc.op_status = BFA_STATUS_OK; bfa->iocfc.submod_enabled = BFA_FALSE; bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped); } /* * Query IOC memory requirement information. */ void bfa_iocfc_init(struct bfa_s *bfa) { bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT); } /* * IOC start called from bfa_start(). Called to start IOC operations * at driver instantiation for this instance. */ void bfa_iocfc_start(struct bfa_s *bfa) { bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START); } /* * IOC stop called from bfa_stop(). Called only when driver is unloaded * for this instance. */ void bfa_iocfc_stop(struct bfa_s *bfa) { bfa->queue_process = BFA_FALSE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP); } void bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) { struct bfa_s *bfa = bfaarg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; union bfi_iocfc_i2h_msg_u *msg; msg = (union bfi_iocfc_i2h_msg_u *) m; bfa_trc(bfa, msg->mh.msg_id); switch (msg->mh.msg_id) { case BFI_IOCFC_I2H_CFG_REPLY: bfa_iocfc_cfgrsp(bfa); break; case BFI_IOCFC_I2H_UPDATEQ_RSP: iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); break; case BFI_IOCFC_I2H_ADDR_MSG: bfa_iocfc_process_faa_addr(bfa, (struct bfi_faa_addr_msg_s *)msg); break; case BFI_IOCFC_I2H_FAA_QUERY_RSP: bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); break; default: WARN_ON(1); } } void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); attr->config = iocfc->cfg; } bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_set_intr_req_s *m; iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_OK; m = bfa_reqq_next(bfa, BFA_REQQ_IOC); if (!m) return BFA_STATUS_DEVBUSY; bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, bfa_fn_lpu(bfa)); m->coalesce = iocfc->cfginfo->intr_attr.coalesce; m->delay = iocfc->cfginfo->intr_attr.delay; m->latency = iocfc->cfginfo->intr_attr.latency; bfa_trc(bfa, attr->delay); bfa_trc(bfa, attr->latency); bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh); return BFA_STATUS_OK; } void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa); } /* * Enable IOC after it is disabled. */ void bfa_iocfc_enable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Enable"); bfa->iocfc.cb_reqd = BFA_TRUE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE); } void bfa_iocfc_disable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Disable"); bfa->queue_process = BFA_FALSE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE); } bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa) { return bfa_ioc_is_operational(&bfa->ioc) && bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational); } /* * Return boot target port wwns -- read from boot information in flash. */ void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; int i; if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); *nwwns = cfgrsp->pbc_cfg.nbluns; for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; return; } *nwwns = cfgrsp->bootwwns.nwwns; memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); } int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); return cfgrsp->pbc_cfg.nvports; } /* * Use this function query the memory requirement of the BFA library. * This function needs to be called before bfa_attach() to get the * memory required of the BFA layer for a given driver configuration. * * This call will fail, if the cap is out of range compared to pre-defined * values within the BFA library * * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate * its configuration in this structure. * The default values for struct bfa_iocfc_cfg_s can be * fetched using bfa_cfg_get_default() API. * * If cap's boundary check fails, the library will use * the default bfa_cap_t values (and log a warning msg). * * @param[out] meminfo - pointer to bfa_meminfo_t. This content * indicates the memory type (see bfa_mem_type_t) and * amount of memory required. * * Driver should allocate the memory, populate the * starting address for each block and provide the same * structure as input parameter to bfa_attach() call. * * @param[in] bfa - pointer to the bfa structure, used while fetching the * dma, kva memory information of the bfa sub-modules. * * @return void * * Special Considerations: @note */ void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa) { int i; struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); WARN_ON((cfg == NULL) || (meminfo == NULL)); memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); /* Initialize the DMA & KVA meminfo queues */ INIT_LIST_HEAD(&meminfo->dma_info.qe); INIT_LIST_HEAD(&meminfo->kva_info.qe); bfa_iocfc_meminfo(cfg, meminfo, bfa); for (i = 0; hal_mods[i]; i++) hal_mods[i]->meminfo(cfg, meminfo, bfa); /* dma info setup */ bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo()); bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo()); bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo()); bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo()); bfa_mem_dma_setup(meminfo, flash_dma, bfa_flash_meminfo(cfg->drvcfg.min_cfg)); bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); bfa_mem_dma_setup(meminfo, phy_dma, bfa_phy_meminfo(cfg->drvcfg.min_cfg)); } /* * Use this function to do attach the driver instance with the BFA * library. This function will not trigger any HW initialization * process (which will be done in bfa_init() call) * * This call will fail, if the cap is out of range compared to * pre-defined values within the BFA library * * @param[out] bfa Pointer to bfa_t. * @param[in] bfad Opaque handle back to the driver's IOC structure * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure * that was used in bfa_cfg_get_meminfo(). * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should * use the bfa_cfg_get_meminfo() call to * find the memory blocks required, allocate the * required memory and provide the starting addresses. * @param[in] pcidev pointer to struct bfa_pcidev_s * * @return * void * * Special Considerations: * * @note * */ void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { int i; struct bfa_mem_dma_s *dma_info, *dma_elem; struct bfa_mem_kva_s *kva_info, *kva_elem; struct list_head *dm_qe, *km_qe; bfa->fcs = BFA_FALSE; WARN_ON((cfg == NULL) || (meminfo == NULL)); /* Initialize memory pointers for iterative allocation */ dma_info = &meminfo->dma_info; dma_info->kva_curp = dma_info->kva; dma_info->dma_curp = dma_info->dma; kva_info = &meminfo->kva_info; kva_info->kva_curp = kva_info->kva; list_for_each(dm_qe, &dma_info->qe) { dma_elem = (struct bfa_mem_dma_s *) dm_qe; dma_elem->kva_curp = dma_elem->kva; dma_elem->dma_curp = dma_elem->dma; } list_for_each(km_qe, &kva_info->qe) { kva_elem = (struct bfa_mem_kva_s *) km_qe; kva_elem->kva_curp = kva_elem->kva; } bfa_iocfc_attach(bfa, bfad, cfg, pcidev); for (i = 0; hal_mods[i]; i++) hal_mods[i]->attach(bfa, bfad, cfg, pcidev); bfa_com_port_attach(bfa); bfa_com_ablk_attach(bfa); bfa_com_cee_attach(bfa); bfa_com_sfp_attach(bfa); bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); bfa_com_diag_attach(bfa); bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); } /* * Use this function to delete a BFA IOC. IOC should be stopped (by * calling bfa_stop()) before this function call. * * @param[in] bfa - pointer to bfa_t. * * @return * void * * Special Considerations: * * @note */ void bfa_detach(struct bfa_s *bfa) { int i; for (i = 0; hal_mods[i]; i++) hal_mods[i]->detach(bfa); bfa_ioc_detach(&bfa->ioc); } void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) { INIT_LIST_HEAD(comp_q); list_splice_tail_init(&bfa->comp_q, comp_q); } void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) { struct list_head *qe; struct list_head *qen; struct bfa_cb_qe_s *hcb_qe; bfa_cb_cbfn_status_t cbfn; list_for_each_safe(qe, qen, comp_q) { hcb_qe = (struct bfa_cb_qe_s *) qe; if (hcb_qe->pre_rmv) { /* qe is invalid after return, dequeue before cbfn() */ list_del(qe); cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn); cbfn(hcb_qe->cbarg, hcb_qe->fw_status); } else hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); } } void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) { struct list_head *qe; struct bfa_cb_qe_s *hcb_qe; while (!list_empty(comp_q)) { bfa_q_deq(comp_q, &qe); hcb_qe = (struct bfa_cb_qe_s *) qe; WARN_ON(hcb_qe->pre_rmv); hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); } } /* * Return the list of PCI vendor/device id lists supported by this * BFA instance. */ void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) { static struct bfa_pciid_s __pciids[] = { {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, }; *npciids = sizeof(__pciids) / sizeof(__pciids[0]); *pciids = __pciids; } /* * Use this function query the default struct bfa_iocfc_cfg_s value (compiled * into BFA layer). The OS driver can then turn back and overwrite entries that * have been configured by the user. * * @param[in] cfg - pointer to bfa_ioc_cfg_t * * @return * void * * Special Considerations: * note */ void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) { cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; cfg->fwcfg.num_fwtio_reqs = 0; cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; cfg->drvcfg.ioc_recover = BFA_FALSE; cfg->drvcfg.delay_comp = BFA_FALSE; } void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) { bfa_cfg_get_default(cfg); cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; cfg->fwcfg.num_rports = BFA_RPORT_MIN; cfg->fwcfg.num_fwtio_reqs = 0; cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; cfg->drvcfg.min_cfg = BFA_TRUE; }
gpl-2.0
TheBootloader/android_kernel_samsung_msm8930-common
drivers/scsi/bfa/bfa_svc.c
4936
136999
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_plog.h" #include "bfa_cs.h" #include "bfa_modules.h" BFA_TRC_FILE(HAL, FCXP); BFA_MODULE(fcdiag); BFA_MODULE(fcxp); BFA_MODULE(sgpg); BFA_MODULE(lps); BFA_MODULE(fcport); BFA_MODULE(rport); BFA_MODULE(uf); /* * LPS related definitions */ #define BFA_LPS_MIN_LPORTS (1) #define BFA_LPS_MAX_LPORTS (256) /* * Maximum Vports supported per physical port or vf. */ #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 /* * FC PORT related definitions */ /* * The port is considered disabled if corresponding physical port or IOC are * disabled explicitly */ #define BFA_PORT_IS_DISABLED(bfa) \ ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) /* * BFA port state machine events */ enum bfa_fcport_sm_event { BFA_FCPORT_SM_START = 1, /* start port state machine */ BFA_FCPORT_SM_STOP = 2, /* stop port state machine */ BFA_FCPORT_SM_ENABLE = 3, /* enable port */ BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */ BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */ BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ }; /* * BFA port link notification state machine events */ enum bfa_fcport_ln_sm_event { BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */ BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ }; /* * RPORT related definitions */ #define bfa_rport_offline_cb(__rp) do { \ if ((__rp)->bfa->fcs) \ bfa_cb_rport_offline((__rp)->rport_drv); \ else { \ bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ __bfa_cb_rport_offline, (__rp)); \ } \ } while (0) #define bfa_rport_online_cb(__rp) do { \ if ((__rp)->bfa->fcs) \ bfa_cb_rport_online((__rp)->rport_drv); \ else { \ bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ __bfa_cb_rport_online, (__rp)); \ } \ } while (0) /* * forward declarations FCXP related functions */ static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_rsp_s *fcxp_rsp); static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, struct fchs_s *fchs); static void bfa_fcxp_qresume(void *cbarg); static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req); /* * forward declarations for LPS functions */ static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa); static void bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev); static void bfa_lps_detach(struct bfa_s *bfa); static void bfa_lps_start(struct bfa_s *bfa); static void bfa_lps_stop(struct bfa_s *bfa); static void bfa_lps_iocdisable(struct bfa_s *bfa); static void bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp); static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count); static void bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp); static void bfa_lps_reqq_resume(void *lps_arg); static void bfa_lps_free(struct bfa_lps_s *lps); static void bfa_lps_send_login(struct bfa_lps_s *lps); static void bfa_lps_send_logout(struct bfa_lps_s *lps); static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps); static void bfa_lps_login_comp(struct bfa_lps_s *lps); static void bfa_lps_logout_comp(struct bfa_lps_s *lps); static void bfa_lps_cvl_event(struct bfa_lps_s *lps); /* * forward declaration for LPS state machine */ static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event); /* * forward declaration for FC Port functions */ static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport); static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport); static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport); static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport); static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete); static void bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event, bfa_boolean_t trunk); static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event); static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete); static void bfa_fcport_stats_get_timeout(void *cbarg); static void bfa_fcport_stats_clr_timeout(void *cbarg); static void bfa_trunk_iocdisable(struct bfa_s *bfa); /* * forward declaration for FC PORT state machine */ static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static struct bfa_sm_table_s hal_port_sm_table[] = { {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT}, {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT}, {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING}, {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN}, {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP}, {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT}, {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT}, {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING}, {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED}, {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, }; /* * forward declaration for RPORT related functions */ static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); static void bfa_rport_free(struct bfa_rport_s *rport); static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp); static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp); static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp); static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete); /* * forward declaration for RPORT state machine */ static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event); /* * PLOG related definitions */ static int plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec) { if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING)) return 1; if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ)) return 1; return 0; } static u64 bfa_get_log_time(void) { u64 system_time = 0; struct timeval tv; do_gettimeofday(&tv); /* We are interested in seconds only. */ system_time = tv.tv_sec; return system_time; } static void bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) { u16 tail; struct bfa_plog_rec_s *pl_recp; if (plog->plog_enabled == 0) return; if (plkd_validate_logrec(pl_rec)) { WARN_ON(1); return; } tail = plog->tail; pl_recp = &(plog->plog_recs[tail]); memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); pl_recp->tv = bfa_get_log_time(); BFA_PL_LOG_REC_INCR(plog->tail); if (plog->head == plog->tail) BFA_PL_LOG_REC_INCR(plog->head); } void bfa_plog_init(struct bfa_plog_s *plog) { memset((char *)plog, 0, sizeof(struct bfa_plog_s)); memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); plog->head = plog->tail = 0; plog->plog_enabled = 1; } void bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, enum bfa_plog_eid event, u16 misc, char *log_str) { struct bfa_plog_rec_s lp; if (plog->plog_enabled) { memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); lp.mid = mid; lp.eid = event; lp.log_type = BFA_PL_LOG_TYPE_STRING; lp.misc = misc; strncpy(lp.log_entry.string_log, log_str, BFA_PL_STRING_LOG_SZ - 1); lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0'; bfa_plog_add(plog, &lp); } } void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, enum bfa_plog_eid event, u16 misc, u32 *intarr, u32 num_ints) { struct bfa_plog_rec_s lp; u32 i; if (num_ints > BFA_PL_INT_LOG_SZ) num_ints = BFA_PL_INT_LOG_SZ; if (plog->plog_enabled) { memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); lp.mid = mid; lp.eid = event; lp.log_type = BFA_PL_LOG_TYPE_INT; lp.misc = misc; for (i = 0; i < num_ints; i++) lp.log_entry.int_log[i] = intarr[i]; lp.log_num_ints = (u8) num_ints; bfa_plog_add(plog, &lp); } } void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr) { struct bfa_plog_rec_s lp; u32 *tmp_int = (u32 *) fchdr; u32 ints[BFA_PL_INT_LOG_SZ]; if (plog->plog_enabled) { memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); ints[0] = tmp_int[0]; ints[1] = tmp_int[1]; ints[2] = tmp_int[4]; bfa_plog_intarr(plog, mid, event, misc, ints, 3); } } void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr, u32 pld_w0) { struct bfa_plog_rec_s lp; u32 *tmp_int = (u32 *) fchdr; u32 ints[BFA_PL_INT_LOG_SZ]; if (plog->plog_enabled) { memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); ints[0] = tmp_int[0]; ints[1] = tmp_int[1]; ints[2] = tmp_int[4]; ints[3] = pld_w0; bfa_plog_intarr(plog, mid, event, misc, ints, 4); } } /* * fcxp_pvt BFA FCXP private functions */ static void claim_fcxps_mem(struct bfa_fcxp_mod_s *mod) { u16 i; struct bfa_fcxp_s *fcxp; fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod); memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); INIT_LIST_HEAD(&mod->fcxp_free_q); INIT_LIST_HEAD(&mod->fcxp_active_q); INIT_LIST_HEAD(&mod->fcxp_unused_q); mod->fcxp_list = fcxp; for (i = 0; i < mod->num_fcxps; i++) { fcxp->fcxp_mod = mod; fcxp->fcxp_tag = i; list_add_tail(&fcxp->qe, &mod->fcxp_free_q); bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp); fcxp->reqq_waiting = BFA_FALSE; fcxp = fcxp + 1; } bfa_mem_kva_curp(mod) = (void *)fcxp; } static void bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa); struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa); struct bfa_mem_dma_s *seg_ptr; u16 nsegs, idx, per_seg_fcxp; u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs; u32 per_fcxp_sz; if (num_fcxps == 0) return; if (cfg->drvcfg.min_cfg) per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ; else per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ; /* dma memory */ nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz); per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz); bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) { if (num_fcxps >= per_seg_fcxp) { num_fcxps -= per_seg_fcxp; bfa_mem_dma_setup(minfo, seg_ptr, per_seg_fcxp * per_fcxp_sz); } else bfa_mem_dma_setup(minfo, seg_ptr, num_fcxps * per_fcxp_sz); } /* kva memory */ bfa_mem_kva_setup(minfo, fcxp_kva, cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s)); } static void bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); mod->bfa = bfa; mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; /* * Initialize FCXP request and response payload sizes. */ mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; if (!cfg->drvcfg.min_cfg) mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ; INIT_LIST_HEAD(&mod->wait_q); claim_fcxps_mem(mod); } static void bfa_fcxp_detach(struct bfa_s *bfa) { } static void bfa_fcxp_start(struct bfa_s *bfa) { } static void bfa_fcxp_stop(struct bfa_s *bfa) { } static void bfa_fcxp_iocdisable(struct bfa_s *bfa) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); struct bfa_fcxp_s *fcxp; struct list_head *qe, *qen; /* Enqueue unused fcxp resources to free_q */ list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q); list_for_each_safe(qe, qen, &mod->fcxp_active_q) { fcxp = (struct bfa_fcxp_s *) qe; if (fcxp->caller == NULL) { fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, BFA_STATUS_IOC_FAILURE, 0, 0, NULL); bfa_fcxp_free(fcxp); } else { fcxp->rsp_status = BFA_STATUS_IOC_FAILURE; bfa_cb_queue(bfa, &fcxp->hcb_qe, __bfa_fcxp_send_cbfn, fcxp); } } } static struct bfa_fcxp_s * bfa_fcxp_get(struct bfa_fcxp_mod_s *fm) { struct bfa_fcxp_s *fcxp; bfa_q_deq(&fm->fcxp_free_q, &fcxp); if (fcxp) list_add_tail(&fcxp->qe, &fm->fcxp_active_q); return fcxp; } static void bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp, struct bfa_s *bfa, u8 *use_ibuf, u32 *nr_sgles, bfa_fcxp_get_sgaddr_t *r_sga_cbfn, bfa_fcxp_get_sglen_t *r_sglen_cbfn, struct list_head *r_sgpg_q, int n_sgles, bfa_fcxp_get_sgaddr_t sga_cbfn, bfa_fcxp_get_sglen_t sglen_cbfn) { WARN_ON(bfa == NULL); bfa_trc(bfa, fcxp->fcxp_tag); if (n_sgles == 0) { *use_ibuf = 1; } else { WARN_ON(*sga_cbfn == NULL); WARN_ON(*sglen_cbfn == NULL); *use_ibuf = 0; *r_sga_cbfn = sga_cbfn; *r_sglen_cbfn = sglen_cbfn; *nr_sgles = n_sgles; /* * alloc required sgpgs */ if (n_sgles > BFI_SGE_INLINE) WARN_ON(1); } } static void bfa_fcxp_init(struct bfa_fcxp_s *fcxp, void *caller, struct bfa_s *bfa, int nreq_sgles, int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, bfa_fcxp_get_sglen_t req_sglen_cbfn, bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, bfa_fcxp_get_sglen_t rsp_sglen_cbfn) { WARN_ON(bfa == NULL); bfa_trc(bfa, fcxp->fcxp_tag); fcxp->caller = caller; bfa_fcxp_init_reqrsp(fcxp, bfa, &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn, &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q, nreq_sgles, req_sga_cbfn, req_sglen_cbfn); bfa_fcxp_init_reqrsp(fcxp, bfa, &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn, &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q, nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn); } static void bfa_fcxp_put(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; struct bfa_fcxp_wqe_s *wqe; bfa_q_deq(&mod->wait_q, &wqe); if (wqe) { bfa_trc(mod->bfa, fcxp->fcxp_tag); bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles, wqe->nrsp_sgles, wqe->req_sga_cbfn, wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn, wqe->rsp_sglen_cbfn); wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp); return; } WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); list_del(&fcxp->qe); list_add_tail(&fcxp->qe, &mod->fcxp_free_q); } static void bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { /* discarded fcxp completion */ } static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete) { struct bfa_fcxp_s *fcxp = cbarg; if (complete) { fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, fcxp->rsp_status, fcxp->rsp_len, fcxp->residue_len, &fcxp->rsp_fchs); } else { bfa_fcxp_free(fcxp); } } static void hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); struct bfa_fcxp_s *fcxp; u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag); bfa_trc(bfa, fcxp_tag); fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len); /* * @todo f/w should not set residue to non-0 when everything * is received. */ if (fcxp_rsp->req_status == BFA_STATUS_OK) fcxp_rsp->residue_len = 0; else fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len); fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); WARN_ON(fcxp->send_cbfn == NULL); hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp); if (fcxp->send_cbfn != NULL) { bfa_trc(mod->bfa, (NULL == fcxp->caller)); if (fcxp->caller == NULL) { fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, fcxp_rsp->req_status, fcxp_rsp->rsp_len, fcxp_rsp->residue_len, &fcxp_rsp->fchs); /* * fcxp automatically freed on return from the callback */ bfa_fcxp_free(fcxp); } else { fcxp->rsp_status = fcxp_rsp->req_status; fcxp->rsp_len = fcxp_rsp->rsp_len; fcxp->residue_len = fcxp_rsp->residue_len; fcxp->rsp_fchs = fcxp_rsp->fchs; bfa_cb_queue(bfa, &fcxp->hcb_qe, __bfa_fcxp_send_cbfn, fcxp); } } else { bfa_trc(bfa, (NULL == fcxp->send_cbfn)); } } static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, struct fchs_s *fchs) { /* * TODO: TX ox_id */ if (reqlen > 0) { if (fcxp->use_ireqbuf) { u32 pld_w0 = *((u32 *) BFA_FCXP_REQ_PLD(fcxp)); bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s), fchs, pld_w0); } else { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s), fchs); } } else { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s), fchs); } } static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_rsp_s *fcxp_rsp) { if (fcxp_rsp->rsp_len > 0) { if (fcxp->use_irspbuf) { u32 pld_w0 = *((u32 *) BFA_FCXP_RSP_PLD(fcxp)); bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs, pld_w0); } else { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs); } } else { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs); } } /* * Handler to resume sending fcxp when space in available in cpe queue. */ static void bfa_fcxp_qresume(void *cbarg) { struct bfa_fcxp_s *fcxp = cbarg; struct bfa_s *bfa = fcxp->fcxp_mod->bfa; struct bfi_fcxp_send_req_s *send_req; fcxp->reqq_waiting = BFA_FALSE; send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); bfa_fcxp_queue(fcxp, send_req); } /* * Queue fcxp send request to foimrware. */ static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) { struct bfa_s *bfa = fcxp->fcxp_mod->bfa; struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; struct bfa_rport_s *rport = reqi->bfa_rport; bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, bfa_fn_lpu(bfa)); send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag); if (rport) { send_req->rport_fw_hndl = rport->fw_handle; send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz); if (send_req->max_frmsz == 0) send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); } else { send_req->rport_fw_hndl = 0; send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); } send_req->vf_id = cpu_to_be16(reqi->vf_id); send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag); send_req->class = reqi->class; send_req->rsp_timeout = rspi->rsp_timeout; send_req->cts = reqi->cts; send_req->fchs = reqi->fchs; send_req->req_len = cpu_to_be32(reqi->req_tot_len); send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen); /* * setup req sgles */ if (fcxp->use_ireqbuf == 1) { bfa_alen_set(&send_req->req_alen, reqi->req_tot_len, BFA_FCXP_REQ_PLD_PA(fcxp)); } else { if (fcxp->nreq_sgles > 0) { WARN_ON(fcxp->nreq_sgles != 1); bfa_alen_set(&send_req->req_alen, reqi->req_tot_len, fcxp->req_sga_cbfn(fcxp->caller, 0)); } else { WARN_ON(reqi->req_tot_len != 0); bfa_alen_set(&send_req->rsp_alen, 0, 0); } } /* * setup rsp sgles */ if (fcxp->use_irspbuf == 1) { WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ); bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen, BFA_FCXP_RSP_PLD_PA(fcxp)); } else { if (fcxp->nrsp_sgles > 0) { WARN_ON(fcxp->nrsp_sgles != 1); bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen, fcxp->rsp_sga_cbfn(fcxp->caller, 0)); } else { WARN_ON(rspi->rsp_maxlen != 0); bfa_alen_set(&send_req->rsp_alen, 0, 0); } } hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs); bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh); bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP)); bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); } /* * Allocate an FCXP instance to send a response or to send a request * that has a response. Request/response buffers are allocated by caller. * * @param[in] bfa BFA bfa instance * @param[in] nreq_sgles Number of SG elements required for request * buffer. 0, if fcxp internal buffers are used. * Use bfa_fcxp_get_reqbuf() to get the * internal req buffer. * @param[in] req_sgles SG elements describing request buffer. Will be * copied in by BFA and hence can be freed on * return from this function. * @param[in] get_req_sga function ptr to be called to get a request SG * Address (given the sge index). * @param[in] get_req_sglen function ptr to be called to get a request SG * len (given the sge index). * @param[in] get_rsp_sga function ptr to be called to get a response SG * Address (given the sge index). * @param[in] get_rsp_sglen function ptr to be called to get a response SG * len (given the sge index). * * @return FCXP instance. NULL on failure. */ struct bfa_fcxp_s * bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, bfa_fcxp_get_sglen_t req_sglen_cbfn, bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, bfa_fcxp_get_sglen_t rsp_sglen_cbfn) { struct bfa_fcxp_s *fcxp = NULL; WARN_ON(bfa == NULL); fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); if (fcxp == NULL) return NULL; bfa_trc(bfa, fcxp->fcxp_tag); bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn, req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn); return fcxp; } /* * Get the internal request buffer pointer * * @param[in] fcxp BFA fcxp pointer * * @return pointer to the internal request buffer */ void * bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; void *reqbuf; WARN_ON(fcxp->use_ireqbuf != 1); reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag, mod->req_pld_sz + mod->rsp_pld_sz); return reqbuf; } u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; return mod->req_pld_sz; } /* * Get the internal response buffer pointer * * @param[in] fcxp BFA fcxp pointer * * @return pointer to the internal request buffer */ void * bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; void *fcxp_buf; WARN_ON(fcxp->use_irspbuf != 1); fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag, mod->req_pld_sz + mod->rsp_pld_sz); /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */ return ((u8 *) fcxp_buf) + mod->req_pld_sz; } /* * Free the BFA FCXP * * @param[in] fcxp BFA fcxp pointer * * @return void */ void bfa_fcxp_free(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; WARN_ON(fcxp == NULL); bfa_trc(mod->bfa, fcxp->fcxp_tag); bfa_fcxp_put(fcxp); } /* * Send a FCXP request * * @param[in] fcxp BFA fcxp pointer * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports * @param[in] vf_id virtual Fabric ID * @param[in] lp_tag lport tag * @param[in] cts use Continuous sequence * @param[in] cos fc Class of Service * @param[in] reqlen request length, does not include FCHS length * @param[in] fchs fc Header Pointer. The header content will be copied * in by BFA. * * @param[in] cbfn call back function to be called on receiving * the response * @param[in] cbarg arg for cbfn * @param[in] rsp_timeout * response timeout * * @return bfa_status_t */ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos, u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn, void *cbarg, u32 rsp_maxlen, u8 rsp_timeout) { struct bfa_s *bfa = fcxp->fcxp_mod->bfa; struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; struct bfi_fcxp_send_req_s *send_req; bfa_trc(bfa, fcxp->fcxp_tag); /* * setup request/response info */ reqi->bfa_rport = rport; reqi->vf_id = vf_id; reqi->lp_tag = lp_tag; reqi->class = cos; rspi->rsp_timeout = rsp_timeout; reqi->cts = cts; reqi->fchs = *fchs; reqi->req_tot_len = reqlen; rspi->rsp_maxlen = rsp_maxlen; fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; fcxp->send_cbarg = cbarg; /* * If no room in CPE queue, wait for space in request queue */ send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); if (!send_req) { bfa_trc(bfa, fcxp->fcxp_tag); fcxp->reqq_waiting = BFA_TRUE; bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe); return; } bfa_fcxp_queue(fcxp, send_req); } /* * Abort a BFA FCXP * * @param[in] fcxp BFA fcxp pointer * * @return void */ bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) { bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); WARN_ON(1); return BFA_STATUS_OK; } void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, void *caller, int nreq_sgles, int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, bfa_fcxp_get_sglen_t req_sglen_cbfn, bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, bfa_fcxp_get_sglen_t rsp_sglen_cbfn) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); WARN_ON(!list_empty(&mod->fcxp_free_q)); wqe->alloc_cbfn = alloc_cbfn; wqe->alloc_cbarg = alloc_cbarg; wqe->caller = caller; wqe->bfa = bfa; wqe->nreq_sgles = nreq_sgles; wqe->nrsp_sgles = nrsp_sgles; wqe->req_sga_cbfn = req_sga_cbfn; wqe->req_sglen_cbfn = req_sglen_cbfn; wqe->rsp_sga_cbfn = rsp_sga_cbfn; wqe->rsp_sglen_cbfn = rsp_sglen_cbfn; list_add_tail(&wqe->qe, &mod->wait_q); } void bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe)); list_del(&wqe->qe); } void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) { /* * If waiting for room in request queue, cancel reqq wait * and free fcxp. */ if (fcxp->reqq_waiting) { fcxp->reqq_waiting = BFA_FALSE; bfa_reqq_wcancel(&fcxp->reqq_wqe); bfa_fcxp_free(fcxp); return; } fcxp->send_cbfn = bfa_fcxp_null_comp; } void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) { switch (msg->mhdr.msg_id) { case BFI_FCXP_I2H_SEND_RSP: hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg); break; default: bfa_trc(bfa, msg->mhdr.msg_id); WARN_ON(1); } } u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); return mod->rsp_pld_sz; } void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); struct list_head *qe; int i; for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) { bfa_q_deq_tail(&mod->fcxp_free_q, &qe); list_add_tail(qe, &mod->fcxp_unused_q); } } /* * BFA LPS state machine functions */ /* * Init state -- no login */ static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_LOGIN: if (bfa_reqq_full(lps->bfa, lps->reqq)) { bfa_sm_set_state(lps, bfa_lps_sm_loginwait); bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); } else { bfa_sm_set_state(lps, bfa_lps_sm_login); bfa_lps_send_login(lps); } if (lps->fdisc) bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FDISC Request"); else bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FLOGI Request"); break; case BFA_LPS_SM_LOGOUT: bfa_lps_logout_comp(lps); break; case BFA_LPS_SM_DELETE: bfa_lps_free(lps); break; case BFA_LPS_SM_RX_CVL: case BFA_LPS_SM_OFFLINE: break; case BFA_LPS_SM_FWRSP: /* * Could happen when fabric detects loopback and discards * the lps request. Fw will eventually sent out the timeout * Just ignore */ break; default: bfa_sm_fault(lps->bfa, event); } } /* * login is in progress -- awaiting response from firmware */ static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_FWRSP: if (lps->status == BFA_STATUS_OK) { bfa_sm_set_state(lps, bfa_lps_sm_online); if (lps->fdisc) bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FDISC Accept"); else bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); /* If N2N, send the assigned PID to FW */ bfa_trc(lps->bfa, lps->fport); bfa_trc(lps->bfa, lps->lp_pid); if (!lps->fport && lps->lp_pid) bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); } else { bfa_sm_set_state(lps, bfa_lps_sm_init); if (lps->fdisc) bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FDISC Fail (RJT or timeout)"); else bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FLOGI Fail (RJT or timeout)"); } bfa_lps_login_comp(lps); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; case BFA_LPS_SM_SET_N2N_PID: bfa_trc(lps->bfa, lps->fport); bfa_trc(lps->bfa, lps->lp_pid); break; default: bfa_sm_fault(lps->bfa, event); } } /* * login pending - awaiting space in request queue */ static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_RESUME: bfa_sm_set_state(lps, bfa_lps_sm_login); bfa_lps_send_login(lps); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); break; case BFA_LPS_SM_RX_CVL: /* * Login was not even sent out; so when getting out * of this state, it will appear like a login retry * after Clear virtual link */ break; default: bfa_sm_fault(lps->bfa, event); } } /* * login complete */ static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_LOGOUT: if (bfa_reqq_full(lps->bfa, lps->reqq)) { bfa_sm_set_state(lps, bfa_lps_sm_logowait); bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); } else { bfa_sm_set_state(lps, bfa_lps_sm_logout); bfa_lps_send_logout(lps); } bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGO, 0, "Logout"); break; case BFA_LPS_SM_RX_CVL: bfa_sm_set_state(lps, bfa_lps_sm_init); /* Let the vport module know about this event */ bfa_lps_cvl_event(lps); bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); break; case BFA_LPS_SM_SET_N2N_PID: if (bfa_reqq_full(lps->bfa, lps->reqq)) { bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait); bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); } else bfa_lps_send_set_n2n_pid(lps); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; default: bfa_sm_fault(lps->bfa, event); } } /* * login complete */ static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_RESUME: bfa_sm_set_state(lps, bfa_lps_sm_online); bfa_lps_send_set_n2n_pid(lps); break; case BFA_LPS_SM_LOGOUT: bfa_sm_set_state(lps, bfa_lps_sm_logowait); bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGO, 0, "Logout"); break; case BFA_LPS_SM_RX_CVL: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); /* Let the vport module know about this event */ bfa_lps_cvl_event(lps); bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); break; default: bfa_sm_fault(lps->bfa, event); } } /* * logout in progress - awaiting firmware response */ static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_FWRSP: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_lps_logout_comp(lps); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; default: bfa_sm_fault(lps->bfa, event); } } /* * logout pending -- awaiting space in request queue */ static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_RESUME: bfa_sm_set_state(lps, bfa_lps_sm_logout); bfa_lps_send_logout(lps); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); break; default: bfa_sm_fault(lps->bfa, event); } } /* * lps_pvt BFA LPS private functions */ /* * return memory requirement */ static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa); if (cfg->drvcfg.min_cfg) bfa_mem_kva_setup(minfo, lps_kva, sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS); else bfa_mem_kva_setup(minfo, lps_kva, sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS); } /* * bfa module attach at initialization time */ static void bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; int i; mod->num_lps = BFA_LPS_MAX_LPORTS; if (cfg->drvcfg.min_cfg) mod->num_lps = BFA_LPS_MIN_LPORTS; else mod->num_lps = BFA_LPS_MAX_LPORTS; mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod); bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s); INIT_LIST_HEAD(&mod->lps_free_q); INIT_LIST_HEAD(&mod->lps_active_q); INIT_LIST_HEAD(&mod->lps_login_q); for (i = 0; i < mod->num_lps; i++, lps++) { lps->bfa = bfa; lps->bfa_tag = (u8) i; lps->reqq = BFA_REQQ_LPS; bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps); list_add_tail(&lps->qe, &mod->lps_free_q); } } static void bfa_lps_detach(struct bfa_s *bfa) { } static void bfa_lps_start(struct bfa_s *bfa) { } static void bfa_lps_stop(struct bfa_s *bfa) { } /* * IOC in disabled state -- consider all lps offline */ static void bfa_lps_iocdisable(struct bfa_s *bfa) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &mod->lps_active_q) { lps = (struct bfa_lps_s *) qe; bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); } list_for_each_safe(qe, qen, &mod->lps_login_q) { lps = (struct bfa_lps_s *) qe; bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); } list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q); } /* * Firmware login response */ static void bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; WARN_ON(rsp->bfa_tag >= mod->num_lps); lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag); lps->status = rsp->status; switch (rsp->status) { case BFA_STATUS_OK: lps->fw_tag = rsp->fw_tag; lps->fport = rsp->f_port; if (lps->fport) lps->lp_pid = rsp->lp_pid; lps->npiv_en = rsp->npiv_en; lps->pr_bbcred = be16_to_cpu(rsp->bb_credit); lps->pr_pwwn = rsp->port_name; lps->pr_nwwn = rsp->node_name; lps->auth_req = rsp->auth_req; lps->lp_mac = rsp->lp_mac; lps->brcd_switch = rsp->brcd_switch; lps->fcf_mac = rsp->fcf_mac; lps->pr_bbscn = rsp->bb_scn; break; case BFA_STATUS_FABRIC_RJT: lps->lsrjt_rsn = rsp->lsrjt_rsn; lps->lsrjt_expl = rsp->lsrjt_expl; break; case BFA_STATUS_EPROTOCOL: lps->ext_status = rsp->ext_status; break; case BFA_STATUS_VPORT_MAX: if (rsp->ext_status) bfa_lps_no_res(lps, rsp->ext_status); break; default: /* Nothing to do with other status */ break; } list_del(&lps->qe); list_add_tail(&lps->qe, &mod->lps_active_q); bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); } static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count) { struct bfa_s *bfa = first_lps->bfa; struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct list_head *qe, *qe_next; struct bfa_lps_s *lps; bfa_trc(bfa, count); qe = bfa_q_next(first_lps); while (count && qe) { qe_next = bfa_q_next(qe); lps = (struct bfa_lps_s *)qe; bfa_trc(bfa, lps->bfa_tag); lps->status = first_lps->status; list_del(&lps->qe); list_add_tail(&lps->qe, &mod->lps_active_q); bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); qe = qe_next; count--; } } /* * Firmware logout response */ static void bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; WARN_ON(rsp->bfa_tag >= mod->num_lps); lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag); bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); } /* * Firmware received a Clear virtual link request (for FCoE) */ static void bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag); bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); } /* * Space is available in request queue, resume queueing request to firmware. */ static void bfa_lps_reqq_resume(void *lps_arg) { struct bfa_lps_s *lps = lps_arg; bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); } /* * lps is freed -- triggered by vport delete */ static void bfa_lps_free(struct bfa_lps_s *lps) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); lps->lp_pid = 0; list_del(&lps->qe); list_add_tail(&lps->qe, &mod->lps_free_q); } /* * send login request to firmware */ static void bfa_lps_send_login(struct bfa_lps_s *lps) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); struct bfi_lps_login_req_s *m; m = bfa_reqq_next(lps->bfa, lps->reqq); WARN_ON(!m); bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, bfa_fn_lpu(lps->bfa)); m->bfa_tag = lps->bfa_tag; m->alpa = lps->alpa; m->pdu_size = cpu_to_be16(lps->pdusz); m->pwwn = lps->pwwn; m->nwwn = lps->nwwn; m->fdisc = lps->fdisc; m->auth_en = lps->auth_en; m->bb_scn = lps->bb_scn; bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); list_del(&lps->qe); list_add_tail(&lps->qe, &mod->lps_login_q); } /* * send logout request to firmware */ static void bfa_lps_send_logout(struct bfa_lps_s *lps) { struct bfi_lps_logout_req_s *m; m = bfa_reqq_next(lps->bfa, lps->reqq); WARN_ON(!m); bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, bfa_fn_lpu(lps->bfa)); m->fw_tag = lps->fw_tag; m->port_name = lps->pwwn; bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); } /* * send n2n pid set request to firmware */ static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps) { struct bfi_lps_n2n_pid_req_s *m; m = bfa_reqq_next(lps->bfa, lps->reqq); WARN_ON(!m); bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ, bfa_fn_lpu(lps->bfa)); m->fw_tag = lps->fw_tag; m->lp_pid = lps->lp_pid; bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); } /* * Indirect login completion handler for non-fcs */ static void bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete) { struct bfa_lps_s *lps = arg; if (!complete) return; if (lps->fdisc) bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); else bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); } /* * Login completion handler -- direct call for fcs, queue for others */ static void bfa_lps_login_comp(struct bfa_lps_s *lps) { if (!lps->bfa->fcs) { bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb, lps); return; } if (lps->fdisc) bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); else bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); } /* * Indirect logout completion handler for non-fcs */ static void bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete) { struct bfa_lps_s *lps = arg; if (!complete) return; if (lps->fdisc) bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); } /* * Logout completion handler -- direct call for fcs, queue for others */ static void bfa_lps_logout_comp(struct bfa_lps_s *lps) { if (!lps->bfa->fcs) { bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb, lps); return; } if (lps->fdisc) bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); } /* * Clear virtual link completion handler for non-fcs */ static void bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete) { struct bfa_lps_s *lps = arg; if (!complete) return; /* Clear virtual link to base port will result in link down */ if (lps->fdisc) bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); } /* * Received Clear virtual link event --direct call for fcs, * queue for others */ static void bfa_lps_cvl_event(struct bfa_lps_s *lps) { if (!lps->bfa->fcs) { bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb, lps); return; } /* Clear virtual link to base port will result in link down */ if (lps->fdisc) bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); } /* * lps_public BFA LPS public functions */ u32 bfa_lps_get_max_vport(struct bfa_s *bfa) { if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) return BFA_LPS_MAX_VPORTS_SUPP_CT; else return BFA_LPS_MAX_VPORTS_SUPP_CB; } /* * Allocate a lport srvice tag. */ struct bfa_lps_s * bfa_lps_alloc(struct bfa_s *bfa) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps = NULL; bfa_q_deq(&mod->lps_free_q, &lps); if (lps == NULL) return NULL; list_add_tail(&lps->qe, &mod->lps_active_q); bfa_sm_set_state(lps, bfa_lps_sm_init); return lps; } /* * Free lport service tag. This can be called anytime after an alloc. * No need to wait for any pending login/logout completions. */ void bfa_lps_delete(struct bfa_lps_s *lps) { bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); } /* * Initiate a lport login. */ void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn) { lps->uarg = uarg; lps->alpa = alpa; lps->pdusz = pdusz; lps->pwwn = pwwn; lps->nwwn = nwwn; lps->fdisc = BFA_FALSE; lps->auth_en = auth_en; lps->bb_scn = bb_scn; bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); } /* * Initiate a lport fdisc login. */ void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, wwn_t nwwn) { lps->uarg = uarg; lps->alpa = 0; lps->pdusz = pdusz; lps->pwwn = pwwn; lps->nwwn = nwwn; lps->fdisc = BFA_TRUE; lps->auth_en = BFA_FALSE; bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); } /* * Initiate a lport FDSIC logout. */ void bfa_lps_fdisclogo(struct bfa_lps_s *lps) { bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); } u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag; } /* * Return lport services tag given the pid */ u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; int i; for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) { if (lps->lp_pid == pid) return lps->bfa_tag; } /* Return base port tag anyway */ return 0; } /* * return port id assigned to the base lport */ u32 bfa_lps_get_base_pid(struct bfa_s *bfa) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; } /* * Set PID in case of n2n (which is assigned during PLOGI) */ void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, n2n_pid); lps->lp_pid = n2n_pid; bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); } /* * LPS firmware message class handler. */ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { union bfi_lps_i2h_msg_u msg; bfa_trc(bfa, m->mhdr.msg_id); msg.msg = m; switch (m->mhdr.msg_id) { case BFI_LPS_I2H_LOGIN_RSP: bfa_lps_login_rsp(bfa, msg.login_rsp); break; case BFI_LPS_I2H_LOGOUT_RSP: bfa_lps_logout_rsp(bfa, msg.logout_rsp); break; case BFI_LPS_I2H_CVL_EVENT: bfa_lps_rx_cvl_event(bfa, msg.cvl_event); break; default: bfa_trc(bfa, m->mhdr.msg_id); WARN_ON(1); } } static void bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event) { struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; struct bfa_aen_entry_s *aen_entry; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa); aen_entry->aen_data.port.pwwn = fcport->pwwn; /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq, BFA_AEN_CAT_PORT, event); } /* * FC PORT state machine functions */ static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: /* * Start event after IOC is configured and BFA is started. */ fcport->use_flash_cfg = BFA_TRUE; if (bfa_fcport_send_enable(fcport)) { bfa_trc(fcport->bfa, BFA_TRUE); bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); } else { bfa_trc(fcport->bfa, BFA_FALSE); bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); } break; case BFA_FCPORT_SM_ENABLE: /* * Port is persistently configured to be in enabled state. Do * not change state. Port enabling is done when START event is * received. */ break; case BFA_FCPORT_SM_DISABLE: /* * If a port is persistently configured to be disabled, the * first event will a port disable request. */ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_QRESUME: bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); bfa_fcport_send_enable(fcport); break; case BFA_FCPORT_SM_STOP: bfa_reqq_wcancel(&fcport->reqq_wait); bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_ENABLE: /* * Already enable is in progress. */ break; case BFA_FCPORT_SM_DISABLE: /* * Just send disable request to firmware when room becomes * available in request queue. */ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); bfa_reqq_wcancel(&fcport->reqq_wait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_reqq_wcancel(&fcport->reqq_wait); bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_FWRSP: case BFA_FCPORT_SM_LINKDOWN: bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); break; case BFA_FCPORT_SM_LINKUP: bfa_fcport_update_linkinfo(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); WARN_ON(!fcport->event_cbfn); bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); break; case BFA_FCPORT_SM_ENABLE: /* * Already being enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_LINKUP: bfa_fcport_update_linkinfo(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); WARN_ON(!fcport->event_cbfn); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { bfa_trc(fcport->bfa, pevent->link_state.vc_fcf.fcf.fipenabled); bfa_trc(fcport->bfa, pevent->link_state.vc_fcf.fcf.fipfailed); if (pevent->link_state.vc_fcf.fcf.fipfailed) bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_FIP_FCF_DISC, 0, "FIP FCF Discovery Failed"); else bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_FIP_FCF_DISC, 0, "FIP FCF Discovered"); } bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port online: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE); /* If QoS is enabled and it is not online, send AEN */ if (fcport->cfg.qos_enabled && fcport->qos_attr.state != BFA_QOS_ONLINE) bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG); break; case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link down event. */ break; case BFA_FCPORT_SM_ENABLE: /* * Already enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_ENABLE: /* * Already enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_LINKDOWN: bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); wwn2str(pwwn_buf, fcport->pwwn); if (BFA_PORT_IS_DISABLED(fcport->bfa)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); } else { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Base port (WWN = %s) " "lost fabric connectivity\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); } break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); bfa_fcport_reset_linkinfo(fcport); wwn2str(pwwn_buf, fcport->pwwn); if (BFA_PORT_IS_DISABLED(fcport->bfa)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); } else { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Base port (WWN = %s) " "lost fabric connectivity\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); } break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); wwn2str(pwwn_buf, fcport->pwwn); if (BFA_PORT_IS_DISABLED(fcport->bfa)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); } else { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Base port (WWN = %s) " "lost fabric connectivity\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); } break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_QRESUME: bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); bfa_fcport_send_disable(fcport); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); bfa_reqq_wcancel(&fcport->reqq_wait); break; case BFA_FCPORT_SM_ENABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait); break; case BFA_FCPORT_SM_DISABLE: /* * Already being disabled. */ break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); bfa_reqq_wcancel(&fcport->reqq_wait); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_QRESUME: bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); bfa_fcport_send_disable(fcport); if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); bfa_reqq_wcancel(&fcport->reqq_wait); break; case BFA_FCPORT_SM_ENABLE: break; case BFA_FCPORT_SM_DISABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); bfa_reqq_wcancel(&fcport->reqq_wait); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_FWRSP: bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; case BFA_FCPORT_SM_DISABLE: /* * Already being disabled. */ break; case BFA_FCPORT_SM_ENABLE: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port enabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: /* * Ignore start event for a port that is disabled. */ break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_ENABLE: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port enabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_DISABLE: /* * Already disabled. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); break; default: /* * Ignore all other events. */ ; } } /* * Port is enabled. IOC is down/failed. */ static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); break; default: /* * Ignore all events. */ ; } } /* * Port is disabled. IOC is down/failed. */ static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; case BFA_FCPORT_SM_ENABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: /* * Ignore all events. */ ; } } /* * Link state is down */ static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKUP: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for down notification */ static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKUP: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for down notification and there is a pending up */ static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is up */ static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for up notification */ static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for up notification and there is a pending down */ static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKUP: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for up notification and there are pending down and up */ static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) { struct bfa_fcport_ln_s *ln = cbarg; if (complete) ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event); else bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); } /* * Send SCN notification to upper layers. * trunk - false if caller is fcport to ignore fcport event in trunked mode */ static void bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event, bfa_boolean_t trunk) { if (fcport->cfg.trunked && !trunk) return; switch (event) { case BFA_PORT_LINKUP: bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); break; case BFA_PORT_LINKDOWN: bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); break; default: WARN_ON(1); } } static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event) { struct bfa_fcport_s *fcport = ln->fcport; if (fcport->bfa->fcs) { fcport->event_cbfn(fcport->event_cbarg, event); bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); } else { ln->ln_event = event; bfa_cb_queue(fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln); } } #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ BFA_CACHELINE_SZ)) static void bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa); bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ); } static void bfa_fcport_qresume(void *cbarg) { struct bfa_fcport_s *fcport = cbarg; bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME); } static void bfa_fcport_mem_claim(struct bfa_fcport_s *fcport) { struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma; fcport->stats_kva = bfa_mem_dma_virt(fcport_dma); fcport->stats_pa = bfa_mem_dma_phys(fcport_dma); fcport->stats = (union bfa_fcport_stats_u *) bfa_mem_dma_virt(fcport_dma); } /* * Memory initialization. */ static void bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); struct bfa_port_cfg_s *port_cfg = &fcport->cfg; struct bfa_fcport_ln_s *ln = &fcport->ln; struct timeval tv; fcport->bfa = bfa; ln->fcport = fcport; bfa_fcport_mem_claim(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); /* * initialize time stamp for stats reset */ do_gettimeofday(&tv); fcport->stats_reset_time = tv.tv_sec; /* * initialize and set default configuration */ port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; port_cfg->speed = BFA_PORT_SPEED_AUTO; port_cfg->trunked = BFA_FALSE; port_cfg->maxfrsize = 0; port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; INIT_LIST_HEAD(&fcport->stats_pending_q); INIT_LIST_HEAD(&fcport->statsclr_pending_q); bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); } static void bfa_fcport_detach(struct bfa_s *bfa) { } /* * Called when IOC is ready. */ static void bfa_fcport_start(struct bfa_s *bfa) { bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); } /* * Called before IOC is stopped. */ static void bfa_fcport_stop(struct bfa_s *bfa) { bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP); bfa_trunk_iocdisable(bfa); } /* * Called when IOC failure is detected. */ static void bfa_fcport_iocdisable(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL); bfa_trunk_iocdisable(bfa); } static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) { struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; struct bfa_fcport_trunk_s *trunk = &fcport->trunk; fcport->speed = pevent->link_state.speed; fcport->topology = pevent->link_state.topology; if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) fcport->myalpa = 0; /* QoS Details */ fcport->qos_attr = pevent->link_state.qos_attr; fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; /* * update trunk state if applicable */ if (!fcport->cfg.trunked) trunk->attr.state = BFA_TRUNK_DISABLED; /* update FCoE specific */ fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan); bfa_trc(fcport->bfa, fcport->speed); bfa_trc(fcport->bfa, fcport->topology); } static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) { fcport->speed = BFA_PORT_SPEED_UNKNOWN; fcport->topology = BFA_PORT_TOPOLOGY_NONE; fcport->bbsc_op_state = BFA_FALSE; } /* * Send port enable message to firmware. */ static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport) { struct bfi_fcport_enable_req_s *m; /* * Increment message tag before queue check, so that responses to old * requests are discarded. */ fcport->msgtag++; /* * check for room in queue to send request now */ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!m) { bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, &fcport->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, bfa_fn_lpu(fcport->bfa)); m->nwwn = fcport->nwwn; m->pwwn = fcport->pwwn; m->port_cfg = fcport->cfg; m->msgtag = fcport->msgtag; m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); m->use_flash_cfg = fcport->use_flash_cfg; bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); /* * queue I/O message to firmware */ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); return BFA_TRUE; } /* * Send port disable message to firmware. */ static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport) { struct bfi_fcport_req_s *m; /* * Increment message tag before queue check, so that responses to old * requests are discarded. */ fcport->msgtag++; /* * check for room in queue to send request now */ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!m) { bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, &fcport->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, bfa_fn_lpu(fcport->bfa)); m->msgtag = fcport->msgtag; /* * queue I/O message to firmware */ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); return BFA_TRUE; } static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) { fcport->pwwn = fcport->bfa->ioc.attr->pwwn; fcport->nwwn = fcport->bfa->ioc.attr->nwwn; bfa_trc(fcport->bfa, fcport->pwwn); bfa_trc(fcport->bfa, fcport->nwwn); } static void bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, struct bfa_qos_stats_s *s) { u32 *dip = (u32 *) d; __be32 *sip = (__be32 *) s; int i; /* Now swap the 32 bit fields */ for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) dip[i] = be32_to_cpu(sip[i]); } static void bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, struct bfa_fcoe_stats_s *s) { u32 *dip = (u32 *) d; __be32 *sip = (__be32 *) s; int i; for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); i = i + 2) { #ifdef __BIG_ENDIAN dip[i] = be32_to_cpu(sip[i]); dip[i + 1] = be32_to_cpu(sip[i + 1]); #else dip[i] = be32_to_cpu(sip[i + 1]); dip[i + 1] = be32_to_cpu(sip[i]); #endif } } static void __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; struct bfa_cb_pending_q_s *cb; struct list_head *qe, *qen; union bfa_fcport_stats_u *ret; if (complete) { struct timeval tv; if (fcport->stats_status == BFA_STATUS_OK) do_gettimeofday(&tv); list_for_each_safe(qe, qen, &fcport->stats_pending_q) { bfa_q_deq(&fcport->stats_pending_q, &qe); cb = (struct bfa_cb_pending_q_s *)qe; if (fcport->stats_status == BFA_STATUS_OK) { ret = (union bfa_fcport_stats_u *)cb->data; /* Swap FC QoS or FCoE stats */ if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) bfa_fcport_qos_stats_swap(&ret->fcqos, &fcport->stats->fcqos); else { bfa_fcport_fcoe_stats_swap(&ret->fcoe, &fcport->stats->fcoe); ret->fcoe.secs_reset = tv.tv_sec - fcport->stats_reset_time; } } bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe, fcport->stats_status); } fcport->stats_status = BFA_STATUS_OK; } else { INIT_LIST_HEAD(&fcport->stats_pending_q); fcport->stats_status = BFA_STATUS_OK; } } static void bfa_fcport_stats_get_timeout(void *cbarg) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; bfa_trc(fcport->bfa, fcport->stats_qfull); if (fcport->stats_qfull) { bfa_reqq_wcancel(&fcport->stats_reqq_wait); fcport->stats_qfull = BFA_FALSE; } fcport->stats_status = BFA_STATUS_ETIMER; __bfa_cb_fcport_stats_get(fcport, BFA_TRUE); } static void bfa_fcport_send_stats_get(void *cbarg) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; struct bfi_fcport_req_s *msg; msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!msg) { fcport->stats_qfull = BFA_TRUE; bfa_reqq_winit(&fcport->stats_reqq_wait, bfa_fcport_send_stats_get, fcport); bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, &fcport->stats_reqq_wait); return; } fcport->stats_qfull = BFA_FALSE; memset(msg, 0, sizeof(struct bfi_fcport_req_s)); bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, bfa_fn_lpu(fcport->bfa)); bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh); } static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; struct bfa_cb_pending_q_s *cb; struct list_head *qe, *qen; if (complete) { struct timeval tv; /* * re-initialize time stamp for stats reset */ do_gettimeofday(&tv); fcport->stats_reset_time = tv.tv_sec; list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) { bfa_q_deq(&fcport->statsclr_pending_q, &qe); cb = (struct bfa_cb_pending_q_s *)qe; bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe, fcport->stats_status); } fcport->stats_status = BFA_STATUS_OK; } else { INIT_LIST_HEAD(&fcport->statsclr_pending_q); fcport->stats_status = BFA_STATUS_OK; } } static void bfa_fcport_stats_clr_timeout(void *cbarg) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; bfa_trc(fcport->bfa, fcport->stats_qfull); if (fcport->stats_qfull) { bfa_reqq_wcancel(&fcport->stats_reqq_wait); fcport->stats_qfull = BFA_FALSE; } fcport->stats_status = BFA_STATUS_ETIMER; __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE); } static void bfa_fcport_send_stats_clear(void *cbarg) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; struct bfi_fcport_req_s *msg; msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!msg) { fcport->stats_qfull = BFA_TRUE; bfa_reqq_winit(&fcport->stats_reqq_wait, bfa_fcport_send_stats_clear, fcport); bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, &fcport->stats_reqq_wait); return; } fcport->stats_qfull = BFA_FALSE; memset(msg, 0, sizeof(struct bfi_fcport_req_s)); bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, bfa_fn_lpu(fcport->bfa)); bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh); } /* * Handle trunk SCN event from firmware. */ static void bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn) { struct bfa_fcport_trunk_s *trunk = &fcport->trunk; struct bfi_fcport_trunk_link_s *tlink; struct bfa_trunk_link_attr_s *lattr; enum bfa_trunk_state state_prev; int i; int link_bm = 0; bfa_trc(fcport->bfa, fcport->cfg.trunked); WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE && scn->trunk_state != BFA_TRUNK_OFFLINE); bfa_trc(fcport->bfa, trunk->attr.state); bfa_trc(fcport->bfa, scn->trunk_state); bfa_trc(fcport->bfa, scn->trunk_speed); /* * Save off new state for trunk attribute query */ state_prev = trunk->attr.state; if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED)) trunk->attr.state = scn->trunk_state; trunk->attr.speed = scn->trunk_speed; for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { lattr = &trunk->attr.link_attr[i]; tlink = &scn->tlink[i]; lattr->link_state = tlink->state; lattr->trunk_wwn = tlink->trunk_wwn; lattr->fctl = tlink->fctl; lattr->speed = tlink->speed; lattr->deskew = be32_to_cpu(tlink->deskew); if (tlink->state == BFA_TRUNK_LINK_STATE_UP) { fcport->speed = tlink->speed; fcport->topology = BFA_PORT_TOPOLOGY_P2P; link_bm |= 1 << i; } bfa_trc(fcport->bfa, lattr->link_state); bfa_trc(fcport->bfa, lattr->trunk_wwn); bfa_trc(fcport->bfa, lattr->fctl); bfa_trc(fcport->bfa, lattr->speed); bfa_trc(fcport->bfa, lattr->deskew); } switch (link_bm) { case 3: bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)"); break; case 2: bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)"); break; case 1: bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)"); break; default: bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); } /* * Notify upper layers if trunk state changed. */ if ((state_prev != trunk->attr.state) || (scn->trunk_state == BFA_TRUNK_OFFLINE)) { bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ? BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE); } } static void bfa_trunk_iocdisable(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); int i = 0; /* * In trunked mode, notify upper layers that link is down */ if (fcport->cfg.trunked) { if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE) bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE); fcport->trunk.attr.state = BFA_TRUNK_OFFLINE; fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN; for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { fcport->trunk.attr.link_attr[i].trunk_wwn = 0; fcport->trunk.attr.link_attr[i].fctl = BFA_TRUNK_LINK_FCTL_NORMAL; fcport->trunk.attr.link_attr[i].link_state = BFA_TRUNK_LINK_STATE_DN_LINKDN; fcport->trunk.attr.link_attr[i].speed = BFA_PORT_SPEED_UNKNOWN; fcport->trunk.attr.link_attr[i].deskew = 0; } } } /* * Called to initialize port attributes */ void bfa_fcport_init(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); /* * Initialize port attributes from IOC hardware data. */ bfa_fcport_set_wwns(fcport); if (fcport->cfg.maxfrsize == 0) fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); if (bfa_fcport_is_pbcdisabled(bfa)) bfa->modules.port.pbc_disabled = BFA_TRUE; WARN_ON(!fcport->cfg.maxfrsize); WARN_ON(!fcport->cfg.rx_bbcredit); WARN_ON(!fcport->speed_sup); } /* * Firmware message handler. */ void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); union bfi_fcport_i2h_msg_u i2hmsg; i2hmsg.msg = msg; fcport->event_arg.i2hmsg = i2hmsg; bfa_trc(bfa, msg->mhdr.msg_id); bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm)); switch (msg->mhdr.msg_id) { case BFI_FCPORT_I2H_ENABLE_RSP: if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { if (fcport->use_flash_cfg) { fcport->cfg = i2hmsg.penable_rsp->port_cfg; fcport->cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); fcport->cfg.path_tov = cpu_to_be16(fcport->cfg.path_tov); fcport->cfg.q_depth = cpu_to_be16(fcport->cfg.q_depth); if (fcport->cfg.trunked) fcport->trunk.attr.state = BFA_TRUNK_OFFLINE; else fcport->trunk.attr.state = BFA_TRUNK_DISABLED; fcport->use_flash_cfg = BFA_FALSE; } if (fcport->cfg.qos_enabled) fcport->qos_attr.state = BFA_QOS_OFFLINE; else fcport->qos_attr.state = BFA_QOS_DISABLED; bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); } break; case BFI_FCPORT_I2H_DISABLE_RSP: if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); break; case BFI_FCPORT_I2H_EVENT: if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); else bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); break; case BFI_FCPORT_I2H_TRUNK_SCN: bfa_trunk_scn(fcport, i2hmsg.trunk_scn); break; case BFI_FCPORT_I2H_STATS_GET_RSP: /* * check for timer pop before processing the rsp */ if (list_empty(&fcport->stats_pending_q) || (fcport->stats_status == BFA_STATUS_ETIMER)) break; bfa_timer_stop(&fcport->timer); fcport->stats_status = i2hmsg.pstatsget_rsp->status; __bfa_cb_fcport_stats_get(fcport, BFA_TRUE); break; case BFI_FCPORT_I2H_STATS_CLEAR_RSP: /* * check for timer pop before processing the rsp */ if (list_empty(&fcport->statsclr_pending_q) || (fcport->stats_status == BFA_STATUS_ETIMER)) break; bfa_timer_stop(&fcport->timer); fcport->stats_status = BFA_STATUS_OK; __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE); break; case BFI_FCPORT_I2H_ENABLE_AEN: bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE); break; case BFI_FCPORT_I2H_DISABLE_AEN: bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE); break; default: WARN_ON(1); break; } } /* * Registered callback for port events. */ void bfa_fcport_event_register(struct bfa_s *bfa, void (*cbfn) (void *cbarg, enum bfa_port_linkstate event), void *cbarg) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); fcport->event_cbfn = cbfn; fcport->event_cbarg = cbarg; } bfa_status_t bfa_fcport_enable(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (bfa_fcport_is_pbcdisabled(bfa)) return BFA_STATUS_PBC; if (bfa_ioc_is_disabled(&bfa->ioc)) return BFA_STATUS_IOC_DISABLED; if (fcport->diag_busy) return BFA_STATUS_DIAG_BUSY; bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE); return BFA_STATUS_OK; } bfa_status_t bfa_fcport_disable(struct bfa_s *bfa) { if (bfa_fcport_is_pbcdisabled(bfa)) return BFA_STATUS_PBC; if (bfa_ioc_is_disabled(&bfa->ioc)) return BFA_STATUS_IOC_DISABLED; bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE); return BFA_STATUS_OK; } /* If PBC is disabled on port, return error */ bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) { bfa_trc(bfa, fcport->pwwn); return BFA_STATUS_PBC; } return BFA_STATUS_OK; } /* * Configure port speed. */ bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, speed); if (fcport->cfg.trunked == BFA_TRUE) return BFA_STATUS_TRUNK_ENABLED; if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { bfa_trc(bfa, fcport->speed_sup); return BFA_STATUS_UNSUPP_SPEED; } /* Port speed entered needs to be checked */ if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) { /* For CT2, 1G is not supported */ if ((speed == BFA_PORT_SPEED_1GBPS) && (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) return BFA_STATUS_UNSUPP_SPEED; /* Already checked for Auto Speed and Max Speed supp */ if (!(speed == BFA_PORT_SPEED_1GBPS || speed == BFA_PORT_SPEED_2GBPS || speed == BFA_PORT_SPEED_4GBPS || speed == BFA_PORT_SPEED_8GBPS || speed == BFA_PORT_SPEED_16GBPS || speed == BFA_PORT_SPEED_AUTO)) return BFA_STATUS_UNSUPP_SPEED; } else { if (speed != BFA_PORT_SPEED_10GBPS) return BFA_STATUS_UNSUPP_SPEED; } fcport->cfg.speed = speed; return BFA_STATUS_OK; } /* * Get current speed. */ enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->speed; } /* * Configure port topology. */ bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, topology); bfa_trc(bfa, fcport->cfg.topology); switch (topology) { case BFA_PORT_TOPOLOGY_P2P: case BFA_PORT_TOPOLOGY_LOOP: case BFA_PORT_TOPOLOGY_AUTO: break; default: return BFA_STATUS_EINVAL; } fcport->cfg.topology = topology; return BFA_STATUS_OK; } /* * Get current topology. */ enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->topology; } bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, alpa); bfa_trc(bfa, fcport->cfg.cfg_hardalpa); bfa_trc(bfa, fcport->cfg.hardalpa); fcport->cfg.cfg_hardalpa = BFA_TRUE; fcport->cfg.hardalpa = alpa; return BFA_STATUS_OK; } bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, fcport->cfg.cfg_hardalpa); bfa_trc(bfa, fcport->cfg.hardalpa); fcport->cfg.cfg_hardalpa = BFA_FALSE; return BFA_STATUS_OK; } bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); *alpa = fcport->cfg.hardalpa; return fcport->cfg.cfg_hardalpa; } u8 bfa_fcport_get_myalpa(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->myalpa; } bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, maxfrsize); bfa_trc(bfa, fcport->cfg.maxfrsize); /* with in range */ if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) return BFA_STATUS_INVLD_DFSZ; /* power of 2, if not the max frame size of 2112 */ if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) return BFA_STATUS_INVLD_DFSZ; fcport->cfg.maxfrsize = maxfrsize; return BFA_STATUS_OK; } u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.maxfrsize; } u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.rx_bbcredit; } void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); fcport->cfg.tx_bbcredit = (u8)tx_bbcredit; fcport->cfg.bb_scn = bb_scn; if (bb_scn) fcport->bbsc_op_state = BFA_TRUE; } /* * Get port attributes. */ wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (node) return fcport->nwwn; else return fcport->pwwn; } void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); memset(attr, 0, sizeof(struct bfa_port_attr_s)); attr->nwwn = fcport->nwwn; attr->pwwn = fcport->pwwn; attr->factorypwwn = bfa->ioc.attr->mfg_pwwn; attr->factorynwwn = bfa->ioc.attr->mfg_nwwn; memcpy(&attr->pport_cfg, &fcport->cfg, sizeof(struct bfa_port_cfg_s)); /* speed attributes */ attr->pport_cfg.speed = fcport->cfg.speed; attr->speed_supported = fcport->speed_sup; attr->speed = fcport->speed; attr->cos_supported = FC_CLASS_3; /* topology attributes */ attr->pport_cfg.topology = fcport->cfg.topology; attr->topology = fcport->topology; attr->pport_cfg.trunked = fcport->cfg.trunked; /* beacon attributes */ attr->beacon = fcport->beacon; attr->link_e2e_beacon = fcport->link_e2e_beacon; attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm); attr->bbsc_op_status = fcport->bbsc_op_state; /* PBC Disabled State */ if (bfa_fcport_is_pbcdisabled(bfa)) attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED; else { if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) attr->port_state = BFA_PORT_ST_IOCDIS; else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) attr->port_state = BFA_PORT_ST_FWMISMATCH; } /* FCoE vlan */ attr->fcoe_vlan = fcport->fcoe_vlan; } #define BFA_FCPORT_STATS_TOV 1000 /* * Fetch port statistics (FCQoS or FCoE). */ bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (bfa_ioc_is_disabled(&bfa->ioc)) return BFA_STATUS_IOC_DISABLED; if (!list_empty(&fcport->statsclr_pending_q)) return BFA_STATUS_DEVBUSY; if (list_empty(&fcport->stats_pending_q)) { list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q); bfa_fcport_send_stats_get(fcport); bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout, fcport, BFA_FCPORT_STATS_TOV); } else list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q); return BFA_STATUS_OK; } /* * Reset port statistics (FCQoS or FCoE). */ bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (!list_empty(&fcport->stats_pending_q)) return BFA_STATUS_DEVBUSY; if (list_empty(&fcport->statsclr_pending_q)) { list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q); bfa_fcport_send_stats_clear(fcport); bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, fcport, BFA_FCPORT_STATS_TOV); } else list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q); return BFA_STATUS_OK; } /* * Fetch port attributes. */ bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return bfa_sm_to_state(hal_port_sm_table, fcport->sm) == BFA_PORT_ST_DISABLED; } bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; } /* * Enable/Disable FAA feature in port config */ void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, state); fcport->cfg.faa_state = state; } /* * Get default minimum ratelim speed */ enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, fcport->cfg.trl_def_speed); return fcport->cfg.trl_def_speed; } void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon) { struct bfa_s *bfa = dev; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, beacon); bfa_trc(bfa, link_e2e_beacon); bfa_trc(bfa, fcport->beacon); bfa_trc(bfa, fcport->link_e2e_beacon); fcport->beacon = beacon; fcport->link_e2e_beacon = link_e2e_beacon; } bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return (!fcport->cfg.trunked && bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) || (fcport->cfg.trunked && fcport->trunk.attr.state == BFA_TRUNK_ONLINE); } bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.qos_enabled; } bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.trunked; } /* * Rport State machine functions */ /* * Beginning state, only online event expected. */ static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_CREATE: bfa_stats(rp, sm_un_cr); bfa_sm_set_state(rp, bfa_rport_sm_created); break; default: bfa_stats(rp, sm_un_unexp); bfa_sm_fault(rp->bfa, event); } } static void bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_ONLINE: bfa_stats(rp, sm_cr_on); if (bfa_rport_send_fwcreate(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); else bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_cr_del); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_cr_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); break; default: bfa_stats(rp, sm_cr_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Waiting for rport create response from firmware. */ static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_fwc_rsp); bfa_sm_set_state(rp, bfa_rport_sm_online); bfa_rport_online_cb(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_fwc_del); bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); break; case BFA_RPORT_SM_OFFLINE: bfa_stats(rp, sm_fwc_off); bfa_sm_set_state(rp, bfa_rport_sm_offline_pending); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_fwc_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); break; default: bfa_stats(rp, sm_fwc_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Request queue is full, awaiting queue resume to send create request. */ static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_QRESUME: bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); bfa_rport_send_fwcreate(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_fwc_del); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_reqq_wcancel(&rp->reqq_wait); bfa_rport_free(rp); break; case BFA_RPORT_SM_OFFLINE: bfa_stats(rp, sm_fwc_off); bfa_sm_set_state(rp, bfa_rport_sm_offline); bfa_reqq_wcancel(&rp->reqq_wait); bfa_rport_offline_cb(rp); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_fwc_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); bfa_reqq_wcancel(&rp->reqq_wait); break; default: bfa_stats(rp, sm_fwc_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Online state - normal parking state. */ static void bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) { struct bfi_rport_qos_scn_s *qos_scn; bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_OFFLINE: bfa_stats(rp, sm_on_off); if (bfa_rport_send_fwdelete(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); else bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_on_del); if (bfa_rport_send_fwdelete(rp)) bfa_sm_set_state(rp, bfa_rport_sm_deleting); else bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_on_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); break; case BFA_RPORT_SM_SET_SPEED: bfa_rport_send_fwspeed(rp); break; case BFA_RPORT_SM_QOS_SCN: qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg; rp->qos_attr = qos_scn->new_qos_attr; bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id); bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id); bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority); bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority); qos_scn->old_qos_attr.qos_flow_id = be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id); qos_scn->new_qos_attr.qos_flow_id = be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id); if (qos_scn->old_qos_attr.qos_flow_id != qos_scn->new_qos_attr.qos_flow_id) bfa_cb_rport_qos_scn_flowid(rp->rport_drv, qos_scn->old_qos_attr, qos_scn->new_qos_attr); if (qos_scn->old_qos_attr.qos_priority != qos_scn->new_qos_attr.qos_priority) bfa_cb_rport_qos_scn_prio(rp->rport_drv, qos_scn->old_qos_attr, qos_scn->new_qos_attr); break; default: bfa_stats(rp, sm_on_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Firmware rport is being deleted - awaiting f/w response. */ static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_fwd_rsp); bfa_sm_set_state(rp, bfa_rport_sm_offline); bfa_rport_offline_cb(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_fwd_del); bfa_sm_set_state(rp, bfa_rport_sm_deleting); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_fwd_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); bfa_rport_offline_cb(rp); break; default: bfa_stats(rp, sm_fwd_unexp); bfa_sm_fault(rp->bfa, event); } } static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_QRESUME: bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); bfa_rport_send_fwdelete(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_fwd_del); bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_fwd_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); bfa_reqq_wcancel(&rp->reqq_wait); bfa_rport_offline_cb(rp); break; default: bfa_stats(rp, sm_fwd_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Offline state. */ static void bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_off_del); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; case BFA_RPORT_SM_ONLINE: bfa_stats(rp, sm_off_on); if (bfa_rport_send_fwcreate(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); else bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_off_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); break; default: bfa_stats(rp, sm_off_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Rport is deleted, waiting for firmware response to delete. */ static void bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_del_fwrsp); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_del_hwf); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; default: bfa_sm_fault(rp->bfa, event); } } static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_QRESUME: bfa_stats(rp, sm_del_fwrsp); bfa_sm_set_state(rp, bfa_rport_sm_deleting); bfa_rport_send_fwdelete(rp); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_del_hwf); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_reqq_wcancel(&rp->reqq_wait); bfa_rport_free(rp); break; default: bfa_sm_fault(rp->bfa, event); } } /* * Waiting for rport create response from firmware. A delete is pending. */ static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_delp_fwrsp); if (bfa_rport_send_fwdelete(rp)) bfa_sm_set_state(rp, bfa_rport_sm_deleting); else bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_delp_hwf); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; default: bfa_stats(rp, sm_delp_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Waiting for rport create response from firmware. Rport offline is pending. */ static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_offp_fwrsp); if (bfa_rport_send_fwdelete(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); else bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_offp_del); bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_offp_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); break; default: bfa_stats(rp, sm_offp_unexp); bfa_sm_fault(rp->bfa, event); } } /* * IOC h/w failed. */ static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_OFFLINE: bfa_stats(rp, sm_iocd_off); bfa_rport_offline_cb(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_iocd_del); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; case BFA_RPORT_SM_ONLINE: bfa_stats(rp, sm_iocd_on); if (bfa_rport_send_fwcreate(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); else bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); break; case BFA_RPORT_SM_HWFAIL: break; default: bfa_stats(rp, sm_iocd_unexp); bfa_sm_fault(rp->bfa, event); } } /* * bfa_rport_private BFA rport private functions */ static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete) { struct bfa_rport_s *rp = cbarg; if (complete) bfa_cb_rport_online(rp->rport_drv); } static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete) { struct bfa_rport_s *rp = cbarg; if (complete) bfa_cb_rport_offline(rp->rport_drv); } static void bfa_rport_qresume(void *cbarg) { struct bfa_rport_s *rp = cbarg; bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME); } static void bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa); if (cfg->fwcfg.num_rports < BFA_RPORT_MIN) cfg->fwcfg.num_rports = BFA_RPORT_MIN; /* kva memory */ bfa_mem_kva_setup(minfo, rport_kva, cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s)); } static void bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); struct bfa_rport_s *rp; u16 i; INIT_LIST_HEAD(&mod->rp_free_q); INIT_LIST_HEAD(&mod->rp_active_q); INIT_LIST_HEAD(&mod->rp_unused_q); rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod); mod->rps_list = rp; mod->num_rports = cfg->fwcfg.num_rports; WARN_ON(!mod->num_rports || (mod->num_rports & (mod->num_rports - 1))); for (i = 0; i < mod->num_rports; i++, rp++) { memset(rp, 0, sizeof(struct bfa_rport_s)); rp->bfa = bfa; rp->rport_tag = i; bfa_sm_set_state(rp, bfa_rport_sm_uninit); /* * - is unused */ if (i) list_add_tail(&rp->qe, &mod->rp_free_q); bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); } /* * consume memory */ bfa_mem_kva_curp(mod) = (u8 *) rp; } static void bfa_rport_detach(struct bfa_s *bfa) { } static void bfa_rport_start(struct bfa_s *bfa) { } static void bfa_rport_stop(struct bfa_s *bfa) { } static void bfa_rport_iocdisable(struct bfa_s *bfa) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); struct bfa_rport_s *rport; struct list_head *qe, *qen; /* Enqueue unused rport resources to free_q */ list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q); list_for_each_safe(qe, qen, &mod->rp_active_q) { rport = (struct bfa_rport_s *) qe; bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL); } } static struct bfa_rport_s * bfa_rport_alloc(struct bfa_rport_mod_s *mod) { struct bfa_rport_s *rport; bfa_q_deq(&mod->rp_free_q, &rport); if (rport) list_add_tail(&rport->qe, &mod->rp_active_q); return rport; } static void bfa_rport_free(struct bfa_rport_s *rport) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa); WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport)); list_del(&rport->qe); list_add_tail(&rport->qe, &mod->rp_free_q); } static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp) { struct bfi_rport_create_req_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); if (!m) { bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, bfa_fn_lpu(rp->bfa)); m->bfa_handle = rp->rport_tag; m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz); m->pid = rp->rport_info.pid; m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag); m->local_pid = rp->rport_info.local_pid; m->fc_class = rp->rport_info.fc_class; m->vf_en = rp->rport_info.vf_en; m->vf_id = rp->rport_info.vf_id; m->cisc = rp->rport_info.cisc; /* * queue I/O message to firmware */ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); return BFA_TRUE; } static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp) { struct bfi_rport_delete_req_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); if (!m) { bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ, bfa_fn_lpu(rp->bfa)); m->fw_handle = rp->fw_handle; /* * queue I/O message to firmware */ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); return BFA_TRUE; } static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp) { struct bfa_rport_speed_req_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); if (!m) { bfa_trc(rp->bfa, rp->rport_info.speed); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ, bfa_fn_lpu(rp->bfa)); m->fw_handle = rp->fw_handle; m->speed = (u8)rp->rport_info.speed; /* * queue I/O message to firmware */ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); return BFA_TRUE; } /* * bfa_rport_public */ /* * Rport interrupt processing. */ void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { union bfi_rport_i2h_msg_u msg; struct bfa_rport_s *rp; bfa_trc(bfa, m->mhdr.msg_id); msg.msg = m; switch (m->mhdr.msg_id) { case BFI_RPORT_I2H_CREATE_RSP: rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); rp->fw_handle = msg.create_rsp->fw_handle; rp->qos_attr = msg.create_rsp->qos_attr; bfa_rport_set_lunmask(bfa, rp); WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); break; case BFI_RPORT_I2H_DELETE_RSP: rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); bfa_rport_unset_lunmask(bfa, rp); bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); break; case BFI_RPORT_I2H_QOS_SCN: rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle); rp->event_arg.fw_msg = msg.qos_scn_evt; bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); break; default: bfa_trc(bfa, m->mhdr.msg_id); WARN_ON(1); } } void bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); struct list_head *qe; int i; for (i = 0; i < (mod->num_rports - num_rport_fw); i++) { bfa_q_deq_tail(&mod->rp_free_q, &qe); list_add_tail(qe, &mod->rp_unused_q); } } /* * bfa_rport_api */ struct bfa_rport_s * bfa_rport_create(struct bfa_s *bfa, void *rport_drv) { struct bfa_rport_s *rp; rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); if (rp == NULL) return NULL; rp->bfa = bfa; rp->rport_drv = rport_drv; memset(&rp->stats, 0, sizeof(rp->stats)); WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); return rp; } void bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) { WARN_ON(rport_info->max_frmsz == 0); /* * Some JBODs are seen to be not setting PDU size correctly in PLOGI * responses. Default to minimum size. */ if (rport_info->max_frmsz == 0) { bfa_trc(rport->bfa, rport->rport_tag); rport_info->max_frmsz = FC_MIN_PDUSZ; } rport->rport_info = *rport_info; bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE); } void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) { WARN_ON(speed == 0); WARN_ON(speed == BFA_PORT_SPEED_AUTO); rport->rport_info.speed = speed; bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); } /* Set Rport LUN Mask */ void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp) { struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa); wwn_t lp_wwn, rp_wwn; u8 lp_tag = (u8)rp->rport_info.lp_tag; rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn; lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn; BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask = rp->lun_mask = BFA_TRUE; bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag); } /* Unset Rport LUN mask */ void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp) { struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa); wwn_t lp_wwn, rp_wwn; rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn; lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn; BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask = rp->lun_mask = BFA_FALSE; bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID); } /* * SGPG related functions */ /* * Compute and return memory needed by FCP(im) module. */ static void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa); struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa); struct bfa_mem_dma_s *seg_ptr; u16 nsegs, idx, per_seg_sgpg, num_sgpg; u32 sgpg_sz = sizeof(struct bfi_sgpg_s); if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN) cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX) cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX; num_sgpg = cfg->drvcfg.num_sgpgs; nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz); bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) { if (num_sgpg >= per_seg_sgpg) { num_sgpg -= per_seg_sgpg; bfa_mem_dma_setup(minfo, seg_ptr, per_seg_sgpg * sgpg_sz); } else bfa_mem_dma_setup(minfo, seg_ptr, num_sgpg * sgpg_sz); } /* kva memory */ bfa_mem_kva_setup(minfo, sgpg_kva, cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s)); } static void bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); struct bfa_sgpg_s *hsgpg; struct bfi_sgpg_s *sgpg; u64 align_len; struct bfa_mem_dma_s *seg_ptr; u32 sgpg_sz = sizeof(struct bfi_sgpg_s); u16 i, idx, nsegs, per_seg_sgpg, num_sgpg; union { u64 pa; union bfi_addr_u addr; } sgpg_pa, sgpg_pa_tmp; INIT_LIST_HEAD(&mod->sgpg_q); INIT_LIST_HEAD(&mod->sgpg_wait_q); bfa_trc(bfa, cfg->drvcfg.num_sgpgs); mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs; num_sgpg = cfg->drvcfg.num_sgpgs; nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); /* dma/kva mem claim */ hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod); bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) { if (!bfa_mem_dma_virt(seg_ptr)) break; align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) - bfa_mem_dma_phys(seg_ptr); sgpg = (struct bfi_sgpg_s *) (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len); sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len; WARN_ON(sgpg_pa.pa & (sgpg_sz - 1)); per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz; for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) { memset(hsgpg, 0, sizeof(*hsgpg)); memset(sgpg, 0, sizeof(*sgpg)); hsgpg->sgpg = sgpg; sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); hsgpg->sgpg_pa = sgpg_pa_tmp.addr; list_add_tail(&hsgpg->qe, &mod->sgpg_q); sgpg++; hsgpg++; sgpg_pa.pa += sgpg_sz; } } bfa_mem_kva_curp(mod) = (u8 *) hsgpg; } static void bfa_sgpg_detach(struct bfa_s *bfa) { } static void bfa_sgpg_start(struct bfa_s *bfa) { } static void bfa_sgpg_stop(struct bfa_s *bfa) { } static void bfa_sgpg_iocdisable(struct bfa_s *bfa) { } bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); struct bfa_sgpg_s *hsgpg; int i; if (mod->free_sgpgs < nsgpgs) return BFA_STATUS_ENOMEM; for (i = 0; i < nsgpgs; i++) { bfa_q_deq(&mod->sgpg_q, &hsgpg); WARN_ON(!hsgpg); list_add_tail(&hsgpg->qe, sgpg_q); } mod->free_sgpgs -= nsgpgs; return BFA_STATUS_OK; } void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); struct bfa_sgpg_wqe_s *wqe; mod->free_sgpgs += nsgpg; WARN_ON(mod->free_sgpgs > mod->num_sgpgs); list_splice_tail_init(sgpg_q, &mod->sgpg_q); if (list_empty(&mod->sgpg_wait_q)) return; /* * satisfy as many waiting requests as possible */ do { wqe = bfa_q_first(&mod->sgpg_wait_q); if (mod->free_sgpgs < wqe->nsgpg) nsgpg = mod->free_sgpgs; else nsgpg = wqe->nsgpg; bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg); wqe->nsgpg -= nsgpg; if (wqe->nsgpg == 0) { list_del(&wqe->qe); wqe->cbfn(wqe->cbarg); } } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q)); } void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); WARN_ON(nsgpg <= 0); WARN_ON(nsgpg <= mod->free_sgpgs); wqe->nsgpg_total = wqe->nsgpg = nsgpg; /* * allocate any left to this one first */ if (mod->free_sgpgs) { /* * no one else is waiting for SGPG */ WARN_ON(!list_empty(&mod->sgpg_wait_q)); list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); wqe->nsgpg -= mod->free_sgpgs; mod->free_sgpgs = 0; } list_add_tail(&wqe->qe, &mod->sgpg_wait_q); } void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); list_del(&wqe->qe); if (wqe->nsgpg_total != wqe->nsgpg) bfa_sgpg_mfree(bfa, &wqe->sgpg_q, wqe->nsgpg_total - wqe->nsgpg); } void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), void *cbarg) { INIT_LIST_HEAD(&wqe->sgpg_q); wqe->cbfn = cbfn; wqe->cbarg = cbarg; } /* * UF related functions */ /* ***************************************************************************** * Internal functions ***************************************************************************** */ static void __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete) { struct bfa_uf_s *uf = cbarg; struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa); if (complete) ufm->ufrecv(ufm->cbarg, uf); } static void claim_uf_post_msgs(struct bfa_uf_mod_s *ufm) { struct bfi_uf_buf_post_s *uf_bp_msg; u16 i; u16 buf_len; ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm); uf_bp_msg = ufm->uf_buf_posts; for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; i++, uf_bp_msg++) { memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); uf_bp_msg->buf_tag = i; buf_len = sizeof(struct bfa_uf_buf_s); uf_bp_msg->buf_len = cpu_to_be16(buf_len); bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, bfa_fn_lpu(ufm->bfa)); bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i)); } /* * advance pointer beyond consumed memory */ bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg; } static void claim_ufs(struct bfa_uf_mod_s *ufm) { u16 i; struct bfa_uf_s *uf; /* * Claim block of memory for UF list */ ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm); /* * Initialize UFs and queue it in UF free queue */ for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) { memset(uf, 0, sizeof(struct bfa_uf_s)); uf->bfa = ufm->bfa; uf->uf_tag = i; uf->pb_len = BFA_PER_UF_DMA_SZ; uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ); uf->buf_pa = ufm_pbs_pa(ufm, i); list_add_tail(&uf->qe, &ufm->uf_free_q); } /* * advance memory pointer */ bfa_mem_kva_curp(ufm) = (u8 *) uf; } static void uf_mem_claim(struct bfa_uf_mod_s *ufm) { claim_ufs(ufm); claim_uf_post_msgs(ufm); } static void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa); u32 num_ufs = cfg->fwcfg.num_uf_bufs; struct bfa_mem_dma_s *seg_ptr; u16 nsegs, idx, per_seg_uf = 0; nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ); per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ); bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) { if (num_ufs >= per_seg_uf) { num_ufs -= per_seg_uf; bfa_mem_dma_setup(minfo, seg_ptr, per_seg_uf * BFA_PER_UF_DMA_SZ); } else bfa_mem_dma_setup(minfo, seg_ptr, num_ufs * BFA_PER_UF_DMA_SZ); } /* kva memory */ bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs * (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s))); } static void bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); ufm->bfa = bfa; ufm->num_ufs = cfg->fwcfg.num_uf_bufs; INIT_LIST_HEAD(&ufm->uf_free_q); INIT_LIST_HEAD(&ufm->uf_posted_q); INIT_LIST_HEAD(&ufm->uf_unused_q); uf_mem_claim(ufm); } static void bfa_uf_detach(struct bfa_s *bfa) { } static struct bfa_uf_s * bfa_uf_get(struct bfa_uf_mod_s *uf_mod) { struct bfa_uf_s *uf; bfa_q_deq(&uf_mod->uf_free_q, &uf); return uf; } static void bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf) { list_add_tail(&uf->qe, &uf_mod->uf_free_q); } static bfa_status_t bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf) { struct bfi_uf_buf_post_s *uf_post_msg; uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP); if (!uf_post_msg) return BFA_STATUS_FAILED; memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], sizeof(struct bfi_uf_buf_post_s)); bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh); bfa_trc(ufm->bfa, uf->uf_tag); list_add_tail(&uf->qe, &ufm->uf_posted_q); return BFA_STATUS_OK; } static void bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod) { struct bfa_uf_s *uf; while ((uf = bfa_uf_get(uf_mod)) != NULL) { if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK) break; } } static void uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); u16 uf_tag = m->buf_tag; struct bfa_uf_s *uf = &ufm->uf_list[uf_tag]; struct bfa_uf_buf_s *uf_buf; uint8_t *buf; struct fchs_s *fchs; uf_buf = (struct bfa_uf_buf_s *) bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len); buf = &uf_buf->d[0]; m->frm_len = be16_to_cpu(m->frm_len); m->xfr_len = be16_to_cpu(m->xfr_len); fchs = (struct fchs_s *)uf_buf; list_del(&uf->qe); /* dequeue from posted queue */ uf->data_ptr = buf; uf->data_len = m->xfr_len; WARN_ON(uf->data_len < sizeof(struct fchs_s)); if (uf->data_len == sizeof(struct fchs_s)) { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, uf->data_len, (struct fchs_s *)buf); } else { u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s))); bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, uf->data_len, (struct fchs_s *)buf, pld_w0); } if (bfa->fcs) __bfa_cb_uf_recv(uf, BFA_TRUE); else bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf); } static void bfa_uf_stop(struct bfa_s *bfa) { } static void bfa_uf_iocdisable(struct bfa_s *bfa) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); struct bfa_uf_s *uf; struct list_head *qe, *qen; /* Enqueue unused uf resources to free_q */ list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q); list_for_each_safe(qe, qen, &ufm->uf_posted_q) { uf = (struct bfa_uf_s *) qe; list_del(&uf->qe); bfa_uf_put(ufm, uf); } } static void bfa_uf_start(struct bfa_s *bfa) { bfa_uf_post_all(BFA_UF_MOD(bfa)); } /* * Register handler for all unsolicted receive frames. * * @param[in] bfa BFA instance * @param[in] ufrecv receive handler function * @param[in] cbarg receive handler arg */ void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); ufm->ufrecv = ufrecv; ufm->cbarg = cbarg; } /* * Free an unsolicited frame back to BFA. * * @param[in] uf unsolicited frame to be freed * * @return None */ void bfa_uf_free(struct bfa_uf_s *uf) { bfa_uf_put(BFA_UF_MOD(uf->bfa), uf); bfa_uf_post_all(BFA_UF_MOD(uf->bfa)); } /* * uf_pub BFA uf module public functions */ void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) { bfa_trc(bfa, msg->mhdr.msg_id); switch (msg->mhdr.msg_id) { case BFI_UF_I2H_FRM_RCVD: uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg); break; default: bfa_trc(bfa, msg->mhdr.msg_id); WARN_ON(1); } } void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw) { struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa); struct list_head *qe; int i; for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) { bfa_q_deq_tail(&mod->uf_free_q, &qe); list_add_tail(qe, &mod->uf_unused_q); } } /* * BFA fcdiag module */ #define BFA_DIAG_QTEST_TOV 1000 /* msec */ /* * Set port status to busy */ static void bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa); if (fcdiag->lb.lock) fcport->diag_busy = BFA_TRUE; else fcport->diag_busy = BFA_FALSE; } static void bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa) { } static void bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); fcdiag->bfa = bfa; fcdiag->trcmod = bfa->trcmod; /* The common DIAG attach bfa_diag_attach() will do all memory claim */ } static void bfa_fcdiag_iocdisable(struct bfa_s *bfa) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); bfa_trc(fcdiag, fcdiag->lb.lock); if (fcdiag->lb.lock) { fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status); fcdiag->lb.lock = 0; bfa_fcdiag_set_busy_status(fcdiag); } } static void bfa_fcdiag_detach(struct bfa_s *bfa) { } static void bfa_fcdiag_start(struct bfa_s *bfa) { } static void bfa_fcdiag_stop(struct bfa_s *bfa) { } static void bfa_fcdiag_queuetest_timeout(void *cbarg) { struct bfa_fcdiag_s *fcdiag = cbarg; struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result; bfa_trc(fcdiag, fcdiag->qtest.all); bfa_trc(fcdiag, fcdiag->qtest.count); fcdiag->qtest.timer_active = 0; res->status = BFA_STATUS_ETIMER; res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count; if (fcdiag->qtest.all) res->queue = fcdiag->qtest.all; bfa_trc(fcdiag, BFA_STATUS_ETIMER); fcdiag->qtest.status = BFA_STATUS_ETIMER; fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status); fcdiag->qtest.lock = 0; } static bfa_status_t bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag) { u32 i; struct bfi_diag_qtest_req_s *req; req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue); if (!req) return BFA_STATUS_DEVBUSY; /* build host command */ bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST, bfa_fn_lpu(fcdiag->bfa)); for (i = 0; i < BFI_LMSG_PL_WSZ; i++) req->data[i] = QTEST_PAT_DEFAULT; bfa_trc(fcdiag, fcdiag->qtest.queue); /* ring door bell */ bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh); return BFA_STATUS_OK; } static void bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag, bfi_diag_qtest_rsp_t *rsp) { struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result; bfa_status_t status = BFA_STATUS_OK; int i; /* Check timer, should still be active */ if (!fcdiag->qtest.timer_active) { bfa_trc(fcdiag, fcdiag->qtest.timer_active); return; } /* update count */ fcdiag->qtest.count--; /* Check result */ for (i = 0; i < BFI_LMSG_PL_WSZ; i++) { if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) { res->status = BFA_STATUS_DATACORRUPTED; break; } } if (res->status == BFA_STATUS_OK) { if (fcdiag->qtest.count > 0) { status = bfa_fcdiag_queuetest_send(fcdiag); if (status == BFA_STATUS_OK) return; else res->status = status; } else if (fcdiag->qtest.all > 0 && fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) { fcdiag->qtest.count = QTEST_CNT_DEFAULT; fcdiag->qtest.queue++; status = bfa_fcdiag_queuetest_send(fcdiag); if (status == BFA_STATUS_OK) return; else res->status = status; } } /* Stop timer when we comp all queue */ if (fcdiag->qtest.timer_active) { bfa_timer_stop(&fcdiag->qtest.timer); fcdiag->qtest.timer_active = 0; } res->queue = fcdiag->qtest.queue; res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count; bfa_trc(fcdiag, res->count); bfa_trc(fcdiag, res->status); fcdiag->qtest.status = res->status; fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status); fcdiag->qtest.lock = 0; } static void bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag, struct bfi_diag_lb_rsp_s *rsp) { struct bfa_diag_loopback_result_s *res = fcdiag->lb.result; res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm); res->numosffrm = be32_to_cpu(rsp->res.numosffrm); res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm); res->badfrminf = be32_to_cpu(rsp->res.badfrminf); res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum); res->status = rsp->res.status; fcdiag->lb.status = rsp->res.status; bfa_trc(fcdiag, fcdiag->lb.status); fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status); fcdiag->lb.lock = 0; bfa_fcdiag_set_busy_status(fcdiag); } static bfa_status_t bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag, struct bfa_diag_loopback_s *loopback) { struct bfi_diag_lb_req_s *lb_req; lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG); if (!lb_req) return BFA_STATUS_DEVBUSY; /* build host command */ bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK, bfa_fn_lpu(fcdiag->bfa)); lb_req->lb_mode = loopback->lb_mode; lb_req->speed = loopback->speed; lb_req->loopcnt = loopback->loopcnt; lb_req->pattern = loopback->pattern; /* ring door bell */ bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh); bfa_trc(fcdiag, loopback->lb_mode); bfa_trc(fcdiag, loopback->speed); bfa_trc(fcdiag, loopback->loopcnt); bfa_trc(fcdiag, loopback->pattern); return BFA_STATUS_OK; } /* * cpe/rme intr handler */ void bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); switch (msg->mhdr.msg_id) { case BFI_DIAG_I2H_LOOPBACK: bfa_fcdiag_loopback_comp(fcdiag, (struct bfi_diag_lb_rsp_s *) msg); break; case BFI_DIAG_I2H_QTEST: bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); break; default: bfa_trc(fcdiag, msg->mhdr.msg_id); WARN_ON(1); } } /* * Loopback test * * @param[in] *bfa - bfa data struct * @param[in] opmode - port operation mode * @param[in] speed - port speed * @param[in] lpcnt - loop count * @param[in] pat - pattern to build packet * @param[in] *result - pt to bfa_diag_loopback_result_t data struct * @param[in] cbfn - callback function * @param[in] cbarg - callback functioin arg * * @param[out] */ bfa_status_t bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode, enum bfa_port_speed speed, u32 lpcnt, u32 pat, struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn, void *cbarg) { struct bfa_diag_loopback_s loopback; struct bfa_port_attr_s attr; bfa_status_t status; struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_IOC_NON_OP; /* if port is PBC disabled, return error */ if (bfa_fcport_is_pbcdisabled(bfa)) { bfa_trc(fcdiag, BFA_STATUS_PBC); return BFA_STATUS_PBC; } if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) { bfa_trc(fcdiag, opmode); return BFA_STATUS_PORT_NOT_DISABLED; } /* * Check if input speed is supported by the port mode */ if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { if (!(speed == BFA_PORT_SPEED_1GBPS || speed == BFA_PORT_SPEED_2GBPS || speed == BFA_PORT_SPEED_4GBPS || speed == BFA_PORT_SPEED_8GBPS || speed == BFA_PORT_SPEED_16GBPS || speed == BFA_PORT_SPEED_AUTO)) { bfa_trc(fcdiag, speed); return BFA_STATUS_UNSUPP_SPEED; } bfa_fcport_get_attr(bfa, &attr); bfa_trc(fcdiag, attr.speed_supported); if (speed > attr.speed_supported) return BFA_STATUS_UNSUPP_SPEED; } else { if (speed != BFA_PORT_SPEED_10GBPS) { bfa_trc(fcdiag, speed); return BFA_STATUS_UNSUPP_SPEED; } } /* For Mezz card, port speed entered needs to be checked */ if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { if ((speed == BFA_PORT_SPEED_1GBPS) && (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) return BFA_STATUS_UNSUPP_SPEED; if (!(speed == BFA_PORT_SPEED_1GBPS || speed == BFA_PORT_SPEED_2GBPS || speed == BFA_PORT_SPEED_4GBPS || speed == BFA_PORT_SPEED_8GBPS || speed == BFA_PORT_SPEED_16GBPS || speed == BFA_PORT_SPEED_AUTO)) return BFA_STATUS_UNSUPP_SPEED; } else { if (speed != BFA_PORT_SPEED_10GBPS) return BFA_STATUS_UNSUPP_SPEED; } } /* check to see if there is another destructive diag cmd running */ if (fcdiag->lb.lock) { bfa_trc(fcdiag, fcdiag->lb.lock); return BFA_STATUS_DEVBUSY; } fcdiag->lb.lock = 1; loopback.lb_mode = opmode; loopback.speed = speed; loopback.loopcnt = lpcnt; loopback.pattern = pat; fcdiag->lb.result = result; fcdiag->lb.cbfn = cbfn; fcdiag->lb.cbarg = cbarg; memset(result, 0, sizeof(struct bfa_diag_loopback_result_s)); bfa_fcdiag_set_busy_status(fcdiag); /* Send msg to fw */ status = bfa_fcdiag_loopback_send(fcdiag, &loopback); return status; } /* * DIAG queue test command * * @param[in] *bfa - bfa data struct * @param[in] force - 1: don't do ioc op checking * @param[in] queue - queue no. to test * @param[in] *result - pt to bfa_diag_qtest_result_t data struct * @param[in] cbfn - callback function * @param[in] *cbarg - callback functioin arg * * @param[out] */ bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue, struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn, void *cbarg) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); bfa_status_t status; bfa_trc(fcdiag, force); bfa_trc(fcdiag, queue); if (!force && !bfa_iocfc_is_operational(bfa)) return BFA_STATUS_IOC_NON_OP; /* check to see if there is another destructive diag cmd running */ if (fcdiag->qtest.lock) { bfa_trc(fcdiag, fcdiag->qtest.lock); return BFA_STATUS_DEVBUSY; } /* Initialization */ fcdiag->qtest.lock = 1; fcdiag->qtest.cbfn = cbfn; fcdiag->qtest.cbarg = cbarg; fcdiag->qtest.result = result; fcdiag->qtest.count = QTEST_CNT_DEFAULT; /* Init test results */ fcdiag->qtest.result->status = BFA_STATUS_OK; fcdiag->qtest.result->count = 0; /* send */ if (queue < BFI_IOC_MAX_CQS) { fcdiag->qtest.result->queue = (u8)queue; fcdiag->qtest.queue = (u8)queue; fcdiag->qtest.all = 0; } else { fcdiag->qtest.result->queue = 0; fcdiag->qtest.queue = 0; fcdiag->qtest.all = 1; } status = bfa_fcdiag_queuetest_send(fcdiag); /* Start a timer */ if (status == BFA_STATUS_OK) { bfa_timer_start(bfa, &fcdiag->qtest.timer, bfa_fcdiag_queuetest_timeout, fcdiag, BFA_DIAG_QTEST_TOV); fcdiag->qtest.timer_active = 1; } return status; } /* * DIAG PLB is running * * @param[in] *bfa - bfa data struct * * @param[out] */ bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; }
gpl-2.0
GuneetAtwal/kernel_g900f
drivers/pcmcia/sa1100_generic.c
8008
3973
/*====================================================================== Device driver for the PCMCIA control functionality of StrongARM SA-1100 microprocessors. The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is John G. Dorsey <john+@cs.cmu.edu>. Portions created by John G. Dorsey are Copyright (C) 1999 John G. Dorsey. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <pcmcia/ss.h> #include <asm/hardware/scoop.h> #include "sa1100_generic.h" int __init pcmcia_collie_init(struct device *dev); static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) __devinitdata = { #ifdef CONFIG_SA1100_ASSABET pcmcia_assabet_init, #endif #ifdef CONFIG_SA1100_CERF pcmcia_cerf_init, #endif #if defined(CONFIG_SA1100_H3100) || defined(CONFIG_SA1100_H3600) pcmcia_h3600_init, #endif #ifdef CONFIG_SA1100_NANOENGINE pcmcia_nanoengine_init, #endif #ifdef CONFIG_SA1100_SHANNON pcmcia_shannon_init, #endif #ifdef CONFIG_SA1100_SIMPAD pcmcia_simpad_init, #endif #ifdef CONFIG_SA1100_COLLIE pcmcia_collie_init, #endif }; static int __devinit sa11x0_drv_pcmcia_probe(struct platform_device *dev) { int i, ret = -ENODEV; /* * Initialise any "on-board" PCMCIA sockets. */ for (i = 0; i < ARRAY_SIZE(sa11x0_pcmcia_hw_init); i++) { ret = sa11x0_pcmcia_hw_init[i](&dev->dev); if (ret == 0) break; } return ret; } static int sa11x0_drv_pcmcia_remove(struct platform_device *dev) { struct skt_dev_info *sinfo = platform_get_drvdata(dev); int i; platform_set_drvdata(dev, NULL); for (i = 0; i < sinfo->nskt; i++) soc_pcmcia_remove_one(&sinfo->skt[i]); kfree(sinfo); return 0; } static struct platform_driver sa11x0_pcmcia_driver = { .driver = { .name = "sa11x0-pcmcia", .owner = THIS_MODULE, }, .probe = sa11x0_drv_pcmcia_probe, .remove = sa11x0_drv_pcmcia_remove, }; /* sa11x0_pcmcia_init() * ^^^^^^^^^^^^^^^^^^^^ * * This routine performs low-level PCMCIA initialization and then * registers this socket driver with Card Services. * * Returns: 0 on success, -ve error code on failure */ static int __init sa11x0_pcmcia_init(void) { return platform_driver_register(&sa11x0_pcmcia_driver); } /* sa11x0_pcmcia_exit() * ^^^^^^^^^^^^^^^^^^^^ * Invokes the low-level kernel service to free IRQs associated with this * socket controller and reset GPIO edge detection. */ static void __exit sa11x0_pcmcia_exit(void) { platform_driver_unregister(&sa11x0_pcmcia_driver); } MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>"); MODULE_DESCRIPTION("Linux PCMCIA Card Services: SA-11x0 Socket Controller"); MODULE_LICENSE("Dual MPL/GPL"); fs_initcall(sa11x0_pcmcia_init); module_exit(sa11x0_pcmcia_exit);
gpl-2.0
rassillon/android_kernel_samsung_grandneove3g
arch/arm/mach-iop32x/irq.c
11848
1541
/* * arch/arm/mach-iop32x/irq.c * * Generic IOP32X IRQ handling functionality * * Author: Rory Bolt <rorybolt@pacbell.net> * Copyright (C) 2002 Rory Bolt * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <asm/mach/irq.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/mach-types.h> static u32 iop32x_mask; static void intctl_write(u32 val) { asm volatile("mcr p6, 0, %0, c0, c0, 0" : : "r" (val)); } static void intstr_write(u32 val) { asm volatile("mcr p6, 0, %0, c4, c0, 0" : : "r" (val)); } static void iop32x_irq_mask(struct irq_data *d) { iop32x_mask &= ~(1 << d->irq); intctl_write(iop32x_mask); } static void iop32x_irq_unmask(struct irq_data *d) { iop32x_mask |= 1 << d->irq; intctl_write(iop32x_mask); } struct irq_chip ext_chip = { .name = "IOP32x", .irq_ack = iop32x_irq_mask, .irq_mask = iop32x_irq_mask, .irq_unmask = iop32x_irq_unmask, }; void __init iop32x_init_irq(void) { int i; iop_init_cp6_handler(); intctl_write(0); intstr_write(0); if (machine_is_glantank() || machine_is_iq80321() || machine_is_iq31244() || machine_is_n2100() || machine_is_em7210()) *IOP3XX_PCIIRSR = 0x0f; for (i = 0; i < NR_IRQS; i++) { irq_set_chip_and_handler(i, &ext_chip, handle_level_irq); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } }
gpl-2.0
antonblanchard/linux
drivers/staging/rtl8192u/r8192U_wx.c
73
25837
// SPDX-License-Identifier: GPL-2.0 /****************************************************************************** * * This file contains wireless extension handlers. * * This is part of rtl8180 OpenSource driver. * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com> * * Parts of this driver are based on the GPL part * of the official realtek driver. * * Parts of this driver are based on the rtl8180 driver skeleton * from Patric Schenke & Andres Salomon. * * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver. * * We want to thank the Authors of those projects and the Ndiswrapper * project Authors. * *****************************************************************************/ #include <linux/string.h> #include "r8192U.h" #include "r8192U_hw.h" #include "ieee80211/dot11d.h" #include "r8192U_wx.h" #define RATE_COUNT 12 static const u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000, 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000}; #ifndef ENETDOWN #define ENETDOWN 1 #endif static int r8192_wx_get_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b); } static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_mode(priv->ieee80211, a, wrqu, b); } static int r8192_wx_get_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_rate(priv->ieee80211, info, wrqu, extra); } static int r8192_wx_set_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_rate(priv->ieee80211, info, wrqu, extra); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_set_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_rts(priv->ieee80211, info, wrqu, extra); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_get_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_rts(priv->ieee80211, info, wrqu, extra); } static int r8192_wx_set_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_get_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_power(priv->ieee80211, info, wrqu, extra); } static int r8192_wx_force_reset(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); mutex_lock(&priv->wx_mutex); netdev_dbg(dev, "%s(): force reset ! extra is %d\n", __func__, *extra); priv->force_reset = *extra; mutex_unlock(&priv->wx_mutex); return 0; } static int r8192_wx_set_rawtx(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); int ret; mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_set_crcmon(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); int *parms = (int *)extra; int enable = (parms[0] > 0); mutex_lock(&priv->wx_mutex); if (enable) priv->crcmon = 1; else priv->crcmon = 0; DMESG("bad CRC in monitor mode are %s", priv->crcmon ? "accepted" : "rejected"); mutex_unlock(&priv->wx_mutex); return 0; } static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = ieee80211_priv(dev); int ret; mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b); rtl8192_set_rxconf(dev); mutex_unlock(&priv->wx_mutex); return ret; } struct iw_range_with_scan_capa { /* Informative stuff (to choose between different interface) */ __u32 throughput; /* To give an idea... */ /* In theory this value should be the maximum benchmarked * TCP/IP throughput, because with most of these devices the * bit rate is meaningless (overhead an co) to estimate how * fast the connection will go and pick the fastest one. * I suggest people to play with Netperf or any benchmark... */ /* NWID (or domain id) */ __u32 min_nwid; /* Minimal NWID we are able to set */ __u32 max_nwid; /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ __u16 old_num_channels; __u8 old_num_frequency; /* Scan capabilities */ __u8 scan_capa; }; static int rtl8180_wx_get_range(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_range *range = (struct iw_range *)extra; struct iw_range_with_scan_capa *tmp = (struct iw_range_with_scan_capa *)range; struct r8192_priv *priv = ieee80211_priv(dev); u16 val; int i; wrqu->data.length = sizeof(*range); memset(range, 0, sizeof(*range)); /* Let's try to keep this struct in the same order as in * linux/include/wireless.h */ /* TODO: See what values we can set, and remove the ones we can't * set, or fill them with some default data. */ /* ~5 Mb/s real (802.11b) */ range->throughput = 5 * 1000 * 1000; /* TODO: Not used in 802.11b? */ /* range->min_nwid; */ /* Minimal NWID we are able to set */ /* TODO: Not used in 802.11b? */ /* range->max_nwid; */ /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ /* range->old_num_channels; */ /* range->old_num_frequency; */ /* range->old_freq[6]; */ /* Filler to keep "version" at the same offset */ if (priv->rf_set_sens != NULL) range->sensitivity = priv->max_sens; /* signal level threshold range */ range->max_qual.qual = 100; /* TODO: Find real max RSSI and stick here */ range->max_qual.level = 0; range->max_qual.noise = 0x100 - 98; range->max_qual.updated = 7; /* Updated all three */ range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */ /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ range->avg_qual.level = 0x100 - 78; range->avg_qual.noise = 0; range->avg_qual.updated = 7; /* Updated all three */ range->num_bitrates = RATE_COUNT; for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) range->bitrate[i] = rtl8180_rates[i]; range->min_frag = MIN_FRAG_THRESHOLD; range->max_frag = MAX_FRAG_THRESHOLD; range->min_pmp = 0; range->max_pmp = 5000000; range->min_pmt = 0; range->max_pmt = 65535*1000; range->pmp_flags = IW_POWER_PERIOD; range->pmt_flags = IW_POWER_TIMEOUT; range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R; range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 16; /* range->retry_capa; */ /* What retry options are supported */ /* range->retry_flags; */ /* How to decode max/min retry limit */ /* range->r_time_flags; */ /* How to decode max/min retry life */ /* range->min_retry; */ /* Minimal number of retries */ /* range->max_retry; */ /* Maximal number of retries */ /* range->min_r_time; */ /* Minimal retry lifetime */ /* range->max_r_time; */ /* Maximal retry lifetime */ for (i = 0, val = 0; i < 14; i++) { /* Include only legal frequencies for some countries */ if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) { range->freq[val].i = i + 1; range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000; range->freq[val].e = 1; val++; } else { /* FIXME: do we need to set anything for channels */ /* we don't use ? */ } if (val == IW_MAX_FREQUENCIES) break; } range->num_frequency = val; range->num_channels = val; range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2| IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP; tmp->scan_capa = 0x01; return 0; } static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; int ret = 0; if (!priv->up) return -ENETDOWN; if (priv->ieee80211->LinkDetectInfo.bBusyTraffic) return -EAGAIN; if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { ieee->current_network.ssid_len = req->essid_len; memcpy(ieee->current_network.ssid, req->essid, req->essid_len); } } mutex_lock(&priv->wx_mutex); if (priv->ieee80211->state != IEEE80211_LINKED) { priv->ieee80211->scanning = 0; ieee80211_softmac_scan_syncro(priv->ieee80211); ret = 0; } else { ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b); } mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); if (!priv->up) return -ENETDOWN; mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_get_scan(priv->ieee80211, a, wrqu, b); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_set_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = ieee80211_priv(dev); int ret; mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_get_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra); } static int r8192_wx_set_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); if (wrqu->frag.disabled) priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD; else { if (wrqu->frag.value < MIN_FRAG_THRESHOLD || wrqu->frag.value > MAX_FRAG_THRESHOLD) return -EINVAL; priv->ieee80211->fts = wrqu->frag.value & ~0x1; } return 0; } static int r8192_wx_get_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); wrqu->frag.value = priv->ieee80211->fts; wrqu->frag.fixed = 0; /* no auto select */ wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD); return 0; } static int r8192_wx_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); /* struct sockaddr *temp = (struct sockaddr *)awrq; */ mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_wap(priv->ieee80211, info, awrq, extra); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_wap(priv->ieee80211, info, wrqu, extra); } static int r8192_wx_get_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_encode(priv->ieee80211, info, wrqu, key); } static int r8192_wx_set_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; int ret; u32 hwkey[4] = {0, 0, 0, 0}; u8 mask = 0xff; u32 key_idx = 0; u8 zero_addr[4][6] = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} }; int i; if (!priv->up) return -ENETDOWN; mutex_lock(&priv->wx_mutex); RT_TRACE(COMP_SEC, "Setting SW wep key"); ret = ieee80211_wx_set_encode(priv->ieee80211, info, wrqu, key); mutex_unlock(&priv->wx_mutex); /* sometimes, the length is zero while we do not type key value */ if (wrqu->encoding.length != 0) { for (i = 0; i < 4; i++) { hwkey[i] |= key[4*i+0]&mask; if (i == 1 && (4*i+1) == wrqu->encoding.length) mask = 0x00; if (i == 3 && (4*i+1) == wrqu->encoding.length) mask = 0x00; hwkey[i] |= (key[4*i+1]&mask)<<8; hwkey[i] |= (key[4*i+2]&mask)<<16; hwkey[i] |= (key[4*i+3]&mask)<<24; } #define CONF_WEP40 0x4 #define CONF_WEP104 0x14 switch (wrqu->encoding.flags & IW_ENCODE_INDEX) { case 0: key_idx = ieee->tx_keyidx; break; case 1: key_idx = 0; break; case 2: key_idx = 1; break; case 3: key_idx = 2; break; case 4: key_idx = 3; break; default: break; } if (wrqu->encoding.length == 0x5) { ieee->pairwise_key_type = KEY_TYPE_WEP40; EnableHWSecurityConfig8192(dev); setKey(dev, key_idx, /* EntryNo */ key_idx, /* KeyIndex */ KEY_TYPE_WEP40, /* KeyType */ zero_addr[key_idx], 0, /* DefaultKey */ hwkey); /* KeyContent */ } else if (wrqu->encoding.length == 0xd) { ieee->pairwise_key_type = KEY_TYPE_WEP104; EnableHWSecurityConfig8192(dev); setKey(dev, key_idx, /* EntryNo */ key_idx, /* KeyIndex */ KEY_TYPE_WEP104, /* KeyType */ zero_addr[key_idx], 0, /* DefaultKey */ hwkey); /* KeyContent */ } else { netdev_warn(dev, "wrong type in WEP, not WEP40 and WEP104\n"); } } return ret; } static int r8192_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa, union iwreq_data *wrqu, char *p) { struct r8192_priv *priv = ieee80211_priv(dev); int *parms = (int *)p; int mode = parms[0]; priv->ieee80211->active_scan = mode; return 1; } static int r8192_wx_set_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); int err = 0; mutex_lock(&priv->wx_mutex); if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled){ err = -EINVAL; goto exit; } if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) { err = -EINVAL; goto exit; } if (wrqu->retry.value > R8180_MAX_RETRY) { err = -EINVAL; goto exit; } if (wrqu->retry.flags & IW_RETRY_MAX) { priv->retry_rts = wrqu->retry.value; DMESG("Setting retry for RTS/CTS data to %d", wrqu->retry.value); } else { priv->retry_data = wrqu->retry.value; DMESG("Setting retry for non RTS/CTS data to %d", wrqu->retry.value); } /* FIXME ! * We might try to write directly the TX config register * or to restart just the (R)TX process. * I'm unsure if whole reset is really needed */ rtl8192_commit(dev); exit: mutex_unlock(&priv->wx_mutex); return err; } static int r8192_wx_get_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); wrqu->retry.disabled = 0; /* can't be disabled */ if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) return -EINVAL; if (wrqu->retry.flags & IW_RETRY_MAX) { wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX; wrqu->retry.value = priv->retry_rts; } else { wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN; wrqu->retry.value = priv->retry_data; } return 0; } static int r8192_wx_get_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); if (priv->rf_set_sens == NULL) return -1; /* we have not this support for this radio */ wrqu->sens.value = priv->sens; return 0; } static int r8192_wx_set_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); short err = 0; mutex_lock(&priv->wx_mutex); if (priv->rf_set_sens == NULL) { err = -1; /* we have not this support for this radio */ goto exit; } if (priv->rf_set_sens(dev, wrqu->sens.value) == 0) priv->sens = wrqu->sens.value; else err = -EINVAL; exit: mutex_unlock(&priv->wx_mutex); return err; } /* hw security need to reorganized. */ static int r8192_wx_set_enc_ext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra); { u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; u8 zero[6] = {0}; u32 key[4] = {0}; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; struct iw_point *encoding = &wrqu->encoding; u8 idx = 0, alg = 0, group = 0; if ((encoding->flags & IW_ENCODE_DISABLED) || ext->alg == IW_ENCODE_ALG_NONE) /* none is not allowed to use hwsec WB 2008.07.01 */ goto end_hw_sec; /* as IW_ENCODE_ALG_CCMP is defined to be 3 and KEY_TYPE_CCMP is defined to 4; */ alg = (ext->alg == IW_ENCODE_ALG_CCMP)?KEY_TYPE_CCMP:ext->alg; idx = encoding->flags & IW_ENCODE_INDEX; if (idx) idx--; group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY; if ((!group) || (IW_MODE_ADHOC == ieee->iw_mode) || (alg == KEY_TYPE_WEP40)) { if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40)) alg = KEY_TYPE_WEP104; ieee->pairwise_key_type = alg; EnableHWSecurityConfig8192(dev); } memcpy((u8 *)key, ext->key, 16); /* we only get 16 bytes key.why? WB 2008.7.1 */ if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode != 2)) { setKey(dev, idx, /* EntryNao */ idx, /* KeyIndex */ alg, /* KeyType */ zero, /* MacAddr */ 0, /* DefaultKey */ key); /* KeyContent */ } else if (group) { ieee->group_key_type = alg; setKey(dev, idx, /* EntryNo */ idx, /* KeyIndex */ alg, /* KeyType */ broadcast_addr, /* MacAddr */ 0, /* DefaultKey */ key); /* KeyContent */ } else { /* pairwise key */ setKey(dev, 4, /* EntryNo */ idx, /* KeyIndex */ alg, /* KeyType */ (u8 *)ieee->ap_mac_addr,/* MacAddr */ 0, /* DefaultKey */ key); /* KeyContent */ } } end_hw_sec: mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { int ret = 0; struct r8192_priv *priv = ieee80211_priv(dev); mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_auth(priv->ieee80211, info, &(data->param), extra); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; struct r8192_priv *priv = ieee80211_priv(dev); mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra); mutex_unlock(&priv->wx_mutex); return ret; } static int r8192_wx_set_gen_ie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { int ret = 0; struct r8192_priv *priv = ieee80211_priv(dev); mutex_lock(&priv->wx_mutex); ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->data.length); mutex_unlock(&priv->wx_mutex); return ret; } static int dummy(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { return -1; } static iw_handler r8192_wx_handlers[] = { NULL, /* SIOCSIWCOMMIT */ r8192_wx_get_name, /* SIOCGIWNAME */ dummy, /* SIOCSIWNWID */ dummy, /* SIOCGIWNWID */ r8192_wx_set_freq, /* SIOCSIWFREQ */ r8192_wx_get_freq, /* SIOCGIWFREQ */ r8192_wx_set_mode, /* SIOCSIWMODE */ r8192_wx_get_mode, /* SIOCGIWMODE */ r8192_wx_set_sens, /* SIOCSIWSENS */ r8192_wx_get_sens, /* SIOCGIWSENS */ NULL, /* SIOCSIWRANGE */ rtl8180_wx_get_range, /* SIOCGIWRANGE */ NULL, /* SIOCSIWPRIV */ NULL, /* SIOCGIWPRIV */ NULL, /* SIOCSIWSTATS */ NULL, /* SIOCGIWSTATS */ dummy, /* SIOCSIWSPY */ dummy, /* SIOCGIWSPY */ NULL, /* SIOCGIWTHRSPY */ NULL, /* SIOCWIWTHRSPY */ r8192_wx_set_wap, /* SIOCSIWAP */ r8192_wx_get_wap, /* SIOCGIWAP */ r8192_wx_set_mlme, /* MLME-- */ dummy, /* SIOCGIWAPLIST -- deprecated */ r8192_wx_set_scan, /* SIOCSIWSCAN */ r8192_wx_get_scan, /* SIOCGIWSCAN */ r8192_wx_set_essid, /* SIOCSIWESSID */ r8192_wx_get_essid, /* SIOCGIWESSID */ dummy, /* SIOCSIWNICKN */ dummy, /* SIOCGIWNICKN */ NULL, /* -- hole -- */ NULL, /* -- hole -- */ r8192_wx_set_rate, /* SIOCSIWRATE */ r8192_wx_get_rate, /* SIOCGIWRATE */ r8192_wx_set_rts, /* SIOCSIWRTS */ r8192_wx_get_rts, /* SIOCGIWRTS */ r8192_wx_set_frag, /* SIOCSIWFRAG */ r8192_wx_get_frag, /* SIOCGIWFRAG */ dummy, /* SIOCSIWTXPOW */ dummy, /* SIOCGIWTXPOW */ r8192_wx_set_retry, /* SIOCSIWRETRY */ r8192_wx_get_retry, /* SIOCGIWRETRY */ r8192_wx_set_enc, /* SIOCSIWENCODE */ r8192_wx_get_enc, /* SIOCGIWENCODE */ r8192_wx_set_power, /* SIOCSIWPOWER */ r8192_wx_get_power, /* SIOCGIWPOWER */ NULL, /*---hole---*/ NULL, /*---hole---*/ r8192_wx_set_gen_ie, /* NULL, */ /* SIOCSIWGENIE */ NULL, /* SIOCSIWGENIE */ r8192_wx_set_auth,/* NULL, */ /* SIOCSIWAUTH */ NULL,/* r8192_wx_get_auth, */ /* NULL, */ /* SIOCSIWAUTH */ r8192_wx_set_enc_ext, /* SIOCSIWENCODEEXT */ NULL,/* r8192_wx_get_enc_ext, *//* NULL, */ /* SIOCSIWENCODEEXT */ NULL, /* SIOCSIWPMKSA */ NULL, /*---hole---*/ }; static const struct iw_priv_args r8192_private_args[] = { { SIOCIWFIRSTPRIV + 0x0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "badcrc" }, { SIOCIWFIRSTPRIV + 0x1, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan" }, { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx" }, { SIOCIWFIRSTPRIV + 0x3, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset" } }; static iw_handler r8192_private_handler[] = { r8192_wx_set_crcmon, r8192_wx_set_scan_type, r8192_wx_set_rawtx, r8192_wx_force_reset, }; struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; struct iw_statistics *wstats = &priv->wstats; int tmp_level = 0; int tmp_qual = 0; int tmp_noise = 0; if (ieee->state < IEEE80211_LINKED) { wstats->qual.qual = 0; wstats->qual.level = 0; wstats->qual.noise = 0; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; return wstats; } tmp_level = (&ieee->current_network)->stats.rssi; tmp_qual = (&ieee->current_network)->stats.signal; tmp_noise = (&ieee->current_network)->stats.noise; wstats->qual.level = tmp_level; wstats->qual.qual = tmp_qual; wstats->qual.noise = tmp_noise; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; return wstats; } const struct iw_handler_def r8192_wx_handlers_def = { .standard = r8192_wx_handlers, .num_standard = ARRAY_SIZE(r8192_wx_handlers), .private = r8192_private_handler, .num_private = ARRAY_SIZE(r8192_private_handler), .num_private_args = sizeof(r8192_private_args) / sizeof(struct iw_priv_args), .get_wireless_stats = r8192_get_wireless_stats, .private_args = (struct iw_priv_args *)r8192_private_args, };
gpl-2.0
fanfuqiang/iec_old
clang-3.2.src/test/SemaTemplate/self-comparison.cpp
73
1108
// RUN: %clang_cc1 -fsyntax-only -verify %s template <int A, int B> void foo() { (void)(A == A); // expected-warning {{self-comparison always evaluates to true}} (void)(A == B); } template <int A, int B> struct S1 { void foo() { (void)(A == A); // expected-warning {{self-comparison always evaluates to true}} (void)(A == B); } }; template <int A, int B> struct S2 { template <typename T> T foo() { (void)(A == A); // expected-warning {{self-comparison always evaluates to true}} (void)(A == B); } }; struct S3 { template <int A, int B> void foo() { (void)(A == A); // expected-warning {{self-comparison always evaluates to true}} (void)(A == B); } }; template <int A> struct S4 { template <int B> void foo() { (void)(A == A); // expected-warning {{self-comparison always evaluates to true}} (void)(A == B); } }; const int N = 42; template <int X> void foo2() { (void)(X == N); (void)(N == X); } void test() { foo<1, 1>(); S1<1, 1> s1; s1.foo(); S2<1, 1> s2; s2.foo<void>(); S3 s3; s3.foo<1, 1>(); S4<1> s4; s4.foo<1>(); foo2<N>(); }
gpl-2.0
pjsports/kernel-2.6.32.9-A88
arch/arm/mach-msm/qdsp6/audiov2/aac_in.c
1609
6176
/* * Copyright (C) 2009 Google, Inc. * Copyright (C) 2009 HTC Corporation * Copyright (c) 2009, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/fs.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/uaccess.h> #include <linux/msm_audio_aac.h> #include <mach/msm_qdsp6_audiov2.h> #include "dal_audio.h" #include "dal_audio_format.h" struct aac { struct mutex lock; struct msm_audio_aac_enc_config cfg; struct msm_audio_stream_config str_cfg; struct audio_client *audio_client; }; static long q6_aac_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct aac *aac = file->private_data; struct adsp_open_command rpc; int sample_rate; int audio_object_type; int index = sizeof(u32); int rc = 0; u32 *aac_type = NULL; mutex_lock(&aac->lock); switch (cmd) { case AUDIO_START: if (aac->audio_client) { rc = -EBUSY; break; } else { tx_clk_freq = 48000; aac->audio_client = q6audio_open(AUDIO_FLAG_READ, aac->str_cfg.buffer_size); if (aac->audio_client < 0) { tx_clk_freq = 8000; rc = -ENOMEM; break; } } memset(&rpc, 0, sizeof(rpc)); rpc.format_block.binary.format = ADSP_AUDIO_FORMAT_MPEG4_AAC; /* only 48k sample rate is supported */ sample_rate = 3; /* AAC OBJECT LC */ audio_object_type = 2; aac_type = (u32 *)rpc.format_block.binary.data; switch (aac->cfg.stream_format) { case AUDIO_AAC_FORMAT_ADTS: /* AAC Encoder expect MPEG4_ADTS media type */ *aac_type = ADSP_AUDIO_AAC_MPEG4_ADTS; break; case AUDIO_AAC_FORMAT_RAW: /* for ADIF recording */ *aac_type = ADSP_AUDIO_AAC_RAW; break; } rpc.format_block.binary.data[index++] = (u8)( ((audio_object_type & 0x1F) << 3) | ((sample_rate >> 1) & 0x7)); rpc.format_block.binary.data[index] = (u8)( ((sample_rate & 0x1) << 7) | ((aac->cfg.channels & 0x7) << 3)); rpc.format_block.binary.num_bytes = index + 1; rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_OPEN_READ; rpc.device = ADSP_AUDIO_DEVICE_ID_DEFAULT; rpc.stream_context = ADSP_AUDIO_DEVICE_CONTEXT_RECORD; rpc.buf_max_size = aac->str_cfg.buffer_size; rpc.config.aac.bit_rate = aac->cfg.bit_rate; rpc.config.aac.encoder_mode = ADSP_AUDIO_ENC_AAC_LC_ONLY_MODE; q6audio_start(aac->audio_client, &rpc, sizeof(rpc)); break; case AUDIO_STOP: break; case AUDIO_FLUSH: break; case AUDIO_SET_VOLUME: break; case AUDIO_GET_STREAM_CONFIG: if (copy_to_user((void *)arg, &aac->str_cfg, sizeof(struct msm_audio_stream_config))) rc = -EFAULT; break; case AUDIO_SET_STREAM_CONFIG: if (copy_from_user(&aac->str_cfg, (void *)arg, sizeof(struct msm_audio_stream_config))) { rc = -EFAULT; break; } if (aac->str_cfg.buffer_size < 519) { pr_err("Buffer size too small\n"); rc = -EINVAL; break; } if (aac->str_cfg.buffer_count != 2) pr_info("Buffer count set to 2\n"); break; case AUDIO_SET_AAC_ENC_CONFIG: if (copy_from_user(&aac->cfg, (void *) arg, sizeof(struct msm_audio_aac_enc_config))) { rc = -EFAULT; } if (aac->cfg.channels != 1) { pr_err("only mono is supported\n"); rc = -EINVAL; } if (aac->cfg.sample_rate != 48000) { pr_err("only 48KHz is supported\n"); rc = -EINVAL; } if (aac->cfg.stream_format != AUDIO_AAC_FORMAT_RAW && aac->cfg.stream_format != AUDIO_AAC_FORMAT_ADTS) { pr_err("unsupported AAC format\n"); rc = -EINVAL; } break; case AUDIO_GET_AAC_ENC_CONFIG: if (copy_to_user((void *) arg, &aac->cfg, sizeof(struct msm_audio_aac_enc_config))) { rc = -EFAULT; } break; default: rc = -EINVAL; } mutex_unlock(&aac->lock); return rc; } static int q6_aac_in_open(struct inode *inode, struct file *file) { struct aac *aac; aac = kmalloc(sizeof(struct aac), GFP_KERNEL); if (aac == NULL) { pr_err("Could not allocate memory for aac driver\n"); return -ENOMEM; } mutex_init(&aac->lock); file->private_data = aac; aac->audio_client = NULL; aac->str_cfg.buffer_size = 519; aac->str_cfg.buffer_count = 2; aac->cfg.channels = 1; aac->cfg.bit_rate = 192000; aac->cfg.stream_format = AUDIO_AAC_FORMAT_ADTS; aac->cfg.sample_rate = 48000; return 0; } static ssize_t q6_aac_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio_client *ac; struct audio_buffer *ab; const char __user *start = buf; struct aac *aac = file->private_data; int xfer = 0; int res; mutex_lock(&aac->lock); ac = aac->audio_client; if (!ac) { res = -ENODEV; goto fail; } while (count > xfer) { ab = ac->buf + ac->cpu_buf; if (ab->used) wait_event(ac->wait, (ab->used == 0)); xfer = ab->actual_size; if (copy_to_user(buf, ab->data, xfer)) { res = -EFAULT; goto fail; } buf += xfer; count -= xfer; ab->used = 1; q6audio_read(ac, ab); ac->cpu_buf ^= 1; } res = buf - start; fail: mutex_unlock(&aac->lock); return res; } static int q6_aac_in_release(struct inode *inode, struct file *file) { int rc = 0; struct aac *aac = file->private_data; mutex_lock(&aac->lock); if (aac->audio_client) rc = q6audio_close(aac->audio_client); mutex_unlock(&aac->lock); kfree(aac); tx_clk_freq = 8000; return rc; } static const struct file_operations q6_aac_in_fops = { .owner = THIS_MODULE, .open = q6_aac_in_open, .read = q6_aac_in_read, .release = q6_aac_in_release, .unlocked_ioctl = q6_aac_in_ioctl, }; struct miscdevice q6_aac_in_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_aac_in", .fops = &q6_aac_in_fops, }; static int __init q6_aac_in_init(void) { return misc_register(&q6_aac_in_misc); } device_initcall(q6_aac_in_init);
gpl-2.0
attn1/android_kernel_pantech_p9070
drivers/net/chelsio/sge.c
2377
59624
/***************************************************************************** * * * File: sge.c * * $Revision: 1.26 $ * * $Date: 2005/06/21 18:29:48 $ * * Description: * * DMA engine. * * part of the Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, write to the Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "common.h" #include <linux/types.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/ktime.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/tcp.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/prefetch.h> #include "cpl5_cmd.h" #include "sge.h" #include "regs.h" #include "espi.h" /* This belongs in if_ether.h */ #define ETH_P_CPL5 0xf #define SGE_CMDQ_N 2 #define SGE_FREELQ_N 2 #define SGE_CMDQ0_E_N 1024 #define SGE_CMDQ1_E_N 128 #define SGE_FREEL_SIZE 4096 #define SGE_JUMBO_FREEL_SIZE 512 #define SGE_FREEL_REFILL_THRESH 16 #define SGE_RESPQ_E_N 1024 #define SGE_INTRTIMER_NRES 1000 #define SGE_RX_SM_BUF_SIZE 1536 #define SGE_TX_DESC_MAX_PLEN 16384 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) /* * Period of the TX buffer reclaim timer. This timer does not need to run * frequently as TX buffers are usually reclaimed by new TX packets. */ #define TX_RECLAIM_PERIOD (HZ / 4) #define M_CMD_LEN 0x7fffffff #define V_CMD_LEN(v) (v) #define G_CMD_LEN(v) ((v) & M_CMD_LEN) #define V_CMD_GEN1(v) ((v) << 31) #define V_CMD_GEN2(v) (v) #define F_CMD_DATAVALID (1 << 1) #define F_CMD_SOP (1 << 2) #define V_CMD_EOP(v) ((v) << 3) /* * Command queue, receive buffer list, and response queue descriptors. */ #if defined(__BIG_ENDIAN_BITFIELD) struct cmdQ_e { u32 addr_lo; u32 len_gen; u32 flags; u32 addr_hi; }; struct freelQ_e { u32 addr_lo; u32 len_gen; u32 gen2; u32 addr_hi; }; struct respQ_e { u32 Qsleeping : 4; u32 Cmdq1CreditReturn : 5; u32 Cmdq1DmaComplete : 5; u32 Cmdq0CreditReturn : 5; u32 Cmdq0DmaComplete : 5; u32 FreelistQid : 2; u32 CreditValid : 1; u32 DataValid : 1; u32 Offload : 1; u32 Eop : 1; u32 Sop : 1; u32 GenerationBit : 1; u32 BufferLength; }; #elif defined(__LITTLE_ENDIAN_BITFIELD) struct cmdQ_e { u32 len_gen; u32 addr_lo; u32 addr_hi; u32 flags; }; struct freelQ_e { u32 len_gen; u32 addr_lo; u32 addr_hi; u32 gen2; }; struct respQ_e { u32 BufferLength; u32 GenerationBit : 1; u32 Sop : 1; u32 Eop : 1; u32 Offload : 1; u32 DataValid : 1; u32 CreditValid : 1; u32 FreelistQid : 2; u32 Cmdq0DmaComplete : 5; u32 Cmdq0CreditReturn : 5; u32 Cmdq1DmaComplete : 5; u32 Cmdq1CreditReturn : 5; u32 Qsleeping : 4; } ; #endif /* * SW Context Command and Freelist Queue Descriptors */ struct cmdQ_ce { struct sk_buff *skb; DEFINE_DMA_UNMAP_ADDR(dma_addr); DEFINE_DMA_UNMAP_LEN(dma_len); }; struct freelQ_ce { struct sk_buff *skb; DEFINE_DMA_UNMAP_ADDR(dma_addr); DEFINE_DMA_UNMAP_LEN(dma_len); }; /* * SW command, freelist and response rings */ struct cmdQ { unsigned long status; /* HW DMA fetch status */ unsigned int in_use; /* # of in-use command descriptors */ unsigned int size; /* # of descriptors */ unsigned int processed; /* total # of descs HW has processed */ unsigned int cleaned; /* total # of descs SW has reclaimed */ unsigned int stop_thres; /* SW TX queue suspend threshold */ u16 pidx; /* producer index (SW) */ u16 cidx; /* consumer index (HW) */ u8 genbit; /* current generation (=valid) bit */ u8 sop; /* is next entry start of packet? */ struct cmdQ_e *entries; /* HW command descriptor Q */ struct cmdQ_ce *centries; /* SW command context descriptor Q */ dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ spinlock_t lock; /* Lock to protect cmdQ enqueuing */ }; struct freelQ { unsigned int credits; /* # of available RX buffers */ unsigned int size; /* free list capacity */ u16 pidx; /* producer index (SW) */ u16 cidx; /* consumer index (HW) */ u16 rx_buffer_size; /* Buffer size on this free list */ u16 dma_offset; /* DMA offset to align IP headers */ u16 recycleq_idx; /* skb recycle q to use */ u8 genbit; /* current generation (=valid) bit */ struct freelQ_e *entries; /* HW freelist descriptor Q */ struct freelQ_ce *centries; /* SW freelist context descriptor Q */ dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ }; struct respQ { unsigned int credits; /* credits to be returned to SGE */ unsigned int size; /* # of response Q descriptors */ u16 cidx; /* consumer index (SW) */ u8 genbit; /* current generation(=valid) bit */ struct respQ_e *entries; /* HW response descriptor Q */ dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ }; /* Bit flags for cmdQ.status */ enum { CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ }; /* T204 TX SW scheduler */ /* Per T204 TX port */ struct sched_port { unsigned int avail; /* available bits - quota */ unsigned int drain_bits_per_1024ns; /* drain rate */ unsigned int speed; /* drain rate, mbps */ unsigned int mtu; /* mtu size */ struct sk_buff_head skbq; /* pending skbs */ }; /* Per T204 device */ struct sched { ktime_t last_updated; /* last time quotas were computed */ unsigned int max_avail; /* max bits to be sent to any port */ unsigned int port; /* port index (round robin ports) */ unsigned int num; /* num skbs in per port queues */ struct sched_port p[MAX_NPORTS]; struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ }; static void restart_sched(unsigned long); /* * Main SGE data structure * * Interrupts are handled by a single CPU and it is likely that on a MP system * the application is migrated to another CPU. In that scenario, we try to * separate the RX(in irq context) and TX state in order to decrease memory * contention. */ struct sge { struct adapter *adapter; /* adapter backpointer */ struct net_device *netdev; /* netdevice backpointer */ struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ struct respQ respQ; /* response Q */ unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ unsigned int rx_pkt_pad; /* RX padding for L2 packets */ unsigned int jumbo_fl; /* jumbo freelist Q index */ unsigned int intrtimer_nres; /* no-resource interrupt timer */ unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ struct timer_list espibug_timer; unsigned long espibug_timeout; struct sk_buff *espibug_skb[MAX_NPORTS]; u32 sge_control; /* shadow value of sge control reg */ struct sge_intr_counts stats; struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; struct sched *tx_sched; struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; }; static const u8 ch_mac_addr[ETH_ALEN] = { 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 }; /* * stop tasklet and free all pending skb's */ static void tx_sched_stop(struct sge *sge) { struct sched *s = sge->tx_sched; int i; tasklet_kill(&s->sched_tsk); for (i = 0; i < MAX_NPORTS; i++) __skb_queue_purge(&s->p[s->port].skbq); } /* * t1_sched_update_parms() is called when the MTU or link speed changes. It * re-computes scheduler parameters to scope with the change. */ unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, unsigned int mtu, unsigned int speed) { struct sched *s = sge->tx_sched; struct sched_port *p = &s->p[port]; unsigned int max_avail_segs; pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); if (speed) p->speed = speed; if (mtu) p->mtu = mtu; if (speed || mtu) { unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); do_div(drain, (p->mtu + 50) * 1000); p->drain_bits_per_1024ns = (unsigned int) drain; if (p->speed < 1000) p->drain_bits_per_1024ns = 90 * p->drain_bits_per_1024ns / 100; } if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { p->drain_bits_per_1024ns -= 16; s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); max_avail_segs = max(1U, 4096 / (p->mtu - 40)); } else { s->max_avail = 16384; max_avail_segs = max(1U, 9000 / (p->mtu - 40)); } pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, p->speed, s->max_avail, max_avail_segs, p->drain_bits_per_1024ns); return max_avail_segs * (p->mtu - 40); } #if 0 /* * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of * data that can be pushed per port. */ void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) { struct sched *s = sge->tx_sched; unsigned int i; s->max_avail = val; for (i = 0; i < MAX_NPORTS; i++) t1_sched_update_parms(sge, i, 0, 0); } /* * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port * is draining. */ void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, unsigned int val) { struct sched *s = sge->tx_sched; struct sched_port *p = &s->p[port]; p->drain_bits_per_1024ns = val * 1024 / 1000; t1_sched_update_parms(sge, port, 0, 0); } #endif /* 0 */ /* * get_clock() implements a ns clock (see ktime_get) */ static inline ktime_t get_clock(void) { struct timespec ts; ktime_get_ts(&ts); return timespec_to_ktime(ts); } /* * tx_sched_init() allocates resources and does basic initialization. */ static int tx_sched_init(struct sge *sge) { struct sched *s; int i; s = kzalloc(sizeof (struct sched), GFP_KERNEL); if (!s) return -ENOMEM; pr_debug("tx_sched_init\n"); tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); sge->tx_sched = s; for (i = 0; i < MAX_NPORTS; i++) { skb_queue_head_init(&s->p[i].skbq); t1_sched_update_parms(sge, i, 1500, 1000); } return 0; } /* * sched_update_avail() computes the delta since the last time it was called * and updates the per port quota (number of bits that can be sent to the any * port). */ static inline int sched_update_avail(struct sge *sge) { struct sched *s = sge->tx_sched; ktime_t now = get_clock(); unsigned int i; long long delta_time_ns; delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); if (delta_time_ns < 15000) return 0; for (i = 0; i < MAX_NPORTS; i++) { struct sched_port *p = &s->p[i]; unsigned int delta_avail; delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; p->avail = min(p->avail + delta_avail, s->max_avail); } s->last_updated = now; return 1; } /* * sched_skb() is called from two different places. In the tx path, any * packet generating load on an output port will call sched_skb() * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq * context (skb == NULL). * The scheduler only returns a skb (which will then be sent) if the * length of the skb is <= the current quota of the output port. */ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, unsigned int credits) { struct sched *s = sge->tx_sched; struct sk_buff_head *skbq; unsigned int i, len, update = 1; pr_debug("sched_skb %p\n", skb); if (!skb) { if (!s->num) return NULL; } else { skbq = &s->p[skb->dev->if_port].skbq; __skb_queue_tail(skbq, skb); s->num++; skb = NULL; } if (credits < MAX_SKB_FRAGS + 1) goto out; again: for (i = 0; i < MAX_NPORTS; i++) { s->port = (s->port + 1) & (MAX_NPORTS - 1); skbq = &s->p[s->port].skbq; skb = skb_peek(skbq); if (!skb) continue; len = skb->len; if (len <= s->p[s->port].avail) { s->p[s->port].avail -= len; s->num--; __skb_unlink(skb, skbq); goto out; } skb = NULL; } if (update-- && sched_update_avail(sge)) goto again; out: /* If there are more pending skbs, we use the hardware to schedule us * again. */ if (s->num && !skb) { struct cmdQ *q = &sge->cmdQ[0]; clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); } } pr_debug("sched_skb ret %p\n", skb); return skb; } /* * PIO to indicate that memory mapped Q contains valid descriptor(s). */ static inline void doorbell_pio(struct adapter *adapter, u32 val) { wmb(); writel(val, adapter->regs + A_SG_DOORBELL); } /* * Frees all RX buffers on the freelist Q. The caller must make sure that * the SGE is turned off before calling this function. */ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) { unsigned int cidx = q->cidx; while (q->credits--) { struct freelQ_ce *ce = &q->centries[cidx]; pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); dev_kfree_skb(ce->skb); ce->skb = NULL; if (++cidx == q->size) cidx = 0; } } /* * Free RX free list and response queue resources. */ static void free_rx_resources(struct sge *sge) { struct pci_dev *pdev = sge->adapter->pdev; unsigned int size, i; if (sge->respQ.entries) { size = sizeof(struct respQ_e) * sge->respQ.size; pci_free_consistent(pdev, size, sge->respQ.entries, sge->respQ.dma_addr); } for (i = 0; i < SGE_FREELQ_N; i++) { struct freelQ *q = &sge->freelQ[i]; if (q->centries) { free_freelQ_buffers(pdev, q); kfree(q->centries); } if (q->entries) { size = sizeof(struct freelQ_e) * q->size; pci_free_consistent(pdev, size, q->entries, q->dma_addr); } } } /* * Allocates basic RX resources, consisting of memory mapped freelist Qs and a * response queue. */ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) { struct pci_dev *pdev = sge->adapter->pdev; unsigned int size, i; for (i = 0; i < SGE_FREELQ_N; i++) { struct freelQ *q = &sge->freelQ[i]; q->genbit = 1; q->size = p->freelQ_size[i]; q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; size = sizeof(struct freelQ_e) * q->size; q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); if (!q->entries) goto err_no_mem; size = sizeof(struct freelQ_ce) * q->size; q->centries = kzalloc(size, GFP_KERNEL); if (!q->centries) goto err_no_mem; } /* * Calculate the buffer sizes for the two free lists. FL0 accommodates * regular sized Ethernet frames, FL1 is sized not to exceed 16K, * including all the sk_buff overhead. * * Note: For T2 FL0 and FL1 are reversed. */ sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data) + sge->freelQ[!sge->jumbo_fl].dma_offset; size = (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; /* * Setup which skb recycle Q should be used when recycling buffers from * each free list. */ sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; sge->respQ.genbit = 1; sge->respQ.size = SGE_RESPQ_E_N; sge->respQ.credits = 0; size = sizeof(struct respQ_e) * sge->respQ.size; sge->respQ.entries = pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); if (!sge->respQ.entries) goto err_no_mem; return 0; err_no_mem: free_rx_resources(sge); return -ENOMEM; } /* * Reclaims n TX descriptors and frees the buffers associated with them. */ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) { struct cmdQ_ce *ce; struct pci_dev *pdev = sge->adapter->pdev; unsigned int cidx = q->cidx; q->in_use -= n; ce = &q->centries[cidx]; while (n--) { if (likely(dma_unmap_len(ce, dma_len))) { pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), dma_unmap_len(ce, dma_len), PCI_DMA_TODEVICE); if (q->sop) q->sop = 0; } if (ce->skb) { dev_kfree_skb_any(ce->skb); q->sop = 1; } ce++; if (++cidx == q->size) { cidx = 0; ce = q->centries; } } q->cidx = cidx; } /* * Free TX resources. * * Assumes that SGE is stopped and all interrupts are disabled. */ static void free_tx_resources(struct sge *sge) { struct pci_dev *pdev = sge->adapter->pdev; unsigned int size, i; for (i = 0; i < SGE_CMDQ_N; i++) { struct cmdQ *q = &sge->cmdQ[i]; if (q->centries) { if (q->in_use) free_cmdQ_buffers(sge, q, q->in_use); kfree(q->centries); } if (q->entries) { size = sizeof(struct cmdQ_e) * q->size; pci_free_consistent(pdev, size, q->entries, q->dma_addr); } } } /* * Allocates basic TX resources, consisting of memory mapped command Qs. */ static int alloc_tx_resources(struct sge *sge, struct sge_params *p) { struct pci_dev *pdev = sge->adapter->pdev; unsigned int size, i; for (i = 0; i < SGE_CMDQ_N; i++) { struct cmdQ *q = &sge->cmdQ[i]; q->genbit = 1; q->sop = 1; q->size = p->cmdQ_size[i]; q->in_use = 0; q->status = 0; q->processed = q->cleaned = 0; q->stop_thres = 0; spin_lock_init(&q->lock); size = sizeof(struct cmdQ_e) * q->size; q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); if (!q->entries) goto err_no_mem; size = sizeof(struct cmdQ_ce) * q->size; q->centries = kzalloc(size, GFP_KERNEL); if (!q->centries) goto err_no_mem; } /* * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE * only. For queue 0 set the stop threshold so we can handle one more * packet from each port, plus reserve an additional 24 entries for * Ethernet packets only. Queue 1 never suspends nor do we reserve * space for Ethernet packets. */ sge->cmdQ[0].stop_thres = sge->adapter->params.nports * (MAX_SKB_FRAGS + 1); return 0; err_no_mem: free_tx_resources(sge); return -ENOMEM; } static inline void setup_ring_params(struct adapter *adapter, u64 addr, u32 size, int base_reg_lo, int base_reg_hi, int size_reg) { writel((u32)addr, adapter->regs + base_reg_lo); writel(addr >> 32, adapter->regs + base_reg_hi); writel(size, adapter->regs + size_reg); } /* * Enable/disable VLAN acceleration. */ void t1_set_vlan_accel(struct adapter *adapter, int on_off) { struct sge *sge = adapter->sge; sge->sge_control &= ~F_VLAN_XTRACT; if (on_off) sge->sge_control |= F_VLAN_XTRACT; if (adapter->open_device_map) { writel(sge->sge_control, adapter->regs + A_SG_CONTROL); readl(adapter->regs + A_SG_CONTROL); /* flush */ } } /* * Programs the various SGE registers. However, the engine is not yet enabled, * but sge->sge_control is setup and ready to go. */ static void configure_sge(struct sge *sge, struct sge_params *p) { struct adapter *ap = sge->adapter; writel(0, ap->regs + A_SG_CONTROL); setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); setup_ring_params(ap, sge->freelQ[0].dma_addr, sge->freelQ[0].size, A_SG_FL0BASELWR, A_SG_FL0BASEUPR, A_SG_FL0SIZE); setup_ring_params(ap, sge->freelQ[1].dma_addr, sge->freelQ[1].size, A_SG_FL1BASELWR, A_SG_FL1BASEUPR, A_SG_FL1SIZE); /* The threshold comparison uses <. */ writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | V_RX_PKT_OFFSET(sge->rx_pkt_pad); #if defined(__BIG_ENDIAN_BITFIELD) sge->sge_control |= F_ENABLE_BIG_ENDIAN; #endif /* Initialize no-resource timer */ sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); t1_sge_set_coalesce_params(sge, p); } /* * Return the payload capacity of the jumbo free-list buffers. */ static inline unsigned int jumbo_payload_capacity(const struct sge *sge) { return sge->freelQ[sge->jumbo_fl].rx_buffer_size - sge->freelQ[sge->jumbo_fl].dma_offset - sizeof(struct cpl_rx_data); } /* * Frees all SGE related resources and the sge structure itself */ void t1_sge_destroy(struct sge *sge) { int i; for_each_port(sge->adapter, i) free_percpu(sge->port_stats[i]); kfree(sge->tx_sched); free_tx_resources(sge); free_rx_resources(sge); kfree(sge); } /* * Allocates new RX buffers on the freelist Q (and tracks them on the freelist * context Q) until the Q is full or alloc_skb fails. * * It is possible that the generation bits already match, indicating that the * buffer is already valid and nothing needs to be done. This happens when we * copied a received buffer into a new sk_buff during the interrupt processing. * * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), * we specify a RX_OFFSET in order to make sure that the IP header is 4B * aligned. */ static void refill_free_list(struct sge *sge, struct freelQ *q) { struct pci_dev *pdev = sge->adapter->pdev; struct freelQ_ce *ce = &q->centries[q->pidx]; struct freelQ_e *e = &q->entries[q->pidx]; unsigned int dma_len = q->rx_buffer_size - q->dma_offset; while (q->credits < q->size) { struct sk_buff *skb; dma_addr_t mapping; skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); if (!skb) break; skb_reserve(skb, q->dma_offset); mapping = pci_map_single(pdev, skb->data, dma_len, PCI_DMA_FROMDEVICE); skb_reserve(skb, sge->rx_pkt_pad); ce->skb = skb; dma_unmap_addr_set(ce, dma_addr, mapping); dma_unmap_len_set(ce, dma_len, dma_len); e->addr_lo = (u32)mapping; e->addr_hi = (u64)mapping >> 32; e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); wmb(); e->gen2 = V_CMD_GEN2(q->genbit); e++; ce++; if (++q->pidx == q->size) { q->pidx = 0; q->genbit ^= 1; ce = q->centries; e = q->entries; } q->credits++; } } /* * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 * of both rings, we go into 'few interrupt mode' in order to give the system * time to free up resources. */ static void freelQs_empty(struct sge *sge) { struct adapter *adapter = sge->adapter; u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); u32 irqholdoff_reg; refill_free_list(sge, &sge->freelQ[0]); refill_free_list(sge, &sge->freelQ[1]); if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { irq_reg |= F_FL_EXHAUSTED; irqholdoff_reg = sge->fixed_intrtimer; } else { /* Clear the F_FL_EXHAUSTED interrupts for now */ irq_reg &= ~F_FL_EXHAUSTED; irqholdoff_reg = sge->intrtimer_nres; } writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); /* We reenable the Qs to force a freelist GTS interrupt later */ doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); } #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) /* * Disable SGE Interrupts */ void t1_sge_intr_disable(struct sge *sge) { u32 val = readl(sge->adapter->regs + A_PL_ENABLE); writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); writel(0, sge->adapter->regs + A_SG_INT_ENABLE); } /* * Enable SGE interrupts. */ void t1_sge_intr_enable(struct sge *sge) { u32 en = SGE_INT_ENABLE; u32 val = readl(sge->adapter->regs + A_PL_ENABLE); if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) en &= ~F_PACKET_TOO_BIG; writel(en, sge->adapter->regs + A_SG_INT_ENABLE); writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); } /* * Clear SGE interrupts. */ void t1_sge_intr_clear(struct sge *sge) { writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); } /* * SGE 'Error' interrupt handler */ int t1_sge_intr_error_handler(struct sge *sge) { struct adapter *adapter = sge->adapter; u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); if (adapter->port[0].dev->hw_features & NETIF_F_TSO) cause &= ~F_PACKET_TOO_BIG; if (cause & F_RESPQ_EXHAUSTED) sge->stats.respQ_empty++; if (cause & F_RESPQ_OVERFLOW) { sge->stats.respQ_overflow++; pr_alert("%s: SGE response queue overflow\n", adapter->name); } if (cause & F_FL_EXHAUSTED) { sge->stats.freelistQ_empty++; freelQs_empty(sge); } if (cause & F_PACKET_TOO_BIG) { sge->stats.pkt_too_big++; pr_alert("%s: SGE max packet size exceeded\n", adapter->name); } if (cause & F_PACKET_MISMATCH) { sge->stats.pkt_mismatch++; pr_alert("%s: SGE packet mismatch\n", adapter->name); } if (cause & SGE_INT_FATAL) t1_fatal_err(adapter); writel(cause, adapter->regs + A_SG_INT_CAUSE); return 0; } const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) { return &sge->stats; } void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *ss) { int cpu; memset(ss, 0, sizeof(*ss)); for_each_possible_cpu(cpu) { struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); ss->rx_cso_good += st->rx_cso_good; ss->tx_cso += st->tx_cso; ss->tx_tso += st->tx_tso; ss->tx_need_hdrroom += st->tx_need_hdrroom; ss->vlan_xtract += st->vlan_xtract; ss->vlan_insert += st->vlan_insert; } } /** * recycle_fl_buf - recycle a free list buffer * @fl: the free list * @idx: index of buffer to recycle * * Recycles the specified buffer on the given free list by adding it at * the next available slot on the list. */ static void recycle_fl_buf(struct freelQ *fl, int idx) { struct freelQ_e *from = &fl->entries[idx]; struct freelQ_e *to = &fl->entries[fl->pidx]; fl->centries[fl->pidx] = fl->centries[idx]; to->addr_lo = from->addr_lo; to->addr_hi = from->addr_hi; to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); wmb(); to->gen2 = V_CMD_GEN2(fl->genbit); fl->credits++; if (++fl->pidx == fl->size) { fl->pidx = 0; fl->genbit ^= 1; } } static int copybreak __read_mostly = 256; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); /** * get_packet - return the next ingress packet buffer * @pdev: the PCI device that received the packet * @fl: the SGE free list holding the packet * @len: the actual packet length, excluding any SGE padding * * Get the next packet from a free list and complete setup of the * sk_buff. If the packet is small we make a copy and recycle the * original buffer, otherwise we use the original buffer itself. If a * positive drop threshold is supplied packets are dropped and their * buffers recycled if (a) the number of remaining buffers is under the * threshold and the packet is too big to copy, or (b) the packet should * be copied but there is no memory for the copy. */ static inline struct sk_buff *get_packet(struct pci_dev *pdev, struct freelQ *fl, unsigned int len) { struct sk_buff *skb; const struct freelQ_ce *ce = &fl->centries[fl->cidx]; if (len < copybreak) { skb = alloc_skb(len + 2, GFP_ATOMIC); if (!skb) goto use_orig_buf; skb_reserve(skb, 2); /* align IP header */ skb_put(skb, len); pci_dma_sync_single_for_cpu(pdev, dma_unmap_addr(ce, dma_addr), dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(ce->skb, skb->data, len); pci_dma_sync_single_for_device(pdev, dma_unmap_addr(ce, dma_addr), dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); recycle_fl_buf(fl, fl->cidx); return skb; } use_orig_buf: if (fl->credits < 2) { recycle_fl_buf(fl, fl->cidx); return NULL; } pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); skb = ce->skb; prefetch(skb->data); skb_put(skb, len); return skb; } /** * unexpected_offload - handle an unexpected offload packet * @adapter: the adapter * @fl: the free list that received the packet * * Called when we receive an unexpected offload packet (e.g., the TOE * function is disabled or the card is a NIC). Prints a message and * recycles the buffer. */ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) { struct freelQ_ce *ce = &fl->centries[fl->cidx]; struct sk_buff *skb = ce->skb; pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); pr_err("%s: unexpected offload packet, cmd %u\n", adapter->name, *skb->data); recycle_fl_buf(fl, fl->cidx); } /* * T1/T2 SGE limits the maximum DMA size per TX descriptor to * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. * Note that the *_large_page_tx_descs stuff will be optimized out when * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. * * compute_large_page_descs() computes how many additional descriptors are * required to break down the stack's request. */ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) { unsigned int count = 0; if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { unsigned int nfrags = skb_shinfo(skb)->nr_frags; unsigned int i, len = skb_headlen(skb); while (len > SGE_TX_DESC_MAX_PLEN) { count++; len -= SGE_TX_DESC_MAX_PLEN; } for (i = 0; nfrags--; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = frag->size; while (len > SGE_TX_DESC_MAX_PLEN) { count++; len -= SGE_TX_DESC_MAX_PLEN; } } } return count; } /* * Write a cmdQ entry. * * Since this function writes the 'flags' field, it must not be used to * write the first cmdQ entry. */ static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, unsigned int len, unsigned int gen, unsigned int eop) { BUG_ON(len > SGE_TX_DESC_MAX_PLEN); e->addr_lo = (u32)mapping; e->addr_hi = (u64)mapping >> 32; e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); } /* * See comment for previous function. * * write_tx_descs_large_page() writes additional SGE tx descriptors if * *desc_len exceeds HW's capability. */ static inline unsigned int write_large_page_tx_descs(unsigned int pidx, struct cmdQ_e **e, struct cmdQ_ce **ce, unsigned int *gen, dma_addr_t *desc_mapping, unsigned int *desc_len, unsigned int nfrags, struct cmdQ *q) { if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { struct cmdQ_e *e1 = *e; struct cmdQ_ce *ce1 = *ce; while (*desc_len > SGE_TX_DESC_MAX_PLEN) { *desc_len -= SGE_TX_DESC_MAX_PLEN; write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, *gen, nfrags == 0 && *desc_len == 0); ce1->skb = NULL; dma_unmap_len_set(ce1, dma_len, 0); *desc_mapping += SGE_TX_DESC_MAX_PLEN; if (*desc_len) { ce1++; e1++; if (++pidx == q->size) { pidx = 0; *gen ^= 1; ce1 = q->centries; e1 = q->entries; } } } *e = e1; *ce = ce1; } return pidx; } /* * Write the command descriptors to transmit the given skb starting at * descriptor pidx with the given generation. */ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, unsigned int pidx, unsigned int gen, struct cmdQ *q) { dma_addr_t mapping, desc_mapping; struct cmdQ_e *e, *e1; struct cmdQ_ce *ce; unsigned int i, flags, first_desc_len, desc_len, nfrags = skb_shinfo(skb)->nr_frags; e = e1 = &q->entries[pidx]; ce = &q->centries[pidx]; mapping = pci_map_single(adapter->pdev, skb->data, skb_headlen(skb), PCI_DMA_TODEVICE); desc_mapping = mapping; desc_len = skb_headlen(skb); flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | V_CMD_GEN2(gen); first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? desc_len : SGE_TX_DESC_MAX_PLEN; e->addr_lo = (u32)desc_mapping; e->addr_hi = (u64)desc_mapping >> 32; e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); ce->skb = NULL; dma_unmap_len_set(ce, dma_len, 0); if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && desc_len > SGE_TX_DESC_MAX_PLEN) { desc_mapping += first_desc_len; desc_len -= first_desc_len; e1++; ce++; if (++pidx == q->size) { pidx = 0; gen ^= 1; e1 = q->entries; ce = q->centries; } pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, &desc_mapping, &desc_len, nfrags, q); if (likely(desc_len)) write_tx_desc(e1, desc_mapping, desc_len, gen, nfrags == 0); } ce->skb = NULL; dma_unmap_addr_set(ce, dma_addr, mapping); dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); for (i = 0; nfrags--; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; e1++; ce++; if (++pidx == q->size) { pidx = 0; gen ^= 1; e1 = q->entries; ce = q->centries; } mapping = pci_map_page(adapter->pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); desc_mapping = mapping; desc_len = frag->size; pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, &desc_mapping, &desc_len, nfrags, q); if (likely(desc_len)) write_tx_desc(e1, desc_mapping, desc_len, gen, nfrags == 0); ce->skb = NULL; dma_unmap_addr_set(ce, dma_addr, mapping); dma_unmap_len_set(ce, dma_len, frag->size); } ce->skb = skb; wmb(); e->flags = flags; } /* * Clean up completed Tx buffers. */ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) { unsigned int reclaim = q->processed - q->cleaned; if (reclaim) { pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", q->processed, q->cleaned); free_cmdQ_buffers(sge, q, reclaim); q->cleaned += reclaim; } } /* * Called from tasklet. Checks the scheduler for any * pending skbs that can be sent. */ static void restart_sched(unsigned long arg) { struct sge *sge = (struct sge *) arg; struct adapter *adapter = sge->adapter; struct cmdQ *q = &sge->cmdQ[0]; struct sk_buff *skb; unsigned int credits, queued_skb = 0; spin_lock(&q->lock); reclaim_completed_tx(sge, q); credits = q->size - q->in_use; pr_debug("restart_sched credits=%d\n", credits); while ((skb = sched_skb(sge, NULL, credits)) != NULL) { unsigned int genbit, pidx, count; count = 1 + skb_shinfo(skb)->nr_frags; count += compute_large_page_tx_descs(skb); q->in_use += count; genbit = q->genbit; pidx = q->pidx; q->pidx += count; if (q->pidx >= q->size) { q->pidx -= q->size; q->genbit ^= 1; } write_tx_descs(adapter, skb, pidx, genbit, q); credits = q->size - q->in_use; queued_skb = 1; } if (queued_skb) { clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); } } spin_unlock(&q->lock); } /** * sge_rx - process an ingress ethernet packet * @sge: the sge structure * @fl: the free list that contains the packet buffer * @len: the packet length * * Process an ingress ethernet pakcet and deliver it to the stack. */ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) { struct sk_buff *skb; const struct cpl_rx_pkt *p; struct adapter *adapter = sge->adapter; struct sge_port_stats *st; struct net_device *dev; skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); if (unlikely(!skb)) { sge->stats.rx_drops++; return; } p = (const struct cpl_rx_pkt *) skb->data; if (p->iff >= adapter->params.nports) { kfree_skb(skb); return; } __skb_pull(skb, sizeof(*p)); st = this_cpu_ptr(sge->port_stats[p->iff]); dev = adapter->port[p->iff].dev; skb->protocol = eth_type_trans(skb, dev); if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff && skb->protocol == htons(ETH_P_IP) && (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { ++st->rx_cso_good; skb->ip_summed = CHECKSUM_UNNECESSARY; } else skb_checksum_none_assert(skb); if (unlikely(adapter->vlan_grp && p->vlan_valid)) { st->vlan_xtract++; vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, ntohs(p->vlan)); } else netif_receive_skb(skb); } /* * Returns true if a command queue has enough available descriptors that * we can resume Tx operation after temporarily disabling its packet queue. */ static inline int enough_free_Tx_descs(const struct cmdQ *q) { unsigned int r = q->processed - q->cleaned; return q->in_use - r < (q->size >> 1); } /* * Called when sufficient space has become available in the SGE command queues * after the Tx packet schedulers have been suspended to restart the Tx path. */ static void restart_tx_queues(struct sge *sge) { struct adapter *adap = sge->adapter; int i; if (!enough_free_Tx_descs(&sge->cmdQ[0])) return; for_each_port(adap, i) { struct net_device *nd = adap->port[i].dev; if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && netif_running(nd)) { sge->stats.cmdQ_restarted[2]++; netif_wake_queue(nd); } } } /* * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 * information. */ static unsigned int update_tx_info(struct adapter *adapter, unsigned int flags, unsigned int pr0) { struct sge *sge = adapter->sge; struct cmdQ *cmdq = &sge->cmdQ[0]; cmdq->processed += pr0; if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { freelQs_empty(sge); flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); } if (flags & F_CMDQ0_ENABLE) { clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); if (cmdq->cleaned + cmdq->in_use != cmdq->processed && !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { set_bit(CMDQ_STAT_RUNNING, &cmdq->status); writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); } if (sge->tx_sched) tasklet_hi_schedule(&sge->tx_sched->sched_tsk); flags &= ~F_CMDQ0_ENABLE; } if (unlikely(sge->stopped_tx_queues != 0)) restart_tx_queues(sge); return flags; } /* * Process SGE responses, up to the supplied budget. Returns the number of * responses processed. A negative budget is effectively unlimited. */ static int process_responses(struct adapter *adapter, int budget) { struct sge *sge = adapter->sge; struct respQ *q = &sge->respQ; struct respQ_e *e = &q->entries[q->cidx]; int done = 0; unsigned int flags = 0; unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; while (done < budget && e->GenerationBit == q->genbit) { flags |= e->Qsleeping; cmdq_processed[0] += e->Cmdq0CreditReturn; cmdq_processed[1] += e->Cmdq1CreditReturn; /* We batch updates to the TX side to avoid cacheline * ping-pong of TX state information on MP where the sender * might run on a different CPU than this function... */ if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { flags = update_tx_info(adapter, flags, cmdq_processed[0]); cmdq_processed[0] = 0; } if (unlikely(cmdq_processed[1] > 16)) { sge->cmdQ[1].processed += cmdq_processed[1]; cmdq_processed[1] = 0; } if (likely(e->DataValid)) { struct freelQ *fl = &sge->freelQ[e->FreelistQid]; BUG_ON(!e->Sop || !e->Eop); if (unlikely(e->Offload)) unexpected_offload(adapter, fl); else sge_rx(sge, fl, e->BufferLength); ++done; /* * Note: this depends on each packet consuming a * single free-list buffer; cf. the BUG above. */ if (++fl->cidx == fl->size) fl->cidx = 0; prefetch(fl->centries[fl->cidx].skb); if (unlikely(--fl->credits < fl->size - SGE_FREEL_REFILL_THRESH)) refill_free_list(sge, fl); } else sge->stats.pure_rsps++; e++; if (unlikely(++q->cidx == q->size)) { q->cidx = 0; q->genbit ^= 1; e = q->entries; } prefetch(e); if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); q->credits = 0; } } flags = update_tx_info(adapter, flags, cmdq_processed[0]); sge->cmdQ[1].processed += cmdq_processed[1]; return done; } static inline int responses_pending(const struct adapter *adapter) { const struct respQ *Q = &adapter->sge->respQ; const struct respQ_e *e = &Q->entries[Q->cidx]; return e->GenerationBit == Q->genbit; } /* * A simpler version of process_responses() that handles only pure (i.e., * non data-carrying) responses. Such respones are too light-weight to justify * calling a softirq when using NAPI, so we handle them specially in hard * interrupt context. The function is called with a pointer to a response, * which the caller must ensure is a valid pure response. Returns 1 if it * encounters a valid data-carrying response, 0 otherwise. */ static int process_pure_responses(struct adapter *adapter) { struct sge *sge = adapter->sge; struct respQ *q = &sge->respQ; struct respQ_e *e = &q->entries[q->cidx]; const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; unsigned int flags = 0; unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; prefetch(fl->centries[fl->cidx].skb); if (e->DataValid) return 1; do { flags |= e->Qsleeping; cmdq_processed[0] += e->Cmdq0CreditReturn; cmdq_processed[1] += e->Cmdq1CreditReturn; e++; if (unlikely(++q->cidx == q->size)) { q->cidx = 0; q->genbit ^= 1; e = q->entries; } prefetch(e); if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); q->credits = 0; } sge->stats.pure_rsps++; } while (e->GenerationBit == q->genbit && !e->DataValid); flags = update_tx_info(adapter, flags, cmdq_processed[0]); sge->cmdQ[1].processed += cmdq_processed[1]; return e->GenerationBit == q->genbit; } /* * Handler for new data events when using NAPI. This does not need any locking * or protection from interrupts as data interrupts are off at this point and * other adapter interrupts do not interfere. */ int t1_poll(struct napi_struct *napi, int budget) { struct adapter *adapter = container_of(napi, struct adapter, napi); int work_done = process_responses(adapter, budget); if (likely(work_done < budget)) { napi_complete(napi); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); } return work_done; } irqreturn_t t1_interrupt(int irq, void *data) { struct adapter *adapter = data; struct sge *sge = adapter->sge; int handled; if (likely(responses_pending(adapter))) { writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); if (napi_schedule_prep(&adapter->napi)) { if (process_pure_responses(adapter)) __napi_schedule(&adapter->napi); else { /* no data, no NAPI needed */ writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); /* undo schedule_prep */ napi_enable(&adapter->napi); } } return IRQ_HANDLED; } spin_lock(&adapter->async_lock); handled = t1_slow_intr_handler(adapter); spin_unlock(&adapter->async_lock); if (!handled) sge->stats.unhandled_irqs++; return IRQ_RETVAL(handled != 0); } /* * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. * * The code figures out how many entries the sk_buff will require in the * cmdQ and updates the cmdQ data structure with the state once the enqueue * has complete. Then, it doesn't access the global structure anymore, but * uses the corresponding fields on the stack. In conjunction with a spinlock * around that code, we can make the function reentrant without holding the * lock when we actually enqueue (which might be expensive, especially on * architectures with IO MMUs). * * This runs with softirqs disabled. */ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, unsigned int qid, struct net_device *dev) { struct sge *sge = adapter->sge; struct cmdQ *q = &sge->cmdQ[qid]; unsigned int credits, pidx, genbit, count, use_sched_skb = 0; if (!spin_trylock(&q->lock)) return NETDEV_TX_LOCKED; reclaim_completed_tx(sge, q); pidx = q->pidx; credits = q->size - q->in_use; count = 1 + skb_shinfo(skb)->nr_frags; count += compute_large_page_tx_descs(skb); /* Ethernet packet */ if (unlikely(credits < count)) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); set_bit(dev->if_port, &sge->stopped_tx_queues); sge->stats.cmdQ_full[2]++; pr_err("%s: Tx ring full while queue awake!\n", adapter->name); } spin_unlock(&q->lock); return NETDEV_TX_BUSY; } if (unlikely(credits - count < q->stop_thres)) { netif_stop_queue(dev); set_bit(dev->if_port, &sge->stopped_tx_queues); sge->stats.cmdQ_full[2]++; } /* T204 cmdQ0 skbs that are destined for a certain port have to go * through the scheduler. */ if (sge->tx_sched && !qid && skb->dev) { use_sched: use_sched_skb = 1; /* Note that the scheduler might return a different skb than * the one passed in. */ skb = sched_skb(sge, skb, credits); if (!skb) { spin_unlock(&q->lock); return NETDEV_TX_OK; } pidx = q->pidx; count = 1 + skb_shinfo(skb)->nr_frags; count += compute_large_page_tx_descs(skb); } q->in_use += count; genbit = q->genbit; pidx = q->pidx; q->pidx += count; if (q->pidx >= q->size) { q->pidx -= q->size; q->genbit ^= 1; } spin_unlock(&q->lock); write_tx_descs(adapter, skb, pidx, genbit, q); /* * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring * the doorbell if the Q is asleep. There is a natural race, where * the hardware is going to sleep just after we checked, however, * then the interrupt handler will detect the outstanding TX packet * and ring the doorbell for us. */ if (qid) doorbell_pio(adapter, F_CMDQ1_ENABLE); else { clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); } } if (use_sched_skb) { if (spin_trylock(&q->lock)) { credits = q->size - q->in_use; skb = NULL; goto use_sched; } } return NETDEV_TX_OK; } #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) /* * eth_hdr_len - return the length of an Ethernet header * @data: pointer to the start of the Ethernet header * * Returns the length of an Ethernet header, including optional VLAN tag. */ static inline int eth_hdr_len(const void *data) { const struct ethhdr *e = data; return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; } /* * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. */ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct sge *sge = adapter->sge; struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); struct cpl_tx_pkt *cpl; struct sk_buff *orig_skb = skb; int ret; if (skb->protocol == htons(ETH_P_CPL5)) goto send; /* * We are using a non-standard hard_header_len. * Allocate more header room in the rare cases it is not big enough. */ if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); ++st->tx_need_hdrroom; dev_kfree_skb_any(orig_skb); if (!skb) return NETDEV_TX_OK; } if (skb_shinfo(skb)->gso_size) { int eth_type; struct cpl_tx_pkt_lso *hdr; ++st->tx_tso; eth_type = skb_network_offset(skb) == ETH_HLEN ? CPL_ETH_II : CPL_ETH_II_VLAN; hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); hdr->opcode = CPL_TX_PKT_LSO; hdr->ip_csum_dis = hdr->l4_csum_dis = 0; hdr->ip_hdr_words = ip_hdr(skb)->ihl; hdr->tcp_hdr_words = tcp_hdr(skb)->doff; hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, skb_shinfo(skb)->gso_size)); hdr->len = htonl(skb->len - sizeof(*hdr)); cpl = (struct cpl_tx_pkt *)hdr; } else { /* * Packets shorter than ETH_HLEN can break the MAC, drop them * early. Also, we may get oversized packets because some * parts of the kernel don't handle our unusual hard_header_len * right, drop those too. */ if (unlikely(skb->len < ETH_HLEN || skb->len > dev->mtu + eth_hdr_len(skb->data))) { pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, skb->len, eth_hdr_len(skb->data), dev->mtu); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->ip_summed == CHECKSUM_PARTIAL && ip_hdr(skb)->protocol == IPPROTO_UDP) { if (unlikely(skb_checksum_help(skb))) { pr_debug("%s: unable to do udp checksum\n", dev->name); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } /* Hmmm, assuming to catch the gratious arp... and we'll use * it to flush out stuck espi packets... */ if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { if (skb->protocol == htons(ETH_P_ARP) && arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { adapter->sge->espibug_skb[dev->if_port] = skb; /* We want to re-use this skb later. We * simply bump the reference count and it * will not be freed... */ skb = skb_get(skb); } } cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); cpl->opcode = CPL_TX_PKT; cpl->ip_csum_dis = 1; /* SW calculates IP csum */ cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; /* the length field isn't used so don't bother setting it */ st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); } cpl->iff = dev->if_port; #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) if (vlan_tx_tag_present(skb)) { cpl->vlan_valid = 1; cpl->vlan = htons(vlan_tx_tag_get(skb)); st->vlan_insert++; } else #endif cpl->vlan_valid = 0; send: ret = t1_sge_tx(skb, adapter, 0, dev); /* If transmit busy, and we reallocated skb's due to headroom limit, * then silently discard to avoid leak. */ if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { dev_kfree_skb_any(skb); ret = NETDEV_TX_OK; } return ret; } /* * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. */ static void sge_tx_reclaim_cb(unsigned long data) { int i; struct sge *sge = (struct sge *)data; for (i = 0; i < SGE_CMDQ_N; ++i) { struct cmdQ *q = &sge->cmdQ[i]; if (!spin_trylock(&q->lock)) continue; reclaim_completed_tx(sge, q); if (i == 0 && q->in_use) { /* flush pending credits */ writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); } spin_unlock(&q->lock); } mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); } /* * Propagate changes of the SGE coalescing parameters to the HW. */ int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) { sge->fixed_intrtimer = p->rx_coalesce_usecs * core_ticks_per_usec(sge->adapter); writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); return 0; } /* * Allocates both RX and TX resources and configures the SGE. However, * the hardware is not enabled yet. */ int t1_sge_configure(struct sge *sge, struct sge_params *p) { if (alloc_rx_resources(sge, p)) return -ENOMEM; if (alloc_tx_resources(sge, p)) { free_rx_resources(sge); return -ENOMEM; } configure_sge(sge, p); /* * Now that we have sized the free lists calculate the payload * capacity of the large buffers. Other parts of the driver use * this to set the max offload coalescing size so that RX packets * do not overflow our large buffers. */ p->large_buf_capacity = jumbo_payload_capacity(sge); return 0; } /* * Disables the DMA engine. */ void t1_sge_stop(struct sge *sge) { int i; writel(0, sge->adapter->regs + A_SG_CONTROL); readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ if (is_T2(sge->adapter)) del_timer_sync(&sge->espibug_timer); del_timer_sync(&sge->tx_reclaim_timer); if (sge->tx_sched) tx_sched_stop(sge); for (i = 0; i < MAX_NPORTS; i++) kfree_skb(sge->espibug_skb[i]); } /* * Enables the DMA engine. */ void t1_sge_start(struct sge *sge) { refill_free_list(sge, &sge->freelQ[0]); refill_free_list(sge, &sge->freelQ[1]); writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); if (is_T2(sge->adapter)) mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); } /* * Callback for the T2 ESPI 'stuck packet feature' workaorund */ static void espibug_workaround_t204(unsigned long data) { struct adapter *adapter = (struct adapter *)data; struct sge *sge = adapter->sge; unsigned int nports = adapter->params.nports; u32 seop[MAX_NPORTS]; if (adapter->open_device_map & PORT_MASK) { int i; if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) return; for (i = 0; i < nports; i++) { struct sk_buff *skb = sge->espibug_skb[i]; if (!netif_running(adapter->port[i].dev) || netif_queue_stopped(adapter->port[i].dev) || !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) continue; if (!skb->cb[0]) { skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_tx_pkt), ch_mac_addr, ETH_ALEN); skb_copy_to_linear_data_offset(skb, skb->len - 10, ch_mac_addr, ETH_ALEN); skb->cb[0] = 0xff; } /* bump the reference count to avoid freeing of * the skb once the DMA has completed. */ skb = skb_get(skb); t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); } } mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); } static void espibug_workaround(unsigned long data) { struct adapter *adapter = (struct adapter *)data; struct sge *sge = adapter->sge; if (netif_running(adapter->port[0].dev)) { struct sk_buff *skb = sge->espibug_skb[0]; u32 seop = t1_espi_get_mon(adapter, 0x930, 0); if ((seop & 0xfff0fff) == 0xfff && skb) { if (!skb->cb[0]) { skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_tx_pkt), ch_mac_addr, ETH_ALEN); skb_copy_to_linear_data_offset(skb, skb->len - 10, ch_mac_addr, ETH_ALEN); skb->cb[0] = 0xff; } /* bump the reference count to avoid freeing of the * skb once the DMA has completed. */ skb = skb_get(skb); t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); } } mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); } /* * Creates a t1_sge structure and returns suggested resource parameters. */ struct sge * __devinit t1_sge_create(struct adapter *adapter, struct sge_params *p) { struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); int i; if (!sge) return NULL; sge->adapter = adapter; sge->netdev = adapter->port[0].dev; sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; for_each_port(adapter, i) { sge->port_stats[i] = alloc_percpu(struct sge_port_stats); if (!sge->port_stats[i]) goto nomem_port; } init_timer(&sge->tx_reclaim_timer); sge->tx_reclaim_timer.data = (unsigned long)sge; sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; if (is_T2(sge->adapter)) { init_timer(&sge->espibug_timer); if (adapter->params.nports > 1) { tx_sched_init(sge); sge->espibug_timer.function = espibug_workaround_t204; } else sge->espibug_timer.function = espibug_workaround; sge->espibug_timer.data = (unsigned long)sge->adapter; sge->espibug_timeout = 1; /* for T204, every 10ms */ if (adapter->params.nports > 1) sge->espibug_timeout = HZ/100; } p->cmdQ_size[0] = SGE_CMDQ0_E_N; p->cmdQ_size[1] = SGE_CMDQ1_E_N; p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; if (sge->tx_sched) { if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) p->rx_coalesce_usecs = 15; else p->rx_coalesce_usecs = 50; } else p->rx_coalesce_usecs = 50; p->coalesce_enable = 0; p->sample_interval_usecs = 0; return sge; nomem_port: while (i >= 0) { free_percpu(sge->port_stats[i]); --i; } kfree(sge); return NULL; }
gpl-2.0
oppo-source/R7plus-5.1-kernel-source
drivers/platform/x86/eeepc-wmi.c
2377
7516
/* * Eee PC WMI hotkey driver * * Copyright(C) 2010 Intel Corporation. * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com> * * Portions based on wistron_btns.c: * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/dmi.h> #include <linux/fb.h> #include <acpi/acpi_bus.h> #include "asus-wmi.h" #define EEEPC_WMI_FILE "eeepc-wmi" MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>"); MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); MODULE_LICENSE("GPL"); #define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */ #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); static bool hotplug_wireless; module_param(hotplug_wireless, bool, 0444); MODULE_PARM_DESC(hotplug_wireless, "Enable hotplug for wireless device. " "If your laptop needs that, please report to " "acpi4asus-user@lists.sourceforge.net."); /* Values for T101MT "Home" key */ #define HOME_PRESS 0xe4 #define HOME_HOLD 0xea #define HOME_RELEASE 0xe5 static const struct key_entry eeepc_wmi_keymap[] = { { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } }, /* Sleep already handled via generic ACPI code */ { KE_KEY, 0x30, { KEY_VOLUMEUP } }, { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, { KE_KEY, 0x32, { KEY_MUTE } }, { KE_KEY, 0x5c, { KEY_F15 } }, /* Power Gear key */ { KE_KEY, 0x5d, { KEY_WLAN } }, { KE_KEY, 0x6b, { KEY_TOUCHPAD_TOGGLE } }, /* Toggle Touchpad */ { KE_KEY, 0x82, { KEY_CAMERA } }, { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, { KE_KEY, 0x88, { KEY_WLAN } }, { KE_KEY, 0xbd, { KEY_CAMERA } }, { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ { KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */ { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, { KE_KEY, 0xe9, { KEY_DISPLAYTOGGLE } }, { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, { KE_KEY, 0xec, { KEY_CAMERA_UP } }, { KE_KEY, 0xed, { KEY_CAMERA_DOWN } }, { KE_KEY, 0xee, { KEY_CAMERA_LEFT } }, { KE_KEY, 0xef, { KEY_CAMERA_RIGHT } }, { KE_KEY, 0xf3, { KEY_MENU } }, { KE_KEY, 0xf5, { KEY_HOMEPAGE } }, { KE_KEY, 0xf6, { KEY_ESC } }, { KE_END, 0}, }; static struct quirk_entry quirk_asus_unknown = { }; static struct quirk_entry quirk_asus_1000h = { .hotplug_wireless = true, }; static struct quirk_entry quirk_asus_et2012_type1 = { .store_backlight_power = true, }; static struct quirk_entry quirk_asus_et2012_type3 = { .scalar_panel_brightness = true, .store_backlight_power = true, }; static struct quirk_entry quirk_asus_x101ch = { /* We need this when ACPI function doesn't do this well */ .wmi_backlight_power = true, }; static struct quirk_entry *quirks; static void et2012_quirks(void) { const struct dmi_device *dev = NULL; char oemstring[30]; while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) { if (sscanf(dev->name, "AEMS%24c", oemstring) == 1) { if (oemstring[18] == '1') quirks = &quirk_asus_et2012_type1; else if (oemstring[18] == '3') quirks = &quirk_asus_et2012_type3; break; } } } static int dmi_matched(const struct dmi_system_id *dmi) { char *model; quirks = dmi->driver_data; model = (char *)dmi->matches[1].substr; if (unlikely(strncmp(model, "ET2012", 6) == 0)) et2012_quirks(); return 1; } static struct dmi_system_id asus_quirks[] = { { .callback = dmi_matched, .ident = "ASUSTeK Computer INC. 1000H", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_PRODUCT_NAME, "1000H"), }, .driver_data = &quirk_asus_1000h, }, { .callback = dmi_matched, .ident = "ASUSTeK Computer INC. ET2012E/I", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_PRODUCT_NAME, "ET2012"), }, .driver_data = &quirk_asus_unknown, }, { .callback = dmi_matched, .ident = "ASUSTeK Computer INC. X101CH", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"), }, .driver_data = &quirk_asus_x101ch, }, { .callback = dmi_matched, .ident = "ASUSTeK Computer INC. 1015CX", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"), }, .driver_data = &quirk_asus_x101ch, }, {}, }; static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code, unsigned int *value, bool *autorelease) { switch (*code) { case HOME_PRESS: *value = 1; *autorelease = 0; break; case HOME_HOLD: *code = ASUS_WMI_KEY_IGNORE; break; case HOME_RELEASE: *code = HOME_PRESS; *value = 0; *autorelease = 0; break; } } static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level, void *context, void **retval) { pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID); *(bool *)context = true; return AE_CTRL_TERMINATE; } static int eeepc_wmi_check_atkd(void) { acpi_status status; bool found = false; status = acpi_get_devices(EEEPC_ACPI_HID, eeepc_wmi_parse_device, &found, NULL); if (ACPI_FAILURE(status) || !found) return 0; return -1; } static int eeepc_wmi_probe(struct platform_device *pdev) { if (eeepc_wmi_check_atkd()) { pr_warn("WMI device present, but legacy ATKD device is also " "present and enabled\n"); pr_warn("You probably booted with acpi_osi=\"Linux\" or " "acpi_osi=\"!Windows 2009\"\n"); pr_warn("Can't load eeepc-wmi, use default acpi_osi " "(preferred) or eeepc-laptop\n"); return -EBUSY; } return 0; } static void eeepc_wmi_quirks(struct asus_wmi_driver *driver) { quirks = &quirk_asus_unknown; quirks->hotplug_wireless = hotplug_wireless; dmi_check_system(asus_quirks); driver->quirks = quirks; driver->quirks->wapf = -1; driver->panel_power = FB_BLANK_UNBLANK; } static struct asus_wmi_driver asus_wmi_driver = { .name = EEEPC_WMI_FILE, .owner = THIS_MODULE, .event_guid = EEEPC_WMI_EVENT_GUID, .keymap = eeepc_wmi_keymap, .input_name = "Eee PC WMI hotkeys", .input_phys = EEEPC_WMI_FILE "/input0", .key_filter = eeepc_wmi_key_filter, .probe = eeepc_wmi_probe, .detect_quirks = eeepc_wmi_quirks, }; static int __init eeepc_wmi_init(void) { return asus_wmi_register_driver(&asus_wmi_driver); } static void __exit eeepc_wmi_exit(void) { asus_wmi_unregister_driver(&asus_wmi_driver); } module_init(eeepc_wmi_init); module_exit(eeepc_wmi_exit);
gpl-2.0
ryrzy/yoda-kernel-i9300-JB
arch/tile/kernel/smpboot.c
3145
7573
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/bootmem.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/percpu.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/irq.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/sections.h> /* State of each CPU. */ static DEFINE_PER_CPU(int, cpu_state) = { 0 }; /* The messaging code jumps to this pointer during boot-up */ unsigned long start_cpu_function_addr; /* Called very early during startup to mark boot cpu as online */ void __init smp_prepare_boot_cpu(void) { int cpu = smp_processor_id(); set_cpu_online(cpu, 1); set_cpu_present(cpu, 1); __get_cpu_var(cpu_state) = CPU_ONLINE; init_messaging(); } static void start_secondary(void); /* * Called at the top of init() to launch all the other CPUs. * They run free to complete their initialization and then wait * until they get an IPI from the boot cpu to come online. */ void __init smp_prepare_cpus(unsigned int max_cpus) { long rc; int cpu, cpu_count; int boot_cpu = smp_processor_id(); current_thread_info()->cpu = boot_cpu; /* * Pin this task to the boot CPU while we bring up the others, * just to make sure we don't uselessly migrate as they come up. */ rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu)); if (rc != 0) pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc); /* Print information about disabled and dataplane cpus. */ print_disabled_cpus(); /* * Tell the messaging subsystem how to respond to the * startup message. We use a level of indirection to avoid * confusing the linker with the fact that the messaging * subsystem is calling __init code. */ start_cpu_function_addr = (unsigned long) &online_secondary; /* Set up thread context for all new processors. */ cpu_count = 1; for (cpu = 0; cpu < NR_CPUS; ++cpu) { struct task_struct *idle; if (cpu == boot_cpu) continue; if (!cpu_possible(cpu)) { /* * Make this processor do nothing on boot. * Note that we don't give the boot_pc function * a stack, so it has to be assembly code. */ per_cpu(boot_sp, cpu) = 0; per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; continue; } /* Create a new idle thread to run start_secondary() */ idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); idle->thread.pc = (unsigned long) start_secondary; /* Make this thread the boot thread for this processor */ per_cpu(boot_sp, cpu) = task_ksp0(idle); per_cpu(boot_pc, cpu) = idle->thread.pc; ++cpu_count; } BUG_ON(cpu_count > (max_cpus ? max_cpus : 1)); /* Fire up the other tiles, if any */ init_cpu_present(cpu_possible_mask); if (cpumask_weight(cpu_present_mask) > 1) { mb(); /* make sure all data is visible to new processors */ hv_start_all_tiles(); } } static __initdata struct cpumask init_affinity; static __init int reset_init_affinity(void) { long rc = sched_setaffinity(current->pid, &init_affinity); if (rc != 0) pr_warning("couldn't reset init affinity (%ld)\n", rc); return 0; } late_initcall(reset_init_affinity); static struct cpumask cpu_started __cpuinitdata; /* * Activate a secondary processor. Very minimal; don't add anything * to this path without knowing what you're doing, since SMP booting * is pretty fragile. */ static void __cpuinit start_secondary(void) { int cpuid = smp_processor_id(); /* Set our thread pointer appropriately. */ set_my_cpu_offset(__per_cpu_offset[cpuid]); preempt_disable(); /* * In large machines even this will slow us down, since we * will be contending for for the printk spinlock. */ /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ /* Initialize the current asid for our first page table. */ __get_cpu_var(current_asid) = min_asid; /* Set up this thread as another owner of the init_mm */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) BUG(); enter_lazy_tlb(&init_mm, current); /* Allow hypervisor messages to be received */ init_messaging(); local_irq_enable(); /* Indicate that we're ready to come up. */ /* Must not do this before we're ready to receive messages */ if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { pr_warning("CPU#%d already started!\n", cpuid); for (;;) local_irq_enable(); } smp_nap(); } /* * Bring a secondary processor online. */ void __cpuinit online_secondary(void) { /* * low-memory mappings have been cleared, flush them from * the local TLBs too. */ local_flush_tlb(); BUG_ON(in_interrupt()); /* This must be done before setting cpu_online_mask */ wmb(); /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of * IPI recipients, and the time when the determination is made * for which cpus receive the IPI. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). */ ipi_call_lock(); set_cpu_online(smp_processor_id(), 1); ipi_call_unlock(); __get_cpu_var(cpu_state) = CPU_ONLINE; /* Set up tile-specific state for this cpu. */ setup_cpu(0); /* Set up tile-timer clock-event device on this cpu */ setup_tile_timer(); preempt_enable(); cpu_idle(); } int __cpuinit __cpu_up(unsigned int cpu) { /* Wait 5s total for all CPUs for them to come online */ static int timeout; for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { if (timeout >= 50000) { pr_info("skipping unresponsive cpu%d\n", cpu); local_irq_enable(); return -EIO; } udelay(100); } local_irq_enable(); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Unleash the CPU! */ send_IPI_single(cpu, MSG_TAG_START_CPU); while (!cpumask_test_cpu(cpu, cpu_online_mask)) cpu_relax(); return 0; } static void panic_start_cpu(void) { panic("Received a MSG_START_CPU IPI after boot finished."); } void __init smp_cpus_done(unsigned int max_cpus) { int cpu, next, rc; /* Reset the response to a (now illegal) MSG_START_CPU IPI. */ start_cpu_function_addr = (unsigned long) &panic_start_cpu; cpumask_copy(&init_affinity, cpu_online_mask); /* * Pin ourselves to a single cpu in the initial affinity set * so that kernel mappings for the rootfs are not in the dataplane, * if set, and to avoid unnecessary migrating during bringup. * Use the last cpu just in case the whole chip has been * isolated from the scheduler, to keep init away from likely * more useful user code. This also ensures that work scheduled * via schedule_delayed_work() in the init routines will land * on this cpu. */ for (cpu = cpumask_first(&init_affinity); (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; cpu = next) ; rc = sched_setaffinity(current->pid, cpumask_of(cpu)); if (rc != 0) pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc); }
gpl-2.0
smihir/wireless-testing
lib/ucs2_string.c
3657
1272
#include <linux/ucs2_string.h> #include <linux/module.h> /* Return the number of unicode characters in data */ unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength) { unsigned long length = 0; while (*s++ != 0 && length < maxlength) length++; return length; } EXPORT_SYMBOL(ucs2_strnlen); unsigned long ucs2_strlen(const ucs2_char_t *s) { return ucs2_strnlen(s, ~0UL); } EXPORT_SYMBOL(ucs2_strlen); /* * Return the number of bytes is the length of this string * Note: this is NOT the same as the number of unicode characters */ unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength) { return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t); } EXPORT_SYMBOL(ucs2_strsize); int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) { while (1) { if (len == 0) return 0; if (*a < *b) return -1; if (*a > *b) return 1; if (*a == 0) /* implies *b == 0 */ return 0; a++; b++; len--; } } EXPORT_SYMBOL(ucs2_strncmp);
gpl-2.0
estiko/lenovo_a706_xoplax
drivers/media/common/tuners/mc44s803.c
4681
9105
/* * Driver for Freescale MC44S803 Low Power CMOS Broadband Tuner * * Copyright (c) 2009 Jochen Friedrich <jochen@scram.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ #include <linux/module.h> #include <linux/delay.h> #include <linux/dvb/frontend.h> #include <linux/i2c.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "mc44s803.h" #include "mc44s803_priv.h" #define mc_printk(level, format, arg...) \ printk(level "mc44s803: " format , ## arg) /* Writes a single register */ static int mc44s803_writereg(struct mc44s803_priv *priv, u32 val) { u8 buf[3]; struct i2c_msg msg = { .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 3 }; buf[0] = (val & 0xff0000) >> 16; buf[1] = (val & 0xff00) >> 8; buf[2] = (val & 0xff); if (i2c_transfer(priv->i2c, &msg, 1) != 1) { mc_printk(KERN_WARNING, "I2C write failed\n"); return -EREMOTEIO; } return 0; } /* Reads a single register */ static int mc44s803_readreg(struct mc44s803_priv *priv, u8 reg, u32 *val) { u32 wval; u8 buf[3]; int ret; struct i2c_msg msg[] = { { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = buf, .len = 3 }, }; wval = MC44S803_REG_SM(MC44S803_REG_DATAREG, MC44S803_ADDR) | MC44S803_REG_SM(reg, MC44S803_D); ret = mc44s803_writereg(priv, wval); if (ret) return ret; if (i2c_transfer(priv->i2c, msg, 1) != 1) { mc_printk(KERN_WARNING, "I2C read failed\n"); return -EREMOTEIO; } *val = (buf[0] << 16) | (buf[1] << 8) | buf[2]; return 0; } static int mc44s803_release(struct dvb_frontend *fe) { struct mc44s803_priv *priv = fe->tuner_priv; fe->tuner_priv = NULL; kfree(priv); return 0; } static int mc44s803_init(struct dvb_frontend *fe) { struct mc44s803_priv *priv = fe->tuner_priv; u32 val; int err; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* Reset chip */ val = MC44S803_REG_SM(MC44S803_REG_RESET, MC44S803_ADDR) | MC44S803_REG_SM(1, MC44S803_RS); err = mc44s803_writereg(priv, val); if (err) goto exit; val = MC44S803_REG_SM(MC44S803_REG_RESET, MC44S803_ADDR); err = mc44s803_writereg(priv, val); if (err) goto exit; /* Power Up and Start Osc */ val = MC44S803_REG_SM(MC44S803_REG_REFOSC, MC44S803_ADDR) | MC44S803_REG_SM(0xC0, MC44S803_REFOSC) | MC44S803_REG_SM(1, MC44S803_OSCSEL); err = mc44s803_writereg(priv, val); if (err) goto exit; val = MC44S803_REG_SM(MC44S803_REG_POWER, MC44S803_ADDR) | MC44S803_REG_SM(0x200, MC44S803_POWER); err = mc44s803_writereg(priv, val); if (err) goto exit; msleep(10); val = MC44S803_REG_SM(MC44S803_REG_REFOSC, MC44S803_ADDR) | MC44S803_REG_SM(0x40, MC44S803_REFOSC) | MC44S803_REG_SM(1, MC44S803_OSCSEL); err = mc44s803_writereg(priv, val); if (err) goto exit; msleep(20); /* Setup Mixer */ val = MC44S803_REG_SM(MC44S803_REG_MIXER, MC44S803_ADDR) | MC44S803_REG_SM(1, MC44S803_TRI_STATE) | MC44S803_REG_SM(0x7F, MC44S803_MIXER_RES); err = mc44s803_writereg(priv, val); if (err) goto exit; /* Setup Cirquit Adjust */ val = MC44S803_REG_SM(MC44S803_REG_CIRCADJ, MC44S803_ADDR) | MC44S803_REG_SM(1, MC44S803_G1) | MC44S803_REG_SM(1, MC44S803_G3) | MC44S803_REG_SM(0x3, MC44S803_CIRCADJ_RES) | MC44S803_REG_SM(1, MC44S803_G6) | MC44S803_REG_SM(priv->cfg->dig_out, MC44S803_S1) | MC44S803_REG_SM(0x3, MC44S803_LP) | MC44S803_REG_SM(1, MC44S803_CLRF) | MC44S803_REG_SM(1, MC44S803_CLIF); err = mc44s803_writereg(priv, val); if (err) goto exit; val = MC44S803_REG_SM(MC44S803_REG_CIRCADJ, MC44S803_ADDR) | MC44S803_REG_SM(1, MC44S803_G1) | MC44S803_REG_SM(1, MC44S803_G3) | MC44S803_REG_SM(0x3, MC44S803_CIRCADJ_RES) | MC44S803_REG_SM(1, MC44S803_G6) | MC44S803_REG_SM(priv->cfg->dig_out, MC44S803_S1) | MC44S803_REG_SM(0x3, MC44S803_LP); err = mc44s803_writereg(priv, val); if (err) goto exit; /* Setup Digtune */ val = MC44S803_REG_SM(MC44S803_REG_DIGTUNE, MC44S803_ADDR) | MC44S803_REG_SM(3, MC44S803_XOD); err = mc44s803_writereg(priv, val); if (err) goto exit; /* Setup AGC */ val = MC44S803_REG_SM(MC44S803_REG_LNAAGC, MC44S803_ADDR) | MC44S803_REG_SM(1, MC44S803_AT1) | MC44S803_REG_SM(1, MC44S803_AT2) | MC44S803_REG_SM(1, MC44S803_AGC_AN_DIG) | MC44S803_REG_SM(1, MC44S803_AGC_READ_EN) | MC44S803_REG_SM(1, MC44S803_LNA0); err = mc44s803_writereg(priv, val); if (err) goto exit; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return 0; exit: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); mc_printk(KERN_WARNING, "I/O Error\n"); return err; } static int mc44s803_set_params(struct dvb_frontend *fe) { struct mc44s803_priv *priv = fe->tuner_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 r1, r2, n1, n2, lo1, lo2, freq, val; int err; priv->frequency = c->frequency; r1 = MC44S803_OSC / 1000000; r2 = MC44S803_OSC / 100000; n1 = (c->frequency + MC44S803_IF1 + 500000) / 1000000; freq = MC44S803_OSC / r1 * n1; lo1 = ((60 * n1) + (r1 / 2)) / r1; freq = freq - c->frequency; n2 = (freq - MC44S803_IF2 + 50000) / 100000; lo2 = ((60 * n2) + (r2 / 2)) / r2; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); val = MC44S803_REG_SM(MC44S803_REG_REFDIV, MC44S803_ADDR) | MC44S803_REG_SM(r1-1, MC44S803_R1) | MC44S803_REG_SM(r2-1, MC44S803_R2) | MC44S803_REG_SM(1, MC44S803_REFBUF_EN); err = mc44s803_writereg(priv, val); if (err) goto exit; val = MC44S803_REG_SM(MC44S803_REG_LO1, MC44S803_ADDR) | MC44S803_REG_SM(n1-2, MC44S803_LO1); err = mc44s803_writereg(priv, val); if (err) goto exit; val = MC44S803_REG_SM(MC44S803_REG_LO2, MC44S803_ADDR) | MC44S803_REG_SM(n2-2, MC44S803_LO2); err = mc44s803_writereg(priv, val); if (err) goto exit; val = MC44S803_REG_SM(MC44S803_REG_DIGTUNE, MC44S803_ADDR) | MC44S803_REG_SM(1, MC44S803_DA) | MC44S803_REG_SM(lo1, MC44S803_LO_REF) | MC44S803_REG_SM(1, MC44S803_AT); err = mc44s803_writereg(priv, val); if (err) goto exit; val = MC44S803_REG_SM(MC44S803_REG_DIGTUNE, MC44S803_ADDR) | MC44S803_REG_SM(2, MC44S803_DA) | MC44S803_REG_SM(lo2, MC44S803_LO_REF) | MC44S803_REG_SM(1, MC44S803_AT); err = mc44s803_writereg(priv, val); if (err) goto exit; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return 0; exit: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); mc_printk(KERN_WARNING, "I/O Error\n"); return err; } static int mc44s803_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct mc44s803_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static const struct dvb_tuner_ops mc44s803_tuner_ops = { .info = { .name = "Freescale MC44S803", .frequency_min = 48000000, .frequency_max = 1000000000, .frequency_step = 100000, }, .release = mc44s803_release, .init = mc44s803_init, .set_params = mc44s803_set_params, .get_frequency = mc44s803_get_frequency }; /* This functions tries to identify a MC44S803 tuner by reading the ID register. This is hasty. */ struct dvb_frontend *mc44s803_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mc44s803_config *cfg) { struct mc44s803_priv *priv; u32 reg; u8 id; int ret; reg = 0; priv = kzalloc(sizeof(struct mc44s803_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->cfg = cfg; priv->i2c = i2c; priv->fe = fe; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = mc44s803_readreg(priv, MC44S803_REG_ID, &reg); if (ret) goto error; id = MC44S803_REG_MS(reg, MC44S803_ID); if (id != 0x14) { mc_printk(KERN_ERR, "unsupported ID " "(%x should be 0x14)\n", id); goto error; } mc_printk(KERN_INFO, "successfully identified (ID = %x)\n", id); memcpy(&fe->ops.tuner_ops, &mc44s803_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return fe; error: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ kfree(priv); return NULL; } EXPORT_SYMBOL(mc44s803_attach); MODULE_AUTHOR("Jochen Friedrich"); MODULE_DESCRIPTION("Freescale MC44S803 silicon tuner driver"); MODULE_LICENSE("GPL");
gpl-2.0
casinobrawl27/android_133-108_dt2w
drivers/scsi/vmw_pvscsi.c
4937
39449
/* * Linux driver for VMware's para-virtualized SCSI HBA. * * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained by: Arvind Kumar <arvindkumar@vmware.com> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "vmw_pvscsi.h" #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver" MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC); MODULE_AUTHOR("VMware, Inc."); MODULE_LICENSE("GPL"); MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 #define PVSCSI_DEFAULT_QUEUE_DEPTH 64 #define SGL_SIZE PAGE_SIZE struct pvscsi_sg_list { struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT]; }; struct pvscsi_ctx { /* * The index of the context in cmd_map serves as the context ID for a * 1-to-1 mapping completions back to requests. */ struct scsi_cmnd *cmd; struct pvscsi_sg_list *sgl; struct list_head list; dma_addr_t dataPA; dma_addr_t sensePA; dma_addr_t sglPA; }; struct pvscsi_adapter { char *mmioBase; unsigned int irq; u8 rev; bool use_msi; bool use_msix; bool use_msg; spinlock_t hw_lock; struct workqueue_struct *workqueue; struct work_struct work; struct PVSCSIRingReqDesc *req_ring; unsigned req_pages; unsigned req_depth; dma_addr_t reqRingPA; struct PVSCSIRingCmpDesc *cmp_ring; unsigned cmp_pages; dma_addr_t cmpRingPA; struct PVSCSIRingMsgDesc *msg_ring; unsigned msg_pages; dma_addr_t msgRingPA; struct PVSCSIRingsState *rings_state; dma_addr_t ringStatePA; struct pci_dev *dev; struct Scsi_Host *host; struct list_head cmd_pool; struct pvscsi_ctx *cmd_map; }; /* Command line parameters */ static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING; static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; static bool pvscsi_disable_msi; static bool pvscsi_disable_msix; static bool pvscsi_use_msg = true; #define PVSCSI_RW (S_IRUSR | S_IWUSR) module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")"); module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")"); module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")"); module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW); MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); static const struct pci_device_id pvscsi_pci_tbl[] = { { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, { 0 } }; MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl); static struct device * pvscsi_dev(const struct pvscsi_adapter *adapter) { return &(adapter->dev->dev); } static struct pvscsi_ctx * pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) { struct pvscsi_ctx *ctx, *end; end = &adapter->cmd_map[adapter->req_depth]; for (ctx = adapter->cmd_map; ctx < end; ctx++) if (ctx->cmd == cmd) return ctx; return NULL; } static struct pvscsi_ctx * pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) { struct pvscsi_ctx *ctx; if (list_empty(&adapter->cmd_pool)) return NULL; ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); ctx->cmd = cmd; list_del(&ctx->list); return ctx; } static void pvscsi_release_context(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx) { ctx->cmd = NULL; list_add(&ctx->list, &adapter->cmd_pool); } /* * Map a pvscsi_ctx struct to a context ID field value; we map to a simple * non-zero integer. ctx always points to an entry in cmd_map array, hence * the return value is always >=1. */ static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter, const struct pvscsi_ctx *ctx) { return ctx - adapter->cmd_map + 1; } static struct pvscsi_ctx * pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context) { return &adapter->cmd_map[context - 1]; } static void pvscsi_reg_write(const struct pvscsi_adapter *adapter, u32 offset, u32 val) { writel(val, adapter->mmioBase + offset); } static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset) { return readl(adapter->mmioBase + offset); } static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter) { return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS); } static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter, u32 val) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val); } static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter) { u32 intr_bits; intr_bits = PVSCSI_INTR_CMPL_MASK; if (adapter->use_msg) intr_bits |= PVSCSI_INTR_MSG_MASK; pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits); } static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0); } static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter, u32 cmd, const void *desc, size_t len) { const u32 *ptr = desc; size_t i; len /= sizeof(*ptr); pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd); for (i = 0; i < len; i++) pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]); } static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter, const struct pvscsi_ctx *ctx) { struct PVSCSICmdDescAbortCmd cmd = { 0 }; cmd.target = ctx->cmd->device->id; cmd.context = pvscsi_map_context(adapter, ctx); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); } static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); } static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); } static int scsi_is_rw(unsigned char op) { return op == READ_6 || op == WRITE_6 || op == READ_10 || op == WRITE_10 || op == READ_12 || op == WRITE_12 || op == READ_16 || op == WRITE_16; } static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, unsigned char op) { if (scsi_is_rw(op)) pvscsi_kick_rw_io(adapter); else pvscsi_process_request_ring(adapter); } static void ll_adapter_reset(const struct pvscsi_adapter *adapter) { dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); } static void ll_bus_reset(const struct pvscsi_adapter *adapter) { dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); } static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) { struct PVSCSICmdDescResetDevice cmd = { 0 }; dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target); cmd.target = target; pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof(cmd)); } static void pvscsi_create_sg(struct pvscsi_ctx *ctx, struct scatterlist *sg, unsigned count) { unsigned i; struct PVSCSISGElement *sge; BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); sge = &ctx->sgl->sge[0]; for (i = 0; i < count; i++, sg++) { sge[i].addr = sg_dma_address(sg); sge[i].length = sg_dma_len(sg); sge[i].flags = 0; } } /* * Map all data buffers for a command into PCI space and * setup the scatter/gather list if needed. */ static void pvscsi_map_buffers(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, struct PVSCSIRingReqDesc *e) { unsigned count; unsigned bufflen = scsi_bufflen(cmd); struct scatterlist *sg; e->dataLen = bufflen; e->dataAddr = 0; if (bufflen == 0) return; sg = scsi_sglist(cmd); count = scsi_sg_count(cmd); if (count != 0) { int segs = scsi_dma_map(cmd); if (segs > 1) { pvscsi_create_sg(ctx, sg, segs); e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl, SGL_SIZE, PCI_DMA_TODEVICE); e->dataAddr = ctx->sglPA; } else e->dataAddr = sg_dma_address(sg); } else { /* * In case there is no S/G list, scsi_sglist points * directly to the buffer. */ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen, cmd->sc_data_direction); e->dataAddr = ctx->dataPA; } } static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx) { struct scsi_cmnd *cmd; unsigned bufflen; cmd = ctx->cmd; bufflen = scsi_bufflen(cmd); if (bufflen != 0) { unsigned count = scsi_sg_count(cmd); if (count != 0) { scsi_dma_unmap(cmd); if (ctx->sglPA) { pci_unmap_single(adapter->dev, ctx->sglPA, SGL_SIZE, PCI_DMA_TODEVICE); ctx->sglPA = 0; } } else pci_unmap_single(adapter->dev, ctx->dataPA, bufflen, cmd->sc_data_direction); } if (cmd->sense_buffer) pci_unmap_single(adapter->dev, ctx->sensePA, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); } static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter) { adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE, &adapter->ringStatePA); if (!adapter->rings_state) return -ENOMEM; adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages); adapter->req_depth = adapter->req_pages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; adapter->req_ring = pci_alloc_consistent(adapter->dev, adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA); if (!adapter->req_ring) return -ENOMEM; adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, pvscsi_ring_pages); adapter->cmp_ring = pci_alloc_consistent(adapter->dev, adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA); if (!adapter->cmp_ring) return -ENOMEM; BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); if (!adapter->use_msg) return 0; adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, pvscsi_msg_ring_pages); adapter->msg_ring = pci_alloc_consistent(adapter->dev, adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA); if (!adapter->msg_ring) return -ENOMEM; BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); return 0; } static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) { struct PVSCSICmdDescSetupRings cmd = { 0 }; dma_addr_t base; unsigned i; cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; cmd.reqRingNumPages = adapter->req_pages; cmd.cmpRingNumPages = adapter->cmp_pages; base = adapter->reqRingPA; for (i = 0; i < adapter->req_pages; i++) { cmd.reqRingPPNs[i] = base >> PAGE_SHIFT; base += PAGE_SIZE; } base = adapter->cmpRingPA; for (i = 0; i < adapter->cmp_pages; i++) { cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT; base += PAGE_SIZE; } memset(adapter->rings_state, 0, PAGE_SIZE); memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd)); if (adapter->use_msg) { struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 }; cmd_msg.numPages = adapter->msg_pages; base = adapter->msgRingPA; for (i = 0; i < adapter->msg_pages; i++) { cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT; base += PAGE_SIZE; } memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING, &cmd_msg, sizeof(cmd_msg)); } } /* * Pull a completion descriptor off and pass the completion back * to the SCSI mid layer. */ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, const struct PVSCSIRingCmpDesc *e) { struct pvscsi_ctx *ctx; struct scsi_cmnd *cmd; u32 btstat = e->hostStatus; u32 sdstat = e->scsiStatus; ctx = pvscsi_get_context(adapter, e->context); cmd = ctx->cmd; pvscsi_unmap_buffers(adapter, ctx); pvscsi_release_context(adapter, ctx); cmd->result = 0; if (sdstat != SAM_STAT_GOOD && (btstat == BTSTAT_SUCCESS || btstat == BTSTAT_LINKED_COMMAND_COMPLETED || btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { cmd->result = (DID_OK << 16) | sdstat; if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) cmd->result |= (DRIVER_SENSE << 24); } else switch (btstat) { case BTSTAT_SUCCESS: case BTSTAT_LINKED_COMMAND_COMPLETED: case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: /* If everything went fine, let's move on.. */ cmd->result = (DID_OK << 16); break; case BTSTAT_DATARUN: case BTSTAT_DATA_UNDERRUN: /* Report residual data in underruns */ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); cmd->result = (DID_ERROR << 16); break; case BTSTAT_SELTIMEO: /* Our emulation returns this for non-connected devs */ cmd->result = (DID_BAD_TARGET << 16); break; case BTSTAT_LUNMISMATCH: case BTSTAT_TAGREJECT: case BTSTAT_BADMSG: cmd->result = (DRIVER_INVALID << 24); /* fall through */ case BTSTAT_HAHARDWARE: case BTSTAT_INVPHASE: case BTSTAT_HATIMEOUT: case BTSTAT_NORESPONSE: case BTSTAT_DISCONNECT: case BTSTAT_HASOFTWARE: case BTSTAT_BUSFREE: case BTSTAT_SENSFAILED: cmd->result |= (DID_ERROR << 16); break; case BTSTAT_SENTRST: case BTSTAT_RECVRST: case BTSTAT_BUSRESET: cmd->result = (DID_RESET << 16); break; case BTSTAT_ABORTQUEUE: cmd->result = (DID_ABORT << 16); break; case BTSTAT_SCSIPARITY: cmd->result = (DID_PARITY << 16); break; default: cmd->result = (DID_ERROR << 16); scmd_printk(KERN_DEBUG, cmd, "Unknown completion status: 0x%x\n", btstat); } dev_dbg(&cmd->device->sdev_gendev, "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); cmd->scsi_done(cmd); } /* * barrier usage : Since the PVSCSI device is emulated, there could be cases * where we may want to serialize some accesses between the driver and the * emulation layer. We use compiler barriers instead of the more expensive * memory barriers because PVSCSI is only supported on X86 which has strong * memory access ordering. */ static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter) { struct PVSCSIRingsState *s = adapter->rings_state; struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; u32 cmp_entries = s->cmpNumEntriesLog2; while (s->cmpConsIdx != s->cmpProdIdx) { struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & MASK(cmp_entries)); /* * This barrier() ensures that *e is not dereferenced while * the device emulation still writes data into the slot. * Since the device emulation advances s->cmpProdIdx only after * updating the slot we want to check it first. */ barrier(); pvscsi_complete_request(adapter, e); /* * This barrier() ensures that compiler doesn't reorder write * to s->cmpConsIdx before the read of (*e) inside * pvscsi_complete_request. Otherwise, device emulation may * overwrite *e before we had a chance to read it. */ barrier(); s->cmpConsIdx++; } } /* * Translate a Linux SCSI request into a request ring entry. */ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd) { struct PVSCSIRingsState *s; struct PVSCSIRingReqDesc *e; struct scsi_device *sdev; u32 req_entries; s = adapter->rings_state; sdev = cmd->device; req_entries = s->reqNumEntriesLog2; /* * If this condition holds, we might have room on the request ring, but * we might not have room on the completion ring for the response. * However, we have already ruled out this possibility - we would not * have successfully allocated a context if it were true, since we only * have one context per request entry. Check for it anyway, since it * would be a serious bug. */ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: " "ring full: reqProdIdx=%d cmpConsIdx=%d\n", s->reqProdIdx, s->cmpConsIdx); return -1; } e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); e->bus = sdev->channel; e->target = sdev->id; memset(e->lun, 0, sizeof(e->lun)); e->lun[1] = sdev->lun; if (cmd->sense_buffer) { ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); e->senseAddr = ctx->sensePA; e->senseLen = SCSI_SENSE_BUFFERSIZE; } else { e->senseLen = 0; e->senseAddr = 0; } e->cdbLen = cmd->cmd_len; e->vcpuHint = smp_processor_id(); memcpy(e->cdb, cmd->cmnd, e->cdbLen); e->tag = SIMPLE_QUEUE_TAG; if (sdev->tagged_supported && (cmd->tag == HEAD_OF_QUEUE_TAG || cmd->tag == ORDERED_QUEUE_TAG)) e->tag = cmd->tag; if (cmd->sc_data_direction == DMA_FROM_DEVICE) e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; else if (cmd->sc_data_direction == DMA_TO_DEVICE) e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; else if (cmd->sc_data_direction == DMA_NONE) e->flags = PVSCSI_FLAG_CMD_DIR_NONE; else e->flags = 0; pvscsi_map_buffers(adapter, ctx, cmd, e); e->context = pvscsi_map_context(adapter, ctx); barrier(); s->reqProdIdx++; return 0; } static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); struct pvscsi_ctx *ctx; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); ctx = pvscsi_acquire_context(adapter, cmd); if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { if (ctx) pvscsi_release_context(adapter, ctx); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } cmd->scsi_done = done; dev_dbg(&cmd->device->sdev_gendev, "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); spin_unlock_irqrestore(&adapter->hw_lock, flags); pvscsi_kick_io(adapter, cmd->cmnd[0]); return 0; } static DEF_SCSI_QCMD(pvscsi_queue) static int pvscsi_abort(struct scsi_cmnd *cmd) { struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); struct pvscsi_ctx *ctx; unsigned long flags; scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", adapter->host->host_no, cmd); spin_lock_irqsave(&adapter->hw_lock, flags); /* * Poll the completion ring first - we might be trying to abort * a command that is waiting to be dispatched in the completion ring. */ pvscsi_process_completion_ring(adapter); /* * If there is no context for the command, it either already succeeded * or else was never properly issued. Not our problem. */ ctx = pvscsi_find_context(adapter, cmd); if (!ctx) { scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); goto out; } pvscsi_abort_cmd(adapter, ctx); pvscsi_process_completion_ring(adapter); out: spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } /* * Abort all outstanding requests. This is only safe to use if the completion * ring will never be walked again or the device has been reset, because it * destroys the 1-1 mapping between context field passed to emulation and our * request structure. */ static void pvscsi_reset_all(struct pvscsi_adapter *adapter) { unsigned i; for (i = 0; i < adapter->req_depth; i++) { struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; struct scsi_cmnd *cmd = ctx->cmd; if (cmd) { scmd_printk(KERN_ERR, cmd, "Forced reset on cmd %p\n", cmd); pvscsi_unmap_buffers(adapter, ctx); pvscsi_release_context(adapter, ctx); cmd->result = (DID_RESET << 16); cmd->scsi_done(cmd); } } } static int pvscsi_host_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; bool use_msg; scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); spin_lock_irqsave(&adapter->hw_lock, flags); use_msg = adapter->use_msg; if (use_msg) { adapter->use_msg = 0; spin_unlock_irqrestore(&adapter->hw_lock, flags); /* * Now that we know that the ISR won't add more work on the * workqueue we can safely flush any outstanding work. */ flush_workqueue(adapter->workqueue); spin_lock_irqsave(&adapter->hw_lock, flags); } /* * We're going to tear down the entire ring structure and set it back * up, so stalling new requests until all completions are flushed and * the rings are back in place. */ pvscsi_process_request_ring(adapter); ll_adapter_reset(adapter); /* * Now process any completions. Note we do this AFTER adapter reset, * which is strange, but stops races where completions get posted * between processing the ring and issuing the reset. The backend will * not touch the ring memory after reset, so the immediately pre-reset * completion ring state is still valid. */ pvscsi_process_completion_ring(adapter); pvscsi_reset_all(adapter); adapter->use_msg = use_msg; pvscsi_setup_all_rings(adapter); pvscsi_unmask_intr(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } static int pvscsi_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n"); /* * We don't want to queue new requests for this bus after * flushing all pending requests to emulation, since new * requests could then sneak in during this bus reset phase, * so take the lock now. */ spin_lock_irqsave(&adapter->hw_lock, flags); pvscsi_process_request_ring(adapter); ll_bus_reset(adapter); pvscsi_process_completion_ring(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } static int pvscsi_device_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n", host->host_no, cmd->device->id); /* * We don't want to queue new requests for this device after flushing * all pending requests to emulation, since new requests could then * sneak in during this device reset phase, so take the lock now. */ spin_lock_irqsave(&adapter->hw_lock, flags); pvscsi_process_request_ring(adapter); ll_device_reset(adapter, cmd->device->id); pvscsi_process_completion_ring(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } static struct scsi_host_template pvscsi_template; static const char *pvscsi_info(struct Scsi_Host *host) { struct pvscsi_adapter *adapter = shost_priv(host); static char buf[256]; sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, pvscsi_template.cmd_per_lun); return buf; } static struct scsi_host_template pvscsi_template = { .module = THIS_MODULE, .name = "VMware PVSCSI Host Adapter", .proc_name = "vmw_pvscsi", .info = pvscsi_info, .queuecommand = pvscsi_queue, .this_id = -1, .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, .dma_boundary = UINT_MAX, .max_sectors = 0xffff, .use_clustering = ENABLE_CLUSTERING, .eh_abort_handler = pvscsi_abort, .eh_device_reset_handler = pvscsi_device_reset, .eh_bus_reset_handler = pvscsi_bus_reset, .eh_host_reset_handler = pvscsi_host_reset, }; static void pvscsi_process_msg(const struct pvscsi_adapter *adapter, const struct PVSCSIRingMsgDesc *e) { struct PVSCSIRingsState *s = adapter->rings_state; struct Scsi_Host *host = adapter->host; struct scsi_device *sdev; printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); BUILD_BUG_ON(PVSCSI_MSG_LAST != 2); if (e->type == PVSCSI_MSG_DEV_ADDED) { struct PVSCSIMsgDescDevStatusChanged *desc; desc = (struct PVSCSIMsgDescDevStatusChanged *)e; printk(KERN_INFO "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n", desc->bus, desc->target, desc->lun[1]); if (!scsi_host_get(host)) return; sdev = scsi_device_lookup(host, desc->bus, desc->target, desc->lun[1]); if (sdev) { printk(KERN_INFO "vmw_pvscsi: device already exists\n"); scsi_device_put(sdev); } else scsi_add_device(adapter->host, desc->bus, desc->target, desc->lun[1]); scsi_host_put(host); } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { struct PVSCSIMsgDescDevStatusChanged *desc; desc = (struct PVSCSIMsgDescDevStatusChanged *)e; printk(KERN_INFO "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n", desc->bus, desc->target, desc->lun[1]); if (!scsi_host_get(host)) return; sdev = scsi_device_lookup(host, desc->bus, desc->target, desc->lun[1]); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } else printk(KERN_INFO "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n", desc->bus, desc->target, desc->lun[1]); scsi_host_put(host); } } static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter) { struct PVSCSIRingsState *s = adapter->rings_state; return s->msgProdIdx != s->msgConsIdx; } static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter) { struct PVSCSIRingsState *s = adapter->rings_state; struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; u32 msg_entries = s->msgNumEntriesLog2; while (pvscsi_msg_pending(adapter)) { struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & MASK(msg_entries)); barrier(); pvscsi_process_msg(adapter, e); barrier(); s->msgConsIdx++; } } static void pvscsi_msg_workqueue_handler(struct work_struct *data) { struct pvscsi_adapter *adapter; adapter = container_of(data, struct pvscsi_adapter, work); pvscsi_process_msg_ring(adapter); } static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) { char name[32]; if (!pvscsi_use_msg) return 0; pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, PVSCSI_CMD_SETUP_MSG_RING); if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) return 0; snprintf(name, sizeof(name), "vmw_pvscsi_wq_%u", adapter->host->host_no); adapter->workqueue = create_singlethread_workqueue(name); if (!adapter->workqueue) { printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); return 0; } INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); return 1; } static irqreturn_t pvscsi_isr(int irq, void *devp) { struct pvscsi_adapter *adapter = devp; int handled; if (adapter->use_msi || adapter->use_msix) handled = true; else { u32 val = pvscsi_read_intr_status(adapter); handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0; if (handled) pvscsi_write_intr_status(devp, val); } if (handled) { unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); pvscsi_process_completion_ring(adapter); if (adapter->use_msg && pvscsi_msg_pending(adapter)) queue_work(adapter->workqueue, &adapter->work); spin_unlock_irqrestore(&adapter->hw_lock, flags); } return IRQ_RETVAL(handled); } static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) { struct pvscsi_ctx *ctx = adapter->cmd_map; unsigned i; for (i = 0; i < adapter->req_depth; ++i, ++ctx) free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); } static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, unsigned int *irq) { struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; int ret; ret = pci_enable_msix(adapter->dev, &entry, 1); if (ret) return ret; *irq = entry.vector; return 0; } static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) { if (adapter->irq) { free_irq(adapter->irq, adapter); adapter->irq = 0; } if (adapter->use_msi) { pci_disable_msi(adapter->dev); adapter->use_msi = 0; } else if (adapter->use_msix) { pci_disable_msix(adapter->dev); adapter->use_msix = 0; } } static void pvscsi_release_resources(struct pvscsi_adapter *adapter) { pvscsi_shutdown_intr(adapter); if (adapter->workqueue) destroy_workqueue(adapter->workqueue); if (adapter->mmioBase) pci_iounmap(adapter->dev, adapter->mmioBase); pci_release_regions(adapter->dev); if (adapter->cmd_map) { pvscsi_free_sgls(adapter); kfree(adapter->cmd_map); } if (adapter->rings_state) pci_free_consistent(adapter->dev, PAGE_SIZE, adapter->rings_state, adapter->ringStatePA); if (adapter->req_ring) pci_free_consistent(adapter->dev, adapter->req_pages * PAGE_SIZE, adapter->req_ring, adapter->reqRingPA); if (adapter->cmp_ring) pci_free_consistent(adapter->dev, adapter->cmp_pages * PAGE_SIZE, adapter->cmp_ring, adapter->cmpRingPA); if (adapter->msg_ring) pci_free_consistent(adapter->dev, adapter->msg_pages * PAGE_SIZE, adapter->msg_ring, adapter->msgRingPA); } /* * Allocate scatter gather lists. * * These are statically allocated. Trying to be clever was not worth it. * * Dynamic allocation can fail, and we can't go deep into the memory * allocator, since we're a SCSI driver, and trying too hard to allocate * memory might generate disk I/O. We also don't want to fail disk I/O * in that case because we can't get an allocation - the I/O could be * trying to swap out data to free memory. Since that is pathological, * just use a statically allocated scatter list. * */ static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter) { struct pvscsi_ctx *ctx; int i; ctx = adapter->cmd_map; BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); for (i = 0; i < adapter->req_depth; ++i, ++ctx) { ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, get_order(SGL_SIZE)); ctx->sglPA = 0; BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); if (!ctx->sgl) { for (; i >= 0; --i, --ctx) { free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); ctx->sgl = NULL; } return -ENOMEM; } } return 0; } /* * Query the device, fetch the config info and return the * maximum number of targets on the adapter. In case of * failure due to any reason return default i.e. 16. */ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter) { struct PVSCSICmdDescConfigCmd cmd; struct PVSCSIConfigPageHeader *header; struct device *dev; dma_addr_t configPagePA; void *config_page; u32 numPhys = 16; dev = pvscsi_dev(adapter); config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE, &configPagePA); if (!config_page) { dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); goto exit; } BUG_ON(configPagePA & ~PAGE_MASK); /* Fetch config info from the device. */ cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32; cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER; cmd.cmpAddr = configPagePA; cmd._pad = 0; /* * Mark the completion page header with error values. If the device * completes the command successfully, it sets the status values to * indicate success. */ header = config_page; memset(header, 0, sizeof *header); header->hostStatus = BTSTAT_INVPARAM; header->scsiStatus = SDSTAT_CHECK; pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd); if (header->hostStatus == BTSTAT_SUCCESS && header->scsiStatus == SDSTAT_GOOD) { struct PVSCSIConfigPageController *config; config = config_page; numPhys = config->numPhys; } else dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", header->hostStatus, header->scsiStatus); pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA); exit: return numPhys; } static int __devinit pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pvscsi_adapter *adapter; struct Scsi_Host *host; struct device *dev; unsigned int i; unsigned long flags = 0; int error; error = -ENODEV; if (pci_enable_device(pdev)) return error; if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 && pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 && pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) { printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); } else { printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); goto out_disable_device; } pvscsi_template.can_queue = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; pvscsi_template.cmd_per_lun = min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); if (!host) { printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); goto out_disable_device; } adapter = shost_priv(host); memset(adapter, 0, sizeof(*adapter)); adapter->dev = pdev; adapter->host = host; spin_lock_init(&adapter->hw_lock); host->max_channel = 0; host->max_id = 16; host->max_lun = 1; host->max_cmd_len = 16; adapter->rev = pdev->revision; if (pci_request_regions(pdev, "vmw_pvscsi")) { printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); goto out_free_host; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) continue; if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) continue; break; } if (i == DEVICE_COUNT_RESOURCE) { printk(KERN_ERR "vmw_pvscsi: adapter has no suitable MMIO region\n"); goto out_release_resources; } adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); if (!adapter->mmioBase) { printk(KERN_ERR "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", i, PVSCSI_MEM_SPACE_SIZE); goto out_release_resources; } pci_set_master(pdev); pci_set_drvdata(pdev, host); ll_adapter_reset(adapter); adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); error = pvscsi_allocate_rings(adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); goto out_release_resources; } /* * Ask the device for max number of targets. */ host->max_id = pvscsi_get_max_targets(adapter); dev = pvscsi_dev(adapter); dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id); /* * From this point on we should reset the adapter if anything goes * wrong. */ pvscsi_setup_all_rings(adapter); adapter->cmd_map = kcalloc(adapter->req_depth, sizeof(struct pvscsi_ctx), GFP_KERNEL); if (!adapter->cmd_map) { printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); error = -ENOMEM; goto out_reset_adapter; } INIT_LIST_HEAD(&adapter->cmd_pool); for (i = 0; i < adapter->req_depth; i++) { struct pvscsi_ctx *ctx = adapter->cmd_map + i; list_add(&ctx->list, &adapter->cmd_pool); } error = pvscsi_allocate_sg(adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); goto out_reset_adapter; } if (!pvscsi_disable_msix && pvscsi_setup_msix(adapter, &adapter->irq) == 0) { printk(KERN_INFO "vmw_pvscsi: using MSI-X\n"); adapter->use_msix = 1; } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) { printk(KERN_INFO "vmw_pvscsi: using MSI\n"); adapter->use_msi = 1; adapter->irq = pdev->irq; } else { printk(KERN_INFO "vmw_pvscsi: using INTx\n"); adapter->irq = pdev->irq; flags = IRQF_SHARED; } error = request_irq(adapter->irq, pvscsi_isr, flags, "vmw_pvscsi", adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to request IRQ: %d\n", error); adapter->irq = 0; goto out_reset_adapter; } error = scsi_add_host(host, &pdev->dev); if (error) { printk(KERN_ERR "vmw_pvscsi: scsi_add_host failed: %d\n", error); goto out_reset_adapter; } dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", adapter->rev, host->host_no); pvscsi_unmask_intr(adapter); scsi_scan_host(host); return 0; out_reset_adapter: ll_adapter_reset(adapter); out_release_resources: pvscsi_release_resources(adapter); out_free_host: scsi_host_put(host); out_disable_device: pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return error; } static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) { pvscsi_mask_intr(adapter); if (adapter->workqueue) flush_workqueue(adapter->workqueue); pvscsi_shutdown_intr(adapter); pvscsi_process_request_ring(adapter); pvscsi_process_completion_ring(adapter); ll_adapter_reset(adapter); } static void pvscsi_shutdown(struct pci_dev *dev) { struct Scsi_Host *host = pci_get_drvdata(dev); struct pvscsi_adapter *adapter = shost_priv(host); __pvscsi_shutdown(adapter); } static void pvscsi_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct pvscsi_adapter *adapter = shost_priv(host); scsi_remove_host(host); __pvscsi_shutdown(adapter); pvscsi_release_resources(adapter); scsi_host_put(host); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); } static struct pci_driver pvscsi_pci_driver = { .name = "vmw_pvscsi", .id_table = pvscsi_pci_tbl, .probe = pvscsi_probe, .remove = __devexit_p(pvscsi_remove), .shutdown = pvscsi_shutdown, }; static int __init pvscsi_init(void) { pr_info("%s - version %s\n", PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING); return pci_register_driver(&pvscsi_pci_driver); } static void __exit pvscsi_exit(void) { pci_unregister_driver(&pvscsi_pci_driver); } module_init(pvscsi_init); module_exit(pvscsi_exit);
gpl-2.0
Validus-Lollipop/android_kernel_motorola_msm8960dt-common
net/sunrpc/xprtrdma/svc_rdma_transport.c
4937
38881
/* * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <tom@opengridcomputing.com> */ #include <linux/sunrpc/svc_xprt.h> #include <linux/sunrpc/debug.h> #include <linux/sunrpc/rpc_rdma.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <linux/sunrpc/svc_rdma.h> #include <linux/export.h> #include "xprt_rdma.h" #define RPCDBG_FACILITY RPCDBG_SVCXPRT static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags); static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); static void svc_rdma_release_rqst(struct svc_rqst *); static void dto_tasklet_func(unsigned long data); static void svc_rdma_detach(struct svc_xprt *xprt); static void svc_rdma_free(struct svc_xprt *xprt); static int svc_rdma_has_wspace(struct svc_xprt *xprt); static void rq_cq_reap(struct svcxprt_rdma *xprt); static void sq_cq_reap(struct svcxprt_rdma *xprt); static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL); static DEFINE_SPINLOCK(dto_lock); static LIST_HEAD(dto_xprt_q); static struct svc_xprt_ops svc_rdma_ops = { .xpo_create = svc_rdma_create, .xpo_recvfrom = svc_rdma_recvfrom, .xpo_sendto = svc_rdma_sendto, .xpo_release_rqst = svc_rdma_release_rqst, .xpo_detach = svc_rdma_detach, .xpo_free = svc_rdma_free, .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, .xpo_has_wspace = svc_rdma_has_wspace, .xpo_accept = svc_rdma_accept, }; struct svc_xprt_class svc_rdma_class = { .xcl_name = "rdma", .xcl_owner = THIS_MODULE, .xcl_ops = &svc_rdma_ops, .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, }; struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) { struct svc_rdma_op_ctxt *ctxt; while (1) { ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL); if (ctxt) break; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); } ctxt->xprt = xprt; INIT_LIST_HEAD(&ctxt->dto_q); ctxt->count = 0; ctxt->frmr = NULL; atomic_inc(&xprt->sc_ctxt_used); return ctxt; } void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) { struct svcxprt_rdma *xprt = ctxt->xprt; int i; for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { /* * Unmap the DMA addr in the SGE if the lkey matches * the sc_dma_lkey, otherwise, ignore it since it is * an FRMR lkey and will be unmapped later when the * last WR that uses it completes. */ if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { atomic_dec(&xprt->sc_dma_used); ib_dma_unmap_page(xprt->sc_cm_id->device, ctxt->sge[i].addr, ctxt->sge[i].length, ctxt->direction); } } } void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) { struct svcxprt_rdma *xprt; int i; BUG_ON(!ctxt); xprt = ctxt->xprt; if (free_pages) for (i = 0; i < ctxt->count; i++) put_page(ctxt->pages[i]); kmem_cache_free(svc_rdma_ctxt_cachep, ctxt); atomic_dec(&xprt->sc_ctxt_used); } /* * Temporary NFS req mappings are shared across all transport * instances. These are short lived and should be bounded by the number * of concurrent server threads * depth of the SQ. */ struct svc_rdma_req_map *svc_rdma_get_req_map(void) { struct svc_rdma_req_map *map; while (1) { map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL); if (map) break; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); } map->count = 0; map->frmr = NULL; return map; } void svc_rdma_put_req_map(struct svc_rdma_req_map *map) { kmem_cache_free(svc_rdma_map_cachep, map); } /* ib_cq event handler */ static void cq_event_handler(struct ib_event *event, void *context) { struct svc_xprt *xprt = context; dprintk("svcrdma: received CQ event id=%d, context=%p\n", event->event, context); set_bit(XPT_CLOSE, &xprt->xpt_flags); } /* QP event handler */ static void qp_event_handler(struct ib_event *event, void *context) { struct svc_xprt *xprt = context; switch (event->event) { /* These are considered benign events */ case IB_EVENT_PATH_MIG: case IB_EVENT_COMM_EST: case IB_EVENT_SQ_DRAINED: case IB_EVENT_QP_LAST_WQE_REACHED: dprintk("svcrdma: QP event %d received for QP=%p\n", event->event, event->element.qp); break; /* These are considered fatal events */ case IB_EVENT_PATH_MIG_ERR: case IB_EVENT_QP_FATAL: case IB_EVENT_QP_REQ_ERR: case IB_EVENT_QP_ACCESS_ERR: case IB_EVENT_DEVICE_FATAL: default: dprintk("svcrdma: QP ERROR event %d received for QP=%p, " "closing transport\n", event->event, event->element.qp); set_bit(XPT_CLOSE, &xprt->xpt_flags); break; } } /* * Data Transfer Operation Tasklet * * Walks a list of transports with I/O pending, removing entries as * they are added to the server's I/O pending list. Two bits indicate * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave * spinlock that serializes access to the transport list with the RQ * and SQ interrupt handlers. */ static void dto_tasklet_func(unsigned long data) { struct svcxprt_rdma *xprt; unsigned long flags; spin_lock_irqsave(&dto_lock, flags); while (!list_empty(&dto_xprt_q)) { xprt = list_entry(dto_xprt_q.next, struct svcxprt_rdma, sc_dto_q); list_del_init(&xprt->sc_dto_q); spin_unlock_irqrestore(&dto_lock, flags); rq_cq_reap(xprt); sq_cq_reap(xprt); svc_xprt_put(&xprt->sc_xprt); spin_lock_irqsave(&dto_lock, flags); } spin_unlock_irqrestore(&dto_lock, flags); } /* * Receive Queue Completion Handler * * Since an RQ completion handler is called on interrupt context, we * need to defer the handling of the I/O to a tasklet */ static void rq_comp_handler(struct ib_cq *cq, void *cq_context) { struct svcxprt_rdma *xprt = cq_context; unsigned long flags; /* Guard against unconditional flush call for destroyed QP */ if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) return; /* * Set the bit regardless of whether or not it's on the list * because it may be on the list already due to an SQ * completion. */ set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); /* * If this transport is not already on the DTO transport queue, * add it */ spin_lock_irqsave(&dto_lock, flags); if (list_empty(&xprt->sc_dto_q)) { svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ tasklet_schedule(&dto_tasklet); } /* * rq_cq_reap - Process the RQ CQ. * * Take all completing WC off the CQE and enqueue the associated DTO * context on the dto_q for the transport. * * Note that caller must hold a transport reference. */ static void rq_cq_reap(struct svcxprt_rdma *xprt) { int ret; struct ib_wc wc; struct svc_rdma_op_ctxt *ctxt = NULL; if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) return; ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); atomic_inc(&rdma_stat_rq_poll); while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; ctxt->wc_status = wc.status; ctxt->byte_len = wc.byte_len; svc_rdma_unmap_dma(ctxt); if (wc.status != IB_WC_SUCCESS) { /* Close the transport */ dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); svc_rdma_put_context(ctxt, 1); svc_xprt_put(&xprt->sc_xprt); continue; } spin_lock_bh(&xprt->sc_rq_dto_lock); list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); spin_unlock_bh(&xprt->sc_rq_dto_lock); svc_xprt_put(&xprt->sc_xprt); } if (ctxt) atomic_inc(&rdma_stat_rq_prod); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); /* * If data arrived before established event, * don't enqueue. This defers RPC I/O until the * RDMA connection is complete. */ if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) svc_xprt_enqueue(&xprt->sc_xprt); } /* * Process a completion context */ static void process_context(struct svcxprt_rdma *xprt, struct svc_rdma_op_ctxt *ctxt) { svc_rdma_unmap_dma(ctxt); switch (ctxt->wr_op) { case IB_WR_SEND: if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags)) svc_rdma_put_frmr(xprt, ctxt->frmr); svc_rdma_put_context(ctxt, 1); break; case IB_WR_RDMA_WRITE: svc_rdma_put_context(ctxt, 0); break; case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; BUG_ON(!read_hdr); if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags)) svc_rdma_put_frmr(xprt, ctxt->frmr); spin_lock_bh(&xprt->sc_rq_dto_lock); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); list_add_tail(&read_hdr->dto_q, &xprt->sc_read_complete_q); spin_unlock_bh(&xprt->sc_rq_dto_lock); svc_xprt_enqueue(&xprt->sc_xprt); } svc_rdma_put_context(ctxt, 0); break; default: printk(KERN_ERR "svcrdma: unexpected completion type, " "opcode=%d\n", ctxt->wr_op); break; } } /* * Send Queue Completion Handler - potentially called on interrupt context. * * Note that caller must hold a transport reference. */ static void sq_cq_reap(struct svcxprt_rdma *xprt) { struct svc_rdma_op_ctxt *ctxt = NULL; struct ib_wc wc; struct ib_cq *cq = xprt->sc_sq_cq; int ret; if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) return; ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); atomic_inc(&rdma_stat_sq_poll); while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { if (wc.status != IB_WC_SUCCESS) /* Close the transport */ set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); /* Decrement used SQ WR count */ atomic_dec(&xprt->sc_sq_count); wake_up(&xprt->sc_send_wait); ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; if (ctxt) process_context(xprt, ctxt); svc_xprt_put(&xprt->sc_xprt); } if (ctxt) atomic_inc(&rdma_stat_sq_prod); } static void sq_comp_handler(struct ib_cq *cq, void *cq_context) { struct svcxprt_rdma *xprt = cq_context; unsigned long flags; /* Guard against unconditional flush call for destroyed QP */ if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) return; /* * Set the bit regardless of whether or not it's on the list * because it may be on the list already due to an RQ * completion. */ set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); /* * If this transport is not already on the DTO transport queue, * add it */ spin_lock_irqsave(&dto_lock, flags); if (list_empty(&xprt->sc_dto_q)) { svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ tasklet_schedule(&dto_tasklet); } static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, int listener) { struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); if (!cma_xprt) return NULL; svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv); INIT_LIST_HEAD(&cma_xprt->sc_accept_q); INIT_LIST_HEAD(&cma_xprt->sc_dto_q); INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); init_waitqueue_head(&cma_xprt->sc_send_wait); spin_lock_init(&cma_xprt->sc_lock); spin_lock_init(&cma_xprt->sc_rq_dto_lock); spin_lock_init(&cma_xprt->sc_frmr_q_lock); cma_xprt->sc_ord = svcrdma_ord; cma_xprt->sc_max_req_size = svcrdma_max_req_size; cma_xprt->sc_max_requests = svcrdma_max_requests; cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; atomic_set(&cma_xprt->sc_sq_count, 0); atomic_set(&cma_xprt->sc_ctxt_used, 0); if (listener) set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); return cma_xprt; } struct page *svc_rdma_get_page(void) { struct page *page; while ((page = alloc_page(GFP_KERNEL)) == NULL) { /* If we can't get memory, wait a bit and try again */ printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 " "jiffies.\n"); schedule_timeout_uninterruptible(msecs_to_jiffies(1000)); } return page; } int svc_rdma_post_recv(struct svcxprt_rdma *xprt) { struct ib_recv_wr recv_wr, *bad_recv_wr; struct svc_rdma_op_ctxt *ctxt; struct page *page; dma_addr_t pa; int sge_no; int buflen; int ret; ctxt = svc_rdma_get_context(xprt); buflen = 0; ctxt->direction = DMA_FROM_DEVICE; for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { BUG_ON(sge_no >= xprt->sc_max_sge); page = svc_rdma_get_page(); ctxt->pages[sge_no] = page; pa = ib_dma_map_page(xprt->sc_cm_id->device, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) goto err_put_ctxt; atomic_inc(&xprt->sc_dma_used); ctxt->sge[sge_no].addr = pa; ctxt->sge[sge_no].length = PAGE_SIZE; ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; ctxt->count = sge_no + 1; buflen += PAGE_SIZE; } recv_wr.next = NULL; recv_wr.sg_list = &ctxt->sge[0]; recv_wr.num_sge = ctxt->count; recv_wr.wr_id = (u64)(unsigned long)ctxt; svc_xprt_get(&xprt->sc_xprt); ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); if (ret) { svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); svc_xprt_put(&xprt->sc_xprt); } return ret; err_put_ctxt: svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); return -ENOMEM; } /* * This function handles the CONNECT_REQUEST event on a listening * endpoint. It is passed the cma_id for the _new_ connection. The context in * this cma_id is inherited from the listening cma_id and is the svc_xprt * structure for the listening endpoint. * * This function creates a new xprt for the new connection and enqueues it on * the accept queue for the listent xprt. When the listen thread is kicked, it * will call the recvfrom method on the listen xprt which will accept the new * connection. */ static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) { struct svcxprt_rdma *listen_xprt = new_cma_id->context; struct svcxprt_rdma *newxprt; struct sockaddr *sa; /* Create a new transport */ newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); if (!newxprt) { dprintk("svcrdma: failed to create new transport\n"); return; } newxprt->sc_cm_id = new_cma_id; new_cma_id->context = newxprt; dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", newxprt, newxprt->sc_cm_id, listen_xprt); /* Save client advertised inbound read limit for use later in accept. */ newxprt->sc_ord = client_ird; /* Set the local and remote addresses in the transport */ sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); /* * Enqueue the new transport on the accept queue of the listening * transport */ spin_lock_bh(&listen_xprt->sc_lock); list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); spin_unlock_bh(&listen_xprt->sc_lock); /* * Can't use svc_xprt_received here because we are not on a * rqstp thread */ set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); svc_xprt_enqueue(&listen_xprt->sc_xprt); } /* * Handles events generated on the listening endpoint. These events will be * either be incoming connect requests or adapter removal events. */ static int rdma_listen_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { struct svcxprt_rdma *xprt = cma_id->context; int ret = 0; switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " "event=%d\n", cma_id, cma_id->context, event->event); handle_connect_req(cma_id, event->param.conn.initiator_depth); break; case RDMA_CM_EVENT_ESTABLISHED: /* Accept complete */ dprintk("svcrdma: Connection completed on LISTEN xprt=%p, " "cm_id=%p\n", xprt, cma_id); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", xprt, cma_id); if (xprt) set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); break; default: dprintk("svcrdma: Unexpected event on listening endpoint %p, " "event=%d\n", cma_id, event->event); break; } return ret; } static int rdma_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { struct svc_xprt *xprt = cma_id->context; struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); switch (event->event) { case RDMA_CM_EVENT_ESTABLISHED: /* Accept complete */ svc_xprt_get(xprt); dprintk("svcrdma: Connection completed on DTO xprt=%p, " "cm_id=%p\n", xprt, cma_id); clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); svc_xprt_enqueue(xprt); break; case RDMA_CM_EVENT_DISCONNECTED: dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n", xprt, cma_id); if (xprt) { set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } break; case RDMA_CM_EVENT_DEVICE_REMOVAL: dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " "event=%d\n", cma_id, xprt, event->event); if (xprt) { set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); } break; default: dprintk("svcrdma: Unexpected event on DTO endpoint %p, " "event=%d\n", cma_id, event->event); break; } return 0; } /* * Create a listening RDMA service endpoint. */ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags) { struct rdma_cm_id *listen_id; struct svcxprt_rdma *cma_xprt; struct svc_xprt *xprt; int ret; dprintk("svcrdma: Creating RDMA socket\n"); if (sa->sa_family != AF_INET) { dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); return ERR_PTR(-EAFNOSUPPORT); } cma_xprt = rdma_create_xprt(serv, 1); if (!cma_xprt) return ERR_PTR(-ENOMEM); xprt = &cma_xprt->sc_xprt; listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(listen_id)) { ret = PTR_ERR(listen_id); dprintk("svcrdma: rdma_create_id failed = %d\n", ret); goto err0; } ret = rdma_bind_addr(listen_id, sa); if (ret) { dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); goto err1; } cma_xprt->sc_cm_id = listen_id; ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); if (ret) { dprintk("svcrdma: rdma_listen failed = %d\n", ret); goto err1; } /* * We need to use the address from the cm_id in case the * caller specified 0 for the port number. */ sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); return &cma_xprt->sc_xprt; err1: rdma_destroy_id(listen_id); err0: kfree(cma_xprt); return ERR_PTR(ret); } static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) { struct ib_mr *mr; struct ib_fast_reg_page_list *pl; struct svc_rdma_fastreg_mr *frmr; frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); if (!frmr) goto err; mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); if (IS_ERR(mr)) goto err_free_frmr; pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, RPCSVC_MAXPAGES); if (IS_ERR(pl)) goto err_free_mr; frmr->mr = mr; frmr->page_list = pl; INIT_LIST_HEAD(&frmr->frmr_list); return frmr; err_free_mr: ib_dereg_mr(mr); err_free_frmr: kfree(frmr); err: return ERR_PTR(-ENOMEM); } static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) { struct svc_rdma_fastreg_mr *frmr; while (!list_empty(&xprt->sc_frmr_q)) { frmr = list_entry(xprt->sc_frmr_q.next, struct svc_rdma_fastreg_mr, frmr_list); list_del_init(&frmr->frmr_list); ib_dereg_mr(frmr->mr); ib_free_fast_reg_page_list(frmr->page_list); kfree(frmr); } } struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma) { struct svc_rdma_fastreg_mr *frmr = NULL; spin_lock_bh(&rdma->sc_frmr_q_lock); if (!list_empty(&rdma->sc_frmr_q)) { frmr = list_entry(rdma->sc_frmr_q.next, struct svc_rdma_fastreg_mr, frmr_list); list_del_init(&frmr->frmr_list); frmr->map_len = 0; frmr->page_list_len = 0; } spin_unlock_bh(&rdma->sc_frmr_q_lock); if (frmr) return frmr; return rdma_alloc_frmr(rdma); } static void frmr_unmap_dma(struct svcxprt_rdma *xprt, struct svc_rdma_fastreg_mr *frmr) { int page_no; for (page_no = 0; page_no < frmr->page_list_len; page_no++) { dma_addr_t addr = frmr->page_list->page_list[page_no]; if (ib_dma_mapping_error(frmr->mr->device, addr)) continue; atomic_dec(&xprt->sc_dma_used); ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE, frmr->direction); } } void svc_rdma_put_frmr(struct svcxprt_rdma *rdma, struct svc_rdma_fastreg_mr *frmr) { if (frmr) { frmr_unmap_dma(rdma, frmr); spin_lock_bh(&rdma->sc_frmr_q_lock); BUG_ON(!list_empty(&frmr->frmr_list)); list_add(&frmr->frmr_list, &rdma->sc_frmr_q); spin_unlock_bh(&rdma->sc_frmr_q_lock); } } /* * This is the xpo_recvfrom function for listening endpoints. Its * purpose is to accept incoming connections. The CMA callback handler * has already created a new transport and attached it to the new CMA * ID. * * There is a queue of pending connections hung on the listening * transport. This queue contains the new svc_xprt structure. This * function takes svc_xprt structures off the accept_q and completes * the connection. */ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) { struct svcxprt_rdma *listen_rdma; struct svcxprt_rdma *newxprt = NULL; struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; struct ib_device_attr devattr; int uninitialized_var(dma_mr_acc); int need_dma_mr; int ret; int i; listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); clear_bit(XPT_CONN, &xprt->xpt_flags); /* Get the next entry off the accept list */ spin_lock_bh(&listen_rdma->sc_lock); if (!list_empty(&listen_rdma->sc_accept_q)) { newxprt = list_entry(listen_rdma->sc_accept_q.next, struct svcxprt_rdma, sc_accept_q); list_del_init(&newxprt->sc_accept_q); } if (!list_empty(&listen_rdma->sc_accept_q)) set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); spin_unlock_bh(&listen_rdma->sc_lock); if (!newxprt) return NULL; dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", newxprt, newxprt->sc_cm_id); ret = ib_query_device(newxprt->sc_cm_id->device, &devattr); if (ret) { dprintk("svcrdma: could not query device attributes on " "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret); goto errout; } /* Qualify the transport resource defaults with the * capabilities of this particular device */ newxprt->sc_max_sge = min((size_t)devattr.max_sge, (size_t)RPCSVC_MAXPAGES); newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr, (size_t)svcrdma_max_requests); newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; /* * Limit ORD based on client limit, local device limit, and * configured svcrdma limit. */ newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord); newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord); newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device); if (IS_ERR(newxprt->sc_pd)) { dprintk("svcrdma: error creating PD for connect request\n"); goto errout; } newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, sq_comp_handler, cq_event_handler, newxprt, newxprt->sc_sq_depth, 0); if (IS_ERR(newxprt->sc_sq_cq)) { dprintk("svcrdma: error creating SQ CQ for connect request\n"); goto errout; } newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, rq_comp_handler, cq_event_handler, newxprt, newxprt->sc_max_requests, 0); if (IS_ERR(newxprt->sc_rq_cq)) { dprintk("svcrdma: error creating RQ CQ for connect request\n"); goto errout; } memset(&qp_attr, 0, sizeof qp_attr); qp_attr.event_handler = qp_event_handler; qp_attr.qp_context = &newxprt->sc_xprt; qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; qp_attr.cap.max_send_sge = newxprt->sc_max_sge; qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = newxprt->sc_sq_cq; qp_attr.recv_cq = newxprt->sc_rq_cq; dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n" " cm_id->device=%p, sc_pd->device=%p\n" " cap.max_send_wr = %d\n" " cap.max_recv_wr = %d\n" " cap.max_send_sge = %d\n" " cap.max_recv_sge = %d\n", newxprt->sc_cm_id, newxprt->sc_pd, newxprt->sc_cm_id->device, newxprt->sc_pd->device, qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr, qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge); ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); if (ret) { /* * XXX: This is a hack. We need a xx_request_qp interface * that will adjust the qp_attr's with a best-effort * number */ qp_attr.cap.max_send_sge -= 2; qp_attr.cap.max_recv_sge -= 2; ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); if (ret) { dprintk("svcrdma: failed to create QP, ret=%d\n", ret); goto errout; } newxprt->sc_max_sge = qp_attr.cap.max_send_sge; newxprt->sc_max_sge = qp_attr.cap.max_recv_sge; newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; } newxprt->sc_qp = newxprt->sc_cm_id->qp; /* * Use the most secure set of MR resources based on the * transport type and available memory management features in * the device. Here's the table implemented below: * * Fast Global DMA Remote WR * Reg LKEY MR Access * Sup'd Sup'd Needed Needed * * IWARP N N Y Y * N Y Y Y * Y N Y N * Y Y N - * * IB N N Y N * N Y N - * Y N Y N * Y Y N - * * NB: iWARP requires remote write access for the data sink * of an RDMA_READ. IB does not. */ if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { newxprt->sc_frmr_pg_list_len = devattr.max_fast_reg_page_list_len; newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; } /* * Determine if a DMA MR is required and if so, what privs are required */ switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) { case RDMA_TRANSPORT_IWARP: newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) { need_dma_mr = 1; dma_mr_acc = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { need_dma_mr = 1; dma_mr_acc = IB_ACCESS_LOCAL_WRITE; } else need_dma_mr = 0; break; case RDMA_TRANSPORT_IB: if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { need_dma_mr = 1; dma_mr_acc = IB_ACCESS_LOCAL_WRITE; } else need_dma_mr = 0; break; default: goto errout; } /* Create the DMA MR if needed, otherwise, use the DMA LKEY */ if (need_dma_mr) { /* Register all of physical memory */ newxprt->sc_phys_mr = ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc); if (IS_ERR(newxprt->sc_phys_mr)) { dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret); goto errout; } newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey; } else newxprt->sc_dma_lkey = newxprt->sc_cm_id->device->local_dma_lkey; /* Post receive buffers */ for (i = 0; i < newxprt->sc_max_requests; i++) { ret = svc_rdma_post_recv(newxprt); if (ret) { dprintk("svcrdma: failure posting receive buffers\n"); goto errout; } } /* Swap out the handler */ newxprt->sc_cm_id->event_handler = rdma_cma_handler; /* * Arm the CQs for the SQ and RQ before accepting so we can't * miss the first message */ ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP); ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP); /* Accept Connection */ set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = 0; conn_param.initiator_depth = newxprt->sc_ord; ret = rdma_accept(newxprt->sc_cm_id, &conn_param); if (ret) { dprintk("svcrdma: failed to accept new connection, ret=%d\n", ret); goto errout; } dprintk("svcrdma: new connection %p accepted with the following " "attributes:\n" " local_ip : %pI4\n" " local_port : %d\n" " remote_ip : %pI4\n" " remote_port : %d\n" " max_sge : %d\n" " sq_depth : %d\n" " max_requests : %d\n" " ord : %d\n", newxprt, &((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.src_addr)->sin_addr.s_addr, ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.src_addr)->sin_port), &((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.dst_addr)->sin_addr.s_addr, ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.dst_addr)->sin_port), newxprt->sc_max_sge, newxprt->sc_sq_depth, newxprt->sc_max_requests, newxprt->sc_ord); return &newxprt->sc_xprt; errout: dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); /* Take a reference in case the DTO handler runs */ svc_xprt_get(&newxprt->sc_xprt); if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) ib_destroy_qp(newxprt->sc_qp); rdma_destroy_id(newxprt->sc_cm_id); /* This call to put will destroy the transport */ svc_xprt_put(&newxprt->sc_xprt); return NULL; } static void svc_rdma_release_rqst(struct svc_rqst *rqstp) { } /* * When connected, an svc_xprt has at least two references: * * - A reference held by the cm_id between the ESTABLISHED and * DISCONNECTED events. If the remote peer disconnected first, this * reference could be gone. * * - A reference held by the svc_recv code that called this function * as part of close processing. * * At a minimum one references should still be held. */ static void svc_rdma_detach(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); dprintk("svc: svc_rdma_detach(%p)\n", xprt); /* Disconnect and flush posted WQE */ rdma_disconnect(rdma->sc_cm_id); } static void __svc_rdma_free(struct work_struct *work) { struct svcxprt_rdma *rdma = container_of(work, struct svcxprt_rdma, sc_work); dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); /* We should only be called from kref_put */ BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); /* * Destroy queued, but not processed read completions. Note * that this cleanup has to be done before destroying the * cm_id because the device ptr is needed to unmap the dma in * svc_rdma_put_context. */ while (!list_empty(&rdma->sc_read_complete_q)) { struct svc_rdma_op_ctxt *ctxt; ctxt = list_entry(rdma->sc_read_complete_q.next, struct svc_rdma_op_ctxt, dto_q); list_del_init(&ctxt->dto_q); svc_rdma_put_context(ctxt, 1); } /* Destroy queued, but not processed recv completions */ while (!list_empty(&rdma->sc_rq_dto_q)) { struct svc_rdma_op_ctxt *ctxt; ctxt = list_entry(rdma->sc_rq_dto_q.next, struct svc_rdma_op_ctxt, dto_q); list_del_init(&ctxt->dto_q); svc_rdma_put_context(ctxt, 1); } /* Warn if we leaked a resource or under-referenced */ WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); /* De-allocate fastreg mr */ rdma_dealloc_frmr_q(rdma); /* Destroy the QP if present (not a listener) */ if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) ib_destroy_qp(rdma->sc_qp); if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) ib_destroy_cq(rdma->sc_sq_cq); if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) ib_destroy_cq(rdma->sc_rq_cq); if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) ib_dereg_mr(rdma->sc_phys_mr); if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) ib_dealloc_pd(rdma->sc_pd); /* Destroy the CM ID */ rdma_destroy_id(rdma->sc_cm_id); kfree(rdma); } static void svc_rdma_free(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); INIT_WORK(&rdma->sc_work, __svc_rdma_free); queue_work(svc_rdma_wq, &rdma->sc_work); } static int svc_rdma_has_wspace(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); /* * If there are fewer SQ WR available than required to send a * simple response, return false. */ if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3)) return 0; /* * ...or there are already waiters on the SQ, * return false. */ if (waitqueue_active(&rdma->sc_send_wait)) return 0; /* Otherwise return true. */ return 1; } /* * Attempt to register the kvec representing the RPC memory with the * device. * * Returns: * NULL : The device does not support fastreg or there were no more * fastreg mr. * frmr : The kvec register request was successfully posted. * <0 : An error was encountered attempting to register the kvec. */ int svc_rdma_fastreg(struct svcxprt_rdma *xprt, struct svc_rdma_fastreg_mr *frmr) { struct ib_send_wr fastreg_wr; u8 key; /* Bump the key */ key = (u8)(frmr->mr->lkey & 0x000000FF); ib_update_fast_reg_key(frmr->mr, ++key); /* Prepare FASTREG WR */ memset(&fastreg_wr, 0, sizeof fastreg_wr); fastreg_wr.opcode = IB_WR_FAST_REG_MR; fastreg_wr.send_flags = IB_SEND_SIGNALED; fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva; fastreg_wr.wr.fast_reg.page_list = frmr->page_list; fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len; fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; fastreg_wr.wr.fast_reg.length = frmr->map_len; fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags; fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey; return svc_rdma_send(xprt, &fastreg_wr); } int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) { struct ib_send_wr *bad_wr, *n_wr; int wr_count; int i; int ret; if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) return -ENOTCONN; BUG_ON(wr->send_flags != IB_SEND_SIGNALED); wr_count = 1; for (n_wr = wr->next; n_wr; n_wr = n_wr->next) wr_count++; /* If the SQ is full, wait until an SQ entry is available */ while (1) { spin_lock_bh(&xprt->sc_lock); if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { spin_unlock_bh(&xprt->sc_lock); atomic_inc(&rdma_stat_sq_starve); /* See if we can opportunistically reap SQ WR to make room */ sq_cq_reap(xprt); /* Wait until SQ WR available if SQ still full */ wait_event(xprt->sc_send_wait, atomic_read(&xprt->sc_sq_count) < xprt->sc_sq_depth); if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) return -ENOTCONN; continue; } /* Take a transport ref for each WR posted */ for (i = 0; i < wr_count; i++) svc_xprt_get(&xprt->sc_xprt); /* Bump used SQ WR count and post */ atomic_add(wr_count, &xprt->sc_sq_count); ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); if (ret) { set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); atomic_sub(wr_count, &xprt->sc_sq_count); for (i = 0; i < wr_count; i ++) svc_xprt_put(&xprt->sc_xprt); dprintk("svcrdma: failed to post SQ WR rc=%d, " "sc_sq_count=%d, sc_sq_depth=%d\n", ret, atomic_read(&xprt->sc_sq_count), xprt->sc_sq_depth); } spin_unlock_bh(&xprt->sc_lock); if (ret) wake_up(&xprt->sc_send_wait); break; } return ret; } void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, enum rpcrdma_errcode err) { struct ib_send_wr err_wr; struct page *p; struct svc_rdma_op_ctxt *ctxt; u32 *va; int length; int ret; p = svc_rdma_get_page(); va = page_address(p); /* XDR encode error */ length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); ctxt = svc_rdma_get_context(xprt); ctxt->direction = DMA_FROM_DEVICE; ctxt->count = 1; ctxt->pages[0] = p; /* Prepare SGE for local address */ ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, p, 0, length, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { put_page(p); svc_rdma_put_context(ctxt, 1); return; } atomic_inc(&xprt->sc_dma_used); ctxt->sge[0].lkey = xprt->sc_dma_lkey; ctxt->sge[0].length = length; /* Prepare SEND WR */ memset(&err_wr, 0, sizeof err_wr); ctxt->wr_op = IB_WR_SEND; err_wr.wr_id = (unsigned long)ctxt; err_wr.sg_list = ctxt->sge; err_wr.num_sge = 1; err_wr.opcode = IB_WR_SEND; err_wr.send_flags = IB_SEND_SIGNALED; /* Post It */ ret = svc_rdma_send(xprt, &err_wr); if (ret) { dprintk("svcrdma: Error %d posting send for protocol error\n", ret); svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); } }
gpl-2.0