repo_name
string
path
string
copies
string
size
string
content
string
license
string
CyanogenMod/lge-kernel-msm7x30
arch/mips/nxp/pnx8550/stb810/prom_init.c
9522
1100
/* * STB810 specific prom routines * * Author: MontaVista Software, Inc. * source@mvista.com * * Copyright 2005 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/bootmem.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> #include <linux/string.h> #include <linux/kernel.h> int prom_argc; char **prom_argv, **prom_envp; extern void __init prom_init_cmdline(void); extern char *prom_getenv(char *envname); const char *get_system_type(void) { return "NXP PNX8950/STB810"; } void __init prom_init(void) { unsigned long memsize; prom_argc = (int) fw_arg0; prom_argv = (char **) fw_arg1; prom_envp = (char **) fw_arg2; prom_init_cmdline(); memsize = 0x08000000; /* Trimedia uses memory above */ add_memory_region(0, memsize, BOOT_MEM_RAM); }
gpl-2.0
nimengyu2/ti-arm9-linux-03.21.00.04
fs/afs/super.c
51
12440
/* AFS superblock handling * * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Howells <dhowells@redhat.com> * David Woodhouse <dwmw2@infradead.org> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/parser.h> #include <linux/statfs.h> #include <linux/sched.h> #include "internal.h" #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ static void afs_i_init_once(void *foo); static struct dentry *afs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); static struct inode *afs_alloc_inode(struct super_block *sb); static void afs_put_super(struct super_block *sb); static void afs_destroy_inode(struct inode *inode); static int afs_statfs(struct dentry *dentry, struct kstatfs *buf); struct file_system_type afs_fs_type = { .owner = THIS_MODULE, .name = "afs", .mount = afs_mount, .kill_sb = kill_anon_super, .fs_flags = 0, }; static const struct super_operations afs_super_ops = { .statfs = afs_statfs, .alloc_inode = afs_alloc_inode, .drop_inode = afs_drop_inode, .destroy_inode = afs_destroy_inode, .evict_inode = afs_evict_inode, .put_super = afs_put_super, .show_options = generic_show_options, }; static struct kmem_cache *afs_inode_cachep; static atomic_t afs_count_active_inodes; enum { afs_no_opt, afs_opt_cell, afs_opt_rwpath, afs_opt_vol, afs_opt_autocell, }; static const match_table_t afs_options_list = { { afs_opt_cell, "cell=%s" }, { afs_opt_rwpath, "rwpath" }, { afs_opt_vol, "vol=%s" }, { afs_opt_autocell, "autocell" }, { afs_no_opt, NULL }, }; /* * initialise the filesystem */ int __init afs_fs_init(void) { int ret; _enter(""); /* create ourselves an inode cache */ atomic_set(&afs_count_active_inodes, 0); ret = -ENOMEM; afs_inode_cachep = kmem_cache_create("afs_inode_cache", sizeof(struct afs_vnode), 0, SLAB_HWCACHE_ALIGN, afs_i_init_once); if (!afs_inode_cachep) { printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); return ret; } /* now export our filesystem to lesser mortals */ ret = register_filesystem(&afs_fs_type); if (ret < 0) { kmem_cache_destroy(afs_inode_cachep); _leave(" = %d", ret); return ret; } _leave(" = 0"); return 0; } /* * clean up the filesystem */ void __exit afs_fs_exit(void) { _enter(""); afs_mntpt_kill_timer(); unregister_filesystem(&afs_fs_type); if (atomic_read(&afs_count_active_inodes) != 0) { printk("kAFS: %d active inode objects still present\n", atomic_read(&afs_count_active_inodes)); BUG(); } kmem_cache_destroy(afs_inode_cachep); _leave(""); } /* * parse the mount options * - this function has been shamelessly adapted from the ext3 fs which * shamelessly adapted it from the msdos fs */ static int afs_parse_options(struct afs_mount_params *params, char *options, const char **devname) { struct afs_cell *cell; substring_t args[MAX_OPT_ARGS]; char *p; int token; _enter("%s", options); options[PAGE_SIZE - 1] = 0; while ((p = strsep(&options, ","))) { if (!*p) continue; token = match_token(p, afs_options_list, args); switch (token) { case afs_opt_cell: cell = afs_cell_lookup(args[0].from, args[0].to - args[0].from, false); if (IS_ERR(cell)) return PTR_ERR(cell); afs_put_cell(params->cell); params->cell = cell; break; case afs_opt_rwpath: params->rwpath = 1; break; case afs_opt_vol: *devname = args[0].from; break; case afs_opt_autocell: params->autocell = 1; break; default: printk(KERN_ERR "kAFS:" " Unknown or invalid mount option: '%s'\n", p); return -EINVAL; } } _leave(" = 0"); return 0; } /* * parse a device name to get cell name, volume name, volume type and R/W * selector * - this can be one of the following: * "%[cell:]volume[.]" R/W volume * "#[cell:]volume[.]" R/O or R/W volume (rwpath=0), * or R/W (rwpath=1) volume * "%[cell:]volume.readonly" R/O volume * "#[cell:]volume.readonly" R/O volume * "%[cell:]volume.backup" Backup volume * "#[cell:]volume.backup" Backup volume */ static int afs_parse_device_name(struct afs_mount_params *params, const char *name) { struct afs_cell *cell; const char *cellname, *suffix; int cellnamesz; _enter(",%s", name); if (!name) { printk(KERN_ERR "kAFS: no volume name specified\n"); return -EINVAL; } if ((name[0] != '%' && name[0] != '#') || !name[1]) { printk(KERN_ERR "kAFS: unparsable volume name\n"); return -EINVAL; } /* determine the type of volume we're looking for */ params->type = AFSVL_ROVOL; params->force = false; if (params->rwpath || name[0] == '%') { params->type = AFSVL_RWVOL; params->force = true; } name++; /* split the cell name out if there is one */ params->volname = strchr(name, ':'); if (params->volname) { cellname = name; cellnamesz = params->volname - name; params->volname++; } else { params->volname = name; cellname = NULL; cellnamesz = 0; } /* the volume type is further affected by a possible suffix */ suffix = strrchr(params->volname, '.'); if (suffix) { if (strcmp(suffix, ".readonly") == 0) { params->type = AFSVL_ROVOL; params->force = true; } else if (strcmp(suffix, ".backup") == 0) { params->type = AFSVL_BACKVOL; params->force = true; } else if (suffix[1] == 0) { } else { suffix = NULL; } } params->volnamesz = suffix ? suffix - params->volname : strlen(params->volname); _debug("cell %*.*s [%p]", cellnamesz, cellnamesz, cellname ?: "", params->cell); /* lookup the cell record */ if (cellname || !params->cell) { cell = afs_cell_lookup(cellname, cellnamesz, true); if (IS_ERR(cell)) { printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n", cellnamesz, cellnamesz, cellname ?: ""); return PTR_ERR(cell); } afs_put_cell(params->cell); params->cell = cell; } _debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s", params->cell->name, params->cell, params->volnamesz, params->volnamesz, params->volname, suffix ?: "-", params->type, params->force ? " FORCE" : ""); return 0; } /* * check a superblock to see if it's the one we're looking for */ static int afs_test_super(struct super_block *sb, void *data) { struct afs_mount_params *params = data; struct afs_super_info *as = sb->s_fs_info; return as->volume == params->volume; } /* * fill in the superblock */ static int afs_fill_super(struct super_block *sb, void *data) { struct afs_mount_params *params = data; struct afs_super_info *as = NULL; struct afs_fid fid; struct dentry *root = NULL; struct inode *inode = NULL; int ret; _enter(""); /* allocate a superblock info record */ as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); if (!as) { _leave(" = -ENOMEM"); return -ENOMEM; } afs_get_volume(params->volume); as->volume = params->volume; /* fill in the superblock */ sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; sb->s_fs_info = as; sb->s_bdi = &as->volume->bdi; /* allocate the root inode and dentry */ fid.vid = as->volume->vid; fid.vnode = 1; fid.unique = 1; inode = afs_iget(sb, params->key, &fid, NULL, NULL); if (IS_ERR(inode)) goto error_inode; if (params->autocell) set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags); ret = -ENOMEM; root = d_alloc_root(inode); if (!root) goto error; sb->s_root = root; _leave(" = 0"); return 0; error_inode: ret = PTR_ERR(inode); inode = NULL; error: iput(inode); afs_put_volume(as->volume); kfree(as); sb->s_fs_info = NULL; _leave(" = %d", ret); return ret; } /* * get an AFS superblock */ static struct dentry *afs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *options) { struct afs_mount_params params; struct super_block *sb; struct afs_volume *vol; struct key *key; char *new_opts = kstrdup(options, GFP_KERNEL); int ret; _enter(",,%s,%p", dev_name, options); memset(&params, 0, sizeof(params)); /* parse the options and device name */ if (options) { ret = afs_parse_options(&params, options, &dev_name); if (ret < 0) goto error; } ret = afs_parse_device_name(&params, dev_name); if (ret < 0) goto error; /* try and do the mount securely */ key = afs_request_key(params.cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); ret = PTR_ERR(key); goto error; } params.key = key; /* parse the device name */ vol = afs_volume_lookup(&params); if (IS_ERR(vol)) { ret = PTR_ERR(vol); goto error; } params.volume = vol; /* allocate a deviceless superblock */ sb = sget(fs_type, afs_test_super, set_anon_super, &params); if (IS_ERR(sb)) { ret = PTR_ERR(sb); goto error; } if (!sb->s_root) { /* initial superblock/root creation */ _debug("create"); sb->s_flags = flags; ret = afs_fill_super(sb, &params); if (ret < 0) { deactivate_locked_super(sb); goto error; } save_mount_options(sb, new_opts); sb->s_flags |= MS_ACTIVE; } else { _debug("reuse"); ASSERTCMP(sb->s_flags, &, MS_ACTIVE); } afs_put_volume(params.volume); afs_put_cell(params.cell); kfree(new_opts); _leave(" = 0 [%p]", sb); return dget(sb->s_root); error: afs_put_volume(params.volume); afs_put_cell(params.cell); key_put(params.key); kfree(new_opts); _leave(" = %d", ret); return ERR_PTR(ret); } /* * finish the unmounting process on the superblock */ static void afs_put_super(struct super_block *sb) { struct afs_super_info *as = sb->s_fs_info; _enter(""); afs_put_volume(as->volume); _leave(""); } /* * initialise an inode cache slab element prior to any use */ static void afs_i_init_once(void *_vnode) { struct afs_vnode *vnode = _vnode; memset(vnode, 0, sizeof(*vnode)); inode_init_once(&vnode->vfs_inode); init_waitqueue_head(&vnode->update_waitq); mutex_init(&vnode->permits_lock); mutex_init(&vnode->validate_lock); spin_lock_init(&vnode->writeback_lock); spin_lock_init(&vnode->lock); INIT_LIST_HEAD(&vnode->writebacks); INIT_LIST_HEAD(&vnode->pending_locks); INIT_LIST_HEAD(&vnode->granted_locks); INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work); INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); } /* * allocate an AFS inode struct from our slab cache */ static struct inode *afs_alloc_inode(struct super_block *sb) { struct afs_vnode *vnode; vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL); if (!vnode) return NULL; atomic_inc(&afs_count_active_inodes); memset(&vnode->fid, 0, sizeof(vnode->fid)); memset(&vnode->status, 0, sizeof(vnode->status)); vnode->volume = NULL; vnode->update_cnt = 0; vnode->flags = 1 << AFS_VNODE_UNSET; vnode->cb_promised = false; _leave(" = %p", &vnode->vfs_inode); return &vnode->vfs_inode; } /* * destroy an AFS inode struct */ static void afs_destroy_inode(struct inode *inode) { struct afs_vnode *vnode = AFS_FS_I(inode); _enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode); _debug("DESTROY INODE %p", inode); ASSERTCMP(vnode->server, ==, NULL); kmem_cache_free(afs_inode_cachep, vnode); atomic_dec(&afs_count_active_inodes); } /* * return information about an AFS volume */ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct afs_volume_status vs; struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); struct key *key; int ret; key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) return PTR_ERR(key); ret = afs_vnode_get_volume_status(vnode, key, &vs); key_put(key); if (ret < 0) { _leave(" = %d", ret); return ret; } buf->f_type = dentry->d_sb->s_magic; buf->f_bsize = AFS_BLOCK_SIZE; buf->f_namelen = AFSNAMEMAX - 1; if (vs.max_quota == 0) buf->f_blocks = vs.part_max_blocks; else buf->f_blocks = vs.max_quota; buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use; return 0; }
gpl-2.0
coredumb/linux-grsecurity
drivers/regulator/helpers.c
307
11646
/* * helpers.c -- Voltage/Current Regulator framework helper functions. * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * Copyright 2008 SlimLogic Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/regulator/driver.h> #include <linux/module.h> /** * regulator_is_enabled_regmap - standard is_enabled() for regmap users * * @rdev: regulator to operate on * * Regulators that use regmap for their register I/O can set the * enable_reg and enable_mask fields in their descriptor and then use * this as their is_enabled operation, saving some code. */ int regulator_is_enabled_regmap(struct regulator_dev *rdev) { unsigned int val; int ret; ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val); if (ret != 0) return ret; if (rdev->desc->enable_is_inverted) return (val & rdev->desc->enable_mask) == 0; else return (val & rdev->desc->enable_mask) != 0; } EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap); /** * regulator_enable_regmap - standard enable() for regmap users * * @rdev: regulator to operate on * * Regulators that use regmap for their register I/O can set the * enable_reg and enable_mask fields in their descriptor and then use * this as their enable() operation, saving some code. */ int regulator_enable_regmap(struct regulator_dev *rdev) { unsigned int val; if (rdev->desc->enable_is_inverted) val = 0; else val = rdev->desc->enable_mask; return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, val); } EXPORT_SYMBOL_GPL(regulator_enable_regmap); /** * regulator_disable_regmap - standard disable() for regmap users * * @rdev: regulator to operate on * * Regulators that use regmap for their register I/O can set the * enable_reg and enable_mask fields in their descriptor and then use * this as their disable() operation, saving some code. */ int regulator_disable_regmap(struct regulator_dev *rdev) { unsigned int val; if (rdev->desc->enable_is_inverted) val = rdev->desc->enable_mask; else val = 0; return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, val); } EXPORT_SYMBOL_GPL(regulator_disable_regmap); /** * regulator_get_voltage_sel_regmap - standard get_voltage_sel for regmap users * * @rdev: regulator to operate on * * Regulators that use regmap for their register I/O can set the * vsel_reg and vsel_mask fields in their descriptor and then use this * as their get_voltage_vsel operation, saving some code. */ int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev) { unsigned int val; int ret; ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val); if (ret != 0) return ret; val &= rdev->desc->vsel_mask; val >>= ffs(rdev->desc->vsel_mask) - 1; return val; } EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap); /** * regulator_set_voltage_sel_regmap - standard set_voltage_sel for regmap users * * @rdev: regulator to operate on * @sel: Selector to set * * Regulators that use regmap for their register I/O can set the * vsel_reg and vsel_mask fields in their descriptor and then use this * as their set_voltage_vsel operation, saving some code. */ int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel) { int ret; sel <<= ffs(rdev->desc->vsel_mask) - 1; ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, rdev->desc->vsel_mask, sel); if (ret) return ret; if (rdev->desc->apply_bit) ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg, rdev->desc->apply_bit, rdev->desc->apply_bit); return ret; } EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap); /** * regulator_map_voltage_iterate - map_voltage() based on list_voltage() * * @rdev: Regulator to operate on * @min_uV: Lower bound for voltage * @max_uV: Upper bound for voltage * * Drivers implementing set_voltage_sel() and list_voltage() can use * this as their map_voltage() operation. It will find a suitable * voltage by calling list_voltage() until it gets something in bounds * for the requested voltages. */ int regulator_map_voltage_iterate(struct regulator_dev *rdev, int min_uV, int max_uV) { int best_val = INT_MAX; int selector = 0; int i, ret; /* Find the smallest voltage that falls within the specified * range. */ for (i = 0; i < rdev->desc->n_voltages; i++) { ret = rdev->desc->ops->list_voltage(rdev, i); if (ret < 0) continue; if (ret < best_val && ret >= min_uV && ret <= max_uV) { best_val = ret; selector = i; } } if (best_val != INT_MAX) return selector; else return -EINVAL; } EXPORT_SYMBOL_GPL(regulator_map_voltage_iterate); /** * regulator_map_voltage_ascend - map_voltage() for ascendant voltage list * * @rdev: Regulator to operate on * @min_uV: Lower bound for voltage * @max_uV: Upper bound for voltage * * Drivers that have ascendant voltage list can use this as their * map_voltage() operation. */ int regulator_map_voltage_ascend(struct regulator_dev *rdev, int min_uV, int max_uV) { int i, ret; for (i = 0; i < rdev->desc->n_voltages; i++) { ret = rdev->desc->ops->list_voltage(rdev, i); if (ret < 0) continue; if (ret > max_uV) break; if (ret >= min_uV && ret <= max_uV) return i; } return -EINVAL; } EXPORT_SYMBOL_GPL(regulator_map_voltage_ascend); /** * regulator_map_voltage_linear - map_voltage() for simple linear mappings * * @rdev: Regulator to operate on * @min_uV: Lower bound for voltage * @max_uV: Upper bound for voltage * * Drivers providing min_uV and uV_step in their regulator_desc can * use this as their map_voltage() operation. */ int regulator_map_voltage_linear(struct regulator_dev *rdev, int min_uV, int max_uV) { int ret, voltage; /* Allow uV_step to be 0 for fixed voltage */ if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) { if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV) return 0; else return -EINVAL; } if (!rdev->desc->uV_step) { BUG_ON(!rdev->desc->uV_step); return -EINVAL; } if (min_uV < rdev->desc->min_uV) min_uV = rdev->desc->min_uV; ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); if (ret < 0) return ret; ret += rdev->desc->linear_min_sel; /* Map back into a voltage to verify we're still in bounds */ voltage = rdev->desc->ops->list_voltage(rdev, ret); if (voltage < min_uV || voltage > max_uV) return -EINVAL; return ret; } EXPORT_SYMBOL_GPL(regulator_map_voltage_linear); /** * regulator_map_voltage_linear - map_voltage() for multiple linear ranges * * @rdev: Regulator to operate on * @min_uV: Lower bound for voltage * @max_uV: Upper bound for voltage * * Drivers providing linear_ranges in their descriptor can use this as * their map_voltage() callback. */ int regulator_map_voltage_linear_range(struct regulator_dev *rdev, int min_uV, int max_uV) { const struct regulator_linear_range *range; int ret = -EINVAL; int voltage, i; if (!rdev->desc->n_linear_ranges) { BUG_ON(!rdev->desc->n_linear_ranges); return -EINVAL; } for (i = 0; i < rdev->desc->n_linear_ranges; i++) { int linear_max_uV; range = &rdev->desc->linear_ranges[i]; linear_max_uV = range->min_uV + (range->max_sel - range->min_sel) * range->uV_step; if (!(min_uV <= linear_max_uV && max_uV >= range->min_uV)) continue; if (min_uV <= range->min_uV) min_uV = range->min_uV; /* range->uV_step == 0 means fixed voltage range */ if (range->uV_step == 0) { ret = 0; } else { ret = DIV_ROUND_UP(min_uV - range->min_uV, range->uV_step); if (ret < 0) return ret; } ret += range->min_sel; break; } if (i == rdev->desc->n_linear_ranges) return -EINVAL; /* Map back into a voltage to verify we're still in bounds */ voltage = rdev->desc->ops->list_voltage(rdev, ret); if (voltage < min_uV || voltage > max_uV) return -EINVAL; return ret; } EXPORT_SYMBOL_GPL(regulator_map_voltage_linear_range); /** * regulator_list_voltage_linear - List voltages with simple calculation * * @rdev: Regulator device * @selector: Selector to convert into a voltage * * Regulators with a simple linear mapping between voltages and * selectors can set min_uV and uV_step in the regulator descriptor * and then use this function as their list_voltage() operation, */ int regulator_list_voltage_linear(struct regulator_dev *rdev, unsigned int selector) { if (selector >= rdev->desc->n_voltages) return -EINVAL; if (selector < rdev->desc->linear_min_sel) return 0; selector -= rdev->desc->linear_min_sel; return rdev->desc->min_uV + (rdev->desc->uV_step * selector); } EXPORT_SYMBOL_GPL(regulator_list_voltage_linear); /** * regulator_list_voltage_linear_range - List voltages for linear ranges * * @rdev: Regulator device * @selector: Selector to convert into a voltage * * Regulators with a series of simple linear mappings between voltages * and selectors can set linear_ranges in the regulator descriptor and * then use this function as their list_voltage() operation, */ int regulator_list_voltage_linear_range(struct regulator_dev *rdev, unsigned int selector) { const struct regulator_linear_range *range; int i; if (!rdev->desc->n_linear_ranges) { BUG_ON(!rdev->desc->n_linear_ranges); return -EINVAL; } for (i = 0; i < rdev->desc->n_linear_ranges; i++) { range = &rdev->desc->linear_ranges[i]; if (!(selector >= range->min_sel && selector <= range->max_sel)) continue; selector -= range->min_sel; return range->min_uV + (range->uV_step * selector); } return -EINVAL; } EXPORT_SYMBOL_GPL(regulator_list_voltage_linear_range); /** * regulator_list_voltage_table - List voltages with table based mapping * * @rdev: Regulator device * @selector: Selector to convert into a voltage * * Regulators with table based mapping between voltages and * selectors can set volt_table in the regulator descriptor * and then use this function as their list_voltage() operation. */ int regulator_list_voltage_table(struct regulator_dev *rdev, unsigned int selector) { if (!rdev->desc->volt_table) { BUG_ON(!rdev->desc->volt_table); return -EINVAL; } if (selector >= rdev->desc->n_voltages) return -EINVAL; return rdev->desc->volt_table[selector]; } EXPORT_SYMBOL_GPL(regulator_list_voltage_table); /** * regulator_set_bypass_regmap - Default set_bypass() using regmap * * @rdev: device to operate on. * @enable: state to set. */ int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable) { unsigned int val; if (enable) val = rdev->desc->bypass_mask; else val = 0; return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg, rdev->desc->bypass_mask, val); } EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap); /** * regulator_get_bypass_regmap - Default get_bypass() using regmap * * @rdev: device to operate on. * @enable: current state. */ int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable) { unsigned int val; int ret; ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val); if (ret != 0) return ret; *enable = val & rdev->desc->bypass_mask; return 0; } EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
gpl-2.0
TeamEOS/kernel_htc_msm8960
drivers/block/cciss_scsi.c
1843
49223
/* * Disk Array driver for HP Smart Array controllers, SCSI Tape module. * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA * 02111-1307, USA. * * Questions/Comments/Bugfixes to iss_storagedev@hp.com * * Author: Stephen M. Cameron */ #ifdef CONFIG_CISS_SCSI_TAPE /* Here we have code to present the driver as a scsi driver as it is simultaneously presented as a block driver. The reason for doing this is to allow access to SCSI tape drives through the array controller. Note in particular, neither physical nor logical disks are presented through the scsi layer. */ #include <linux/timer.h> #include <linux/completion.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/atomic.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "cciss_scsi.h" #define CCISS_ABORT_MSG 0x00 #define CCISS_RESET_MSG 0x01 static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, int cmd_type); static CommandList_struct *cmd_alloc(ctlr_info_t *h); static CommandList_struct *cmd_special_alloc(ctlr_info_t *h); static void cmd_free(ctlr_info_t *h, CommandList_struct *c); static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c); static int cciss_scsi_proc_info( struct Scsi_Host *sh, char *buffer, /* data buffer */ char **start, /* where data in buffer starts */ off_t offset, /* offset from start of imaginary file */ int length, /* length of data in buffer */ int func); /* 0 == read, 1 == write */ static int cciss_scsi_queue_command (struct Scsi_Host *h, struct scsi_cmnd *cmd); static int cciss_eh_device_reset_handler(struct scsi_cmnd *); static int cciss_eh_abort_handler(struct scsi_cmnd *); static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = { { .name = "cciss0", .ndevices = 0 }, { .name = "cciss1", .ndevices = 0 }, { .name = "cciss2", .ndevices = 0 }, { .name = "cciss3", .ndevices = 0 }, { .name = "cciss4", .ndevices = 0 }, { .name = "cciss5", .ndevices = 0 }, { .name = "cciss6", .ndevices = 0 }, { .name = "cciss7", .ndevices = 0 }, }; static struct scsi_host_template cciss_driver_template = { .module = THIS_MODULE, .name = "cciss", .proc_name = "cciss", .proc_info = cciss_scsi_proc_info, .queuecommand = cciss_scsi_queue_command, .this_id = 7, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */ .eh_device_reset_handler= cciss_eh_device_reset_handler, .eh_abort_handler = cciss_eh_abort_handler, }; #pragma pack(1) #define SCSI_PAD_32 8 #define SCSI_PAD_64 8 struct cciss_scsi_cmd_stack_elem_t { CommandList_struct cmd; ErrorInfo_struct Err; __u32 busaddr; int cmdindex; u8 pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64]; }; #pragma pack() #pragma pack(1) struct cciss_scsi_cmd_stack_t { struct cciss_scsi_cmd_stack_elem_t *pool; struct cciss_scsi_cmd_stack_elem_t **elem; dma_addr_t cmd_pool_handle; int top; int nelems; }; #pragma pack() struct cciss_scsi_adapter_data_t { struct Scsi_Host *scsi_host; struct cciss_scsi_cmd_stack_t cmd_stack; SGDescriptor_struct **cmd_sg_list; int registered; spinlock_t lock; // to protect ccissscsi[ctlr]; }; #define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \ &h->scsi_ctlr->lock, flags); #define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \ &h->scsi_ctlr->lock, flags); static CommandList_struct * scsi_cmd_alloc(ctlr_info_t *h) { /* assume only one process in here at a time, locking done by caller. */ /* use h->lock */ /* might be better to rewrite how we allocate scsi commands in a way that */ /* needs no locking at all. */ /* take the top memory chunk off the stack and return it, if any. */ struct cciss_scsi_cmd_stack_elem_t *c; struct cciss_scsi_adapter_data_t *sa; struct cciss_scsi_cmd_stack_t *stk; u64bit temp64; sa = h->scsi_ctlr; stk = &sa->cmd_stack; if (stk->top < 0) return NULL; c = stk->elem[stk->top]; /* memset(c, 0, sizeof(*c)); */ memset(&c->cmd, 0, sizeof(c->cmd)); memset(&c->Err, 0, sizeof(c->Err)); /* set physical addr of cmd and addr of scsi parameters */ c->cmd.busaddr = c->busaddr; c->cmd.cmdindex = c->cmdindex; /* (__u32) (stk->cmd_pool_handle + (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */ temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct)); /* (__u64) (stk->cmd_pool_handle + (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) + sizeof(CommandList_struct)); */ stk->top--; c->cmd.ErrDesc.Addr.lower = temp64.val32.lower; c->cmd.ErrDesc.Addr.upper = temp64.val32.upper; c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct); c->cmd.ctlr = h->ctlr; c->cmd.err_info = &c->Err; return (CommandList_struct *) c; } static void scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c) { /* assume only one process in here at a time, locking done by caller. */ /* use h->lock */ /* drop the free memory chunk on top of the stack. */ struct cciss_scsi_adapter_data_t *sa; struct cciss_scsi_cmd_stack_t *stk; sa = h->scsi_ctlr; stk = &sa->cmd_stack; stk->top++; if (stk->top >= stk->nelems) { dev_err(&h->pdev->dev, "scsi_cmd_free called too many times.\n"); BUG(); } stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c; } static int scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa) { int i; struct cciss_scsi_cmd_stack_t *stk; size_t size; stk = &sa->cmd_stack; stk->nelems = cciss_tape_cmds + 2; sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, h->chainsize, stk->nelems); if (!sa->cmd_sg_list && h->chainsize > 0) return -ENOMEM; size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems; /* Check alignment, see cciss_cmd.h near CommandList_struct def. */ BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); /* pci_alloc_consistent guarantees 32-bit DMA address will be used */ stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle); if (stk->pool == NULL) { cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems); sa->cmd_sg_list = NULL; return -ENOMEM; } stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL); if (!stk->elem) { pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); return -1; } for (i = 0; i < stk->nelems; i++) { stk->elem[i] = &stk->pool[i]; stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); stk->elem[i]->cmdindex = i; } stk->top = stk->nelems-1; return 0; } static void scsi_cmd_stack_free(ctlr_info_t *h) { struct cciss_scsi_adapter_data_t *sa; struct cciss_scsi_cmd_stack_t *stk; size_t size; sa = h->scsi_ctlr; stk = &sa->cmd_stack; if (stk->top != stk->nelems-1) { dev_warn(&h->pdev->dev, "bug: %d scsi commands are still outstanding.\n", stk->nelems - stk->top); } size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems; pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); stk->pool = NULL; cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems); kfree(stk->elem); stk->elem = NULL; } #if 0 static int xmargin=8; static int amargin=60; static void print_bytes (unsigned char *c, int len, int hex, int ascii) { int i; unsigned char *x; if (hex) { x = c; for (i=0;i<len;i++) { if ((i % xmargin) == 0 && i>0) printk("\n"); if ((i % xmargin) == 0) printk("0x%04x:", i); printk(" %02x", *x); x++; } printk("\n"); } if (ascii) { x = c; for (i=0;i<len;i++) { if ((i % amargin) == 0 && i>0) printk("\n"); if ((i % amargin) == 0) printk("0x%04x:", i); if (*x > 26 && *x < 128) printk("%c", *x); else printk("."); x++; } printk("\n"); } } static void print_cmd(CommandList_struct *cp) { printk("queue:%d\n", cp->Header.ReplyQueue); printk("sglist:%d\n", cp->Header.SGList); printk("sgtot:%d\n", cp->Header.SGTotal); printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper, cp->Header.Tag.lower); printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n", cp->Header.LUN.LunAddrBytes[0], cp->Header.LUN.LunAddrBytes[1], cp->Header.LUN.LunAddrBytes[2], cp->Header.LUN.LunAddrBytes[3], cp->Header.LUN.LunAddrBytes[4], cp->Header.LUN.LunAddrBytes[5], cp->Header.LUN.LunAddrBytes[6], cp->Header.LUN.LunAddrBytes[7]); printk("CDBLen:%d\n", cp->Request.CDBLen); printk("Type:%d\n",cp->Request.Type.Type); printk("Attr:%d\n",cp->Request.Type.Attribute); printk(" Dir:%d\n",cp->Request.Type.Direction); printk("Timeout:%d\n",cp->Request.Timeout); printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x" " %02x %02x %02x %02x %02x %02x %02x %02x\n", cp->Request.CDB[0], cp->Request.CDB[1], cp->Request.CDB[2], cp->Request.CDB[3], cp->Request.CDB[4], cp->Request.CDB[5], cp->Request.CDB[6], cp->Request.CDB[7], cp->Request.CDB[8], cp->Request.CDB[9], cp->Request.CDB[10], cp->Request.CDB[11], cp->Request.CDB[12], cp->Request.CDB[13], cp->Request.CDB[14], cp->Request.CDB[15]), printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n", cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower, cp->ErrDesc.Len); printk("sgs..........Errorinfo:\n"); printk("scsistatus:%d\n", cp->err_info->ScsiStatus); printk("senselen:%d\n", cp->err_info->SenseLen); printk("cmd status:%d\n", cp->err_info->CommandStatus); printk("resid cnt:%d\n", cp->err_info->ResidualCnt); printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size); printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num); printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value); } #endif static int find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun) { /* finds an unused bus, target, lun for a new device */ /* assumes h->scsi_ctlr->lock is held */ int i, found=0; unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA]; memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA); target_taken[SELF_SCSI_ID] = 1; for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) target_taken[ccissscsi[h->ctlr].dev[i].target] = 1; for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) { if (!target_taken[i]) { *bus = 0; *target=i; *lun = 0; found=1; break; } } return (!found); } struct scsi2map { char scsi3addr[8]; int bus, target, lun; }; static int cciss_scsi_add_entry(ctlr_info_t *h, int hostno, struct cciss_scsi_dev_t *device, struct scsi2map *added, int *nadded) { /* assumes h->scsi_ctlr->lock is held */ int n = ccissscsi[h->ctlr].ndevices; struct cciss_scsi_dev_t *sd; int i, bus, target, lun; unsigned char addr1[8], addr2[8]; if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) { dev_warn(&h->pdev->dev, "Too many devices, " "some will be inaccessible.\n"); return -1; } bus = target = -1; lun = 0; /* Is this device a non-zero lun of a multi-lun device */ /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */ if (device->scsi3addr[4] != 0) { /* Search through our list and find the device which */ /* has the same 8 byte LUN address, excepting byte 4. */ /* Assign the same bus and target for this new LUN. */ /* Use the logical unit number from the firmware. */ memcpy(addr1, device->scsi3addr, 8); addr1[4] = 0; for (i = 0; i < n; i++) { sd = &ccissscsi[h->ctlr].dev[i]; memcpy(addr2, sd->scsi3addr, 8); addr2[4] = 0; /* differ only in byte 4? */ if (memcmp(addr1, addr2, 8) == 0) { bus = sd->bus; target = sd->target; lun = device->scsi3addr[4]; break; } } } sd = &ccissscsi[h->ctlr].dev[n]; if (lun == 0) { if (find_bus_target_lun(h, &sd->bus, &sd->target, &sd->lun) != 0) return -1; } else { sd->bus = bus; sd->target = target; sd->lun = lun; } added[*nadded].bus = sd->bus; added[*nadded].target = sd->target; added[*nadded].lun = sd->lun; (*nadded)++; memcpy(sd->scsi3addr, device->scsi3addr, 8); memcpy(sd->vendor, device->vendor, sizeof(sd->vendor)); memcpy(sd->revision, device->revision, sizeof(sd->revision)); memcpy(sd->device_id, device->device_id, sizeof(sd->device_id)); sd->devtype = device->devtype; ccissscsi[h->ctlr].ndevices++; /* initially, (before registering with scsi layer) we don't know our hostno and we don't want to print anything first time anyway (the scsi layer's inquiries will show that info) */ if (hostno != -1) dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, sd->lun); return 0; } static void cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry, struct scsi2map *removed, int *nremoved) { /* assumes h->ctlr]->scsi_ctlr->lock is held */ int i; struct cciss_scsi_dev_t sd; if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return; sd = ccissscsi[h->ctlr].dev[entry]; removed[*nremoved].bus = sd.bus; removed[*nremoved].target = sd.target; removed[*nremoved].lun = sd.lun; (*nremoved)++; for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++) ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1]; ccissscsi[h->ctlr].ndevices--; dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", scsi_device_type(sd.devtype), hostno, sd.bus, sd.target, sd.lun); } #define SCSI3ADDR_EQ(a,b) ( \ (a)[7] == (b)[7] && \ (a)[6] == (b)[6] && \ (a)[5] == (b)[5] && \ (a)[4] == (b)[4] && \ (a)[3] == (b)[3] && \ (a)[2] == (b)[2] && \ (a)[1] == (b)[1] && \ (a)[0] == (b)[0]) static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr) { /* called when scsi_add_device fails in order to re-adjust */ /* ccissscsi[] to match the mid layer's view. */ unsigned long flags; int i, j; CPQ_TAPE_LOCK(h, flags); for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { if (memcmp(scsi3addr, ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) { for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++) ccissscsi[h->ctlr].dev[j] = ccissscsi[h->ctlr].dev[j+1]; ccissscsi[h->ctlr].ndevices--; break; } } CPQ_TAPE_UNLOCK(h, flags); } static int device_is_the_same(struct cciss_scsi_dev_t *dev1, struct cciss_scsi_dev_t *dev2) { return dev1->devtype == dev2->devtype && memcmp(dev1->scsi3addr, dev2->scsi3addr, sizeof(dev1->scsi3addr)) == 0 && memcmp(dev1->device_id, dev2->device_id, sizeof(dev1->device_id)) == 0 && memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) == 0 && memcmp(dev1->model, dev2->model, sizeof(dev1->model)) == 0 && memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) == 0; } static int adjust_cciss_scsi_table(ctlr_info_t *h, int hostno, struct cciss_scsi_dev_t sd[], int nsds) { /* sd contains scsi3 addresses and devtypes, but bus target and lun are not filled in. This funciton takes what's in sd to be the current and adjusts ccissscsi[] to be in line with what's in sd. */ int i,j, found, changes=0; struct cciss_scsi_dev_t *csd; unsigned long flags; struct scsi2map *added, *removed; int nadded, nremoved; struct Scsi_Host *sh = NULL; added = kzalloc(sizeof(*added) * CCISS_MAX_SCSI_DEVS_PER_HBA, GFP_KERNEL); removed = kzalloc(sizeof(*removed) * CCISS_MAX_SCSI_DEVS_PER_HBA, GFP_KERNEL); if (!added || !removed) { dev_warn(&h->pdev->dev, "Out of memory in adjust_cciss_scsi_table\n"); goto free_and_out; } CPQ_TAPE_LOCK(h, flags); if (hostno != -1) /* if it's not the first time... */ sh = h->scsi_ctlr->scsi_host; /* find any devices in ccissscsi[] that are not in sd[] and remove them from ccissscsi[] */ i = 0; nremoved = 0; nadded = 0; while (i < ccissscsi[h->ctlr].ndevices) { csd = &ccissscsi[h->ctlr].dev[i]; found=0; for (j=0;j<nsds;j++) { if (SCSI3ADDR_EQ(sd[j].scsi3addr, csd->scsi3addr)) { if (device_is_the_same(&sd[j], csd)) found=2; else found=1; break; } } if (found == 0) { /* device no longer present. */ changes++; cciss_scsi_remove_entry(h, hostno, i, removed, &nremoved); /* remove ^^^, hence i not incremented */ } else if (found == 1) { /* device is different in some way */ changes++; dev_info(&h->pdev->dev, "device c%db%dt%dl%d has changed.\n", hostno, csd->bus, csd->target, csd->lun); cciss_scsi_remove_entry(h, hostno, i, removed, &nremoved); /* remove ^^^, hence i not incremented */ if (cciss_scsi_add_entry(h, hostno, &sd[j], added, &nadded) != 0) /* we just removed one, so add can't fail. */ BUG(); csd->devtype = sd[j].devtype; memcpy(csd->device_id, sd[j].device_id, sizeof(csd->device_id)); memcpy(csd->vendor, sd[j].vendor, sizeof(csd->vendor)); memcpy(csd->model, sd[j].model, sizeof(csd->model)); memcpy(csd->revision, sd[j].revision, sizeof(csd->revision)); } else /* device is same as it ever was, */ i++; /* so just move along. */ } /* Now, make sure every device listed in sd[] is also listed in ccissscsi[], adding them if they aren't found */ for (i=0;i<nsds;i++) { found=0; for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) { csd = &ccissscsi[h->ctlr].dev[j]; if (SCSI3ADDR_EQ(sd[i].scsi3addr, csd->scsi3addr)) { if (device_is_the_same(&sd[i], csd)) found=2; /* found device */ else found=1; /* found a bug. */ break; } } if (!found) { changes++; if (cciss_scsi_add_entry(h, hostno, &sd[i], added, &nadded) != 0) break; } else if (found == 1) { /* should never happen... */ changes++; dev_warn(&h->pdev->dev, "device unexpectedly changed\n"); /* but if it does happen, we just ignore that device */ } } CPQ_TAPE_UNLOCK(h, flags); /* Don't notify scsi mid layer of any changes the first time through */ /* (or if there are no changes) scsi_scan_host will do it later the */ /* first time through. */ if (hostno == -1 || !changes) goto free_and_out; /* Notify scsi mid layer of any removed devices */ for (i = 0; i < nremoved; i++) { struct scsi_device *sdev = scsi_device_lookup(sh, removed[i].bus, removed[i].target, removed[i].lun); if (sdev != NULL) { scsi_remove_device(sdev); scsi_device_put(sdev); } else { /* We don't expect to get here. */ /* future cmds to this device will get selection */ /* timeout as if the device was gone. */ dev_warn(&h->pdev->dev, "didn't find " "c%db%dt%dl%d\n for removal.", hostno, removed[i].bus, removed[i].target, removed[i].lun); } } /* Notify scsi mid layer of any added devices */ for (i = 0; i < nadded; i++) { int rc; rc = scsi_add_device(sh, added[i].bus, added[i].target, added[i].lun); if (rc == 0) continue; dev_warn(&h->pdev->dev, "scsi_add_device " "c%db%dt%dl%d failed, device not added.\n", hostno, added[i].bus, added[i].target, added[i].lun); /* now we have to remove it from ccissscsi, */ /* since it didn't get added to scsi mid layer */ fixup_botched_add(h, added[i].scsi3addr); } free_and_out: kfree(added); kfree(removed); return 0; } static int lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr) { int i; struct cciss_scsi_dev_t *sd; unsigned long flags; CPQ_TAPE_LOCK(h, flags); for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { sd = &ccissscsi[h->ctlr].dev[i]; if (sd->bus == bus && sd->target == target && sd->lun == lun) { memcpy(scsi3addr, &sd->scsi3addr[0], 8); CPQ_TAPE_UNLOCK(h, flags); return 0; } } CPQ_TAPE_UNLOCK(h, flags); return -1; } static void cciss_scsi_setup(ctlr_info_t *h) { struct cciss_scsi_adapter_data_t * shba; ccissscsi[h->ctlr].ndevices = 0; shba = (struct cciss_scsi_adapter_data_t *) kmalloc(sizeof(*shba), GFP_KERNEL); if (shba == NULL) return; shba->scsi_host = NULL; spin_lock_init(&shba->lock); shba->registered = 0; if (scsi_cmd_stack_setup(h, shba) != 0) { kfree(shba); shba = NULL; } h->scsi_ctlr = shba; return; } static void complete_scsi_command(CommandList_struct *c, int timeout, __u32 tag) { struct scsi_cmnd *cmd; ctlr_info_t *h; ErrorInfo_struct *ei; ei = c->err_info; /* First, see if it was a message rather than a command */ if (c->Request.Type.Type == TYPE_MSG) { c->cmd_type = CMD_MSG_DONE; return; } cmd = (struct scsi_cmnd *) c->scsi_cmd; h = hba[c->ctlr]; scsi_dma_unmap(cmd); if (c->Header.SGTotal > h->max_cmd_sgentries) cciss_unmap_sg_chain_block(h, c); cmd->result = (DID_OK << 16); /* host byte */ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ /* cmd->result |= (GOOD < 1); */ /* status byte */ cmd->result |= (ei->ScsiStatus); /* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus); */ /* copy the sense data whether we need to or not. */ memcpy(cmd->sense_buffer, ei->SenseInfo, ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? SCSI_SENSE_BUFFERSIZE : ei->SenseLen); scsi_set_resid(cmd, ei->ResidualCnt); if(ei->CommandStatus != 0) { /* an error has occurred */ switch(ei->CommandStatus) { case CMD_TARGET_STATUS: /* Pass it up to the upper layers... */ if (!ei->ScsiStatus) { /* Ordinarily, this case should never happen, but there is a bug in some released firmware revisions that allows it to happen if, for example, a 4100 backplane loses power and the tape drive is in it. We assume that it's a fatal error of some kind because we can't show that it wasn't. We will make it look like selection timeout since that is the most common reason for this to occur, and it's severe enough. */ cmd->result = DID_NO_CONNECT << 16; } break; case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ break; case CMD_DATA_OVERRUN: dev_warn(&h->pdev->dev, "%p has" " completed with data overrun " "reported\n", c); break; case CMD_INVALID: { /* print_bytes(c, sizeof(*c), 1, 0); print_cmd(c); */ /* We get CMD_INVALID if you address a non-existent tape drive instead of a selection timeout (no response). You will see this if you yank out a tape drive, then try to access it. This is kind of a shame because it means that any other CMD_INVALID (e.g. driver bug) will get interpreted as a missing target. */ cmd->result = DID_NO_CONNECT << 16; } break; case CMD_PROTOCOL_ERR: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p has protocol error\n", c); break; case CMD_HARDWARE_ERR: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p had hardware error\n", c); break; case CMD_CONNECTION_LOST: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p had connection lost\n", c); break; case CMD_ABORTED: cmd->result = DID_ABORT << 16; dev_warn(&h->pdev->dev, "%p was aborted\n", c); break; case CMD_ABORT_FAILED: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p reports abort failed\n", c); break; case CMD_UNSOLICITED_ABORT: cmd->result = DID_ABORT << 16; dev_warn(&h->pdev->dev, "%p aborted due to an " "unsolicited abort\n", c); break; case CMD_TIMEOUT: cmd->result = DID_TIME_OUT << 16; dev_warn(&h->pdev->dev, "%p timedout\n", c); break; case CMD_UNABORTABLE: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "c %p command " "unabortable\n", c); break; default: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p returned unknown status %x\n", c, ei->CommandStatus); } } cmd->scsi_done(cmd); scsi_cmd_free(h, c); } static int cciss_scsi_detect(ctlr_info_t *h) { struct Scsi_Host *sh; int error; sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *)); if (sh == NULL) goto fail; sh->io_port = 0; // good enough? FIXME, sh->n_io_port = 0; // I don't think we use these two... sh->this_id = SELF_SCSI_ID; sh->can_queue = cciss_tape_cmds; sh->sg_tablesize = h->maxsgentries; sh->max_cmd_len = MAX_COMMAND_SIZE; sh->max_sectors = h->cciss_max_sectors; ((struct cciss_scsi_adapter_data_t *) h->scsi_ctlr)->scsi_host = sh; sh->hostdata[0] = (unsigned long) h; sh->irq = h->intr[SIMPLE_MODE_INT]; sh->unique_id = sh->irq; error = scsi_add_host(sh, &h->pdev->dev); if (error) goto fail_host_put; scsi_scan_host(sh); return 1; fail_host_put: scsi_host_put(sh); fail: return 0; } static void cciss_unmap_one(struct pci_dev *pdev, CommandList_struct *c, size_t buflen, int data_direction) { u64bit addr64; addr64.val32.lower = c->SG[0].Addr.lower; addr64.val32.upper = c->SG[0].Addr.upper; pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction); } static void cciss_map_one(struct pci_dev *pdev, CommandList_struct *c, unsigned char *buf, size_t buflen, int data_direction) { __u64 addr64; addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); c->SG[0].Addr.lower = (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); c->SG[0].Addr.upper = (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); c->SG[0].Len = buflen; c->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ } static int cciss_scsi_do_simple_cmd(ctlr_info_t *h, CommandList_struct *c, unsigned char *scsi3addr, unsigned char *cdb, unsigned char cdblen, unsigned char *buf, int bufsize, int direction) { DECLARE_COMPLETION_ONSTACK(wait); c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */ c->scsi_cmd = NULL; c->Header.ReplyQueue = 0; /* unused in simple mode */ memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN)); c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ // Fill in the request block... /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */ memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); memcpy(c->Request.CDB, cdb, cdblen); c->Request.Timeout = 0; c->Request.CDBLen = cdblen; c->Request.Type.Type = TYPE_CMD; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = direction; /* Fill in the SG list and do dma mapping */ cciss_map_one(h->pdev, c, (unsigned char *) buf, bufsize, DMA_FROM_DEVICE); c->waiting = &wait; enqueue_cmd_and_start_io(h, c); wait_for_completion(&wait); /* undo the dma mapping */ cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE); return(0); } static void cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c) { ErrorInfo_struct *ei; ei = c->err_info; switch(ei->CommandStatus) { case CMD_TARGET_STATUS: dev_warn(&h->pdev->dev, "cmd %p has completed with errors\n", c); dev_warn(&h->pdev->dev, "cmd %p has SCSI Status = %x\n", c, ei->ScsiStatus); if (ei->ScsiStatus == 0) dev_warn(&h->pdev->dev, "SCSI status is abnormally zero. " "(probably indicates selection timeout " "reported incorrectly due to a known " "firmware bug, circa July, 2001.)\n"); break; case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ dev_info(&h->pdev->dev, "UNDERRUN\n"); break; case CMD_DATA_OVERRUN: dev_warn(&h->pdev->dev, "%p has" " completed with data overrun " "reported\n", c); break; case CMD_INVALID: { /* controller unfortunately reports SCSI passthru's */ /* to non-existent targets as invalid commands. */ dev_warn(&h->pdev->dev, "%p is reported invalid (probably means " "target device no longer present)\n", c); /* print_bytes((unsigned char *) c, sizeof(*c), 1, 0); print_cmd(c); */ } break; case CMD_PROTOCOL_ERR: dev_warn(&h->pdev->dev, "%p has protocol error\n", c); break; case CMD_HARDWARE_ERR: /* cmd->result = DID_ERROR << 16; */ dev_warn(&h->pdev->dev, "%p had hardware error\n", c); break; case CMD_CONNECTION_LOST: dev_warn(&h->pdev->dev, "%p had connection lost\n", c); break; case CMD_ABORTED: dev_warn(&h->pdev->dev, "%p was aborted\n", c); break; case CMD_ABORT_FAILED: dev_warn(&h->pdev->dev, "%p reports abort failed\n", c); break; case CMD_UNSOLICITED_ABORT: dev_warn(&h->pdev->dev, "%p aborted due to an unsolicited abort\n", c); break; case CMD_TIMEOUT: dev_warn(&h->pdev->dev, "%p timedout\n", c); break; case CMD_UNABORTABLE: dev_warn(&h->pdev->dev, "%p unabortable\n", c); break; default: dev_warn(&h->pdev->dev, "%p returned unknown status %x\n", c, ei->CommandStatus); } } static int cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr, unsigned char page, unsigned char *buf, unsigned char bufsize) { int rc; CommandList_struct *c; char cdb[6]; ErrorInfo_struct *ei; unsigned long flags; spin_lock_irqsave(&h->lock, flags); c = scsi_cmd_alloc(h); spin_unlock_irqrestore(&h->lock, flags); if (c == NULL) { /* trouble... */ printk("cmd_alloc returned NULL!\n"); return -1; } ei = c->err_info; cdb[0] = CISS_INQUIRY; cdb[1] = (page != 0); cdb[2] = page; cdb[3] = 0; cdb[4] = bufsize; cdb[5] = 0; rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb, 6, buf, bufsize, XFER_READ); if (rc != 0) return rc; /* something went wrong */ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { cciss_scsi_interpret_error(h, c); rc = -1; } spin_lock_irqsave(&h->lock, flags); scsi_cmd_free(h, c); spin_unlock_irqrestore(&h->lock, flags); return rc; } /* Get the device id from inquiry page 0x83 */ static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr, unsigned char *device_id, int buflen) { int rc; unsigned char *buf; if (buflen > 16) buflen = 16; buf = kzalloc(64, GFP_KERNEL); if (!buf) return -1; rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); if (rc == 0) memcpy(device_id, &buf[8], buflen); kfree(buf); return rc != 0; } static int cciss_scsi_do_report_phys_luns(ctlr_info_t *h, ReportLunData_struct *buf, int bufsize) { int rc; CommandList_struct *c; unsigned char cdb[12]; unsigned char scsi3addr[8]; ErrorInfo_struct *ei; unsigned long flags; spin_lock_irqsave(&h->lock, flags); c = scsi_cmd_alloc(h); spin_unlock_irqrestore(&h->lock, flags); if (c == NULL) { /* trouble... */ printk("cmd_alloc returned NULL!\n"); return -1; } memset(&scsi3addr[0], 0, 8); /* address the controller */ cdb[0] = CISS_REPORT_PHYS; cdb[1] = 0; cdb[2] = 0; cdb[3] = 0; cdb[4] = 0; cdb[5] = 0; cdb[6] = (bufsize >> 24) & 0xFF; //MSB cdb[7] = (bufsize >> 16) & 0xFF; cdb[8] = (bufsize >> 8) & 0xFF; cdb[9] = bufsize & 0xFF; cdb[10] = 0; cdb[11] = 0; rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb, 12, (unsigned char *) buf, bufsize, XFER_READ); if (rc != 0) return rc; /* something went wrong */ ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { cciss_scsi_interpret_error(h, c); rc = -1; } spin_lock_irqsave(&h->lock, flags); scsi_cmd_free(h, c); spin_unlock_irqrestore(&h->lock, flags); return rc; } static void cciss_update_non_disk_devices(ctlr_info_t *h, int hostno) { /* the idea here is we could get notified from /proc that some devices have changed, so we do a report physical luns cmd, and adjust our list of devices accordingly. (We can't rely on the scsi-mid layer just doing inquiries, because the "busses" that the scsi mid-layer probes are totally fabricated by this driver, so new devices wouldn't show up. the scsi3addr's of devices won't change so long as the adapter is not reset. That means we can rescan and tell which devices we already know about, vs. new devices, vs. disappearing devices. Also, if you yank out a tape drive, then put in a disk in it's place, (say, a configured volume from another array controller for instance) _don't_ poke this driver (so it thinks it's still a tape, but _do_ poke the scsi mid layer, so it does an inquiry... the scsi mid layer will see the physical disk. This would be bad. Need to think about how to prevent that. One idea would be to snoop all scsi responses and if an inquiry repsonse comes back that reports a disk, chuck it an return selection timeout instead and adjust our table... Not sure i like that though. */ #define OBDR_TAPE_INQ_SIZE 49 #define OBDR_TAPE_SIG "$DR-10" ReportLunData_struct *ld_buff; unsigned char *inq_buff; unsigned char scsi3addr[8]; __u32 num_luns=0; unsigned char *ch; struct cciss_scsi_dev_t *currentsd, *this_device; int ncurrent=0; int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8; int i; ld_buff = kzalloc(reportlunsize, GFP_KERNEL); inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); currentsd = kzalloc(sizeof(*currentsd) * (CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL); if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) { printk(KERN_ERR "cciss: out of memory\n"); goto out; } this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA]; if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) { ch = &ld_buff->LUNListLength[0]; num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8; if (num_luns > CISS_MAX_PHYS_LUN) { printk(KERN_WARNING "cciss: Maximum physical LUNs (%d) exceeded. " "%d LUNs ignored.\n", CISS_MAX_PHYS_LUN, num_luns - CISS_MAX_PHYS_LUN); num_luns = CISS_MAX_PHYS_LUN; } } else { printk(KERN_ERR "cciss: Report physical LUNs failed.\n"); goto out; } /* adjust our table of devices */ for (i = 0; i < num_luns; i++) { /* for each physical lun, do an inquiry */ if (ld_buff->LUN[i][3] & 0xC0) continue; memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) /* Inquiry failed (msg printed already) */ continue; /* so we will skip this device. */ this_device->devtype = (inq_buff[0] & 0x1f); this_device->bus = -1; this_device->target = -1; this_device->lun = -1; memcpy(this_device->scsi3addr, scsi3addr, 8); memcpy(this_device->vendor, &inq_buff[8], sizeof(this_device->vendor)); memcpy(this_device->model, &inq_buff[16], sizeof(this_device->model)); memcpy(this_device->revision, &inq_buff[32], sizeof(this_device->revision)); memset(this_device->device_id, 0, sizeof(this_device->device_id)); cciss_scsi_get_device_id(h, scsi3addr, this_device->device_id, sizeof(this_device->device_id)); switch (this_device->devtype) { case 0x05: /* CD-ROM */ { /* We don't *really* support actual CD-ROM devices, * just this "One Button Disaster Recovery" tape drive * which temporarily pretends to be a CD-ROM drive. * So we check that the device is really an OBDR tape * device by checking for "$DR-10" in bytes 43-48 of * the inquiry data. */ char obdr_sig[7]; strncpy(obdr_sig, &inq_buff[43], 6); obdr_sig[6] = '\0'; if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) /* Not OBDR device, ignore it. */ break; } /* fall through . . . */ case 0x01: /* sequential access, (tape) */ case 0x08: /* medium changer */ if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { printk(KERN_INFO "cciss%d: %s ignored, " "too many devices.\n", h->ctlr, scsi_device_type(this_device->devtype)); break; } currentsd[ncurrent] = *this_device; ncurrent++; break; default: break; } } adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent); out: kfree(inq_buff); kfree(ld_buff); kfree(currentsd); return; } static int is_keyword(char *ptr, int len, char *verb) // Thanks to ncr53c8xx.c { int verb_len = strlen(verb); if (len >= verb_len && !memcmp(verb,ptr,verb_len)) return verb_len; else return 0; } static int cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length) { int arg_len; if ((arg_len = is_keyword(buffer, length, "rescan")) != 0) cciss_update_non_disk_devices(h, hostno); else return -EINVAL; return length; } static int cciss_scsi_proc_info(struct Scsi_Host *sh, char *buffer, /* data buffer */ char **start, /* where data in buffer starts */ off_t offset, /* offset from start of imaginary file */ int length, /* length of data in buffer */ int func) /* 0 == read, 1 == write */ { int buflen, datalen; ctlr_info_t *h; int i; h = (ctlr_info_t *) sh->hostdata[0]; if (h == NULL) /* This really shouldn't ever happen. */ return -EINVAL; if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */ buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n", h->ctlr, sh->host_no); /* this information is needed by apps to know which cciss device corresponds to which scsi host number without having to open a scsi target device node. The device information is not a duplicate of /proc/scsi/scsi because the two may be out of sync due to scsi hotplug, rather this info is for an app to be able to use to know how to get them back in sync. */ for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { struct cciss_scsi_dev_t *sd = &ccissscsi[h->ctlr].dev[i]; buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d " "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", sh->host_no, sd->bus, sd->target, sd->lun, sd->devtype, sd->scsi3addr[0], sd->scsi3addr[1], sd->scsi3addr[2], sd->scsi3addr[3], sd->scsi3addr[4], sd->scsi3addr[5], sd->scsi3addr[6], sd->scsi3addr[7]); } datalen = buflen - offset; if (datalen < 0) { /* they're reading past EOF. */ datalen = 0; *start = buffer+buflen; } else *start = buffer + offset; return(datalen); } else /* User is writing to /proc/scsi/cciss*?/?* ... */ return cciss_scsi_user_command(h, sh->host_no, buffer, length); } /* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci dma mapping and fills in the scatter gather entries of the cciss command, c. */ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c, struct scsi_cmnd *cmd) { unsigned int len; struct scatterlist *sg; __u64 addr64; int request_nsgs, i, chained, sg_index; struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr; SGDescriptor_struct *curr_sg; BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); chained = 0; sg_index = 0; curr_sg = c->SG; request_nsgs = scsi_dma_map(cmd); if (request_nsgs) { scsi_for_each_sg(cmd, sg, request_nsgs, i) { if (sg_index + 1 == h->max_cmd_sgentries && !chained && request_nsgs - i > 1) { chained = 1; sg_index = 0; curr_sg = sa->cmd_sg_list[c->cmdindex]; } addr64 = (__u64) sg_dma_address(sg); len = sg_dma_len(sg); curr_sg[sg_index].Addr.lower = (__u32) (addr64 & 0x0FFFFFFFFULL); curr_sg[sg_index].Addr.upper = (__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); curr_sg[sg_index].Len = len; curr_sg[sg_index].Ext = 0; ++sg_index; } if (chained) cciss_map_sg_chain_block(h, c, sa->cmd_sg_list[c->cmdindex], (request_nsgs - (h->max_cmd_sgentries - 1)) * sizeof(SGDescriptor_struct)); } /* track how many SG entries we are using */ if (request_nsgs > h->maxSG) h->maxSG = request_nsgs; c->Header.SGTotal = (u16) request_nsgs + chained; if (request_nsgs > h->max_cmd_sgentries) c->Header.SGList = h->max_cmd_sgentries; else c->Header.SGList = c->Header.SGTotal; return; } static int cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { ctlr_info_t *h; int rc; unsigned char scsi3addr[8]; CommandList_struct *c; unsigned long flags; // Get the ptr to our adapter structure (hba[i]) out of cmd->host. // We violate cmd->host privacy here. (Is there another way?) h = (ctlr_info_t *) cmd->device->host->hostdata[0]; rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id, cmd->device->lun, scsi3addr); if (rc != 0) { /* the scsi nexus does not match any that we presented... */ /* pretend to mid layer that we got selection timeout */ cmd->result = DID_NO_CONNECT << 16; done(cmd); /* we might want to think about registering controller itself as a processor device on the bus so sg binds to it. */ return 0; } /* Ok, we have a reasonable scsi nexus, so send the cmd down, and see what the device thinks of it. */ spin_lock_irqsave(&h->lock, flags); c = scsi_cmd_alloc(h); spin_unlock_irqrestore(&h->lock, flags); if (c == NULL) { /* trouble... */ dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n"); /* FIXME: next 3 lines are -> BAD! <- */ cmd->result = DID_NO_CONNECT << 16; done(cmd); return 0; } // Fill in the command list header cmd->scsi_done = done; // save this for use by completion code /* save c in case we have to abort it */ cmd->host_scribble = (unsigned char *) c; c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; c->Header.ReplyQueue = 0; /* unused in simple mode */ memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ // Fill in the request block... c->Request.Timeout = 0; memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); c->Request.CDBLen = cmd->cmd_len; memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); c->Request.Type.Type = TYPE_CMD; c->Request.Type.Attribute = ATTR_SIMPLE; switch(cmd->sc_data_direction) { case DMA_TO_DEVICE: c->Request.Type.Direction = XFER_WRITE; break; case DMA_FROM_DEVICE: c->Request.Type.Direction = XFER_READ; break; case DMA_NONE: c->Request.Type.Direction = XFER_NONE; break; case DMA_BIDIRECTIONAL: // This can happen if a buggy application does a scsi passthru // and sets both inlen and outlen to non-zero. ( see // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) c->Request.Type.Direction = XFER_RSVD; // This is technically wrong, and cciss controllers should // reject it with CMD_INVALID, which is the most correct // response, but non-fibre backends appear to let it // slide by, and give the same results as if this field // were set correctly. Either way is acceptable for // our purposes here. break; default: dev_warn(&h->pdev->dev, "unknown data direction: %d\n", cmd->sc_data_direction); BUG(); break; } cciss_scatter_gather(h, c, cmd); enqueue_cmd_and_start_io(h, c); /* the cmd'll come back via intr handler in complete_scsi_command() */ return 0; } static DEF_SCSI_QCMD(cciss_scsi_queue_command) static void cciss_unregister_scsi(ctlr_info_t *h) { struct cciss_scsi_adapter_data_t *sa; struct cciss_scsi_cmd_stack_t *stk; unsigned long flags; /* we are being forcibly unloaded, and may not refuse. */ spin_lock_irqsave(&h->lock, flags); sa = h->scsi_ctlr; stk = &sa->cmd_stack; /* if we weren't ever actually registered, don't unregister */ if (sa->registered) { spin_unlock_irqrestore(&h->lock, flags); scsi_remove_host(sa->scsi_host); scsi_host_put(sa->scsi_host); spin_lock_irqsave(&h->lock, flags); } /* set scsi_host to NULL so our detect routine will find us on register */ sa->scsi_host = NULL; spin_unlock_irqrestore(&h->lock, flags); scsi_cmd_stack_free(h); kfree(sa); } static int cciss_engage_scsi(ctlr_info_t *h) { struct cciss_scsi_adapter_data_t *sa; struct cciss_scsi_cmd_stack_t *stk; unsigned long flags; spin_lock_irqsave(&h->lock, flags); sa = h->scsi_ctlr; stk = &sa->cmd_stack; if (sa->registered) { dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n"); spin_unlock_irqrestore(&h->lock, flags); return -ENXIO; } sa->registered = 1; spin_unlock_irqrestore(&h->lock, flags); cciss_update_non_disk_devices(h, -1); cciss_scsi_detect(h); return 0; } static void cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h) { unsigned long flags; CPQ_TAPE_LOCK(h, flags); seq_printf(seq, "Sequential access devices: %d\n\n", ccissscsi[h->ctlr].ndevices); CPQ_TAPE_UNLOCK(h, flags); } static int wait_for_device_to_become_ready(ctlr_info_t *h, unsigned char lunaddr[]) { int rc; int count = 0; int waittime = HZ; CommandList_struct *c; c = cmd_alloc(h); if (!c) { dev_warn(&h->pdev->dev, "out of memory in " "wait_for_device_to_become_ready.\n"); return IO_ERROR; } /* Send test unit ready until device ready, or give up. */ while (count < 20) { /* Wait for a bit. do this first, because if we send * the TUR right away, the reset will just abort it. */ schedule_timeout_uninterruptible(waittime); count++; /* Increase wait time with each try, up to a point. */ if (waittime < (HZ * 30)) waittime = waittime * 2; /* Send the Test Unit Ready */ rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0, lunaddr, TYPE_CMD); if (rc == 0) rc = sendcmd_withirq_core(h, c, 0); (void) process_sendcmd_error(h, c); if (rc != 0) goto retry_tur; if (c->err_info->CommandStatus == CMD_SUCCESS) break; if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) { if (c->err_info->SenseInfo[2] == NO_SENSE) break; if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) { unsigned char asc; asc = c->err_info->SenseInfo[12]; check_for_unit_attention(h, c); if (asc == POWER_OR_RESET) break; } } retry_tur: dev_warn(&h->pdev->dev, "Waiting %d secs " "for device to become ready.\n", waittime / HZ); rc = 1; /* device not ready. */ } if (rc) dev_warn(&h->pdev->dev, "giving up on device.\n"); else dev_warn(&h->pdev->dev, "device is ready.\n"); cmd_free(h, c); return rc; } /* Need at least one of these error handlers to keep ../scsi/hosts.c from * complaining. Doing a host- or bus-reset can't do anything good here. * Despite what it might say in scsi_error.c, there may well be commands * on the controller, as the cciss driver registers twice, once as a block * device for the logical drives, and once as a scsi device, for any tape * drives. So we know there are no commands out on the tape drives, but we * don't know there are no commands on the controller, and it is likely * that there probably are, as the cciss block device is most commonly used * as a boot device (embedded controller on HP/Compaq systems.) */ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd) { int rc; CommandList_struct *cmd_in_trouble; unsigned char lunaddr[8]; ctlr_info_t *h; /* find the controller to which the command to be aborted was sent */ h = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; if (h == NULL) /* paranoia */ return FAILED; dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n"); /* find the command that's giving us trouble */ cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; if (cmd_in_trouble == NULL) /* paranoia */ return FAILED; memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8); /* send a reset to the SCSI LUN which the command was sent to */ rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr, TYPE_MSG); if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0) return SUCCESS; dev_warn(&h->pdev->dev, "resetting device failed.\n"); return FAILED; } static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) { int rc; CommandList_struct *cmd_to_abort; unsigned char lunaddr[8]; ctlr_info_t *h; /* find the controller to which the command to be aborted was sent */ h = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; if (h == NULL) /* paranoia */ return FAILED; dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n"); /* find the command to be aborted */ cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; if (cmd_to_abort == NULL) /* paranoia */ return FAILED; memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8); rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag, 0, 0, lunaddr, TYPE_MSG); if (rc == 0) return SUCCESS; return FAILED; } #else /* no CONFIG_CISS_SCSI_TAPE */ /* If no tape support, then these become defined out of existence */ #define cciss_scsi_setup(cntl_num) #define cciss_engage_scsi(h) #endif /* CONFIG_CISS_SCSI_TAPE */
gpl-2.0
Migaverse/Samsung-G920S-MMKernel
mm/early_ioremap.c
2099
5544
/* * Provide common bits of early_ioremap() support for architectures needing * temporary mappings during boot before ioremap() is available. * * This is mostly a direct copy of the x86 early_ioremap implementation. * * (C) Copyright 1995 1996, 2014 Linus Torvalds * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <asm/fixmap.h> #ifdef CONFIG_MMU static int early_ioremap_debug __initdata; static int __init early_ioremap_debug_setup(char *str) { early_ioremap_debug = 1; return 0; } early_param("early_ioremap_debug", early_ioremap_debug_setup); static int after_paging_init __initdata; void __init __weak early_ioremap_shutdown(void) { } void __init early_ioremap_reset(void) { early_ioremap_shutdown(); after_paging_init = 1; } /* * Generally, ioremap() is available after paging_init() has been called. * Architectures wanting to allow early_ioremap after paging_init() can * define __late_set_fixmap and __late_clear_fixmap to do the right thing. */ #ifndef __late_set_fixmap static inline void __init __late_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { BUG(); } #endif #ifndef __late_clear_fixmap static inline void __init __late_clear_fixmap(enum fixed_addresses idx) { BUG(); } #endif static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; void __init early_ioremap_setup(void) { int i; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) if (WARN_ON(prev_map[i])) break; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); } static int __init check_early_ioremap_leak(void) { int count = 0; int i; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) if (prev_map[i]) count++; if (WARN(count, KERN_WARNING "Debug warning: early ioremap leak of %d areas detected.\n" "please boot with early_ioremap_debug and report the dmesg.\n", count)) return 1; return 0; } late_initcall(check_early_ioremap_leak); static void __init __iomem * __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) { unsigned long offset; resource_size_t last_addr; unsigned int nrpages; enum fixed_addresses idx; int i, slot; WARN_ON(system_state != SYSTEM_BOOTING); slot = -1; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { if (!prev_map[i]) { slot = i; break; } } if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n", __func__, (u64)phys_addr, size)) return NULL; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (WARN_ON(!size || last_addr < phys_addr)) return NULL; prev_size[slot] = size; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; /* * Mappings have to fit in the FIX_BTMAP area. */ nrpages = size >> PAGE_SHIFT; if (WARN_ON(nrpages > NR_FIX_BTMAPS)) return NULL; /* * Ok, go for it.. */ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; while (nrpages > 0) { if (after_paging_init) __late_set_fixmap(idx, phys_addr, prot); else __early_set_fixmap(idx, phys_addr, prot); phys_addr += PAGE_SIZE; --idx; --nrpages; } WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n", __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]); prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); return prev_map[slot]; } void __init early_iounmap(void __iomem *addr, unsigned long size) { unsigned long virt_addr; unsigned long offset; unsigned int nrpages; enum fixed_addresses idx; int i, slot; slot = -1; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { if (prev_map[i] == addr) { slot = i; break; } } if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n", addr, size)) return; if (WARN(prev_size[slot] != size, "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n", addr, size, slot, prev_size[slot])) return; WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n", addr, size, slot); virt_addr = (unsigned long)addr; if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))) return; offset = virt_addr & ~PAGE_MASK; nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; while (nrpages > 0) { if (after_paging_init) __late_clear_fixmap(idx); else __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR); --idx; --nrpages; } prev_map[slot] = NULL; } /* Remap an IO device */ void __init __iomem * early_ioremap(resource_size_t phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO); } /* Remap memory */ void __init * early_memremap(resource_size_t phys_addr, unsigned long size) { return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_NORMAL); } #else /* CONFIG_MMU */ void __init __iomem * early_ioremap(resource_size_t phys_addr, unsigned long size) { return (__force void __iomem *)phys_addr; } /* Remap memory */ void __init * early_memremap(resource_size_t phys_addr, unsigned long size) { return (void *)phys_addr; } void __init early_iounmap(void __iomem *addr, unsigned long size) { } #endif /* CONFIG_MMU */ void __init early_memunmap(void *addr, unsigned long size) { early_iounmap((__force void __iomem *)addr, size); }
gpl-2.0
EloYGomeZ/test_kernel_g620s
arch/x86/kernel/early_printk.c
2099
6114
#include <linux/console.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/screen_info.h> #include <linux/usb/ch9.h> #include <linux/pci_regs.h> #include <linux/pci_ids.h> #include <linux/errno.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/fcntl.h> #include <asm/setup.h> #include <xen/hvc-console.h> #include <asm/pci-direct.h> #include <asm/fixmap.h> #include <asm/mrst.h> #include <asm/pgtable.h> #include <linux/usb/ehci_def.h> /* Simple VGA output */ #define VGABASE (__ISA_IO_base + 0xb8000) static int max_ypos = 25, max_xpos = 80; static int current_ypos = 25, current_xpos; static void early_vga_write(struct console *con, const char *str, unsigned n) { char c; int i, k, j; while ((c = *str++) != '\0' && n-- > 0) { if (current_ypos >= max_ypos) { /* scroll 1 line up */ for (k = 1, j = 0; k < max_ypos; k++, j++) { for (i = 0; i < max_xpos; i++) { writew(readw(VGABASE+2*(max_xpos*k+i)), VGABASE + 2*(max_xpos*j + i)); } } for (i = 0; i < max_xpos; i++) writew(0x720, VGABASE + 2*(max_xpos*j + i)); current_ypos = max_ypos-1; } #ifdef CONFIG_KGDB_KDB if (c == '\b') { if (current_xpos > 0) current_xpos--; } else if (c == '\r') { current_xpos = 0; } else #endif if (c == '\n') { current_xpos = 0; current_ypos++; } else if (c != '\r') { writew(((0x7 << 8) | (unsigned short) c), VGABASE + 2*(max_xpos*current_ypos + current_xpos++)); if (current_xpos >= max_xpos) { current_xpos = 0; current_ypos++; } } } } static struct console early_vga_console = { .name = "earlyvga", .write = early_vga_write, .flags = CON_PRINTBUFFER, .index = -1, }; /* Serial functions loosely based on a similar package from Klaus P. Gerlicher */ static int early_serial_base = 0x3f8; /* ttyS0 */ #define XMTRDY 0x20 #define DLAB 0x80 #define TXR 0 /* Transmit register (WRITE) */ #define RXR 0 /* Receive register (READ) */ #define IER 1 /* Interrupt Enable */ #define IIR 2 /* Interrupt ID */ #define FCR 2 /* FIFO control */ #define LCR 3 /* Line control */ #define MCR 4 /* Modem control */ #define LSR 5 /* Line Status */ #define MSR 6 /* Modem Status */ #define DLL 0 /* Divisor Latch Low */ #define DLH 1 /* Divisor latch High */ static int early_serial_putc(unsigned char ch) { unsigned timeout = 0xffff; while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout) cpu_relax(); outb(ch, early_serial_base + TXR); return timeout ? 0 : -1; } static void early_serial_write(struct console *con, const char *s, unsigned n) { while (*s && n-- > 0) { if (*s == '\n') early_serial_putc('\r'); early_serial_putc(*s); s++; } } #define DEFAULT_BAUD 9600 static __init void early_serial_init(char *s) { unsigned char c; unsigned divisor; unsigned baud = DEFAULT_BAUD; char *e; if (*s == ',') ++s; if (*s) { unsigned port; if (!strncmp(s, "0x", 2)) { early_serial_base = simple_strtoul(s, &e, 16); } else { static const int __initconst bases[] = { 0x3f8, 0x2f8 }; if (!strncmp(s, "ttyS", 4)) s += 4; port = simple_strtoul(s, &e, 10); if (port > 1 || s == e) port = 0; early_serial_base = bases[port]; } s += strcspn(s, ","); if (*s == ',') s++; } outb(0x3, early_serial_base + LCR); /* 8n1 */ outb(0, early_serial_base + IER); /* no interrupt */ outb(0, early_serial_base + FCR); /* no fifo */ outb(0x3, early_serial_base + MCR); /* DTR + RTS */ if (*s) { baud = simple_strtoul(s, &e, 0); if (baud == 0 || s == e) baud = DEFAULT_BAUD; } divisor = 115200 / baud; c = inb(early_serial_base + LCR); outb(c | DLAB, early_serial_base + LCR); outb(divisor & 0xff, early_serial_base + DLL); outb((divisor >> 8) & 0xff, early_serial_base + DLH); outb(c & ~DLAB, early_serial_base + LCR); } static struct console early_serial_console = { .name = "earlyser", .write = early_serial_write, .flags = CON_PRINTBUFFER, .index = -1, }; static inline void early_console_register(struct console *con, int keep_early) { if (con->index != -1) { printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", con->name); return; } early_console = con; if (keep_early) early_console->flags &= ~CON_BOOT; else early_console->flags |= CON_BOOT; register_console(early_console); } static int __init setup_early_printk(char *buf) { int keep; if (!buf) return 0; if (early_console) return 0; keep = (strstr(buf, "keep") != NULL); while (*buf != '\0') { if (!strncmp(buf, "serial", 6)) { buf += 6; early_serial_init(buf); early_console_register(&early_serial_console, keep); if (!strncmp(buf, ",ttyS", 5)) buf += 5; } if (!strncmp(buf, "ttyS", 4)) { early_serial_init(buf + 4); early_console_register(&early_serial_console, keep); } if (!strncmp(buf, "vga", 3) && boot_params.screen_info.orig_video_isVGA == 1) { max_xpos = boot_params.screen_info.orig_video_cols; max_ypos = boot_params.screen_info.orig_video_lines; current_ypos = boot_params.screen_info.orig_y; early_console_register(&early_vga_console, keep); } #ifdef CONFIG_EARLY_PRINTK_DBGP if (!strncmp(buf, "dbgp", 4) && !early_dbgp_init(buf + 4)) early_console_register(&early_dbgp_console, keep); #endif #ifdef CONFIG_HVC_XEN if (!strncmp(buf, "xen", 3)) early_console_register(&xenboot_console, keep); #endif #ifdef CONFIG_EARLY_PRINTK_INTEL_MID if (!strncmp(buf, "mrst", 4)) { mrst_early_console_init(); early_console_register(&early_mrst_console, keep); } if (!strncmp(buf, "hsu", 3)) { hsu_early_console_init(buf + 3); early_console_register(&early_hsu_console, keep); } #endif buf++; } return 0; } early_param("earlyprintk", setup_early_printk);
gpl-2.0
hvaibhav/beagle-dev
crypto/cast6_generic.c
2355
9732
/* Kernel cryptographic api. * cast6.c - Cast6 cipher algorithm [rfc2612]. * * CAST-256 (*cast6*) is a DES like Substitution-Permutation Network (SPN) * cryptosystem built upon the CAST-128 (*cast5*) [rfc2144] encryption * algorithm. * * Copyright (C) 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com>. * * This program is free software; you can redistribute it and/or modify it * under the terms of GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include <asm/byteorder.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <crypto/cast6.h> #define s1 cast_s1 #define s2 cast_s2 #define s3 cast_s3 #define s4 cast_s4 #define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff])) #define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff])) #define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff])) static const u32 Tm[24][8] = { { 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d, 0x84c413be, 0xf39dff5f, 0x6277eb00 } , { 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525, 0xfb9370c6, 0x6a6d5c67, 0xd9474808 } , { 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d, 0x7262cdce, 0xe13cb96f, 0x5016a510 } , { 0xbef090b1, 0x2dca7c52, 0x9ca467f3, 0x0b7e5394, 0x7a583f35, 0xe9322ad6, 0x580c1677, 0xc6e60218 } , { 0x35bfedb9, 0xa499d95a, 0x1373c4fb, 0x824db09c, 0xf1279c3d, 0x600187de, 0xcedb737f, 0x3db55f20 } , { 0xac8f4ac1, 0x1b693662, 0x8a432203, 0xf91d0da4, 0x67f6f945, 0xd6d0e4e6, 0x45aad087, 0xb484bc28 } , { 0x235ea7c9, 0x9238936a, 0x01127f0b, 0x6fec6aac, 0xdec6564d, 0x4da041ee, 0xbc7a2d8f, 0x2b541930 } , { 0x9a2e04d1, 0x0907f072, 0x77e1dc13, 0xe6bbc7b4, 0x5595b355, 0xc46f9ef6, 0x33498a97, 0xa2237638 } , { 0x10fd61d9, 0x7fd74d7a, 0xeeb1391b, 0x5d8b24bc, 0xcc65105d, 0x3b3efbfe, 0xaa18e79f, 0x18f2d340 } , { 0x87ccbee1, 0xf6a6aa82, 0x65809623, 0xd45a81c4, 0x43346d65, 0xb20e5906, 0x20e844a7, 0x8fc23048 } , { 0xfe9c1be9, 0x6d76078a, 0xdc4ff32b, 0x4b29decc, 0xba03ca6d, 0x28ddb60e, 0x97b7a1af, 0x06918d50 } , { 0x756b78f1, 0xe4456492, 0x531f5033, 0xc1f93bd4, 0x30d32775, 0x9fad1316, 0x0e86feb7, 0x7d60ea58 } , { 0xec3ad5f9, 0x5b14c19a, 0xc9eead3b, 0x38c898dc, 0xa7a2847d, 0x167c701e, 0x85565bbf, 0xf4304760 } , { 0x630a3301, 0xd1e41ea2, 0x40be0a43, 0xaf97f5e4, 0x1e71e185, 0x8d4bcd26, 0xfc25b8c7, 0x6affa468 } , { 0xd9d99009, 0x48b37baa, 0xb78d674b, 0x266752ec, 0x95413e8d, 0x041b2a2e, 0x72f515cf, 0xe1cf0170 } , { 0x50a8ed11, 0xbf82d8b2, 0x2e5cc453, 0x9d36aff4, 0x0c109b95, 0x7aea8736, 0xe9c472d7, 0x589e5e78 } , { 0xc7784a19, 0x365235ba, 0xa52c215b, 0x14060cfc, 0x82dff89d, 0xf1b9e43e, 0x6093cfdf, 0xcf6dbb80 } , { 0x3e47a721, 0xad2192c2, 0x1bfb7e63, 0x8ad56a04, 0xf9af55a5, 0x68894146, 0xd7632ce7, 0x463d1888 } , { 0xb5170429, 0x23f0efca, 0x92cadb6b, 0x01a4c70c, 0x707eb2ad, 0xdf589e4e, 0x4e3289ef, 0xbd0c7590 } , { 0x2be66131, 0x9ac04cd2, 0x099a3873, 0x78742414, 0xe74e0fb5, 0x5627fb56, 0xc501e6f7, 0x33dbd298 } , { 0xa2b5be39, 0x118fa9da, 0x8069957b, 0xef43811c, 0x5e1d6cbd, 0xccf7585e, 0x3bd143ff, 0xaaab2fa0 } , { 0x19851b41, 0x885f06e2, 0xf738f283, 0x6612de24, 0xd4ecc9c5, 0x43c6b566, 0xb2a0a107, 0x217a8ca8 } , { 0x90547849, 0xff2e63ea, 0x6e084f8b, 0xdce23b2c, 0x4bbc26cd, 0xba96126e, 0x296ffe0f, 0x9849e9b0 } , { 0x0723d551, 0x75fdc0f2, 0xe4d7ac93, 0x53b19834, 0xc28b83d5, 0x31656f76, 0xa03f5b17, 0x0f1946b8 } }; static const u8 Tr[4][8] = { { 0x13, 0x04, 0x15, 0x06, 0x17, 0x08, 0x19, 0x0a } , { 0x1b, 0x0c, 0x1d, 0x0e, 0x1f, 0x10, 0x01, 0x12 } , { 0x03, 0x14, 0x05, 0x16, 0x07, 0x18, 0x09, 0x1a } , { 0x0b, 0x1c, 0x0d, 0x1e, 0x0f, 0x00, 0x11, 0x02 } }; /* forward octave */ static inline void W(u32 *key, unsigned int i) { u32 I; key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]); key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]); key[4] ^= F3(key[5], Tr[i % 4][2], Tm[i][2]); key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]); key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]); key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]); key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]); key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); } int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key, unsigned key_len, u32 *flags) { int i; u32 key[8]; __be32 p_key[8]; /* padded key */ if (key_len % 4 != 0) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } memset(p_key, 0, 32); memcpy(p_key, in_key, key_len); key[0] = be32_to_cpu(p_key[0]); /* A */ key[1] = be32_to_cpu(p_key[1]); /* B */ key[2] = be32_to_cpu(p_key[2]); /* C */ key[3] = be32_to_cpu(p_key[3]); /* D */ key[4] = be32_to_cpu(p_key[4]); /* E */ key[5] = be32_to_cpu(p_key[5]); /* F */ key[6] = be32_to_cpu(p_key[6]); /* G */ key[7] = be32_to_cpu(p_key[7]); /* H */ for (i = 0; i < 12; i++) { W(key, 2 * i); W(key, 2 * i + 1); c->Kr[i][0] = key[0] & 0x1f; c->Kr[i][1] = key[2] & 0x1f; c->Kr[i][2] = key[4] & 0x1f; c->Kr[i][3] = key[6] & 0x1f; c->Km[i][0] = key[7]; c->Km[i][1] = key[5]; c->Km[i][2] = key[3]; c->Km[i][3] = key[1]; } return 0; } EXPORT_SYMBOL_GPL(__cast6_setkey); int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { return __cast6_setkey(crypto_tfm_ctx(tfm), key, keylen, &tfm->crt_flags); } EXPORT_SYMBOL_GPL(cast6_setkey); /*forward quad round*/ static inline void Q(u32 *block, u8 *Kr, u32 *Km) { u32 I; block[2] ^= F1(block[3], Kr[0], Km[0]); block[1] ^= F2(block[2], Kr[1], Km[1]); block[0] ^= F3(block[1], Kr[2], Km[2]); block[3] ^= F1(block[0], Kr[3], Km[3]); } /*reverse quad round*/ static inline void QBAR(u32 *block, u8 *Kr, u32 *Km) { u32 I; block[3] ^= F1(block[0], Kr[3], Km[3]); block[0] ^= F3(block[1], Kr[2], Km[2]); block[1] ^= F2(block[2], Kr[1], Km[1]); block[2] ^= F1(block[3], Kr[0], Km[0]); } void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) { const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; u32 *Km; u8 *Kr; block[0] = be32_to_cpu(src[0]); block[1] = be32_to_cpu(src[1]); block[2] = be32_to_cpu(src[2]); block[3] = be32_to_cpu(src[3]); Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km); Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km); Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km); Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km); Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km); Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km); Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km); Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km); Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km); Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km); dst[0] = cpu_to_be32(block[0]); dst[1] = cpu_to_be32(block[1]); dst[2] = cpu_to_be32(block[2]); dst[3] = cpu_to_be32(block[3]); } EXPORT_SYMBOL_GPL(__cast6_encrypt); static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { __cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) { const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; u32 *Km; u8 *Kr; block[0] = be32_to_cpu(src[0]); block[1] = be32_to_cpu(src[1]); block[2] = be32_to_cpu(src[2]); block[3] = be32_to_cpu(src[3]); Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km); Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km); Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km); Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km); Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km); Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km); Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km); Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km); Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km); Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km); dst[0] = cpu_to_be32(block[0]); dst[1] = cpu_to_be32(block[1]); dst[2] = cpu_to_be32(block[2]); dst[3] = cpu_to_be32(block[3]); } EXPORT_SYMBOL_GPL(__cast6_decrypt); static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { __cast6_decrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } static struct crypto_alg alg = { .cra_name = "cast6", .cra_driver_name = "cast6-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST6_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast6_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = CAST6_MIN_KEY_SIZE, .cia_max_keysize = CAST6_MAX_KEY_SIZE, .cia_setkey = cast6_setkey, .cia_encrypt = cast6_encrypt, .cia_decrypt = cast6_decrypt} } }; static int __init cast6_mod_init(void) { return crypto_register_alg(&alg); } static void __exit cast6_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(cast6_mod_init); module_exit(cast6_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cast6 Cipher Algorithm"); MODULE_ALIAS_CRYPTO("cast6"); MODULE_ALIAS_CRYPTO("cast6-generic");
gpl-2.0
javilonas/Lonas_KL-GT-I9300-Sammy
arch/arm/mach-s3c2410/dma.c
3891
5125
/* linux/arch/arm/mach-s3c2410/dma.c * * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2410 DMA selection * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sysdev.h> #include <linux/serial_core.h> #include <mach/map.h> #include <mach/dma.h> #include <plat/cpu.h> #include <plat/dma-s3c24xx.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <plat/regs-ac97.h> #include <plat/regs-dma.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> #include <mach/regs-sdi.h> #include <plat/regs-iis.h> #include <plat/regs-spi.h> static struct s3c24xx_dma_map __initdata s3c2410_dma_mappings[] = { [DMACH_XD0] = { .name = "xdreq0", .channels[0] = S3C2410_DCON_CH0_XDREQ0 | DMA_CH_VALID, }, [DMACH_XD1] = { .name = "xdreq1", .channels[1] = S3C2410_DCON_CH1_XDREQ1 | DMA_CH_VALID, }, [DMACH_SDI] = { .name = "sdi", .channels[0] = S3C2410_DCON_CH0_SDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_SDI | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_SDI | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_IIS + S3C2410_IISFIFO, .hw_addr.from = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_SPI0] = { .name = "spi0", .channels[1] = S3C2410_DCON_CH1_SPI | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_SPI + S3C2410_SPTDAT, .hw_addr.from = S3C2410_PA_SPI + S3C2410_SPRDAT, }, [DMACH_SPI1] = { .name = "spi1", .channels[3] = S3C2410_DCON_CH3_SPI | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_SPI + 0x20 + S3C2410_SPTDAT, .hw_addr.from = S3C2410_PA_SPI + 0x20 + S3C2410_SPRDAT, }, [DMACH_UART0] = { .name = "uart0", .channels[0] = S3C2410_DCON_CH0_UART0 | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_UART0 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART0 + S3C2410_URXH, }, [DMACH_UART1] = { .name = "uart1", .channels[1] = S3C2410_DCON_CH1_UART1 | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_UART1 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART1 + S3C2410_URXH, }, [DMACH_UART2] = { .name = "uart2", .channels[3] = S3C2410_DCON_CH3_UART2 | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_UART2 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART2 + S3C2410_URXH, }, [DMACH_TIMER] = { .name = "timer", .channels[0] = S3C2410_DCON_CH0_TIMER | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_TIMER | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_TIMER | DMA_CH_VALID, }, [DMACH_I2S_IN] = { .name = "i2s-sdi", .channels[1] = S3C2410_DCON_CH1_I2SSDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_I2SSDI | DMA_CH_VALID, .hw_addr.from = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_I2S_OUT] = { .name = "i2s-sdo", .channels[2] = S3C2410_DCON_CH2_I2SSDO | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_USB_EP1] = { .name = "usb-ep1", .channels[0] = S3C2410_DCON_CH0_USBEP1 | DMA_CH_VALID, }, [DMACH_USB_EP2] = { .name = "usb-ep2", .channels[1] = S3C2410_DCON_CH1_USBEP2 | DMA_CH_VALID, }, [DMACH_USB_EP3] = { .name = "usb-ep3", .channels[2] = S3C2410_DCON_CH2_USBEP3 | DMA_CH_VALID, }, [DMACH_USB_EP4] = { .name = "usb-ep4", .channels[3] =S3C2410_DCON_CH3_USBEP4 | DMA_CH_VALID, }, }; static void s3c2410_dma_select(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map) { chan->dcon = map->channels[chan->number] & ~DMA_CH_VALID; } static struct s3c24xx_dma_selection __initdata s3c2410_dma_sel = { .select = s3c2410_dma_select, .dcon_mask = 7 << 24, .map = s3c2410_dma_mappings, .map_size = ARRAY_SIZE(s3c2410_dma_mappings), }; static struct s3c24xx_dma_order __initdata s3c2410_dma_order = { .channels = { [DMACH_SDI] = { .list = { [0] = 3 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, [2] = 0 | DMA_CH_VALID, }, }, [DMACH_I2S_IN] = { .list = { [0] = 1 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, }, }, }, }; static int __init s3c2410_dma_add(struct sys_device *sysdev) { s3c2410_dma_init(); s3c24xx_dma_order_set(&s3c2410_dma_order); return s3c24xx_dma_init_map(&s3c2410_dma_sel); } #if defined(CONFIG_CPU_S3C2410) static struct sysdev_driver s3c2410_dma_driver = { .add = s3c2410_dma_add, }; static int __init s3c2410_dma_drvinit(void) { return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_dma_driver); } arch_initcall(s3c2410_dma_drvinit); static struct sysdev_driver s3c2410a_dma_driver = { .add = s3c2410_dma_add, }; static int __init s3c2410a_dma_drvinit(void) { return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_dma_driver); } arch_initcall(s3c2410a_dma_drvinit); #endif #if defined(CONFIG_CPU_S3C2442) /* S3C2442 DMA contains the same selection table as the S3C2410 */ static struct sysdev_driver s3c2442_dma_driver = { .add = s3c2410_dma_add, }; static int __init s3c2442_dma_drvinit(void) { return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_dma_driver); } arch_initcall(s3c2442_dma_drvinit); #endif
gpl-2.0
MAKO-MM/android_kernel_lge_mako
arch/arm/mach-ux500/board-mop500.c
4659
21898
/* * Copyright (C) 2008-2009 ST-Ericsson * * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/amba/bus.h> #include <linux/amba/pl022.h> #include <linux/amba/serial.h> #include <linux/spi/spi.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/regulator/ab8500.h> #include <linux/mfd/tc3589x.h> #include <linux/mfd/tps6105x.h> #include <linux/mfd/abx500/ab8500-gpio.h> #include <linux/leds-lp5521.h> #include <linux/input.h> #include <linux/smsc911x.h> #include <linux/gpio_keys.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/leds.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/hardware/gic.h> #include <plat/i2c.h> #include <plat/ste_dma40.h> #include <plat/pincfg.h> #include <plat/gpio-nomadik.h> #include <mach/hardware.h> #include <mach/setup.h> #include <mach/devices.h> #include <mach/irqs.h> #include "pins-db8500.h" #include "ste-dma40-db8500.h" #include "devices-db8500.h" #include "board-mop500.h" #include "board-mop500-regulators.h" static struct gpio_led snowball_led_array[] = { { .name = "user_led", .default_trigger = "none", .gpio = 142, }, }; static struct gpio_led_platform_data snowball_led_data = { .leds = snowball_led_array, .num_leds = ARRAY_SIZE(snowball_led_array), }; static struct platform_device snowball_led_dev = { .name = "leds-gpio", .dev = { .platform_data = &snowball_led_data, }, }; static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { .gpio_base = MOP500_AB8500_PIN_GPIO(1), .irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE, /* config_reg is the initial configuration of ab8500 pins. * The pins can be configured as GPIO or alt functions based * on value present in GpioSel1 to GpioSel6 and AlternatFunction * register. This is the array of 7 configuration settings. * One has to compile time decide these settings. Below is the * explanation of these setting * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured * as GPIO then this register selectes the alternate fucntions */ .config_reg = {0x00, 0x1E, 0x80, 0x01, 0x7A, 0x00, 0x00}, }; static struct gpio_keys_button snowball_key_array[] = { { .gpio = 32, .type = EV_KEY, .code = KEY_1, .desc = "userpb", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, { .gpio = 151, .type = EV_KEY, .code = KEY_2, .desc = "extkb1", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, { .gpio = 152, .type = EV_KEY, .code = KEY_3, .desc = "extkb2", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, { .gpio = 161, .type = EV_KEY, .code = KEY_4, .desc = "extkb3", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, { .gpio = 162, .type = EV_KEY, .code = KEY_5, .desc = "extkb4", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, }; static struct gpio_keys_platform_data snowball_key_data = { .buttons = snowball_key_array, .nbuttons = ARRAY_SIZE(snowball_key_array), }; static struct platform_device snowball_key_dev = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &snowball_key_data, } }; static struct smsc911x_platform_config snowball_sbnet_cfg = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY, .shift = 1, }; static struct resource sbnet_res[] = { { .name = "smsc911x-memory", .start = (0x5000 << 16), .end = (0x5000 << 16) + 0xffff, .flags = IORESOURCE_MEM, }, { .start = NOMADIK_GPIO_TO_IRQ(140), .end = NOMADIK_GPIO_TO_IRQ(140), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; static struct platform_device snowball_sbnet_dev = { .name = "smsc911x", .num_resources = ARRAY_SIZE(sbnet_res), .resource = sbnet_res, .dev = { .platform_data = &snowball_sbnet_cfg, }, }; static struct ab8500_platform_data ab8500_platdata = { .irq_base = MOP500_AB8500_IRQ_BASE, .regulator_reg_init = ab8500_regulator_reg_init, .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init), .regulator = ab8500_regulators, .num_regulator = ARRAY_SIZE(ab8500_regulators), .gpio = &ab8500_gpio_pdata, }; static struct resource ab8500_resources[] = { [0] = { .start = IRQ_DB8500_AB8500, .end = IRQ_DB8500_AB8500, .flags = IORESOURCE_IRQ } }; struct platform_device ab8500_device = { .name = "ab8500-i2c", .id = 0, .dev = { .platform_data = &ab8500_platdata, }, .num_resources = 1, .resource = ab8500_resources, }; /* * TPS61052 */ static struct tps6105x_platform_data mop500_tps61052_data = { .mode = TPS6105X_MODE_VOLTAGE, .regulator_data = &tps61052_regulator, }; /* * TC35892 */ static void mop500_tc35892_init(struct tc3589x *tc3589x, unsigned int base) { struct device *parent = NULL; #if 0 /* FIXME: Is the sdi actually part of tc3589x? */ parent = tc3589x->dev; #endif mop500_sdi_tc35892_init(parent); } static struct tc3589x_gpio_platform_data mop500_tc35892_gpio_data = { .gpio_base = MOP500_EGPIO(0), .setup = mop500_tc35892_init, }; static struct tc3589x_platform_data mop500_tc35892_data = { .block = TC3589x_BLOCK_GPIO, .gpio = &mop500_tc35892_gpio_data, .irq_base = MOP500_EGPIO_IRQ_BASE, }; static struct lp5521_led_config lp5521_pri_led[] = { [0] = { .chan_nr = 0, .led_current = 0x2f, .max_current = 0x5f, }, [1] = { .chan_nr = 1, .led_current = 0x2f, .max_current = 0x5f, }, [2] = { .chan_nr = 2, .led_current = 0x2f, .max_current = 0x5f, }, }; static struct lp5521_platform_data __initdata lp5521_pri_data = { .label = "lp5521_pri", .led_config = &lp5521_pri_led[0], .num_channels = 3, .clock_mode = LP5521_CLOCK_EXT, }; static struct lp5521_led_config lp5521_sec_led[] = { [0] = { .chan_nr = 0, .led_current = 0x2f, .max_current = 0x5f, }, [1] = { .chan_nr = 1, .led_current = 0x2f, .max_current = 0x5f, }, [2] = { .chan_nr = 2, .led_current = 0x2f, .max_current = 0x5f, }, }; static struct lp5521_platform_data __initdata lp5521_sec_data = { .label = "lp5521_sec", .led_config = &lp5521_sec_led[0], .num_channels = 3, .clock_mode = LP5521_CLOCK_EXT, }; static struct i2c_board_info __initdata mop500_i2c0_devices[] = { { I2C_BOARD_INFO("tc3589x", 0x42), .irq = NOMADIK_GPIO_TO_IRQ(217), .platform_data = &mop500_tc35892_data, }, /* I2C0 devices only available prior to HREFv60 */ { I2C_BOARD_INFO("tps61052", 0x33), .platform_data = &mop500_tps61052_data, }, }; #define NUM_PRE_V60_I2C0_DEVICES 1 static struct i2c_board_info __initdata mop500_i2c2_devices[] = { { /* lp5521 LED driver, 1st device */ I2C_BOARD_INFO("lp5521", 0x33), .platform_data = &lp5521_pri_data, }, { /* lp5521 LED driver, 2st device */ I2C_BOARD_INFO("lp5521", 0x34), .platform_data = &lp5521_sec_data, }, { /* Light sensor Rohm BH1780GLI */ I2C_BOARD_INFO("bh1780", 0x29), }, }; #define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, t_out, _sm) \ static struct nmk_i2c_controller u8500_i2c##id##_data = { \ /* \ * slave data setup time, which is \ * 250 ns,100ns,10ns which is 14,6,2 \ * respectively for a 48 Mhz \ * i2c clock \ */ \ .slsu = _slsu, \ /* Tx FIFO threshold */ \ .tft = _tft, \ /* Rx FIFO threshold */ \ .rft = _rft, \ /* std. mode operation */ \ .clk_freq = clk, \ /* Slave response timeout(ms) */\ .timeout = t_out, \ .sm = _sm, \ } /* * The board uses 4 i2c controllers, initialize all of * them with slave data setup time of 250 ns, * Tx & Rx FIFO threshold values as 8 and standard * mode of operation */ U8500_I2C_CONTROLLER(0, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); static void __init mop500_i2c_init(struct device *parent) { db8500_add_i2c0(parent, &u8500_i2c0_data); db8500_add_i2c1(parent, &u8500_i2c1_data); db8500_add_i2c2(parent, &u8500_i2c2_data); db8500_add_i2c3(parent, &u8500_i2c3_data); } static struct gpio_keys_button mop500_gpio_keys[] = { { .desc = "SFH7741 Proximity Sensor", .type = EV_SW, .code = SW_FRONT_PROXIMITY, .active_low = 0, .can_disable = 1, } }; static struct regulator *prox_regulator; static int mop500_prox_activate(struct device *dev); static void mop500_prox_deactivate(struct device *dev); static struct gpio_keys_platform_data mop500_gpio_keys_data = { .buttons = mop500_gpio_keys, .nbuttons = ARRAY_SIZE(mop500_gpio_keys), .enable = mop500_prox_activate, .disable = mop500_prox_deactivate, }; static struct platform_device mop500_gpio_keys_device = { .name = "gpio-keys", .id = 0, .dev = { .platform_data = &mop500_gpio_keys_data, }, }; static int mop500_prox_activate(struct device *dev) { prox_regulator = regulator_get(&mop500_gpio_keys_device.dev, "vcc"); if (IS_ERR(prox_regulator)) { dev_err(&mop500_gpio_keys_device.dev, "no regulator\n"); return PTR_ERR(prox_regulator); } regulator_enable(prox_regulator); return 0; } static void mop500_prox_deactivate(struct device *dev) { regulator_disable(prox_regulator); regulator_put(prox_regulator); } /* add any platform devices here - TODO */ static struct platform_device *mop500_platform_devs[] __initdata = { &mop500_gpio_keys_device, &ab8500_device, }; #ifdef CONFIG_STE_DMA40 static struct stedma40_chan_cfg ssp0_dma_cfg_rx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_PERIPH_TO_MEM, .src_dev_type = DB8500_DMA_DEV8_SSP0_RX, .dst_dev_type = STEDMA40_DEV_DST_MEMORY, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg ssp0_dma_cfg_tx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_MEM_TO_PERIPH, .src_dev_type = STEDMA40_DEV_SRC_MEMORY, .dst_dev_type = DB8500_DMA_DEV8_SSP0_TX, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; #endif static struct pl022_ssp_controller ssp0_plat = { .bus_id = 0, #ifdef CONFIG_STE_DMA40 .enable_dma = 1, .dma_filter = stedma40_filter, .dma_rx_param = &ssp0_dma_cfg_rx, .dma_tx_param = &ssp0_dma_cfg_tx, #else .enable_dma = 0, #endif /* on this platform, gpio 31,142,144,214 & * 224 are connected as chip selects */ .num_chipselect = 5, }; static void __init mop500_spi_init(struct device *parent) { db8500_add_ssp0(parent, &ssp0_plat); } #ifdef CONFIG_STE_DMA40 static struct stedma40_chan_cfg uart0_dma_cfg_rx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_PERIPH_TO_MEM, .src_dev_type = DB8500_DMA_DEV13_UART0_RX, .dst_dev_type = STEDMA40_DEV_DST_MEMORY, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart0_dma_cfg_tx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_MEM_TO_PERIPH, .src_dev_type = STEDMA40_DEV_SRC_MEMORY, .dst_dev_type = DB8500_DMA_DEV13_UART0_TX, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart1_dma_cfg_rx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_PERIPH_TO_MEM, .src_dev_type = DB8500_DMA_DEV12_UART1_RX, .dst_dev_type = STEDMA40_DEV_DST_MEMORY, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart1_dma_cfg_tx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_MEM_TO_PERIPH, .src_dev_type = STEDMA40_DEV_SRC_MEMORY, .dst_dev_type = DB8500_DMA_DEV12_UART1_TX, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart2_dma_cfg_rx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_PERIPH_TO_MEM, .src_dev_type = DB8500_DMA_DEV11_UART2_RX, .dst_dev_type = STEDMA40_DEV_DST_MEMORY, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart2_dma_cfg_tx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_MEM_TO_PERIPH, .src_dev_type = STEDMA40_DEV_SRC_MEMORY, .dst_dev_type = DB8500_DMA_DEV11_UART2_TX, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; #endif static pin_cfg_t mop500_pins_uart0[] = { GPIO0_U0_CTSn | PIN_INPUT_PULLUP, GPIO1_U0_RTSn | PIN_OUTPUT_HIGH, GPIO2_U0_RXD | PIN_INPUT_PULLUP, GPIO3_U0_TXD | PIN_OUTPUT_HIGH, }; #define PRCC_K_SOFTRST_SET 0x18 #define PRCC_K_SOFTRST_CLEAR 0x1C static void ux500_uart0_reset(void) { void __iomem *prcc_rst_set, *prcc_rst_clr; prcc_rst_set = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + PRCC_K_SOFTRST_SET); prcc_rst_clr = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + PRCC_K_SOFTRST_CLEAR); /* Activate soft reset PRCC_K_SOFTRST_CLEAR */ writel((readl(prcc_rst_clr) | 0x1), prcc_rst_clr); udelay(1); /* Release soft reset PRCC_K_SOFTRST_SET */ writel((readl(prcc_rst_set) | 0x1), prcc_rst_set); udelay(1); } static void ux500_uart0_init(void) { int ret; ret = nmk_config_pins(mop500_pins_uart0, ARRAY_SIZE(mop500_pins_uart0)); if (ret < 0) pr_err("pl011: uart pins_enable failed\n"); } static void ux500_uart0_exit(void) { int ret; ret = nmk_config_pins_sleep(mop500_pins_uart0, ARRAY_SIZE(mop500_pins_uart0)); if (ret < 0) pr_err("pl011: uart pins_disable failed\n"); } static struct amba_pl011_data uart0_plat = { #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, .dma_rx_param = &uart0_dma_cfg_rx, .dma_tx_param = &uart0_dma_cfg_tx, #endif .init = ux500_uart0_init, .exit = ux500_uart0_exit, .reset = ux500_uart0_reset, }; static struct amba_pl011_data uart1_plat = { #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, .dma_rx_param = &uart1_dma_cfg_rx, .dma_tx_param = &uart1_dma_cfg_tx, #endif }; static struct amba_pl011_data uart2_plat = { #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, .dma_rx_param = &uart2_dma_cfg_rx, .dma_tx_param = &uart2_dma_cfg_tx, #endif }; static void __init mop500_uart_init(struct device *parent) { db8500_add_uart0(parent, &uart0_plat); db8500_add_uart1(parent, &uart1_plat); db8500_add_uart2(parent, &uart2_plat); } static struct platform_device *snowball_platform_devs[] __initdata = { &snowball_led_dev, &snowball_key_dev, &snowball_sbnet_dev, &ab8500_device, }; static void __init mop500_init_machine(void) { struct device *parent = NULL; int i2c0_devs; int i; mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR; parent = u8500_init_devices(); mop500_pins_init(); /* FIXME: parent of ab8500 should be prcmu */ for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) mop500_platform_devs[i]->dev.parent = parent; platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); mop500_i2c_init(parent); mop500_sdi_init(parent); mop500_spi_init(parent); mop500_uart_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); /* This board has full regulator constraints */ regulator_has_full_constraints(); } static void __init snowball_init_machine(void) { struct device *parent = NULL; int i2c0_devs; int i; parent = u8500_init_devices(); snowball_pins_init(); for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) snowball_platform_devs[i]->dev.parent = parent; platform_add_devices(snowball_platform_devs, ARRAY_SIZE(snowball_platform_devs)); mop500_i2c_init(parent); snowball_sdi_init(parent); mop500_spi_init(parent); mop500_uart_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); /* This board has full regulator constraints */ regulator_has_full_constraints(); } static void __init hrefv60_init_machine(void) { struct device *parent = NULL; int i2c0_devs; int i; /* * The HREFv60 board removed a GPIO expander and routed * all these GPIO pins to the internal GPIO controller * instead. */ mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO; parent = u8500_init_devices(); hrefv60_pins_init(); for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) mop500_platform_devs[i]->dev.parent = parent; platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); mop500_i2c_init(parent); hrefv60_sdi_init(parent); mop500_spi_init(parent); mop500_uart_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); /* This board has full regulator constraints */ regulator_has_full_constraints(); } MACHINE_START(U8500, "ST-Ericsson MOP500 platform") /* Maintainer: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> */ .atag_offset = 0x100, .map_io = u8500_map_io, .init_irq = ux500_init_irq, /* we re-use nomadik timer here */ .timer = &ux500_timer, .handle_irq = gic_handle_irq, .init_machine = mop500_init_machine, MACHINE_END MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+") .atag_offset = 0x100, .map_io = u8500_map_io, .init_irq = ux500_init_irq, .timer = &ux500_timer, .handle_irq = gic_handle_irq, .init_machine = hrefv60_init_machine, MACHINE_END MACHINE_START(SNOWBALL, "Calao Systems Snowball platform") .atag_offset = 0x100, .map_io = u8500_map_io, .init_irq = ux500_init_irq, /* we re-use nomadik timer here */ .timer = &ux500_timer, .handle_irq = gic_handle_irq, .init_machine = snowball_init_machine, MACHINE_END #ifdef CONFIG_MACH_UX500_DT struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat), OF_DEV_AUXDATA("arm,pl011", 0x80121000, "uart1", &uart1_plat), OF_DEV_AUXDATA("arm,pl011", 0x80007000, "uart2", &uart2_plat), OF_DEV_AUXDATA("arm,pl022", 0x80002000, "ssp0", &ssp0_plat), {}, }; static const struct of_device_id u8500_soc_node[] = { /* only create devices below soc node */ { .compatible = "stericsson,db8500", }, { }, }; static void __init u8500_init_machine(void) { struct device *parent = NULL; int i2c0_devs; int i; parent = u8500_init_devices(); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) mop500_platform_devs[i]->dev.parent = parent; for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) snowball_platform_devs[i]->dev.parent = parent; /* automatically probe child nodes of db8500 device */ of_platform_populate(NULL, u8500_soc_node, u8500_auxdata_lookup, parent); if (of_machine_is_compatible("st-ericsson,mop500")) { mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR; mop500_pins_init(); platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); mop500_sdi_init(parent); } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { snowball_pins_init(); platform_add_devices(snowball_platform_devs, ARRAY_SIZE(snowball_platform_devs)); snowball_sdi_init(parent); } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) { /* * The HREFv60 board removed a GPIO expander and routed * all these GPIO pins to the internal GPIO controller * instead. */ mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO; i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; hrefv60_pins_init(); platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); hrefv60_sdi_init(parent); } mop500_i2c_init(parent); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); /* This board has full regulator constraints */ regulator_has_full_constraints(); } static const char * u8500_dt_board_compat[] = { "calaosystems,snowball-a9500", "st-ericsson,hrefv60+", "st-ericsson,u8500", "st-ericsson,mop500", NULL, }; DT_MACHINE_START(U8500_DT, "ST-Ericsson U8500 platform (Device Tree Support)") .map_io = u8500_map_io, .init_irq = ux500_init_irq, /* we re-use nomadik timer here */ .timer = &ux500_timer, .handle_irq = gic_handle_irq, .init_machine = u8500_init_machine, .dt_compat = u8500_dt_board_compat, MACHINE_END #endif
gpl-2.0
akw28888/caf2
arch/arm/mach-msm/board-trout-wifi.c
5171
2072
/* arch/arm/mach-msm/board-trout-wifi.c * * Copyright (C) 2008 Google, Inc. * Author: Dmitry Shmidt <dimitrysh@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifdef CONFIG_WIFI_CONTROL_FUNC #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/vmalloc.h> #include <linux/err.h> #include <linux/wifi_tiwlan.h> extern int trout_wifi_set_carddetect(int val); extern int trout_wifi_power(int on); extern int trout_wifi_reset(int on); #ifdef CONFIG_WIFI_MEM_PREALLOC typedef struct wifi_mem_prealloc_struct { void *mem_ptr; unsigned long size; } wifi_mem_prealloc_t; static wifi_mem_prealloc_t wifi_mem_array[WMPA_NUMBER_OF_SECTIONS] = { { NULL, (WMPA_SECTION_SIZE_0 + WMPA_SECTION_HEADER) }, { NULL, (WMPA_SECTION_SIZE_1 + WMPA_SECTION_HEADER) }, { NULL, (WMPA_SECTION_SIZE_2 + WMPA_SECTION_HEADER) } }; static void *trout_wifi_mem_prealloc(int section, unsigned long size) { if( (section < 0) || (section >= WMPA_NUMBER_OF_SECTIONS) ) return NULL; if( wifi_mem_array[section].size < size ) return NULL; return wifi_mem_array[section].mem_ptr; } int __init trout_init_wifi_mem( void ) { int i; for(i=0;( i < WMPA_NUMBER_OF_SECTIONS );i++) { wifi_mem_array[i].mem_ptr = vmalloc(wifi_mem_array[i].size); if( wifi_mem_array[i].mem_ptr == NULL ) return -ENOMEM; } return 0; } #endif struct wifi_platform_data trout_wifi_control = { .set_power = trout_wifi_power, .set_reset = trout_wifi_reset, .set_carddetect = trout_wifi_set_carddetect, #ifdef CONFIG_WIFI_MEM_PREALLOC .mem_prealloc = trout_wifi_mem_prealloc, #else .mem_prealloc = NULL, #endif }; #endif
gpl-2.0
jassycliq/lg_g2d801
arch/arm/mach-omap1/pm_bus.c
5171
1729
/* * Runtime PM support code for OMAP1 * * Author: Kevin Hilman, Deep Root Systems, LLC * * Copyright (C) 2010 Texas Instruments, Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/pm_clock.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/clk.h> #include <linux/err.h> #include <plat/omap_device.h> #include <plat/omap-pm.h> #ifdef CONFIG_PM_RUNTIME static int omap1_pm_runtime_suspend(struct device *dev) { int ret; dev_dbg(dev, "%s\n", __func__); ret = pm_generic_runtime_suspend(dev); if (ret) return ret; ret = pm_clk_suspend(dev); if (ret) { pm_generic_runtime_resume(dev); return ret; } return 0; } static int omap1_pm_runtime_resume(struct device *dev) { dev_dbg(dev, "%s\n", __func__); pm_clk_resume(dev); return pm_generic_runtime_resume(dev); } static struct dev_pm_domain default_pm_domain = { .ops = { .runtime_suspend = omap1_pm_runtime_suspend, .runtime_resume = omap1_pm_runtime_resume, USE_PLATFORM_PM_SLEEP_OPS }, }; #define OMAP1_PM_DOMAIN (&default_pm_domain) #else #define OMAP1_PM_DOMAIN NULL #endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { .pm_domain = OMAP1_PM_DOMAIN, .con_ids = { "ick", "fck", NULL, }, }; static int __init omap1_pm_runtime_init(void) { if (!cpu_class_is_omap1()) return -ENODEV; pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(omap1_pm_runtime_init);
gpl-2.0
LibiSC/Smt520Test
drivers/rapidio/rio.c
7987
33020
/* * RapidIO interconnect services * (RapidIO Interconnect Specification, http://www.rapidio.org) * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * Copyright 2009 Integrated Device Technology, Inc. * Alex Bounine <alexandre.bounine@idt.com> * - Added Port-Write/Error Management initialization and handling * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/rio_regs.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/interrupt.h> #include "rio.h" static LIST_HEAD(rio_mports); static unsigned char next_portid; /** * rio_local_get_device_id - Get the base/extended device id for a port * @port: RIO master port from which to get the deviceid * * Reads the base/extended device id from the local device * implementing the master port. Returns the 8/16-bit device * id. */ u16 rio_local_get_device_id(struct rio_mport *port) { u32 result; rio_local_read_config_32(port, RIO_DID_CSR, &result); return (RIO_GET_DID(port->sys_size, result)); } /** * rio_request_inb_mbox - request inbound mailbox service * @mport: RIO master port from which to allocate the mailbox resource * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox number to claim * @entries: Number of entries in inbound mailbox queue * @minb: Callback to execute when inbound message is received * * Requests ownership of an inbound mailbox resource and binds * a callback function to the resource. Returns %0 on success. */ int rio_request_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries, void (*minb) (struct rio_mport * mport, void *dev_id, int mbox, int slot)) { int rc = -ENOSYS; struct resource *res; if (mport->ops->open_inb_mbox == NULL) goto out; res = kmalloc(sizeof(struct resource), GFP_KERNEL); if (res) { rio_init_mbox_res(res, mbox, mbox); /* Make sure this mailbox isn't in use */ if ((rc = request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE], res)) < 0) { kfree(res); goto out; } mport->inb_msg[mbox].res = res; /* Hook the inbound message callback */ mport->inb_msg[mbox].mcback = minb; rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries); } else rc = -ENOMEM; out: return rc; } /** * rio_release_inb_mbox - release inbound mailbox message service * @mport: RIO master port from which to release the mailbox resource * @mbox: Mailbox number to release * * Releases ownership of an inbound mailbox resource. Returns 0 * if the request has been satisfied. */ int rio_release_inb_mbox(struct rio_mport *mport, int mbox) { if (mport->ops->close_inb_mbox) { mport->ops->close_inb_mbox(mport, mbox); /* Release the mailbox resource */ return release_resource(mport->inb_msg[mbox].res); } else return -ENOSYS; } /** * rio_request_outb_mbox - request outbound mailbox service * @mport: RIO master port from which to allocate the mailbox resource * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox number to claim * @entries: Number of entries in outbound mailbox queue * @moutb: Callback to execute when outbound message is sent * * Requests ownership of an outbound mailbox resource and binds * a callback function to the resource. Returns 0 on success. */ int rio_request_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries, void (*moutb) (struct rio_mport * mport, void *dev_id, int mbox, int slot)) { int rc = -ENOSYS; struct resource *res; if (mport->ops->open_outb_mbox == NULL) goto out; res = kmalloc(sizeof(struct resource), GFP_KERNEL); if (res) { rio_init_mbox_res(res, mbox, mbox); /* Make sure this outbound mailbox isn't in use */ if ((rc = request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE], res)) < 0) { kfree(res); goto out; } mport->outb_msg[mbox].res = res; /* Hook the inbound message callback */ mport->outb_msg[mbox].mcback = moutb; rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries); } else rc = -ENOMEM; out: return rc; } /** * rio_release_outb_mbox - release outbound mailbox message service * @mport: RIO master port from which to release the mailbox resource * @mbox: Mailbox number to release * * Releases ownership of an inbound mailbox resource. Returns 0 * if the request has been satisfied. */ int rio_release_outb_mbox(struct rio_mport *mport, int mbox) { if (mport->ops->close_outb_mbox) { mport->ops->close_outb_mbox(mport, mbox); /* Release the mailbox resource */ return release_resource(mport->outb_msg[mbox].res); } else return -ENOSYS; } /** * rio_setup_inb_dbell - bind inbound doorbell callback * @mport: RIO master port to bind the doorbell callback * @dev_id: Device specific pointer to pass on event * @res: Doorbell message resource * @dinb: Callback to execute when doorbell is received * * Adds a doorbell resource/callback pair into a port's * doorbell event list. Returns 0 if the request has been * satisfied. */ static int rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res, void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst, u16 info)) { int rc = 0; struct rio_dbell *dbell; if (!(dbell = kmalloc(sizeof(struct rio_dbell), GFP_KERNEL))) { rc = -ENOMEM; goto out; } dbell->res = res; dbell->dinb = dinb; dbell->dev_id = dev_id; list_add_tail(&dbell->node, &mport->dbells); out: return rc; } /** * rio_request_inb_dbell - request inbound doorbell message service * @mport: RIO master port from which to allocate the doorbell resource * @dev_id: Device specific pointer to pass on event * @start: Doorbell info range start * @end: Doorbell info range end * @dinb: Callback to execute when doorbell is received * * Requests ownership of an inbound doorbell resource and binds * a callback function to the resource. Returns 0 if the request * has been satisfied. */ int rio_request_inb_dbell(struct rio_mport *mport, void *dev_id, u16 start, u16 end, void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst, u16 info)) { int rc = 0; struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); if (res) { rio_init_dbell_res(res, start, end); /* Make sure these doorbells aren't in use */ if ((rc = request_resource(&mport->riores[RIO_DOORBELL_RESOURCE], res)) < 0) { kfree(res); goto out; } /* Hook the doorbell callback */ rc = rio_setup_inb_dbell(mport, dev_id, res, dinb); } else rc = -ENOMEM; out: return rc; } /** * rio_release_inb_dbell - release inbound doorbell message service * @mport: RIO master port from which to release the doorbell resource * @start: Doorbell info range start * @end: Doorbell info range end * * Releases ownership of an inbound doorbell resource and removes * callback from the doorbell event list. Returns 0 if the request * has been satisfied. */ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) { int rc = 0, found = 0; struct rio_dbell *dbell; list_for_each_entry(dbell, &mport->dbells, node) { if ((dbell->res->start == start) && (dbell->res->end == end)) { found = 1; break; } } /* If we can't find an exact match, fail */ if (!found) { rc = -EINVAL; goto out; } /* Delete from list */ list_del(&dbell->node); /* Release the doorbell resource */ rc = release_resource(dbell->res); /* Free the doorbell event */ kfree(dbell); out: return rc; } /** * rio_request_outb_dbell - request outbound doorbell message range * @rdev: RIO device from which to allocate the doorbell resource * @start: Doorbell message range start * @end: Doorbell message range end * * Requests ownership of a doorbell message range. Returns a resource * if the request has been satisfied or %NULL on failure. */ struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start, u16 end) { struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); if (res) { rio_init_dbell_res(res, start, end); /* Make sure these doorbells aren't in use */ if (request_resource(&rdev->riores[RIO_DOORBELL_RESOURCE], res) < 0) { kfree(res); res = NULL; } } return res; } /** * rio_release_outb_dbell - release outbound doorbell message range * @rdev: RIO device from which to release the doorbell resource * @res: Doorbell resource to be freed * * Releases ownership of a doorbell message range. Returns 0 if the * request has been satisfied. */ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) { int rc = release_resource(res); kfree(res); return rc; } /** * rio_request_inb_pwrite - request inbound port-write message service * @rdev: RIO device to which register inbound port-write callback routine * @pwcback: Callback routine to execute when port-write is received * * Binds a port-write callback function to the RapidIO device. * Returns 0 if the request has been satisfied. */ int rio_request_inb_pwrite(struct rio_dev *rdev, int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step)) { int rc = 0; spin_lock(&rio_global_list_lock); if (rdev->pwcback != NULL) rc = -ENOMEM; else rdev->pwcback = pwcback; spin_unlock(&rio_global_list_lock); return rc; } EXPORT_SYMBOL_GPL(rio_request_inb_pwrite); /** * rio_release_inb_pwrite - release inbound port-write message service * @rdev: RIO device which registered for inbound port-write callback * * Removes callback from the rio_dev structure. Returns 0 if the request * has been satisfied. */ int rio_release_inb_pwrite(struct rio_dev *rdev) { int rc = -ENOMEM; spin_lock(&rio_global_list_lock); if (rdev->pwcback) { rdev->pwcback = NULL; rc = 0; } spin_unlock(&rio_global_list_lock); return rc; } EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); /** * rio_mport_get_physefb - Helper function that returns register offset * for Physical Layer Extended Features Block. * @port: Master port to issue transaction * @local: Indicate a local master port or remote device access * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device */ u32 rio_mport_get_physefb(struct rio_mport *port, int local, u16 destid, u8 hopcount) { u32 ext_ftr_ptr; u32 ftr_header; ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0); while (ext_ftr_ptr) { if (local) rio_local_read_config_32(port, ext_ftr_ptr, &ftr_header); else rio_mport_read_config_32(port, destid, hopcount, ext_ftr_ptr, &ftr_header); ftr_header = RIO_GET_BLOCK_ID(ftr_header); switch (ftr_header) { case RIO_EFB_SER_EP_ID_V13P: case RIO_EFB_SER_EP_REC_ID_V13P: case RIO_EFB_SER_EP_FREE_ID_V13P: case RIO_EFB_SER_EP_ID: case RIO_EFB_SER_EP_REC_ID: case RIO_EFB_SER_EP_FREE_ID: case RIO_EFB_SER_EP_FREC_ID: return ext_ftr_ptr; default: break; } ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, ext_ftr_ptr); } return ext_ftr_ptr; } /** * rio_get_comptag - Begin or continue searching for a RIO device by component tag * @comp_tag: RIO component tag to match * @from: Previous RIO device found in search, or %NULL for new search * * Iterates through the list of known RIO devices. If a RIO device is * found with a matching @comp_tag, a pointer to its device * structure is returned. Otherwise, %NULL is returned. A new search * is initiated by passing %NULL to the @from argument. Otherwise, if * @from is not %NULL, searches continue from next device on the global * list. */ struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from) { struct list_head *n; struct rio_dev *rdev; spin_lock(&rio_global_list_lock); n = from ? from->global_list.next : rio_devices.next; while (n && (n != &rio_devices)) { rdev = rio_dev_g(n); if (rdev->comp_tag == comp_tag) goto exit; n = n->next; } rdev = NULL; exit: spin_unlock(&rio_global_list_lock); return rdev; } /** * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. * @rdev: Pointer to RIO device control structure * @pnum: Switch port number to set LOCKOUT bit * @lock: Operation : set (=1) or clear (=0) */ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) { u32 regval; rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), &regval); if (lock) regval |= RIO_PORT_N_CTL_LOCKOUT; else regval &= ~RIO_PORT_N_CTL_LOCKOUT; rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), regval); return 0; } /** * rio_chk_dev_route - Validate route to the specified device. * @rdev: RIO device failed to respond * @nrdev: Last active device on the route to rdev * @npnum: nrdev's port number on the route to rdev * * Follows a route to the specified RIO device to determine the last available * device (and corresponding RIO port) on the route. */ static int rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum) { u32 result; int p_port, rc = -EIO; struct rio_dev *prev = NULL; /* Find switch with failed RIO link */ while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) { if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) { prev = rdev->prev; break; } rdev = rdev->prev; } if (prev == NULL) goto err_out; p_port = prev->rswitch->route_table[rdev->destid]; if (p_port != RIO_INVALID_ROUTE) { pr_debug("RIO: link failed on [%s]-P%d\n", rio_name(prev), p_port); *nrdev = prev; *npnum = p_port; rc = 0; } else pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev)); err_out: return rc; } /** * rio_mport_chk_dev_access - Validate access to the specified device. * @mport: Master port to send transactions * @destid: Device destination ID in network * @hopcount: Number of hops into the network */ int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount) { int i = 0; u32 tmp; while (rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_ID_CAR, &tmp)) { i++; if (i == RIO_MAX_CHK_RETRY) return -EIO; mdelay(1); } return 0; } /** * rio_chk_dev_access - Validate access to the specified device. * @rdev: Pointer to RIO device control structure */ static int rio_chk_dev_access(struct rio_dev *rdev) { return rio_mport_chk_dev_access(rdev->net->hport, rdev->destid, rdev->hopcount); } /** * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and * returns link-response (if requested). * @rdev: RIO devive to issue Input-status command * @pnum: Device port number to issue the command * @lnkresp: Response from a link partner */ static int rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) { u32 regval; int checkcount; if (lnkresp) { /* Read from link maintenance response register * to clear valid bit */ rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), &regval); udelay(50); } /* Issue Input-status command */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum), RIO_MNT_REQ_CMD_IS); /* Exit if the response is not expected */ if (lnkresp == NULL) return 0; checkcount = 3; while (checkcount--) { udelay(50); rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), &regval); if (regval & RIO_PORT_N_MNT_RSP_RVAL) { *lnkresp = regval; return 0; } } return -EIO; } /** * rio_clr_err_stopped - Clears port Error-stopped states. * @rdev: Pointer to RIO device control structure * @pnum: Switch port number to clear errors * @err_status: port error status (if 0 reads register from device) */ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) { struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum]; u32 regval; u32 far_ackid, far_linkstat, near_ackid; if (err_status == 0) rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), &err_status); if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) { pr_debug("RIO_EM: servicing Output Error-Stopped state\n"); /* * Send a Link-Request/Input-Status control symbol */ if (rio_get_input_status(rdev, pnum, &regval)) { pr_debug("RIO_EM: Input-status response timeout\n"); goto rd_err; } pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n", pnum, regval); far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5; far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), &regval); pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24; pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \ " near_ackID=0x%02x\n", pnum, far_ackid, far_linkstat, near_ackid); /* * If required, synchronize ackIDs of near and * far sides. */ if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) || (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) { /* Align near outstanding/outbound ackIDs with * far inbound. */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), (near_ackid << 24) | (far_ackid << 8) | far_ackid); /* Align far outstanding/outbound ackIDs with * near inbound. */ far_ackid++; if (nextdev) rio_write_config_32(nextdev, nextdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)), (far_ackid << 24) | (near_ackid << 8) | near_ackid); else pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n"); } rd_err: rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), &err_status); pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); } if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) { pr_debug("RIO_EM: servicing Input Error-Stopped state\n"); rio_get_input_status(nextdev, RIO_GET_PORT_NUM(nextdev->swpinfo), NULL); udelay(50); rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), &err_status); pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); } return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0; } /** * rio_inb_pwrite_handler - process inbound port-write message * @pw_msg: pointer to inbound port-write message * * Processes an inbound port-write message. Returns 0 if the request * has been satisfied. */ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) { struct rio_dev *rdev; u32 err_status, em_perrdet, em_ltlerrdet; int rc, portnum; rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL); if (rdev == NULL) { /* Device removed or enumeration error */ pr_debug("RIO: %s No matching device for CTag 0x%08x\n", __func__, pw_msg->em.comptag); return -EIO; } pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev)); #ifdef DEBUG_PW { u32 i; for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) { pr_debug("0x%02x: %08x %08x %08x %08x\n", i*4, pw_msg->raw[i], pw_msg->raw[i + 1], pw_msg->raw[i + 2], pw_msg->raw[i + 3]); i += 4; } } #endif /* Call an external service function (if such is registered * for this device). This may be the service for endpoints that send * device-specific port-write messages. End-point messages expected * to be handled completely by EP specific device driver. * For switches rc==0 signals that no standard processing required. */ if (rdev->pwcback != NULL) { rc = rdev->pwcback(rdev, pw_msg, 0); if (rc == 0) return 0; } portnum = pw_msg->em.is_port & 0xFF; /* Check if device and route to it are functional: * Sometimes devices may send PW message(s) just before being * powered down (or link being lost). */ if (rio_chk_dev_access(rdev)) { pr_debug("RIO: device access failed - get link partner\n"); /* Scan route to the device and identify failed link. * This will replace device and port reported in PW message. * PW message should not be used after this point. */ if (rio_chk_dev_route(rdev, &rdev, &portnum)) { pr_err("RIO: Route trace for %s failed\n", rio_name(rdev)); return -EIO; } pw_msg = NULL; } /* For End-point devices processing stops here */ if (!(rdev->pef & RIO_PEF_SWITCH)) return 0; if (rdev->phys_efptr == 0) { pr_err("RIO_PW: Bad switch initialization for %s\n", rio_name(rdev)); return 0; } /* * Process the port-write notification from switch */ if (rdev->rswitch->em_handle) rdev->rswitch->em_handle(rdev, portnum); rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), &err_status); pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { if (!(rdev->rswitch->port_ok & (1 << portnum))) { rdev->rswitch->port_ok |= (1 << portnum); rio_set_port_lockout(rdev, portnum, 0); /* Schedule Insertion Service */ pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n", rio_name(rdev), portnum); } /* Clear error-stopped states (if reported). * Depending on the link partner state, two attempts * may be needed for successful recovery. */ if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | RIO_PORT_N_ERR_STS_PW_INP_ES)) { if (rio_clr_err_stopped(rdev, portnum, err_status)) rio_clr_err_stopped(rdev, portnum, 0); } } else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */ if (rdev->rswitch->port_ok & (1 << portnum)) { rdev->rswitch->port_ok &= ~(1 << portnum); rio_set_port_lockout(rdev, portnum, 1); rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(portnum), RIO_PORT_N_ACK_CLEAR); /* Schedule Extraction Service */ pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", rio_name(rdev), portnum); } } rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); if (em_perrdet) { pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n", portnum, em_perrdet); /* Clear EM Port N Error Detect CSR */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0); } rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); if (em_ltlerrdet) { pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n", em_ltlerrdet); /* Clear EM L/T Layer Error Detect CSR */ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0); } /* Clear remaining error bits and Port-Write Pending bit */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), err_status); return 0; } EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler); /** * rio_mport_get_efb - get pointer to next extended features block * @port: Master port to issue transaction * @local: Indicate a local master port or remote device access * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @from: Offset of current Extended Feature block header (if 0 starts * from ExtFeaturePtr) */ u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, u8 hopcount, u32 from) { u32 reg_val; if (from == 0) { if (local) rio_local_read_config_32(port, RIO_ASM_INFO_CAR, &reg_val); else rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR, &reg_val); return reg_val & RIO_EXT_FTR_PTR_MASK; } else { if (local) rio_local_read_config_32(port, from, &reg_val); else rio_mport_read_config_32(port, destid, hopcount, from, &reg_val); return RIO_GET_BLOCK_ID(reg_val); } } /** * rio_mport_get_feature - query for devices' extended features * @port: Master port to issue transaction * @local: Indicate a local master port or remote device access * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @ftr: Extended feature code * * Tell if a device supports a given RapidIO capability. * Returns the offset of the requested extended feature * block within the device's RIO configuration space or * 0 in case the device does not support it. Possible * values for @ftr: * * %RIO_EFB_PAR_EP_ID LP/LVDS EP Devices * * %RIO_EFB_PAR_EP_REC_ID LP/LVDS EP Recovery Devices * * %RIO_EFB_PAR_EP_FREE_ID LP/LVDS EP Free Devices * * %RIO_EFB_SER_EP_ID LP/Serial EP Devices * * %RIO_EFB_SER_EP_REC_ID LP/Serial EP Recovery Devices * * %RIO_EFB_SER_EP_FREE_ID LP/Serial EP Free Devices */ u32 rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, u8 hopcount, int ftr) { u32 asm_info, ext_ftr_ptr, ftr_header; if (local) rio_local_read_config_32(port, RIO_ASM_INFO_CAR, &asm_info); else rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR, &asm_info); ext_ftr_ptr = asm_info & RIO_EXT_FTR_PTR_MASK; while (ext_ftr_ptr) { if (local) rio_local_read_config_32(port, ext_ftr_ptr, &ftr_header); else rio_mport_read_config_32(port, destid, hopcount, ext_ftr_ptr, &ftr_header); if (RIO_GET_BLOCK_ID(ftr_header) == ftr) return ext_ftr_ptr; if (!(ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header))) break; } return 0; } /** * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did * @vid: RIO vid to match or %RIO_ANY_ID to match all vids * @did: RIO did to match or %RIO_ANY_ID to match all dids * @asm_vid: RIO asm_vid to match or %RIO_ANY_ID to match all asm_vids * @asm_did: RIO asm_did to match or %RIO_ANY_ID to match all asm_dids * @from: Previous RIO device found in search, or %NULL for new search * * Iterates through the list of known RIO devices. If a RIO device is * found with a matching @vid, @did, @asm_vid, @asm_did, the reference * count to the device is incrememted and a pointer to its device * structure is returned. Otherwise, %NULL is returned. A new search * is initiated by passing %NULL to the @from argument. Otherwise, if * @from is not %NULL, searches continue from next device on the global * list. The reference count for @from is always decremented if it is * not %NULL. */ struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, struct rio_dev *from) { struct list_head *n; struct rio_dev *rdev; WARN_ON(in_interrupt()); spin_lock(&rio_global_list_lock); n = from ? from->global_list.next : rio_devices.next; while (n && (n != &rio_devices)) { rdev = rio_dev_g(n); if ((vid == RIO_ANY_ID || rdev->vid == vid) && (did == RIO_ANY_ID || rdev->did == did) && (asm_vid == RIO_ANY_ID || rdev->asm_vid == asm_vid) && (asm_did == RIO_ANY_ID || rdev->asm_did == asm_did)) goto exit; n = n->next; } rdev = NULL; exit: rio_dev_put(from); rdev = rio_dev_get(rdev); spin_unlock(&rio_global_list_lock); return rdev; } /** * rio_get_device - Begin or continue searching for a RIO device by vid/did * @vid: RIO vid to match or %RIO_ANY_ID to match all vids * @did: RIO did to match or %RIO_ANY_ID to match all dids * @from: Previous RIO device found in search, or %NULL for new search * * Iterates through the list of known RIO devices. If a RIO device is * found with a matching @vid and @did, the reference count to the * device is incrememted and a pointer to its device structure is returned. * Otherwise, %NULL is returned. A new search is initiated by passing %NULL * to the @from argument. Otherwise, if @from is not %NULL, searches * continue from next device on the global list. The reference count for * @from is always decremented if it is not %NULL. */ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from) { return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); } /** * rio_std_route_add_entry - Add switch route table entry using standard * registers defined in RIO specification rev.1.3 * @mport: Master port to issue transaction * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @table: routing table ID (global or port-specific) * @route_destid: destID entry in the RT * @route_port: destination port for specified destID */ int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) { if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, (u32)route_destid); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, (u32)route_port); } udelay(10); return 0; } /** * rio_std_route_get_entry - Read switch route table entry (port number) * associated with specified destID using standard registers defined in RIO * specification rev.1.3 * @mport: Master port to issue transaction * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @table: routing table ID (global or port-specific) * @route_destid: destID entry in the RT * @route_port: returned destination port for specified destID */ int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) { u32 result; if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); rio_mport_read_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); *route_port = (u8)result; } return 0; } /** * rio_std_route_clr_table - Clear swotch route table using standard registers * defined in RIO specification rev.1.3. * @mport: Master port to issue transaction * @destid: Destination ID of the device * @hopcount: Number of switch hops to the device * @table: routing table ID (global or port-specific) */ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table) { u32 max_destid = 0xff; u32 i, pef, id_inc = 1, ext_cfg = 0; u32 port_sel = RIO_INVALID_ROUTE; if (table == RIO_GLOBAL_TABLE) { rio_mport_read_config_32(mport, destid, hopcount, RIO_PEF_CAR, &pef); if (mport->sys_size) { rio_mport_read_config_32(mport, destid, hopcount, RIO_SWITCH_RT_LIMIT, &max_destid); max_destid &= RIO_RT_MAX_DESTID; } if (pef & RIO_PEF_EXT_RT) { ext_cfg = 0x80000000; id_inc = 4; port_sel = (RIO_INVALID_ROUTE << 24) | (RIO_INVALID_ROUTE << 16) | (RIO_INVALID_ROUTE << 8) | RIO_INVALID_ROUTE; } for (i = 0; i <= max_destid;) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, ext_cfg | i); rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_PORT_SEL_CSR, port_sel); i += id_inc; } } udelay(10); return 0; } static void rio_fixup_device(struct rio_dev *dev) { } static int __devinit rio_init(void) { struct rio_dev *dev = NULL; while ((dev = rio_get_device(RIO_ANY_ID, RIO_ANY_ID, dev)) != NULL) { rio_fixup_device(dev); } return 0; } int __devinit rio_init_mports(void) { struct rio_mport *port; list_for_each_entry(port, &rio_mports, node) { if (port->host_deviceid >= 0) rio_enum_mport(port); else rio_disc_mport(port); } rio_init(); return 0; } device_initcall_sync(rio_init_mports); static int hdids[RIO_MAX_MPORTS + 1]; static int rio_get_hdid(int index) { if (!hdids[0] || hdids[0] <= index || index >= RIO_MAX_MPORTS) return -1; return hdids[index + 1]; } static int rio_hdid_setup(char *str) { (void)get_options(str, ARRAY_SIZE(hdids), hdids); return 1; } __setup("riohdid=", rio_hdid_setup); int rio_register_mport(struct rio_mport *port) { if (next_portid >= RIO_MAX_MPORTS) { pr_err("RIO: reached specified max number of mports\n"); return 1; } port->id = next_portid++; port->host_deviceid = rio_get_hdid(port->id); list_add_tail(&port->node, &rio_mports); return 0; } EXPORT_SYMBOL_GPL(rio_local_get_device_id); EXPORT_SYMBOL_GPL(rio_get_device); EXPORT_SYMBOL_GPL(rio_get_asm); EXPORT_SYMBOL_GPL(rio_request_inb_dbell); EXPORT_SYMBOL_GPL(rio_release_inb_dbell); EXPORT_SYMBOL_GPL(rio_request_outb_dbell); EXPORT_SYMBOL_GPL(rio_release_outb_dbell); EXPORT_SYMBOL_GPL(rio_request_inb_mbox); EXPORT_SYMBOL_GPL(rio_release_inb_mbox); EXPORT_SYMBOL_GPL(rio_request_outb_mbox); EXPORT_SYMBOL_GPL(rio_release_outb_mbox);
gpl-2.0
houst0nn/android_kernel_lge_g3
fs/hfs/bfind.c
8499
4663
/* * linux/fs/hfs/bfind.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Search routines for btrees */ #include <linux/slab.h> #include "btree.h" int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) { void *ptr; fd->tree = tree; fd->bnode = NULL; ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); if (!ptr) return -ENOMEM; fd->search_key = ptr; fd->key = ptr + tree->max_key_len + 2; dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); mutex_lock(&tree->tree_lock); return 0; } void hfs_find_exit(struct hfs_find_data *fd) { hfs_bnode_put(fd->bnode); kfree(fd->search_key); dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); mutex_unlock(&fd->tree->tree_lock); fd->tree = NULL; } /* Find the record in bnode that best matches key (not greater than...)*/ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd) { int cmpval; u16 off, len, keylen; int rec; int b, e; int res; b = 0; e = bnode->num_recs - 1; res = -ENOENT; do { rec = (e + b) / 2; len = hfs_brec_lenoff(bnode, rec, &off); keylen = hfs_brec_keylen(bnode, rec); if (keylen == 0) { res = -EINVAL; goto fail; } hfs_bnode_read(bnode, fd->key, off, keylen); cmpval = bnode->tree->keycmp(fd->key, fd->search_key); if (!cmpval) { e = rec; res = 0; goto done; } if (cmpval < 0) b = rec + 1; else e = rec - 1; } while (b <= e); if (rec != e && e >= 0) { len = hfs_brec_lenoff(bnode, e, &off); keylen = hfs_brec_keylen(bnode, e); if (keylen == 0) { res = -EINVAL; goto fail; } hfs_bnode_read(bnode, fd->key, off, keylen); } done: fd->record = e; fd->keyoffset = off; fd->keylength = keylen; fd->entryoffset = off + keylen; fd->entrylength = len - keylen; fail: return res; } /* Traverse a B*Tree from the root to a leaf finding best fit to key */ /* Return allocated copy of node found, set recnum to best record */ int hfs_brec_find(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *bnode; u32 nidx, parent; __be32 data; int height, res; tree = fd->tree; if (fd->bnode) hfs_bnode_put(fd->bnode); fd->bnode = NULL; nidx = tree->root; if (!nidx) return -ENOENT; height = tree->depth; res = 0; parent = 0; for (;;) { bnode = hfs_bnode_find(tree, nidx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; break; } if (bnode->height != height) goto invalid; if (bnode->type != (--height ? HFS_NODE_INDEX : HFS_NODE_LEAF)) goto invalid; bnode->parent = parent; res = __hfs_brec_find(bnode, fd); if (!height) break; if (fd->record < 0) goto release; parent = nidx; hfs_bnode_read(bnode, &data, fd->entryoffset, 4); nidx = be32_to_cpu(data); hfs_bnode_put(bnode); } fd->bnode = bnode; return res; invalid: printk(KERN_ERR "hfs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n", height, bnode->height, bnode->type, nidx, parent); res = -EIO; release: hfs_bnode_put(bnode); return res; } int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len) { int res; res = hfs_brec_find(fd); if (res) return res; if (fd->entrylength > rec_len) return -EINVAL; hfs_bnode_read(fd->bnode, rec, fd->entryoffset, fd->entrylength); return 0; } int hfs_brec_goto(struct hfs_find_data *fd, int cnt) { struct hfs_btree *tree; struct hfs_bnode *bnode; int idx, res = 0; u16 off, len, keylen; bnode = fd->bnode; tree = bnode->tree; if (cnt < 0) { cnt = -cnt; while (cnt > fd->record) { cnt -= fd->record + 1; fd->record = bnode->num_recs - 1; idx = bnode->prev; if (!idx) { res = -ENOENT; goto out; } hfs_bnode_put(bnode); bnode = hfs_bnode_find(tree, idx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; goto out; } } fd->record -= cnt; } else { while (cnt >= bnode->num_recs - fd->record) { cnt -= bnode->num_recs - fd->record; fd->record = 0; idx = bnode->next; if (!idx) { res = -ENOENT; goto out; } hfs_bnode_put(bnode); bnode = hfs_bnode_find(tree, idx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; goto out; } } fd->record += cnt; } len = hfs_brec_lenoff(bnode, fd->record, &off); keylen = hfs_brec_keylen(bnode, fd->record); if (keylen == 0) { res = -EINVAL; goto out; } fd->keyoffset = off; fd->keylength = keylen; fd->entryoffset = off + keylen; fd->entrylength = len - keylen; hfs_bnode_read(bnode, fd->key, off, keylen); out: fd->bnode = bnode; return res; }
gpl-2.0
giveme13s/android_kernel_oneplus_msm8974
drivers/input/keyboard/locomokbd.c
9267
9711
/* * LoCoMo keyboard driver for Linux-based ARM PDAs: * - SHARP Zaurus Collie (SL-5500) * - SHARP Zaurus Poodle (SL-5600) * * Copyright (c) 2005 John Lenz * Based on from xtkbd.c * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <asm/hardware/locomo.h> #include <asm/irq.h> MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>"); MODULE_DESCRIPTION("LoCoMo keyboard driver"); MODULE_LICENSE("GPL"); #define LOCOMOKBD_NUMKEYS 128 #define KEY_ACTIVITY KEY_F16 #define KEY_CONTACT KEY_F18 #define KEY_CENTER KEY_F15 static const unsigned char locomokbd_keycode[LOCOMOKBD_NUMKEYS] __devinitconst = { 0, KEY_ESC, KEY_ACTIVITY, 0, 0, 0, 0, 0, 0, 0, /* 0 - 9 */ 0, 0, 0, 0, 0, 0, 0, KEY_MENU, KEY_HOME, KEY_CONTACT, /* 10 - 19 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 29 */ 0, 0, 0, KEY_CENTER, 0, KEY_MAIL, 0, 0, 0, 0, /* 30 - 39 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_RIGHT, /* 40 - 49 */ KEY_UP, KEY_LEFT, 0, 0, KEY_P, 0, KEY_O, KEY_I, KEY_Y, KEY_T, /* 50 - 59 */ KEY_E, KEY_W, 0, 0, 0, 0, KEY_DOWN, KEY_ENTER, 0, 0, /* 60 - 69 */ KEY_BACKSPACE, 0, KEY_L, KEY_U, KEY_H, KEY_R, KEY_D, KEY_Q, 0, 0, /* 70 - 79 */ 0, 0, 0, 0, 0, 0, KEY_ENTER, KEY_RIGHTSHIFT, KEY_K, KEY_J, /* 80 - 89 */ KEY_G, KEY_F, KEY_X, KEY_S, 0, 0, 0, 0, 0, 0, /* 90 - 99 */ 0, 0, KEY_DOT, 0, KEY_COMMA, KEY_N, KEY_B, KEY_C, KEY_Z, KEY_A, /* 100 - 109 */ KEY_LEFTSHIFT, KEY_TAB, KEY_LEFTCTRL, 0, 0, 0, 0, 0, 0, 0, /* 110 - 119 */ KEY_M, KEY_SPACE, KEY_V, KEY_APOSTROPHE, KEY_SLASH, 0, 0, 0 /* 120 - 128 */ }; #define KB_ROWS 16 #define KB_COLS 8 #define KB_ROWMASK(r) (1 << (r)) #define SCANCODE(c,r) ( ((c)<<4) + (r) + 1 ) #define KB_DELAY 8 #define SCAN_INTERVAL (HZ/10) struct locomokbd { unsigned char keycode[LOCOMOKBD_NUMKEYS]; struct input_dev *input; char phys[32]; unsigned long base; spinlock_t lock; struct timer_list timer; unsigned long suspend_jiffies; unsigned int count_cancel; }; /* helper functions for reading the keyboard matrix */ static inline void locomokbd_charge_all(unsigned long membase) { locomo_writel(0x00FF, membase + LOCOMO_KSC); } static inline void locomokbd_activate_all(unsigned long membase) { unsigned long r; locomo_writel(0, membase + LOCOMO_KSC); r = locomo_readl(membase + LOCOMO_KIC); r &= 0xFEFF; locomo_writel(r, membase + LOCOMO_KIC); } static inline void locomokbd_activate_col(unsigned long membase, int col) { unsigned short nset; unsigned short nbset; nset = 0xFF & ~(1 << col); nbset = (nset << 8) + nset; locomo_writel(nbset, membase + LOCOMO_KSC); } static inline void locomokbd_reset_col(unsigned long membase, int col) { unsigned short nbset; nbset = ((0xFF & ~(1 << col)) << 8) + 0xFF; locomo_writel(nbset, membase + LOCOMO_KSC); } /* * The LoCoMo keyboard only generates interrupts when a key is pressed. * So when a key is pressed, we enable a timer. This timer scans the * keyboard, and this is how we detect when the key is released. */ /* Scan the hardware keyboard and push any changes up through the input layer */ static void locomokbd_scankeyboard(struct locomokbd *locomokbd) { unsigned int row, col, rowd; unsigned long flags; unsigned int num_pressed; unsigned long membase = locomokbd->base; spin_lock_irqsave(&locomokbd->lock, flags); locomokbd_charge_all(membase); num_pressed = 0; for (col = 0; col < KB_COLS; col++) { locomokbd_activate_col(membase, col); udelay(KB_DELAY); rowd = ~locomo_readl(membase + LOCOMO_KIB); for (row = 0; row < KB_ROWS; row++) { unsigned int scancode, pressed, key; scancode = SCANCODE(col, row); pressed = rowd & KB_ROWMASK(row); key = locomokbd->keycode[scancode]; input_report_key(locomokbd->input, key, pressed); if (likely(!pressed)) continue; num_pressed++; /* The "Cancel/ESC" key is labeled "On/Off" on * Collie and Poodle and should suspend the device * if it was pressed for more than a second. */ if (unlikely(key == KEY_ESC)) { if (!time_after(jiffies, locomokbd->suspend_jiffies + HZ)) continue; if (locomokbd->count_cancel++ != (HZ/SCAN_INTERVAL + 1)) continue; input_event(locomokbd->input, EV_PWR, KEY_SUSPEND, 1); locomokbd->suspend_jiffies = jiffies; } else locomokbd->count_cancel = 0; } locomokbd_reset_col(membase, col); } locomokbd_activate_all(membase); input_sync(locomokbd->input); /* if any keys are pressed, enable the timer */ if (num_pressed) mod_timer(&locomokbd->timer, jiffies + SCAN_INTERVAL); else locomokbd->count_cancel = 0; spin_unlock_irqrestore(&locomokbd->lock, flags); } /* * LoCoMo keyboard interrupt handler. */ static irqreturn_t locomokbd_interrupt(int irq, void *dev_id) { struct locomokbd *locomokbd = dev_id; u16 r; r = locomo_readl(locomokbd->base + LOCOMO_KIC); if ((r & 0x0001) == 0) return IRQ_HANDLED; locomo_writel(r & ~0x0100, locomokbd->base + LOCOMO_KIC); /* Ack */ /** wait chattering delay **/ udelay(100); locomokbd_scankeyboard(locomokbd); return IRQ_HANDLED; } /* * LoCoMo timer checking for released keys */ static void locomokbd_timer_callback(unsigned long data) { struct locomokbd *locomokbd = (struct locomokbd *) data; locomokbd_scankeyboard(locomokbd); } static int locomokbd_open(struct input_dev *dev) { struct locomokbd *locomokbd = input_get_drvdata(dev); u16 r; r = locomo_readl(locomokbd->base + LOCOMO_KIC) | 0x0010; locomo_writel(r, locomokbd->base + LOCOMO_KIC); return 0; } static void locomokbd_close(struct input_dev *dev) { struct locomokbd *locomokbd = input_get_drvdata(dev); u16 r; r = locomo_readl(locomokbd->base + LOCOMO_KIC) & ~0x0010; locomo_writel(r, locomokbd->base + LOCOMO_KIC); } static int __devinit locomokbd_probe(struct locomo_dev *dev) { struct locomokbd *locomokbd; struct input_dev *input_dev; int i, err; locomokbd = kzalloc(sizeof(struct locomokbd), GFP_KERNEL); input_dev = input_allocate_device(); if (!locomokbd || !input_dev) { err = -ENOMEM; goto err_free_mem; } /* try and claim memory region */ if (!request_mem_region((unsigned long) dev->mapbase, dev->length, LOCOMO_DRIVER_NAME(dev))) { err = -EBUSY; printk(KERN_ERR "locomokbd: Can't acquire access to io memory for keyboard\n"); goto err_free_mem; } locomo_set_drvdata(dev, locomokbd); locomokbd->base = (unsigned long) dev->mapbase; spin_lock_init(&locomokbd->lock); init_timer(&locomokbd->timer); locomokbd->timer.function = locomokbd_timer_callback; locomokbd->timer.data = (unsigned long) locomokbd; locomokbd->suspend_jiffies = jiffies; locomokbd->input = input_dev; strcpy(locomokbd->phys, "locomokbd/input0"); input_dev->name = "LoCoMo keyboard"; input_dev->phys = locomokbd->phys; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->open = locomokbd_open; input_dev->close = locomokbd_close; input_dev->dev.parent = &dev->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_PWR); input_dev->keycode = locomokbd->keycode; input_dev->keycodesize = sizeof(locomokbd_keycode[0]); input_dev->keycodemax = ARRAY_SIZE(locomokbd_keycode); input_set_drvdata(input_dev, locomokbd); memcpy(locomokbd->keycode, locomokbd_keycode, sizeof(locomokbd->keycode)); for (i = 0; i < LOCOMOKBD_NUMKEYS; i++) set_bit(locomokbd->keycode[i], input_dev->keybit); clear_bit(0, input_dev->keybit); /* attempt to get the interrupt */ err = request_irq(dev->irq[0], locomokbd_interrupt, 0, "locomokbd", locomokbd); if (err) { printk(KERN_ERR "locomokbd: Can't get irq for keyboard\n"); goto err_release_region; } err = input_register_device(locomokbd->input); if (err) goto err_free_irq; return 0; err_free_irq: free_irq(dev->irq[0], locomokbd); err_release_region: release_mem_region((unsigned long) dev->mapbase, dev->length); locomo_set_drvdata(dev, NULL); err_free_mem: input_free_device(input_dev); kfree(locomokbd); return err; } static int __devexit locomokbd_remove(struct locomo_dev *dev) { struct locomokbd *locomokbd = locomo_get_drvdata(dev); free_irq(dev->irq[0], locomokbd); del_timer_sync(&locomokbd->timer); input_unregister_device(locomokbd->input); locomo_set_drvdata(dev, NULL); release_mem_region((unsigned long) dev->mapbase, dev->length); kfree(locomokbd); return 0; } static struct locomo_driver keyboard_driver = { .drv = { .name = "locomokbd" }, .devid = LOCOMO_DEVID_KEYBOARD, .probe = locomokbd_probe, .remove = __devexit_p(locomokbd_remove), }; static int __init locomokbd_init(void) { return locomo_driver_register(&keyboard_driver); } static void __exit locomokbd_exit(void) { locomo_driver_unregister(&keyboard_driver); } module_init(locomokbd_init); module_exit(locomokbd_exit);
gpl-2.0
MoKee/android_kernel_motorola_omap4-common
arch/mips/pnx8550/jbs/init.c
9523
1938
/* * * Copyright 2005 Embedded Alley Solutions, Inc * source@embeddedalley.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/bootmem.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> #include <linux/string.h> #include <linux/kernel.h> int prom_argc; char **prom_argv, **prom_envp; extern void __init prom_init_cmdline(void); extern char *prom_getenv(char *envname); const char *get_system_type(void) { return "NXP PNX8550/JBS"; } void __init prom_init(void) { unsigned long memsize; //memsize = 0x02800000; /* Trimedia uses memory above */ memsize = 0x08000000; /* Trimedia uses memory above */ add_memory_region(0, memsize, BOOT_MEM_RAM); }
gpl-2.0
drewx2/android_kernel_htc_dlx
arch/sh/mm/hugetlbpage.c
9523
1541
/* * arch/sh/mm/hugetlbpage.c * * SuperH HugeTLB page support. * * Cloned from sparc64 by Paul Mundt. * * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) */ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/sysctl.h> #include <asm/mman.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); if (pgd) { pud = pud_alloc(mm, pgd, addr); if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) pte = pte_alloc_map(mm, NULL, pmd, addr); } } return pte; } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); if (pgd) { pud = pud_offset(pgd, addr); if (pud) { pmd = pmd_offset(pud, addr); if (pmd) pte = pte_offset_map(pmd, addr); } } return pte; } int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { return ERR_PTR(-EINVAL); } int pmd_huge(pmd_t pmd) { return 0; } int pud_huge(pud_t pud) { return 0; } struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { return NULL; }
gpl-2.0
TeamHackDroid/samsung-kernel-msm7x30
drivers/ide/ide-dma.c
9779
13556
/* * IDE DMA support (including IDE PCI BM-DMA). * * Copyright (C) 1995-1998 Mark Lord * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License * * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). */ /* * Special Thanks to Mark for his Six years of work. */ /* * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for * fixing the problem with the BIOS on some Acer motherboards. * * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing * "TX" chipset compatibility and for providing patches for the "TX" chipset. * * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack * at generic DMA -- his patches were referred to when preparing this code. * * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> * for supplying a Promise UDMA board & WD UDMA drive for this work! */ #include <linux/types.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/ide.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> static const struct drive_list_entry drive_whitelist[] = { { "Micropolis 2112A" , NULL }, { "CONNER CTMA 4000" , NULL }, { "CONNER CTT8000-A" , NULL }, { "ST34342A" , NULL }, { NULL , NULL } }; static const struct drive_list_entry drive_blacklist[] = { { "WDC AC11000H" , NULL }, { "WDC AC22100H" , NULL }, { "WDC AC32500H" , NULL }, { "WDC AC33100H" , NULL }, { "WDC AC31600H" , NULL }, { "WDC AC32100H" , "24.09P07" }, { "WDC AC23200L" , "21.10N21" }, { "Compaq CRD-8241B" , NULL }, { "CRD-8400B" , NULL }, { "CRD-8480B", NULL }, { "CRD-8482B", NULL }, { "CRD-84" , NULL }, { "SanDisk SDP3B" , NULL }, { "SanDisk SDP3B-64" , NULL }, { "SANYO CD-ROM CRD" , NULL }, { "HITACHI CDR-8" , NULL }, { "HITACHI CDR-8335" , NULL }, { "HITACHI CDR-8435" , NULL }, { "Toshiba CD-ROM XM-6202B" , NULL }, { "TOSHIBA CD-ROM XM-1702BC", NULL }, { "CD-532E-A" , NULL }, { "E-IDE CD-ROM CR-840", NULL }, { "CD-ROM Drive/F5A", NULL }, { "WPI CDD-820", NULL }, { "SAMSUNG CD-ROM SC-148C", NULL }, { "SAMSUNG CD-ROM SC", NULL }, { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL }, { "_NEC DV5800A", NULL }, { "SAMSUNG CD-ROM SN-124", "N001" }, { "Seagate STT20000A", NULL }, { "CD-ROM CDR_U200", "1.09" }, { NULL , NULL } }; /** * ide_dma_intr - IDE DMA interrupt handler * @drive: the drive the interrupt is for * * Handle an interrupt completing a read/write DMA transfer on an * IDE device */ ide_startstop_t ide_dma_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &hwif->cmd; u8 stat = 0, dma_stat = 0; drive->waiting_for_dma = 0; dma_stat = hwif->dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); stat = hwif->tp_ops->read_status(hwif); if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) { if (!dma_stat) { if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) ide_finish_cmd(drive, cmd, stat); else ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9); return ide_stopped; } printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n", drive->name, __func__, dma_stat); } return ide_error(drive, "dma_intr", stat); } int ide_dma_good_drive(ide_drive_t *drive) { return ide_in_drive_list(drive->id, drive_whitelist); } /** * ide_dma_map_sg - map IDE scatter gather for DMA I/O * @drive: the drive to map the DMA table for * @cmd: command * * Perform the DMA mapping magic necessary to access the source or * target buffers of a request via DMA. The lower layers of the * kernel provide the necessary cache management so that we can * operate in a portable fashion. */ static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; struct scatterlist *sg = hwif->sg_table; int i; if (cmd->tf_flags & IDE_TFLAG_WRITE) cmd->sg_dma_direction = DMA_TO_DEVICE; else cmd->sg_dma_direction = DMA_FROM_DEVICE; i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction); if (i) { cmd->orig_sg_nents = cmd->sg_nents; cmd->sg_nents = i; } return i; } /** * ide_dma_unmap_sg - clean up DMA mapping * @drive: The drive to unmap * * Teardown mappings after DMA has completed. This must be called * after the completion of each use of ide_build_dmatable and before * the next use of ide_build_dmatable. Failure to do so will cause * an oops as only one mapping can be live for each target at a given * time. */ void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents, cmd->sg_dma_direction); } EXPORT_SYMBOL_GPL(ide_dma_unmap_sg); /** * ide_dma_off_quietly - Generic DMA kill * @drive: drive to control * * Turn off the current DMA on this IDE controller. */ void ide_dma_off_quietly(ide_drive_t *drive) { drive->dev_flags &= ~IDE_DFLAG_USING_DMA; ide_toggle_bounce(drive, 0); drive->hwif->dma_ops->dma_host_set(drive, 0); } EXPORT_SYMBOL(ide_dma_off_quietly); /** * ide_dma_off - disable DMA on a device * @drive: drive to disable DMA on * * Disable IDE DMA for a device on this IDE controller. * Inform the user that DMA has been disabled. */ void ide_dma_off(ide_drive_t *drive) { printk(KERN_INFO "%s: DMA disabled\n", drive->name); ide_dma_off_quietly(drive); } EXPORT_SYMBOL(ide_dma_off); /** * ide_dma_on - Enable DMA on a device * @drive: drive to enable DMA on * * Enable IDE DMA for a device on this IDE controller. */ void ide_dma_on(ide_drive_t *drive) { drive->dev_flags |= IDE_DFLAG_USING_DMA; ide_toggle_bounce(drive, 1); drive->hwif->dma_ops->dma_host_set(drive, 1); } int __ide_dma_bad_drive(ide_drive_t *drive) { u16 *id = drive->id; int blacklist = ide_in_drive_list(id, drive_blacklist); if (blacklist) { printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", drive->name, (char *)&id[ATA_ID_PROD]); return blacklist; } return 0; } EXPORT_SYMBOL(__ide_dma_bad_drive); static const u8 xfer_mode_bases[] = { XFER_UDMA_0, XFER_MW_DMA_0, XFER_SW_DMA_0, }; static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) { u16 *id = drive->id; ide_hwif_t *hwif = drive->hwif; const struct ide_port_ops *port_ops = hwif->port_ops; unsigned int mask = 0; switch (base) { case XFER_UDMA_0: if ((id[ATA_ID_FIELD_VALID] & 4) == 0) break; mask = id[ATA_ID_UDMA_MODES]; if (port_ops && port_ops->udma_filter) mask &= port_ops->udma_filter(drive); else mask &= hwif->ultra_mask; /* * avoid false cable warning from eighty_ninty_three() */ if (req_mode > XFER_UDMA_2) { if ((mask & 0x78) && (eighty_ninty_three(drive) == 0)) mask &= 0x07; } break; case XFER_MW_DMA_0: mask = id[ATA_ID_MWDMA_MODES]; /* Also look for the CF specific MWDMA modes... */ if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 0x38)) { u8 mode = ((id[ATA_ID_CFA_MODES] & 0x38) >> 3) - 1; mask |= ((2 << mode) - 1) << 3; } if (port_ops && port_ops->mdma_filter) mask &= port_ops->mdma_filter(drive); else mask &= hwif->mwdma_mask; break; case XFER_SW_DMA_0: mask = id[ATA_ID_SWDMA_MODES]; if (!(mask & ATA_SWDMA2) && (id[ATA_ID_OLD_DMA_MODES] >> 8)) { u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8; /* * if the mode is valid convert it to the mask * (the maximum allowed mode is XFER_SW_DMA_2) */ if (mode <= 2) mask = (2 << mode) - 1; } mask &= hwif->swdma_mask; break; default: BUG(); break; } return mask; } /** * ide_find_dma_mode - compute DMA speed * @drive: IDE device * @req_mode: requested mode * * Checks the drive/host capabilities and finds the speed to use for * the DMA transfer. The speed is then limited by the requested mode. * * Returns 0 if the drive/host combination is incapable of DMA transfers * or if the requested mode is not a DMA mode. */ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) { ide_hwif_t *hwif = drive->hwif; unsigned int mask; int x, i; u8 mode = 0; if (drive->media != ide_disk) { if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) return 0; } for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) { if (req_mode < xfer_mode_bases[i]) continue; mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode); x = fls(mask) - 1; if (x >= 0) { mode = xfer_mode_bases[i] + x; break; } } if (hwif->chipset == ide_acorn && mode == 0) { /* * is this correct? */ if (ide_dma_good_drive(drive) && drive->id[ATA_ID_EIDE_DMA_TIME] < 150) mode = XFER_MW_DMA_1; } mode = min(mode, req_mode); printk(KERN_INFO "%s: %s mode selected\n", drive->name, mode ? ide_xfer_verbose(mode) : "no DMA"); return mode; } static int ide_tune_dma(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 speed; if (ata_id_has_dma(drive->id) == 0 || (drive->dev_flags & IDE_DFLAG_NODMA)) return 0; /* consult the list of known "bad" drives */ if (__ide_dma_bad_drive(drive)) return 0; if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) return config_drive_for_dma(drive); speed = ide_max_dma_mode(drive); if (!speed) return 0; if (ide_set_dma_mode(drive, speed)) return 0; return 1; } static int ide_dma_check(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; if (ide_tune_dma(drive)) return 0; /* TODO: always do PIO fallback */ if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) return -1; ide_set_max_pio(drive); return -1; } int ide_set_dma(ide_drive_t *drive) { int rc; /* * Force DMAing for the beginning of the check. * Some chipsets appear to do interesting * things, if not checked and cleared. * PARANOIA!!! */ ide_dma_off_quietly(drive); rc = ide_dma_check(drive); if (rc) return rc; ide_dma_on(drive); return 0; } void ide_check_dma_crc(ide_drive_t *drive) { u8 mode; ide_dma_off_quietly(drive); drive->crc_count = 0; mode = drive->current_speed; /* * Don't try non Ultra-DMA modes without iCRC's. Force the * device to PIO and make the user enable SWDMA/MWDMA modes. */ if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7) mode--; else mode = XFER_PIO_4; ide_set_xfer_rate(drive, mode); if (drive->current_speed >= XFER_SW_DMA_0) ide_dma_on(drive); } void ide_dma_lost_irq(ide_drive_t *drive) { printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name); } EXPORT_SYMBOL_GPL(ide_dma_lost_irq); /* * un-busy the port etc, and clear any pending DMA status. we want to * retry the current request in pio mode instead of risking tossing it * all away */ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { ide_hwif_t *hwif = drive->hwif; const struct ide_dma_ops *dma_ops = hwif->dma_ops; struct ide_cmd *cmd = &hwif->cmd; ide_startstop_t ret = ide_stopped; /* * end current dma transaction */ if (error < 0) { printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); drive->waiting_for_dma = 0; (void)dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); ret = ide_error(drive, "dma timeout error", hwif->tp_ops->read_status(hwif)); } else { printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); if (dma_ops->dma_clear) dma_ops->dma_clear(drive); printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); if (dma_ops->dma_test_irq(drive) == 0) { ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif)); drive->waiting_for_dma = 0; (void)dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); } } /* * disable dma for now, but remember that we did so because of * a timeout -- we'll reenable after we finish this next request * (or rather the first chunk of it) in pio. */ drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY; drive->retry_pio++; ide_dma_off_quietly(drive); /* * make sure request is sane */ if (hwif->rq) hwif->rq->errors = 0; return ret; } void ide_release_dma_engine(ide_hwif_t *hwif) { if (hwif->dmatable_cpu) { int prd_size = hwif->prd_max_nents * hwif->prd_ent_size; dma_free_coherent(hwif->dev, prd_size, hwif->dmatable_cpu, hwif->dmatable_dma); hwif->dmatable_cpu = NULL; } } EXPORT_SYMBOL_GPL(ide_release_dma_engine); int ide_allocate_dma_engine(ide_hwif_t *hwif) { int prd_size; if (hwif->prd_max_nents == 0) hwif->prd_max_nents = PRD_ENTRIES; if (hwif->prd_ent_size == 0) hwif->prd_ent_size = PRD_BYTES; prd_size = hwif->prd_max_nents * hwif->prd_ent_size; hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size, &hwif->dmatable_dma, GFP_ATOMIC); if (hwif->dmatable_cpu == NULL) { printk(KERN_ERR "%s: unable to allocate PRD table\n", hwif->name); return -ENOMEM; } return 0; } EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd) { const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops; if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 || (dma_ops->dma_check && dma_ops->dma_check(drive, cmd))) goto out; ide_map_sg(drive, cmd); if (ide_dma_map_sg(drive, cmd) == 0) goto out_map; if (dma_ops->dma_setup(drive, cmd)) goto out_dma_unmap; drive->waiting_for_dma = 1; return 0; out_dma_unmap: ide_dma_unmap_sg(drive, cmd); out_map: ide_map_sg(drive, cmd); out: return 1; }
gpl-2.0
tommytarts/QuantumKernelM8-GPe-5.0.1
drivers/video/via/via_aux_edid.c
9779
2399
/* * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * generic EDID driver */ #include <linux/slab.h> #include <linux/fb.h> #include "via_aux.h" #include "../edid.h" static const char *name = "EDID"; static void query_edid(struct via_aux_drv *drv) { struct fb_monspecs *spec = drv->data; unsigned char edid[EDID_LENGTH]; bool valid = false; if (spec) { fb_destroy_modedb(spec->modedb); } else { spec = kmalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return; } spec->version = spec->revision = 0; if (via_aux_read(drv, 0x00, edid, EDID_LENGTH)) { fb_edid_to_monspecs(edid, spec); valid = spec->version || spec->revision; } if (!valid) { kfree(spec); spec = NULL; } else printk(KERN_DEBUG "EDID: %s %s\n", spec->manufacturer, spec->monitor); drv->data = spec; } static const struct fb_videomode *get_preferred_mode(struct via_aux_drv *drv) { struct fb_monspecs *spec = drv->data; int i; if (!spec || !spec->modedb || !(spec->misc & FB_MISC_1ST_DETAIL)) return NULL; for (i = 0; i < spec->modedb_len; i++) { if (spec->modedb[i].flag & FB_MODE_IS_FIRST && spec->modedb[i].flag & FB_MODE_IS_DETAILED) return &spec->modedb[i]; } return NULL; } static void cleanup(struct via_aux_drv *drv) { struct fb_monspecs *spec = drv->data; if (spec) fb_destroy_modedb(spec->modedb); } void via_aux_edid_probe(struct via_aux_bus *bus) { struct via_aux_drv drv = { .bus = bus, .addr = 0x50, .name = name, .cleanup = cleanup, .get_preferred_mode = get_preferred_mode}; query_edid(&drv); /* as EDID devices can be connected/disconnected just add the driver */ via_aux_add(&drv); }
gpl-2.0
SOKP/kernel_samsung_trlte
fs/ocfs2/cluster/nodemanager.c
10803
25367
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" #include "ver.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_read(struct o2nm_node *node, char *page) { return sprintf(page, "%d\n", node->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, O2NM_NODE_ATTR_LOCAL, }; static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) p = NULL; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); if (p == NULL) return -EEXIST; return count; } static ssize_t o2nm_node_ipv4_port_read(struct o2nm_node *node, char *page) { return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_write(struct o2nm_node *node, const char *page, size_t count) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_read(struct o2nm_node *node, char *page) { return sprintf(page, "%pI4\n", &node->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_read(struct o2nm_node *node, char *page) { return sprintf(page, "%d\n", node->nd_local); } static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) return -EBUSY; /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) return ret; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } return count; } struct o2nm_node_attribute { struct configfs_attribute attr; ssize_t (*show)(struct o2nm_node *, char *); ssize_t (*store)(struct o2nm_node *, const char *, size_t); }; static struct o2nm_node_attribute o2nm_node_attr_num = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "num", .ca_mode = S_IRUGO | S_IWUSR }, .show = o2nm_node_num_read, .store = o2nm_node_num_write, }; static struct o2nm_node_attribute o2nm_node_attr_ipv4_port = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "ipv4_port", .ca_mode = S_IRUGO | S_IWUSR }, .show = o2nm_node_ipv4_port_read, .store = o2nm_node_ipv4_port_write, }; static struct o2nm_node_attribute o2nm_node_attr_ipv4_address = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "ipv4_address", .ca_mode = S_IRUGO | S_IWUSR }, .show = o2nm_node_ipv4_address_read, .store = o2nm_node_ipv4_address_write, }; static struct o2nm_node_attribute o2nm_node_attr_local = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "local", .ca_mode = S_IRUGO | S_IWUSR }, .show = o2nm_node_local_read, .store = o2nm_node_local_write, }; static struct configfs_attribute *o2nm_node_attrs[] = { [O2NM_NODE_ATTR_NUM] = &o2nm_node_attr_num.attr, [O2NM_NODE_ATTR_PORT] = &o2nm_node_attr_ipv4_port.attr, [O2NM_NODE_ATTR_ADDRESS] = &o2nm_node_attr_ipv4_address.attr, [O2NM_NODE_ATTR_LOCAL] = &o2nm_node_attr_local.attr, NULL, }; static int o2nm_attr_index(struct configfs_attribute *attr) { int i; for (i = 0; i < ARRAY_SIZE(o2nm_node_attrs); i++) { if (attr == o2nm_node_attrs[i]) return i; } BUG(); return 0; } static ssize_t o2nm_node_show(struct config_item *item, struct configfs_attribute *attr, char *page) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_node_attribute *o2nm_node_attr = container_of(attr, struct o2nm_node_attribute, attr); ssize_t ret = 0; if (o2nm_node_attr->show) ret = o2nm_node_attr->show(node, page); return ret; } static ssize_t o2nm_node_store(struct config_item *item, struct configfs_attribute *attr, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_node_attribute *o2nm_node_attr = container_of(attr, struct o2nm_node_attribute, attr); ssize_t ret; int attr_index = o2nm_attr_index(attr); if (o2nm_node_attr->store == NULL) { ret = -EINVAL; goto out; } if (test_bit(attr_index, &node->nd_set_attributes)) return -EBUSY; ret = o2nm_node_attr->store(node, page, count); if (ret < count) goto out; set_bit(attr_index, &node->nd_set_attributes); out: return ret; } static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, .show_attribute = o2nm_node_show, .store_attribute = o2nm_node_store, }; static struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif struct o2nm_cluster_attribute { struct configfs_attribute attr; ssize_t (*show)(struct o2nm_cluster *, char *); ssize_t (*store)(struct o2nm_cluster *, const char *, size_t); }; static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_attr_idle_timeout_ms_read( struct o2nm_cluster *cluster, char *page) { return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_attr_idle_timeout_ms_write( struct o2nm_cluster *cluster, const char *page, size_t count) { ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_attr_keepalive_delay_ms_read( struct o2nm_cluster *cluster, char *page) { return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_attr_keepalive_delay_ms_write( struct o2nm_cluster *cluster, const char *page, size_t count) { ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_attr_reconnect_delay_ms_read( struct o2nm_cluster *cluster, char *page) { return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write( struct o2nm_cluster *cluster, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &cluster->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_attr_fence_method_read( struct o2nm_cluster *cluster, char *page) { ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_attr_fence_method_write( struct o2nm_cluster *cluster, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (cluster->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); cluster->cl_fence_method = i; } return count; } bail: return -EINVAL; } static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "idle_timeout_ms", .ca_mode = S_IRUGO | S_IWUSR }, .show = o2nm_cluster_attr_idle_timeout_ms_read, .store = o2nm_cluster_attr_idle_timeout_ms_write, }; static struct o2nm_cluster_attribute o2nm_cluster_attr_keepalive_delay_ms = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "keepalive_delay_ms", .ca_mode = S_IRUGO | S_IWUSR }, .show = o2nm_cluster_attr_keepalive_delay_ms_read, .store = o2nm_cluster_attr_keepalive_delay_ms_write, }; static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "reconnect_delay_ms", .ca_mode = S_IRUGO | S_IWUSR }, .show = o2nm_cluster_attr_reconnect_delay_ms_read, .store = o2nm_cluster_attr_reconnect_delay_ms_write, }; static struct o2nm_cluster_attribute o2nm_cluster_attr_fence_method = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "fence_method", .ca_mode = S_IRUGO | S_IWUSR }, .show = o2nm_cluster_attr_fence_method_read, .store = o2nm_cluster_attr_fence_method_write, }; static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms.attr, &o2nm_cluster_attr_keepalive_delay_ms.attr, &o2nm_cluster_attr_reconnect_delay_ms.attr, &o2nm_cluster_attr_fence_method.attr, NULL, }; static ssize_t o2nm_cluster_show(struct config_item *item, struct configfs_attribute *attr, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); struct o2nm_cluster_attribute *o2nm_cluster_attr = container_of(attr, struct o2nm_cluster_attribute, attr); ssize_t ret = 0; if (o2nm_cluster_attr->show) ret = o2nm_cluster_attr->show(cluster, page); return ret; } static ssize_t o2nm_cluster_store(struct config_item *item, struct configfs_attribute *attr, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); struct o2nm_cluster_attribute *o2nm_cluster_attr = container_of(attr, struct o2nm_cluster_attribute, attr); ssize_t ret; if (o2nm_cluster_attr->store == NULL) { ret = -EINVAL; goto out; } ret = o2nm_cluster_attr->store(cluster, page, count); if (ret < count) goto out; out: return ret; } static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster->cl_group.default_groups); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, .show_attribute = o2nm_cluster_show, .store_attribute = o2nm_cluster_store, }; static struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; void *defs = NULL; /* this runs under the parent dir's i_mutex; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); cluster->cl_group.default_groups = defs; cluster->cl_group.default_groups[0] = &ns->ns_group; cluster->cl_group.default_groups[1] = o2hb_group; cluster->cl_group.default_groups[2] = NULL; rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); kfree(defs); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); int i; struct config_item *killme; BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; for (i = 0; cluster->cl_group.default_groups[i]; i++) { killme = &cluster->cl_group.default_groups[i]->cg_item; cluster->cl_group.default_groups[i] = NULL; config_item_put(killme); } config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(&o2nm_cluster_group.cs_subsys, item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret = -1; cluster_print_version(); ret = o2hb_init(); if (ret) goto out; ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); module_init(init_o2nm) module_exit(exit_o2nm)
gpl-2.0
TeamRegular/android_kernel_lge_iproj
drivers/media/video/sn9c102/sn9c102_tas5110c1b.c
12851
4411
/*************************************************************************** * Plug-in for TAS5110C1B image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2004-2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int tas5110c1b_init(struct sn9c102_device* cam) { int err = 0; err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x44, 0x01}, {0x00, 0x10}, {0x00, 0x11}, {0x0a, 0x14}, {0x60, 0x17}, {0x06, 0x18}, {0xfb, 0x19}); err += sn9c102_i2c_write(cam, 0xc0, 0x80); return err; } static int tas5110c1b_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { int err = 0; switch (ctrl->id) { case V4L2_CID_GAIN: err += sn9c102_i2c_write(cam, 0x20, 0xf6 - ctrl->value); break; default: return -EINVAL; } return err ? -EIO : 0; } static int tas5110c1b_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 69, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 9; err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); /* Don't change ! */ err += sn9c102_write_reg(cam, 0x14, 0x1a); err += sn9c102_write_reg(cam, 0x0a, 0x1b); err += sn9c102_write_reg(cam, sn9c102_pread_reg(cam, 0x19), 0x19); return err; } static int tas5110c1b_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { int err = 0; if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) err += sn9c102_write_reg(cam, 0x2b, 0x19); else err += sn9c102_write_reg(cam, 0xfb, 0x19); return err; } static const struct sn9c102_sensor tas5110c1b = { .name = "TAS5110C1B", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102, .sysfs_ops = SN9C102_I2C_WRITE, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_3WIRES, .init = &tas5110c1b_init, .qctrl = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "global gain", .minimum = 0x00, .maximum = 0xf6, .step = 0x01, .default_value = 0x40, .flags = 0, }, }, .set_ctrl = &tas5110c1b_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 352, .height = 288, }, .defrect = { .left = 0, .top = 0, .width = 352, .height = 288, }, }, .set_crop = &tas5110c1b_set_crop, .pix_format = { .width = 352, .height = 288, .pixelformat = V4L2_PIX_FMT_SBGGR8, .priv = 8, }, .set_pix_format = &tas5110c1b_set_pix_format }; int sn9c102_probe_tas5110c1b(struct sn9c102_device* cam) { const struct usb_device_id tas5110c1b_id_table[] = { { USB_DEVICE(0x0c45, 0x6001), }, { USB_DEVICE(0x0c45, 0x6005), }, { USB_DEVICE(0x0c45, 0x60ab), }, { } }; /* Sensor detection is based on USB pid/vid */ if (!sn9c102_match_id(cam, tas5110c1b_id_table)) return -ENODEV; sn9c102_attach_sensor(cam, &tas5110c1b); return 0; }
gpl-2.0
sikarash/linux-pm
arch/m32r/platforms/mappi3/io.c
13875
11272
/* * linux/arch/m32r/platforms/mappi3/io.c * * Typical I/O routines for Mappi3 board. * * Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto, Mamoru Sakugawa */ #include <asm/m32r.h> #include <asm/page.h> #include <asm/io.h> #include <asm/byteorder.h> #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) #include <linux/types.h> #define M32R_PCC_IOMAP_SIZE 0x1000 #define M32R_PCC_IOSTART0 0x1000 #define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1) extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int); extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int); extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int); extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int); #endif /* CONFIG_PCMCIA && CONFIG_M32R_CFC */ #define PORT2ADDR(port) _port2addr(port) #define PORT2ADDR_NE(port) _port2addr_ne(port) #define PORT2ADDR_USB(port) _port2addr_usb(port) static inline void *_port2addr(unsigned long port) { return (void *)(port | NONCACHE_OFFSET); } #if defined(CONFIG_IDE) static inline void *__port2addr_ata(unsigned long port) { static int dummy_reg; switch (port) { /* IDE0 CF */ case 0x1f0: return (void *)(0x14002000 | NONCACHE_OFFSET); case 0x1f1: return (void *)(0x14012800 | NONCACHE_OFFSET); case 0x1f2: return (void *)(0x14012002 | NONCACHE_OFFSET); case 0x1f3: return (void *)(0x14012802 | NONCACHE_OFFSET); case 0x1f4: return (void *)(0x14012004 | NONCACHE_OFFSET); case 0x1f5: return (void *)(0x14012804 | NONCACHE_OFFSET); case 0x1f6: return (void *)(0x14012006 | NONCACHE_OFFSET); case 0x1f7: return (void *)(0x14012806 | NONCACHE_OFFSET); case 0x3f6: return (void *)(0x1401200e | NONCACHE_OFFSET); /* IDE1 IDE */ case 0x170: /* Data 16bit */ return (void *)(0x14810000 | NONCACHE_OFFSET); case 0x171: /* Features / Error */ return (void *)(0x14810002 | NONCACHE_OFFSET); case 0x172: /* Sector count */ return (void *)(0x14810004 | NONCACHE_OFFSET); case 0x173: /* Sector number */ return (void *)(0x14810006 | NONCACHE_OFFSET); case 0x174: /* Cylinder low */ return (void *)(0x14810008 | NONCACHE_OFFSET); case 0x175: /* Cylinder high */ return (void *)(0x1481000a | NONCACHE_OFFSET); case 0x176: /* Device head */ return (void *)(0x1481000c | NONCACHE_OFFSET); case 0x177: /* Command */ return (void *)(0x1481000e | NONCACHE_OFFSET); case 0x376: /* Device control / Alt status */ return (void *)(0x1480800c | NONCACHE_OFFSET); default: return (void *)&dummy_reg; } } #endif #define LAN_IOSTART (0x300 | NONCACHE_OFFSET) #define LAN_IOEND (0x320 | NONCACHE_OFFSET) static inline void *_port2addr_ne(unsigned long port) { return (void *)(port + 0x10000000); } static inline void *_port2addr_usb(unsigned long port) { return (void *)(port + NONCACHE_OFFSET + 0x12000000); } static inline void delay(void) { __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory"); } /* * NIC I/O function */ static inline unsigned char _ne_inb(void *portp) { return (unsigned char) *(volatile unsigned char *)portp; } static inline unsigned short _ne_inw(void *portp) { return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp); } static inline void _ne_insb(void *portp, void * addr, unsigned long count) { unsigned char *buf = addr; while (count--) *buf++ = *(volatile unsigned char *)portp; } static inline void _ne_outb(unsigned char b, void *portp) { *(volatile unsigned char *)portp = (unsigned char)b; } static inline void _ne_outw(unsigned short w, void *portp) { *(volatile unsigned short *)portp = cpu_to_le16(w); } unsigned char _inb(unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) return _ne_inb(PORT2ADDR_NE(port)); #if defined(CONFIG_IDE) else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ return *(volatile unsigned char *)__port2addr_ata(port); } #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { unsigned char b; pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0); return b; } else #endif return *(volatile unsigned char *)PORT2ADDR(port); } unsigned short _inw(unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) return _ne_inw(PORT2ADDR_NE(port)); #if defined(CONFIG_IDE) else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ return *(volatile unsigned short *)__port2addr_ata(port); } #endif #if defined(CONFIG_USB) else if (port >= 0x340 && port < 0x3a0) return *(volatile unsigned short *)PORT2ADDR_USB(port); #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { unsigned short w; pcc_ioread_word(0, port, &w, sizeof(w), 1, 0); return w; } else #endif return *(volatile unsigned short *)PORT2ADDR(port); } unsigned long _inl(unsigned long port) { #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { unsigned long l; pcc_ioread_word(0, port, &l, sizeof(l), 1, 0); return l; } else #endif return *(volatile unsigned long *)PORT2ADDR(port); } unsigned char _inb_p(unsigned long port) { unsigned char v = _inb(port); delay(); return (v); } unsigned short _inw_p(unsigned long port) { unsigned short v = _inw(port); delay(); return (v); } unsigned long _inl_p(unsigned long port) { unsigned long v = _inl(port); delay(); return (v); } void _outb(unsigned char b, unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) _ne_outb(b, PORT2ADDR_NE(port)); else #if defined(CONFIG_IDE) if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ *(volatile unsigned char *)__port2addr_ata(port) = b; } else #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0); } else #endif *(volatile unsigned char *)PORT2ADDR(port) = b; } void _outw(unsigned short w, unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) _ne_outw(w, PORT2ADDR_NE(port)); else #if defined(CONFIG_IDE) if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ *(volatile unsigned short *)__port2addr_ata(port) = w; } else #endif #if defined(CONFIG_USB) if (port >= 0x340 && port < 0x3a0) *(volatile unsigned short *)PORT2ADDR_USB(port) = w; else #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0); } else #endif *(volatile unsigned short *)PORT2ADDR(port) = w; } void _outl(unsigned long l, unsigned long port) { #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_word(0, port, &l, sizeof(l), 1, 0); } else #endif *(volatile unsigned long *)PORT2ADDR(port) = l; } void _outb_p(unsigned char b, unsigned long port) { _outb(b, port); delay(); } void _outw_p(unsigned short w, unsigned long port) { _outw(w, port); delay(); } void _outl_p(unsigned long l, unsigned long port) { _outl(l, port); delay(); } void _insb(unsigned int port, void * addr, unsigned long count) { if (port >= LAN_IOSTART && port < LAN_IOEND) _ne_insb(PORT2ADDR_NE(port), addr, count); #if defined(CONFIG_IDE) else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ unsigned char *buf = addr; unsigned char *portp = __port2addr_ata(port); while (count--) *buf++ = *(volatile unsigned char *)portp; } #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_ioread_byte(0, port, (void *)addr, sizeof(unsigned char), count, 1); } #endif else { unsigned char *buf = addr; unsigned char *portp = PORT2ADDR(port); while (count--) *buf++ = *(volatile unsigned char *)portp; } } void _insw(unsigned int port, void * addr, unsigned long count) { unsigned short *buf = addr; unsigned short *portp; if (port >= LAN_IOSTART && port < LAN_IOEND) { portp = PORT2ADDR_NE(port); while (count--) *buf++ = *(volatile unsigned short *)portp; #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short), count, 1); #endif #if defined(CONFIG_IDE) } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ portp = __port2addr_ata(port); while (count--) *buf++ = *(volatile unsigned short *)portp; #endif } else { portp = PORT2ADDR(port); while (count--) *buf++ = *(volatile unsigned short *)portp; } } void _insl(unsigned int port, void * addr, unsigned long count) { unsigned long *buf = addr; unsigned long *portp; portp = PORT2ADDR(port); while (count--) *buf++ = *(volatile unsigned long *)portp; } void _outsb(unsigned int port, const void * addr, unsigned long count) { const unsigned char *buf = addr; unsigned char *portp; if (port >= LAN_IOSTART && port < LAN_IOEND) { portp = PORT2ADDR_NE(port); while (count--) _ne_outb(*buf++, portp); #if defined(CONFIG_IDE) } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ portp = __port2addr_ata(port); while (count--) *(volatile unsigned char *)portp = *buf++; #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_byte(0, port, (void *)addr, sizeof(unsigned char), count, 1); #endif } else { portp = PORT2ADDR(port); while (count--) *(volatile unsigned char *)portp = *buf++; } } void _outsw(unsigned int port, const void * addr, unsigned long count) { const unsigned short *buf = addr; unsigned short *portp; if (port >= LAN_IOSTART && port < LAN_IOEND) { portp = PORT2ADDR_NE(port); while (count--) *(volatile unsigned short *)portp = *buf++; #if defined(CONFIG_IDE) } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ portp = __port2addr_ata(port); while (count--) *(volatile unsigned short *)portp = *buf++; #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short), count, 1); #endif } else { portp = PORT2ADDR(port); while (count--) *(volatile unsigned short *)portp = *buf++; } } void _outsl(unsigned int port, const void * addr, unsigned long count) { const unsigned long *buf = addr; unsigned char *portp; portp = PORT2ADDR(port); while (count--) *(volatile unsigned long *)portp = *buf++; }
gpl-2.0
androidarmv6/android_kernel_lge_msm7x27
arch/arm/mach-msm/spm.c
52
8575
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <mach/msm_iomap.h> #include "spm.h" enum { MSM_SPM_DEBUG_SHADOW = 1U << 0, MSM_SPM_DEBUG_VCTL = 1U << 1, }; static int msm_spm_debug_mask; module_param_named( debug_mask, msm_spm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP ); #define MSM_SPM_PMIC_STATE_IDLE 0 static uint32_t msm_spm_reg_offsets[MSM_SPM_REG_NR] = { [MSM_SPM_REG_SAW_AVS_CTL] = 0x04, [MSM_SPM_REG_SAW_VCTL] = 0x08, [MSM_SPM_REG_SAW_STS] = 0x0C, [MSM_SPM_REG_SAW_CFG] = 0x10, [MSM_SPM_REG_SAW_SPM_CTL] = 0x14, [MSM_SPM_REG_SAW_SPM_SLP_TMR_DLY] = 0x18, [MSM_SPM_REG_SAW_SPM_WAKE_TMR_DLY] = 0x1C, [MSM_SPM_REG_SAW_SPM_PMIC_CTL] = 0x20, [MSM_SPM_REG_SAW_SLP_CLK_EN] = 0x24, [MSM_SPM_REG_SAW_SLP_HSFS_PRECLMP_EN] = 0x28, [MSM_SPM_REG_SAW_SLP_HSFS_POSTCLMP_EN] = 0x2C, [MSM_SPM_REG_SAW_SLP_CLMP_EN] = 0x30, [MSM_SPM_REG_SAW_SLP_RST_EN] = 0x34, [MSM_SPM_REG_SAW_SPM_MPM_CFG] = 0x38, }; struct msm_spm_device { void __iomem *reg_base_addr; uint32_t reg_shadow[MSM_SPM_REG_NR]; uint8_t awake_vlevel; uint8_t retention_vlevel; uint8_t collapse_vlevel; uint8_t retention_mid_vlevel; uint8_t collapse_mid_vlevel; uint32_t vctl_timeout_us; unsigned int low_power_mode; bool notify_rpm; bool dirty; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_spm_devices); static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1); /****************************************************************************** * Internal helper functions *****************************************************************************/ static inline void msm_spm_set_vctl( struct msm_spm_device *dev, uint32_t vlevel) { dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0xFF; dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= vlevel; } static inline void msm_spm_set_spm_ctl(struct msm_spm_device *dev, uint32_t rpm_bypass, uint32_t mode_encoding) { dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] &= ~0x0F; dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= rpm_bypass << 3; dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= mode_encoding; } static inline void msm_spm_set_pmic_ctl(struct msm_spm_device *dev, uint32_t awake_vlevel, uint32_t mid_vlevel, uint32_t sleep_vlevel) { dev->reg_shadow[MSM_SPM_REG_SAW_SPM_PMIC_CTL] = (mid_vlevel << 16) | (awake_vlevel << 8) | (sleep_vlevel); } static inline void msm_spm_set_slp_rst_en( struct msm_spm_device *dev, uint32_t slp_rst_en) { dev->reg_shadow[MSM_SPM_REG_SAW_SLP_RST_EN] = slp_rst_en; } static inline void msm_spm_flush_shadow( struct msm_spm_device *dev, unsigned int reg_index) { writel(dev->reg_shadow[reg_index], dev->reg_base_addr + msm_spm_reg_offsets[reg_index]); } static inline void msm_spm_load_shadow( struct msm_spm_device *dev, unsigned int reg_index) { dev->reg_shadow[reg_index] = readl(dev->reg_base_addr + msm_spm_reg_offsets[reg_index]); } static inline uint32_t msm_spm_get_sts_pmic_state(struct msm_spm_device *dev) { return (dev->reg_shadow[MSM_SPM_REG_SAW_STS] >> 20) & 0x03; } static inline uint32_t msm_spm_get_sts_curr_pmic_data( struct msm_spm_device *dev) { return (dev->reg_shadow[MSM_SPM_REG_SAW_STS] >> 10) & 0xFF; } /****************************************************************************** * Public functions *****************************************************************************/ int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm) { struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices); uint32_t rpm_bypass = notify_rpm ? 0x00 : 0x01; if (mode == dev->low_power_mode && notify_rpm == dev->notify_rpm && !dev->dirty) return 0; switch (mode) { case MSM_SPM_MODE_CLOCK_GATING: msm_spm_set_spm_ctl(dev, rpm_bypass, 0x00); msm_spm_set_slp_rst_en(dev, 0x00); break; case MSM_SPM_MODE_POWER_RETENTION: msm_spm_set_spm_ctl(dev, rpm_bypass, 0x02); msm_spm_set_pmic_ctl(dev, dev->awake_vlevel, dev->retention_mid_vlevel, dev->retention_vlevel); msm_spm_set_slp_rst_en(dev, 0x00); break; case MSM_SPM_MODE_POWER_COLLAPSE: msm_spm_set_spm_ctl(dev, rpm_bypass, 0x02); msm_spm_set_pmic_ctl(dev, dev->awake_vlevel, dev->collapse_mid_vlevel, dev->collapse_vlevel); msm_spm_set_slp_rst_en(dev, 0x01); break; default: BUG(); } msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_PMIC_CTL); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SLP_RST_EN); /* Ensure that the registers are written before returning */ dsb(); dev->low_power_mode = mode; dev->notify_rpm = notify_rpm; dev->dirty = false; if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) { int i; for (i = 0; i < MSM_SPM_REG_NR; i++) pr_info("%s: reg %02x = 0x%08x\n", __func__, msm_spm_reg_offsets[i], dev->reg_shadow[i]); } return 0; } int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel) { unsigned long flags; struct msm_spm_device *dev; uint32_t timeout_us; local_irq_save(flags); if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) && unlikely(smp_processor_id() != cpu)) { if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: attempting to set vdd of cpu %u from " "cpu %u\n", __func__, cpu, smp_processor_id()); goto set_vdd_x_cpu_bail; } dev = &per_cpu(msm_spm_devices, cpu); if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: requesting cpu %u vlevel 0x%x\n", __func__, cpu, vlevel); msm_spm_set_vctl(dev, vlevel); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL); /* Wait for PMIC state to return to idle or until timeout */ timeout_us = dev->vctl_timeout_us; msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); while (msm_spm_get_sts_pmic_state(dev) != MSM_SPM_PMIC_STATE_IDLE) { if (!timeout_us) goto set_vdd_bail; if (timeout_us > 10) { udelay(10); timeout_us -= 10; } else { udelay(timeout_us); timeout_us = 0; } msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); } if (msm_spm_get_sts_curr_pmic_data(dev) != vlevel) goto set_vdd_bail; dev->awake_vlevel = vlevel; dev->dirty = true; if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: cpu %u done, remaining timeout %uus\n", __func__, cpu, timeout_us); local_irq_restore(flags); return 0; set_vdd_bail: pr_err("%s: cpu %u failed, remaining timeout %uus, vlevel 0x%x\n", __func__, cpu, timeout_us, msm_spm_get_sts_curr_pmic_data(dev)); set_vdd_x_cpu_bail: local_irq_restore(flags); return -EIO; } void msm_spm_reinit(void) { struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices); int i; for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_flush_shadow(dev, i); /* Ensure that the registers are written before returning */ dsb(); } void msm_spm_allow_x_cpu_set_vdd(bool allowed) { atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0); } int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs) { unsigned int cpu; BUG_ON(nr_devs < num_possible_cpus()); for_each_possible_cpu(cpu) { struct msm_spm_device *dev = &per_cpu(msm_spm_devices, cpu); int i; dev->reg_base_addr = data[cpu].reg_base_addr; memcpy(dev->reg_shadow, data[cpu].reg_init_values, sizeof(data[cpu].reg_init_values)); dev->awake_vlevel = data[cpu].awake_vlevel; dev->retention_vlevel = data[cpu].retention_vlevel; dev->collapse_vlevel = data[cpu].collapse_vlevel; dev->retention_mid_vlevel = data[cpu].retention_mid_vlevel; dev->collapse_mid_vlevel = data[cpu].collapse_mid_vlevel; dev->vctl_timeout_us = data[cpu].vctl_timeout_us; for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_flush_shadow(dev, i); /* Ensure that the registers are written before returning */ dsb(); dev->low_power_mode = MSM_SPM_MODE_CLOCK_GATING; dev->notify_rpm = false; dev->dirty = true; } return 0; }
gpl-2.0
GustavoRD78/78Kernel-ZL-233
drivers/input/misc/pmic8xxx-pwrkey.c
52
8086
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * Copyright (c) 2013, Sony Mobile Communications. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/log2.h> #include <linux/workqueue.h> #include <linux/mfd/pm8xxx/core.h> #include <linux/input/pmic8xxx-pwrkey.h> #define PON_CNTL_1 0x1C #define PON_CNTL_PULL_UP BIT(7) #define PON_CNTL_TRIG_DELAY_MASK (0x7) #define CHECK_DELAY msecs_to_jiffies(100) /** * struct pmic8xxx_pwrkey - pmic8xxx pwrkey information * @key_press_irq: key press irq number * @pdata: platform data */ struct pmic8xxx_pwrkey { struct input_dev *pwr; int key_press_irq; int key_release_irq; atomic_t press; const struct pm8xxx_pwrkey_platform_data *pdata; struct delayed_work confirm_work; struct platform_device *pdev; }; static void confirm_key_status(struct work_struct *work) { struct pmic8xxx_pwrkey *pwrkey = container_of(to_delayed_work(work), struct pmic8xxx_pwrkey, confirm_work); int pressed = pm8xxx_read_irq_stat(pwrkey->pdev->dev.parent, pwrkey->key_press_irq); int released = pm8xxx_read_irq_stat(pwrkey->pdev->dev.parent, pwrkey->key_release_irq); if (pressed < 0 || released < 0) { dev_err(&pwrkey->pdev->dev, "reading irq status failed\n"); } else if (pressed != !released) { dev_warn(&pwrkey->pdev->dev, "key status unstable\n"); } else if (atomic_cmpxchg(&pwrkey->press, !pressed, pressed) != !pressed) { dev_warn(&pwrkey->pdev->dev, "key status changed to %d\n", pressed); input_report_key(pwrkey->pwr, KEY_POWER, pressed); input_sync(pwrkey->pwr); return; } schedule_delayed_work(to_delayed_work(work), CHECK_DELAY); } static irqreturn_t pwrkey_press_irq(int irq, void *_pwrkey) { struct pmic8xxx_pwrkey *pwrkey = _pwrkey; if (atomic_cmpxchg(&pwrkey->press, false, true) != false) { dev_warn(pwrkey->pwr->dev.parent, "unexpected key press\n"); __cancel_delayed_work(&pwrkey->confirm_work); schedule_delayed_work(&pwrkey->confirm_work, CHECK_DELAY); } #ifdef CONFIG_PMIC8XXX_FORCECRASH pmic8xxx_forcecrash_timer_setup(1); #endif input_report_key(pwrkey->pwr, KEY_POWER, 1); input_sync(pwrkey->pwr); return IRQ_HANDLED; } static irqreturn_t pwrkey_release_irq(int irq, void *_pwrkey) { struct pmic8xxx_pwrkey *pwrkey = _pwrkey; if (atomic_cmpxchg(&pwrkey->press, true, false) != true) { dev_warn(pwrkey->pwr->dev.parent, "unexpected key release\n"); __cancel_delayed_work(&pwrkey->confirm_work); schedule_delayed_work(&pwrkey->confirm_work, CHECK_DELAY); } #ifdef CONFIG_PMIC8XXX_FORCECRASH pmic8xxx_forcecrash_timer_setup(0); #endif input_report_key(pwrkey->pwr, KEY_POWER, 0); input_sync(pwrkey->pwr); return IRQ_HANDLED; } #ifdef CONFIG_PM_SLEEP static int pmic8xxx_pwrkey_suspend(struct device *dev) { struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev); flush_delayed_work_sync(&pwrkey->confirm_work); if (device_may_wakeup(dev)) { enable_irq_wake(pwrkey->key_press_irq); enable_irq_wake(pwrkey->key_release_irq); } return 0; } static int pmic8xxx_pwrkey_resume(struct device *dev) { struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev); if (device_may_wakeup(dev)) { disable_irq_wake(pwrkey->key_press_irq); disable_irq_wake(pwrkey->key_release_irq); } return 0; } #endif static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops, pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume); static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev) { struct input_dev *pwr; int key_release_irq = platform_get_irq(pdev, 0); int key_press_irq = platform_get_irq(pdev, 1); int err; unsigned int delay; u8 pon_cntl; struct pmic8xxx_pwrkey *pwrkey; const struct pm8xxx_pwrkey_platform_data *pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "power key platform data not supplied\n"); return -EINVAL; } /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */ if (pdata->kpd_trigger_delay_us > USEC_PER_SEC * 2 || pdata->kpd_trigger_delay_us < USEC_PER_SEC / 64) { dev_err(&pdev->dev, "invalid power key trigger delay\n"); return -EINVAL; } pwrkey = kzalloc(sizeof(*pwrkey), GFP_KERNEL); if (!pwrkey) return -ENOMEM; pwrkey->pdata = pdata; pwr = input_allocate_device(); if (!pwr) { dev_dbg(&pdev->dev, "Can't allocate power button\n"); err = -ENOMEM; goto free_pwrkey; } input_set_capability(pwr, EV_KEY, KEY_POWER); pwr->name = "pmic8xxx_pwrkey"; pwr->phys = "pmic8xxx_pwrkey/input0"; pwr->dev.parent = &pdev->dev; delay = (pdata->kpd_trigger_delay_us << 6) / USEC_PER_SEC; delay = ilog2(delay); err = pm8xxx_readb(pdev->dev.parent, PON_CNTL_1, &pon_cntl); if (err < 0) { dev_err(&pdev->dev, "failed reading PON_CNTL_1 err=%d\n", err); goto free_input_dev; } pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK; pon_cntl |= (delay & PON_CNTL_TRIG_DELAY_MASK); if (pdata->pull_up) pon_cntl |= PON_CNTL_PULL_UP; else pon_cntl &= ~PON_CNTL_PULL_UP; err = pm8xxx_writeb(pdev->dev.parent, PON_CNTL_1, pon_cntl); if (err < 0) { dev_err(&pdev->dev, "failed writing PON_CNTL_1 err=%d\n", err); goto free_input_dev; } err = input_register_device(pwr); if (err) { dev_dbg(&pdev->dev, "Can't register power key: %d\n", err); goto free_input_dev; } pwrkey->key_press_irq = key_press_irq; pwrkey->key_release_irq = key_release_irq; pwrkey->pwr = pwr; pwrkey->pdev = pdev; INIT_DELAYED_WORK(&pwrkey->confirm_work, confirm_key_status); platform_set_drvdata(pdev, pwrkey); err = request_any_context_irq(key_press_irq, pwrkey_press_irq, IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_press", pwrkey); if (err < 0) { dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n", key_press_irq, err); goto unreg_input_dev; } err = request_any_context_irq(key_release_irq, pwrkey_release_irq, IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_release", pwrkey); if (err < 0) { dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n", key_release_irq, err); goto free_press_irq; } device_init_wakeup(&pdev->dev, pdata->wakeup); #ifdef CONFIG_PMIC8XXX_FORCECRASH pmic8xxx_forcecrash_init(pdev); #endif return 0; free_press_irq: free_irq(key_press_irq, NULL); unreg_input_dev: platform_set_drvdata(pdev, NULL); input_unregister_device(pwr); pwr = NULL; free_input_dev: input_free_device(pwr); free_pwrkey: kfree(pwrkey); return err; } static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev) { struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev); int key_release_irq = platform_get_irq(pdev, 0); int key_press_irq = platform_get_irq(pdev, 1); #ifdef CONFIG_PMIC8XXX_FORCECRASH pmic8xxx_forcecrash_exit(pdev); #endif device_init_wakeup(&pdev->dev, 0); free_irq(key_press_irq, pwrkey); free_irq(key_release_irq, pwrkey); input_unregister_device(pwrkey->pwr); platform_set_drvdata(pdev, NULL); kfree(pwrkey); return 0; } static struct platform_driver pmic8xxx_pwrkey_driver = { .probe = pmic8xxx_pwrkey_probe, .remove = __devexit_p(pmic8xxx_pwrkey_remove), .driver = { .name = PM8XXX_PWRKEY_DEV_NAME, .owner = THIS_MODULE, .pm = &pm8xxx_pwr_key_pm_ops, }, }; static int __devinit pmic8xxx_pwrkey_init(void) { return platform_driver_register(&pmic8xxx_pwrkey_driver); } subsys_initcall(pmic8xxx_pwrkey_init); MODULE_ALIAS("platform:pmic8xxx_pwrkey"); MODULE_DESCRIPTION("PMIC8XXX Power Key driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>");
gpl-2.0
TeamFreedom/FreedomKernel
arch/arm/mach-at91/board-cam60.c
308
4592
/* * KwikByte CAM60 (KB9260) * * based on board-sam9260ek.c * Copyright (C) 2005 SAN People * Copyright (C) 2006 Atmel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/at91sam9_smc.h> #include "sam9_smc.h" #include "generic.h" static void __init cam60_map_io(void) { /* Initialize processor: 10 MHz crystal */ at91sam9260_initialize(10000000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init cam60_init_irq(void) { at91sam9260_init_interrupts(NULL); } /* * USB Host */ static struct at91_usbh_data __initdata cam60_usbh_data = { .ports = 1, }; /* * SPI devices. */ #if defined(CONFIG_MTD_DATAFLASH) static struct mtd_partition cam60_spi_partitions[] = { { .name = "BOOT1", .offset = 0, .size = 4 * 1056, }, { .name = "BOOT2", .offset = MTDPART_OFS_NXTBLK, .size = 256 * 1056, }, { .name = "kernel", .offset = MTDPART_OFS_NXTBLK, .size = 2222 * 1056, }, { .name = "file system", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct flash_platform_data cam60_spi_flash_platform_data = { .name = "spi_flash", .parts = cam60_spi_partitions, .nr_parts = ARRAY_SIZE(cam60_spi_partitions) }; #endif static struct spi_board_info cam60_spi_devices[] __initdata = { #if defined(CONFIG_MTD_DATAFLASH) { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, .platform_data = &cam60_spi_flash_platform_data }, #endif }; /* * MACB Ethernet device */ static struct __initdata at91_eth_data cam60_macb_data = { .phy_irq_pin = AT91_PIN_PB5, .is_rmii = 0, }; /* * NAND Flash */ static struct mtd_partition __initdata cam60_nand_partition[] = { { .name = "nand_fs", .offset = 0, .size = MTDPART_SIZ_FULL, }, }; static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) { *num_partitions = ARRAY_SIZE(cam60_nand_partition); return cam60_nand_partition; } static struct atmel_nand_data __initdata cam60_nand_data = { .ale = 21, .cle = 22, // .det_pin = ... not there .rdy_pin = AT91_PIN_PA9, .enable_pin = AT91_PIN_PA7, .partition_info = nand_partitions, }; static struct sam9_smc_config __initdata cam60_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 3, .nrd_pulse = 3, .ncs_write_pulse = 3, .nwe_pulse = 3, .read_cycle = 5, .write_cycle = 5, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 2, }; static void __init cam60_add_device_nand(void) { /* configure chip-select 3 (NAND) */ sam9_smc_configure(3, &cam60_nand_smc_config); at91_add_device_nand(&cam60_nand_data); } static void __init cam60_board_init(void) { /* Serial */ at91_add_device_serial(); /* SPI */ at91_add_device_spi(cam60_spi_devices, ARRAY_SIZE(cam60_spi_devices)); /* Ethernet */ at91_add_device_eth(&cam60_macb_data); /* USB Host */ /* enable USB power supply circuit */ at91_set_gpio_output(AT91_PIN_PB18, 1); at91_add_device_usbh(&cam60_usbh_data); /* NAND */ cam60_add_device_nand(); } MACHINE_START(CAM60, "KwikByte CAM60") /* Maintainer: KwikByte */ .boot_params = AT91_SDRAM_BASE + 0x100, .timer = &at91sam926x_timer, .map_io = cam60_map_io, .init_irq = cam60_init_irq, .init_machine = cam60_board_init, MACHINE_END
gpl-2.0
major91/Zeta-Chromium-N5
block/fiops-iosched.c
308
17832
/* * IOPS based IO scheduler. Based on CFQ. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> * Shaohua Li <shli@kernel.org> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/jiffies.h> #include <linux/rbtree.h> #include <linux/ioprio.h> #include <linux/blktrace_api.h> #include "blk.h" #define VIOS_SCALE_SHIFT 10 #define VIOS_SCALE (1 << VIOS_SCALE_SHIFT) #define VIOS_READ_SCALE (1) #define VIOS_WRITE_SCALE (1) #define VIOS_SYNC_SCALE (2) #define VIOS_ASYNC_SCALE (5) #define VIOS_PRIO_SCALE (5) struct fiops_rb_root { struct rb_root rb; struct rb_node *left; unsigned count; u64 min_vios; }; #define FIOPS_RB_ROOT (struct fiops_rb_root) { .rb = RB_ROOT} enum wl_prio_t { IDLE_WORKLOAD = 0, BE_WORKLOAD = 1, RT_WORKLOAD = 2, FIOPS_PRIO_NR, }; struct fiops_data { struct request_queue *queue; struct fiops_rb_root service_tree[FIOPS_PRIO_NR]; unsigned int busy_queues; unsigned int in_flight[2]; struct work_struct unplug_work; unsigned int read_scale; unsigned int write_scale; unsigned int sync_scale; unsigned int async_scale; }; struct fiops_ioc { struct io_cq icq; unsigned int flags; struct fiops_data *fiopsd; struct rb_node rb_node; u64 vios; /* key in service_tree */ struct fiops_rb_root *service_tree; unsigned int in_flight; struct rb_root sort_list; struct list_head fifo; pid_t pid; unsigned short ioprio; enum wl_prio_t wl_type; }; #define ioc_service_tree(ioc) (&((ioc)->fiopsd->service_tree[(ioc)->wl_type])) #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) enum ioc_state_flags { FIOPS_IOC_FLAG_on_rr = 0, /* on round-robin busy list */ FIOPS_IOC_FLAG_prio_changed, /* task priority has changed */ }; #define FIOPS_IOC_FNS(name) \ static inline void fiops_mark_ioc_##name(struct fiops_ioc *ioc) \ { \ ioc->flags |= (1 << FIOPS_IOC_FLAG_##name); \ } \ static inline void fiops_clear_ioc_##name(struct fiops_ioc *ioc) \ { \ ioc->flags &= ~(1 << FIOPS_IOC_FLAG_##name); \ } \ static inline int fiops_ioc_##name(const struct fiops_ioc *ioc) \ { \ return ((ioc)->flags & (1 << FIOPS_IOC_FLAG_##name)) != 0; \ } FIOPS_IOC_FNS(on_rr); FIOPS_IOC_FNS(prio_changed); #undef FIOPS_IOC_FNS #define fiops_log_ioc(fiopsd, ioc, fmt, args...) \ blk_add_trace_msg((fiopsd)->queue, "ioc%d " fmt, (ioc)->pid, ##args) #define fiops_log(fiopsd, fmt, args...) \ blk_add_trace_msg((fiopsd)->queue, "fiops " fmt, ##args) enum wl_prio_t fiops_wl_type(short prio_class) { if (prio_class == IOPRIO_CLASS_RT) return RT_WORKLOAD; if (prio_class == IOPRIO_CLASS_BE) return BE_WORKLOAD; return IDLE_WORKLOAD; } static inline struct fiops_ioc *icq_to_cic(struct io_cq *icq) { /* cic->icq is the first member, %NULL will convert to %NULL */ return container_of(icq, struct fiops_ioc, icq); } static inline struct fiops_ioc *fiops_cic_lookup(struct fiops_data *fiopsd, struct io_context *ioc) { if (ioc) return icq_to_cic(ioc_lookup_icq(ioc, fiopsd->queue)); return NULL; } /* * The below is leftmost cache rbtree addon */ static struct fiops_ioc *fiops_rb_first(struct fiops_rb_root *root) { /* Service tree is empty */ if (!root->count) return NULL; if (!root->left) root->left = rb_first(&root->rb); if (root->left) return rb_entry(root->left, struct fiops_ioc, rb_node); return NULL; } static void rb_erase_init(struct rb_node *n, struct rb_root *root) { rb_erase(n, root); RB_CLEAR_NODE(n); } static void fiops_rb_erase(struct rb_node *n, struct fiops_rb_root *root) { if (root->left == n) root->left = NULL; rb_erase_init(n, &root->rb); --root->count; } static inline u64 max_vios(u64 min_vios, u64 vios) { s64 delta = (s64)(vios - min_vios); if (delta > 0) min_vios = vios; return min_vios; } static void fiops_update_min_vios(struct fiops_rb_root *service_tree) { struct fiops_ioc *ioc; ioc = fiops_rb_first(service_tree); if (!ioc) return; service_tree->min_vios = max_vios(service_tree->min_vios, ioc->vios); } /* * The fiopsd->service_trees holds all pending fiops_ioc's that have * requests waiting to be processed. It is sorted in the order that * we will service the queues. */ static void fiops_service_tree_add(struct fiops_data *fiopsd, struct fiops_ioc *ioc) { struct rb_node **p, *parent; struct fiops_ioc *__ioc; struct fiops_rb_root *service_tree = ioc_service_tree(ioc); u64 vios; int left; /* New added IOC */ if (RB_EMPTY_NODE(&ioc->rb_node)) { if (ioc->in_flight > 0) vios = ioc->vios; else vios = max_vios(service_tree->min_vios, ioc->vios); } else { vios = ioc->vios; /* ioc->service_tree might not equal to service_tree */ fiops_rb_erase(&ioc->rb_node, ioc->service_tree); ioc->service_tree = NULL; } fiops_log_ioc(fiopsd, ioc, "service tree add, vios %lld", vios); left = 1; parent = NULL; ioc->service_tree = service_tree; p = &service_tree->rb.rb_node; while (*p) { struct rb_node **n; parent = *p; __ioc = rb_entry(parent, struct fiops_ioc, rb_node); /* * sort by key, that represents service time. */ if (vios < __ioc->vios) n = &(*p)->rb_left; else { n = &(*p)->rb_right; left = 0; } p = n; } if (left) service_tree->left = &ioc->rb_node; ioc->vios = vios; rb_link_node(&ioc->rb_node, parent, p); rb_insert_color(&ioc->rb_node, &service_tree->rb); service_tree->count++; fiops_update_min_vios(service_tree); } /* * Update ioc's position in the service tree. */ static void fiops_resort_rr_list(struct fiops_data *fiopsd, struct fiops_ioc *ioc) { /* * Resorting requires the ioc to be on the RR list already. */ if (fiops_ioc_on_rr(ioc)) fiops_service_tree_add(fiopsd, ioc); } /* * add to busy list of queues for service, trying to be fair in ordering * the pending list according to last request service */ static void fiops_add_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc) { BUG_ON(fiops_ioc_on_rr(ioc)); fiops_mark_ioc_on_rr(ioc); fiopsd->busy_queues++; fiops_resort_rr_list(fiopsd, ioc); } /* * Called when the ioc no longer has requests pending, remove it from * the service tree. */ static void fiops_del_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc) { BUG_ON(!fiops_ioc_on_rr(ioc)); fiops_clear_ioc_on_rr(ioc); if (!RB_EMPTY_NODE(&ioc->rb_node)) { fiops_rb_erase(&ioc->rb_node, ioc->service_tree); ioc->service_tree = NULL; } BUG_ON(!fiopsd->busy_queues); fiopsd->busy_queues--; } /* * rb tree support functions */ static void fiops_del_rq_rb(struct request *rq) { struct fiops_ioc *ioc = RQ_CIC(rq); elv_rb_del(&ioc->sort_list, rq); } static void fiops_add_rq_rb(struct request *rq) { struct fiops_ioc *ioc = RQ_CIC(rq); struct fiops_data *fiopsd = ioc->fiopsd; elv_rb_add(&ioc->sort_list, rq); if (!fiops_ioc_on_rr(ioc)) fiops_add_ioc_rr(fiopsd, ioc); } static void fiops_reposition_rq_rb(struct fiops_ioc *ioc, struct request *rq) { elv_rb_del(&ioc->sort_list, rq); fiops_add_rq_rb(rq); } static void fiops_remove_request(struct request *rq) { list_del_init(&rq->queuelist); fiops_del_rq_rb(rq); } static u64 fiops_scaled_vios(struct fiops_data *fiopsd, struct fiops_ioc *ioc, struct request *rq) { int vios = VIOS_SCALE; if (rq_data_dir(rq) == WRITE) vios = vios * fiopsd->write_scale / fiopsd->read_scale; if (!rq_is_sync(rq)) vios = vios * fiopsd->async_scale / fiopsd->sync_scale; vios += vios * (ioc->ioprio - IOPRIO_NORM) / VIOS_PRIO_SCALE; return vios; } /* return vios dispatched */ static u64 fiops_dispatch_request(struct fiops_data *fiopsd, struct fiops_ioc *ioc) { struct request *rq; struct request_queue *q = fiopsd->queue; rq = rq_entry_fifo(ioc->fifo.next); fiops_remove_request(rq); elv_dispatch_add_tail(q, rq); fiopsd->in_flight[rq_is_sync(rq)]++; ioc->in_flight++; return fiops_scaled_vios(fiopsd, ioc, rq); } static int fiops_forced_dispatch(struct fiops_data *fiopsd) { struct fiops_ioc *ioc; int dispatched = 0; int i; for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) { while (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) { ioc = fiops_rb_first(&fiopsd->service_tree[i]); while (!list_empty(&ioc->fifo)) { fiops_dispatch_request(fiopsd, ioc); dispatched++; } if (fiops_ioc_on_rr(ioc)) fiops_del_ioc_rr(fiopsd, ioc); } } return dispatched; } static struct fiops_ioc *fiops_select_ioc(struct fiops_data *fiopsd) { struct fiops_ioc *ioc; struct fiops_rb_root *service_tree = NULL; int i; struct request *rq; for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) { if (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) { service_tree = &fiopsd->service_tree[i]; break; } } if (!service_tree) return NULL; ioc = fiops_rb_first(service_tree); rq = rq_entry_fifo(ioc->fifo.next); /* * we are the only async task and sync requests are in flight, delay a * moment. If there are other tasks coming, sync tasks have no chance * to be starved, don't delay */ if (!rq_is_sync(rq) && fiopsd->in_flight[1] != 0 && service_tree->count == 1) { fiops_log_ioc(fiopsd, ioc, "postpone async, in_flight async %d sync %d", fiopsd->in_flight[0], fiopsd->in_flight[1]); return NULL; } return ioc; } static void fiops_charge_vios(struct fiops_data *fiopsd, struct fiops_ioc *ioc, u64 vios) { struct fiops_rb_root *service_tree = ioc->service_tree; ioc->vios += vios; fiops_log_ioc(fiopsd, ioc, "charge vios %lld, new vios %lld", vios, ioc->vios); if (RB_EMPTY_ROOT(&ioc->sort_list)) fiops_del_ioc_rr(fiopsd, ioc); else fiops_resort_rr_list(fiopsd, ioc); fiops_update_min_vios(service_tree); } static int fiops_dispatch_requests(struct request_queue *q, int force) { struct fiops_data *fiopsd = q->elevator->elevator_data; struct fiops_ioc *ioc; u64 vios; if (unlikely(force)) return fiops_forced_dispatch(fiopsd); ioc = fiops_select_ioc(fiopsd); if (!ioc) return 0; vios = fiops_dispatch_request(fiopsd, ioc); fiops_charge_vios(fiopsd, ioc, vios); return 1; } static void fiops_init_prio_data(struct fiops_ioc *cic) { struct task_struct *tsk = current; struct io_context *ioc = cic->icq.ioc; int ioprio_class; if (!fiops_ioc_prio_changed(cic)) return; ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); switch (ioprio_class) { default: printk(KERN_ERR "fiops: bad prio %x\n", ioprio_class); case IOPRIO_CLASS_NONE: /* * no prio set, inherit CPU scheduling settings */ cic->ioprio = task_nice_ioprio(tsk); cic->wl_type = fiops_wl_type(task_nice_ioclass(tsk)); break; case IOPRIO_CLASS_RT: cic->ioprio = task_ioprio(ioc); cic->wl_type = fiops_wl_type(IOPRIO_CLASS_RT); break; case IOPRIO_CLASS_BE: cic->ioprio = task_ioprio(ioc); cic->wl_type = fiops_wl_type(IOPRIO_CLASS_BE); break; case IOPRIO_CLASS_IDLE: cic->wl_type = fiops_wl_type(IOPRIO_CLASS_IDLE); cic->ioprio = 7; break; } fiops_clear_ioc_prio_changed(cic); } static void fiops_insert_request(struct request_queue *q, struct request *rq) { struct fiops_ioc *ioc = RQ_CIC(rq); fiops_init_prio_data(ioc); list_add_tail(&rq->queuelist, &ioc->fifo); fiops_add_rq_rb(rq); } /* * scheduler run of queue, if there are requests pending and no one in the * driver that will restart queueing */ static inline void fiops_schedule_dispatch(struct fiops_data *fiopsd) { if (fiopsd->busy_queues) kblockd_schedule_work(fiopsd->queue, &fiopsd->unplug_work); } static void fiops_completed_request(struct request_queue *q, struct request *rq) { struct fiops_data *fiopsd = q->elevator->elevator_data; struct fiops_ioc *ioc = RQ_CIC(rq); fiopsd->in_flight[rq_is_sync(rq)]--; ioc->in_flight--; fiops_log_ioc(fiopsd, ioc, "in_flight %d, busy queues %d", ioc->in_flight, fiopsd->busy_queues); if (fiopsd->in_flight[0] + fiopsd->in_flight[1] == 0) fiops_schedule_dispatch(fiopsd); } static struct request * fiops_find_rq_fmerge(struct fiops_data *fiopsd, struct bio *bio) { struct task_struct *tsk = current; struct fiops_ioc *cic; cic = fiops_cic_lookup(fiopsd, tsk->io_context); if (cic) { sector_t sector = bio->bi_sector + bio_sectors(bio); return elv_rb_find(&cic->sort_list, sector); } return NULL; } static int fiops_merge(struct request_queue *q, struct request **req, struct bio *bio) { struct fiops_data *fiopsd = q->elevator->elevator_data; struct request *__rq; __rq = fiops_find_rq_fmerge(fiopsd, bio); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_FRONT_MERGE; } return ELEVATOR_NO_MERGE; } static void fiops_merged_request(struct request_queue *q, struct request *req, int type) { if (type == ELEVATOR_FRONT_MERGE) { struct fiops_ioc *ioc = RQ_CIC(req); fiops_reposition_rq_rb(ioc, req); } } static void fiops_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { struct fiops_ioc *ioc = RQ_CIC(rq); struct fiops_data *fiopsd = q->elevator->elevator_data; fiops_remove_request(next); ioc = RQ_CIC(next); /* * all requests of this task are merged to other tasks, delete it * from the service tree. */ if (fiops_ioc_on_rr(ioc) && RB_EMPTY_ROOT(&ioc->sort_list)) fiops_del_ioc_rr(fiopsd, ioc); } static int fiops_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) { struct fiops_data *fiopsd = q->elevator->elevator_data; struct fiops_ioc *cic; /* * Lookup the ioc that this bio will be queued with. Allow * merge only if rq is queued there. */ cic = fiops_cic_lookup(fiopsd, current->io_context); return cic == RQ_CIC(rq); } static void fiops_exit_queue(struct elevator_queue *e) { struct fiops_data *fiopsd = e->elevator_data; cancel_work_sync(&fiopsd->unplug_work); kfree(fiopsd); } static void fiops_kick_queue(struct work_struct *work) { struct fiops_data *fiopsd = container_of(work, struct fiops_data, unplug_work); struct request_queue *q = fiopsd->queue; spin_lock_irq(q->queue_lock); __blk_run_queue(q); spin_unlock_irq(q->queue_lock); } static void *fiops_init_queue(struct request_queue *q) { struct fiops_data *fiopsd; int i; fiopsd = kzalloc_node(sizeof(*fiopsd), GFP_KERNEL, q->node); if (!fiopsd) return NULL; fiopsd->queue = q; for (i = IDLE_WORKLOAD; i <= RT_WORKLOAD; i++) fiopsd->service_tree[i] = FIOPS_RB_ROOT; INIT_WORK(&fiopsd->unplug_work, fiops_kick_queue); fiopsd->read_scale = VIOS_READ_SCALE; fiopsd->write_scale = VIOS_WRITE_SCALE; fiopsd->sync_scale = VIOS_SYNC_SCALE; fiopsd->async_scale = VIOS_ASYNC_SCALE; return fiopsd; } static void fiops_init_icq(struct io_cq *icq) { struct fiops_data *fiopsd = icq->q->elevator->elevator_data; struct fiops_ioc *ioc = icq_to_cic(icq); RB_CLEAR_NODE(&ioc->rb_node); INIT_LIST_HEAD(&ioc->fifo); ioc->sort_list = RB_ROOT; ioc->fiopsd = fiopsd; ioc->pid = current->pid; fiops_mark_ioc_prio_changed(ioc); } /* * sysfs parts below --> */ static ssize_t fiops_var_show(unsigned int var, char *page) { return sprintf(page, "%d\n", var); } static ssize_t fiops_var_store(unsigned int *var, const char *page, size_t count) { char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count; } #define SHOW_FUNCTION(__FUNC, __VAR) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct fiops_data *fiopsd = e->elevator_data; \ return fiops_var_show(__VAR, (page)); \ } SHOW_FUNCTION(fiops_read_scale_show, fiopsd->read_scale); SHOW_FUNCTION(fiops_write_scale_show, fiopsd->write_scale); SHOW_FUNCTION(fiops_sync_scale_show, fiopsd->sync_scale); SHOW_FUNCTION(fiops_async_scale_show, fiopsd->async_scale); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct fiops_data *fiopsd = e->elevator_data; \ unsigned int __data; \ int ret = fiops_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ *(__PTR) = __data; \ return ret; \ } STORE_FUNCTION(fiops_read_scale_store, &fiopsd->read_scale, 1, 100); STORE_FUNCTION(fiops_write_scale_store, &fiopsd->write_scale, 1, 100); STORE_FUNCTION(fiops_sync_scale_store, &fiopsd->sync_scale, 1, 100); STORE_FUNCTION(fiops_async_scale_store, &fiopsd->async_scale, 1, 100); #undef STORE_FUNCTION #define FIOPS_ATTR(name) \ __ATTR(name, S_IRUGO|S_IWUSR, fiops_##name##_show, fiops_##name##_store) static struct elv_fs_entry fiops_attrs[] = { FIOPS_ATTR(read_scale), FIOPS_ATTR(write_scale), FIOPS_ATTR(sync_scale), FIOPS_ATTR(async_scale), __ATTR_NULL }; static struct elevator_type iosched_fiops = { .ops = { .elevator_merge_fn = fiops_merge, .elevator_merged_fn = fiops_merged_request, .elevator_merge_req_fn = fiops_merged_requests, .elevator_allow_merge_fn = fiops_allow_merge, .elevator_dispatch_fn = fiops_dispatch_requests, .elevator_add_req_fn = fiops_insert_request, .elevator_completed_req_fn = fiops_completed_request, .elevator_former_req_fn = elv_rb_former_request, .elevator_latter_req_fn = elv_rb_latter_request, .elevator_init_icq_fn = fiops_init_icq, .elevator_init_fn = fiops_init_queue, .elevator_exit_fn = fiops_exit_queue, }, .icq_size = sizeof(struct fiops_ioc), .icq_align = __alignof__(struct fiops_ioc), .elevator_attrs = fiops_attrs, .elevator_name = "fiops", .elevator_owner = THIS_MODULE, }; static int __init fiops_init(void) { return elv_register(&iosched_fiops); } static void __exit fiops_exit(void) { elv_unregister(&iosched_fiops); } module_init(fiops_init); module_exit(fiops_exit); MODULE_AUTHOR("Jens Axboe, Shaohua Li <shli@kernel.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IOPS based IO scheduler");
gpl-2.0
houzhenggang/OpenWRT-1
package/kernel/lantiq/ltq-ptm/src/ifxmips_ptm_ar9.c
564
10820
/****************************************************************************** ** ** FILE NAME : ifxmips_ptm_ar9.c ** PROJECT : UEIP ** MODULES : PTM ** ** DATE : 7 Jul 2009 ** AUTHOR : Xu Liang ** DESCRIPTION : PTM driver common source file (core functions) ** COPYRIGHT : Copyright (c) 2006 ** Infineon Technologies AG ** Am Campeon 1-12, 85579 Neubiberg, Germany ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** HISTORY ** $Date $Author $Comment ** 07 JUL 2009 Xu Liang Init Version *******************************************************************************/ /* * #################################### * Head File * #################################### */ /* * Common Head File */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/version.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/ioctl.h> #include <asm/delay.h> /* * Chip Specific Head File */ #include "ifxmips_ptm_adsl.h" #include "ifxmips_ptm_fw_ar9.h" #include <lantiq_soc.h> /* * #################################### * Definition * #################################### */ /* * EMA Settings */ #define EMA_CMD_BUF_LEN 0x0040 #define EMA_CMD_BASE_ADDR (0x00001B80 << 2) #define EMA_DATA_BUF_LEN 0x0100 #define EMA_DATA_BASE_ADDR (0x00001C00 << 2) #define EMA_WRITE_BURST 0x2 #define EMA_READ_BURST 0x2 /* * #################################### * Declaration * #################################### */ /* * Hardware Init/Uninit Functions */ static inline void init_pmu(void); static inline void uninit_pmu(void); static inline void reset_ppe(void); static inline void init_ema(void); static inline void init_mailbox(void); static inline void init_atm_tc(void); static inline void clear_share_buffer(void); /* * #################################### * Local Variable * #################################### */ /* * #################################### * Local Function * #################################### */ #define IFX_PMU_MODULE_PPE_SLL01 BIT(19) #define IFX_PMU_MODULE_PPE_TC BIT(21) #define IFX_PMU_MODULE_PPE_EMA BIT(22) #define IFX_PMU_MODULE_PPE_QSB BIT(18) #define IFX_PMU_MODULE_TPE BIT(13) #define IFX_PMU_MODULE_DSL_DFE BIT(9) static inline void init_pmu(void) { ltq_pmu_enable(IFX_PMU_MODULE_PPE_SLL01 | IFX_PMU_MODULE_PPE_TC | IFX_PMU_MODULE_PPE_EMA | IFX_PMU_MODULE_TPE | IFX_PMU_MODULE_DSL_DFE); } static inline void uninit_pmu(void) { ltq_pmu_disable(IFX_PMU_MODULE_PPE_SLL01 | IFX_PMU_MODULE_PPE_TC | IFX_PMU_MODULE_PPE_EMA | IFX_PMU_MODULE_TPE | IFX_PMU_MODULE_DSL_DFE); } static inline void reset_ppe(void) { #ifdef MODULE // reset PPE // ifx_rcu_rst(IFX_RCU_DOMAIN_PPE, IFX_RCU_MODULE_PTM); #endif } static inline void init_ema(void) { // Configure share buffer master selection IFX_REG_W32(1, SB_MST_PRI0); IFX_REG_W32(1, SB_MST_PRI1); // EMA Settings IFX_REG_W32((EMA_CMD_BUF_LEN << 16) | (EMA_CMD_BASE_ADDR >> 2), EMA_CMDCFG); IFX_REG_W32((EMA_DATA_BUF_LEN << 16) | (EMA_DATA_BASE_ADDR >> 2), EMA_DATACFG); IFX_REG_W32(0x000000FF, EMA_IER); IFX_REG_W32(EMA_READ_BURST | (EMA_WRITE_BURST << 2), EMA_CFG); } static inline void init_mailbox(void) { IFX_REG_W32(0xFFFFFFFF, MBOX_IGU1_ISRC); IFX_REG_W32(0x00000000, MBOX_IGU1_IER); IFX_REG_W32(0xFFFFFFFF, MBOX_IGU3_ISRC); IFX_REG_W32(0x00000000, MBOX_IGU3_IER); } static inline void init_atm_tc(void) { IFX_REG_W32(0x0, RFBI_CFG); IFX_REG_W32(0x1800, SFSM_DBA0); IFX_REG_W32(0x1921, SFSM_DBA1); IFX_REG_W32(0x1A42, SFSM_CBA0); IFX_REG_W32(0x1A53, SFSM_CBA1); IFX_REG_W32(0x14011, SFSM_CFG0); IFX_REG_W32(0x14011, SFSM_CFG1); IFX_REG_W32(0x1000, FFSM_DBA0); IFX_REG_W32(0x1700, FFSM_DBA1); IFX_REG_W32(0x3000C, FFSM_CFG0); IFX_REG_W32(0x3000C, FFSM_CFG1); IFX_REG_W32(0xF0D10000, FFSM_IDLE_HEAD_BC0); IFX_REG_W32(0xF0D10000, FFSM_IDLE_HEAD_BC1); /* * 0. Backup port2 value to temp * 1. Disable CPU port2 in switch (link and learning) * 2. wait for a while * 3. Configure DM register and counter * 4. restore temp to CPU port2 in switch * This code will cause network to stop working if there are heavy * traffic during bootup. This part should be moved to switch and use * the same code as ATM */ { int i; u32 temp; temp = IFX_REG_R32(SW_P2_CTL); IFX_REG_W32(0x40020000, SW_P2_CTL); for (i = 0; i < 200; i++) udelay(2000); IFX_REG_W32(0x00007028, DM_RXCFG); IFX_REG_W32(0x00007028, DS_RXCFG); IFX_REG_W32(0x00001100, DM_RXDB); IFX_REG_W32(0x00001100, DS_RXDB); IFX_REG_W32(0x00001600, DM_RXCB); IFX_REG_W32(0x00001600, DS_RXCB); /* * For dynamic, must reset these counters, * For once initialization, don't need to reset these counters */ IFX_REG_W32(0x0, DM_RXPGCNT); IFX_REG_W32(0x0, DS_RXPGCNT); IFX_REG_W32(0x0, DM_RXPKTCNT); IFX_REG_W32_MASK(0, 0x80000000, DM_RXCFG); IFX_REG_W32_MASK(0, 0x8000, DS_RXCFG); udelay(2000); IFX_REG_W32(temp, SW_P2_CTL); udelay(2000); } } static inline void clear_share_buffer(void) { volatile u32 *p = SB_RAM0_ADDR(0); unsigned int i; for ( i = 0; i < SB_RAM0_DWLEN + SB_RAM1_DWLEN + SB_RAM2_DWLEN + SB_RAM3_DWLEN + SB_RAM4_DWLEN; i++ ) IFX_REG_W32(0, p++); } /* * Description: * Download PPE firmware binary code. * Input: * src --- u32 *, binary code buffer * dword_len --- unsigned int, binary code length in DWORD (32-bit) * Output: * int --- 0: Success * else: Error Code */ static inline int pp32_download_code(u32 *code_src, unsigned int code_dword_len, u32 *data_src, unsigned int data_dword_len) { volatile u32 *dest; if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0 || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 ) return -1; if ( code_dword_len <= CDM_CODE_MEMORYn_DWLEN(0) ) IFX_REG_W32(0x00, CDM_CFG); else IFX_REG_W32(0x04, CDM_CFG); /* copy code */ dest = CDM_CODE_MEMORY(0, 0); while ( code_dword_len-- > 0 ) IFX_REG_W32(*code_src++, dest++); /* copy data */ dest = CDM_DATA_MEMORY(0, 0); while ( data_dword_len-- > 0 ) IFX_REG_W32(*data_src++, dest++); return 0; } /* * #################################### * Global Function * #################################### */ void ifx_ptm_get_fw_ver(unsigned int *major, unsigned int *minor) { ASSERT(major != NULL, "pointer is NULL"); ASSERT(minor != NULL, "pointer is NULL"); *major = FW_VER_ID->major; *minor = FW_VER_ID->minor; } void ifx_ptm_init_chip(void) { init_pmu(); reset_ppe(); init_ema(); init_mailbox(); init_atm_tc(); clear_share_buffer(); } void ifx_ptm_uninit_chip(void) { uninit_pmu(); } /* * Description: * Initialize and start up PP32. * Input: * none * Output: * int --- 0: Success * else: Error Code */ int ifx_pp32_start(int pp32) { int ret; /* download firmware */ ret = pp32_download_code(firmware_binary_code, sizeof(firmware_binary_code) / sizeof(*firmware_binary_code), firmware_binary_data, sizeof(firmware_binary_data) / sizeof(*firmware_binary_data)); if ( ret != 0 ) return ret; /* run PP32 */ IFX_REG_W32(DBG_CTRL_RESTART, PP32_DBG_CTRL(0)); /* idle for a while to let PP32 init itself */ udelay(10); return 0; } /* * Description: * Halt PP32. * Input: * none * Output: * none */ void ifx_pp32_stop(int pp32) { /* halt PP32 */ IFX_REG_W32(DBG_CTRL_STOP, PP32_DBG_CTRL(0)); } int ifx_ptm_proc_read_regs(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; len += sprintf(page + off + len, "EMA:\n"); len += sprintf(page + off + len, " SB_MST_PRI0 - 0x%08X, SB_MST_PRI1 - 0x%08X\n", IFX_REG_R32(SB_MST_PRI0), IFX_REG_R32(SB_MST_PRI1)); len += sprintf(page + off + len, " EMA_CMDCFG - 0x%08X, EMA_DATACFG - 0x%08X\n", IFX_REG_R32(EMA_CMDCFG), IFX_REG_R32(EMA_DATACFG)); len += sprintf(page + off + len, " EMA_IER - 0x%08X, EMA_CFG - 0x%08X\n", IFX_REG_R32(EMA_IER), IFX_REG_R32(EMA_CFG)); len += sprintf(page + off + len, "Mailbox:\n"); len += sprintf(page + off + len, " MBOX_IGU1_IER - 0x%08X, MBOX_IGU1_ISR - 0x%08X\n", IFX_REG_R32(MBOX_IGU1_IER), IFX_REG_R32(MBOX_IGU1_ISR)); len += sprintf(page + off + len, " MBOX_IGU3_IER - 0x%08X, MBOX_IGU3_ISR - 0x%08X\n", IFX_REG_R32(MBOX_IGU3_IER), IFX_REG_R32(MBOX_IGU3_ISR)); len += sprintf(page + off + len, "TC:\n"); len += sprintf(page + off + len, " RFBI_CFG - 0x%08X\n", IFX_REG_R32(RFBI_CFG)); len += sprintf(page + off + len, " SFSM_DBA0 - 0x%08X, SFSM_CBA0 - 0x%08X, SFSM_CFG0 - 0x%08X\n", IFX_REG_R32(SFSM_DBA0), IFX_REG_R32(SFSM_CBA0), IFX_REG_R32(SFSM_CFG0)); len += sprintf(page + off + len, " SFSM_DBA1 - 0x%08X, SFSM_CBA1 - 0x%08X, SFSM_CFG1 - 0x%08X\n", IFX_REG_R32(SFSM_DBA1), IFX_REG_R32(SFSM_CBA1), IFX_REG_R32(SFSM_CFG1)); len += sprintf(page + off + len, " FFSM_DBA0 - 0x%08X, FFSM_CFG0 - 0x%08X, IDLE_HEAD - 0x%08X\n", IFX_REG_R32(FFSM_DBA0), IFX_REG_R32(FFSM_CFG0), IFX_REG_R32(FFSM_IDLE_HEAD_BC0)); len += sprintf(page + off + len, " FFSM_DBA1 - 0x%08X, FFSM_CFG1 - 0x%08X, IDLE_HEAD - 0x%08X\n", IFX_REG_R32(FFSM_DBA1), IFX_REG_R32(FFSM_CFG1), IFX_REG_R32(FFSM_IDLE_HEAD_BC1)); len += sprintf(page + off + len, "DPlus:\n"); len += sprintf(page + off + len, " DM_RXDB - 0x%08X, DM_RXCB - 0x%08X, DM_RXCFG - 0x%08X\n", IFX_REG_R32(DM_RXDB), IFX_REG_R32(DM_RXCB), IFX_REG_R32(DM_RXCFG)); len += sprintf(page + off + len, " DM_RXPGCNT - 0x%08X, DM_RXPKTCNT - 0x%08X\n", IFX_REG_R32(DM_RXPGCNT), IFX_REG_R32(DM_RXPKTCNT)); len += sprintf(page + off + len, " DS_RXDB - 0x%08X, DS_RXCB - 0x%08X, DS_RXCFG - 0x%08X\n", IFX_REG_R32(DS_RXDB), IFX_REG_R32(DS_RXCB), IFX_REG_R32(DS_RXCFG)); len += sprintf(page + off + len, " DS_RXPGCNT - 0x%08X\n", IFX_REG_R32(DS_RXPGCNT)); *eof = 1; return len; }
gpl-2.0
mekinik232/ambipi
lib/freetype/src/type42/type42.c
820
1385
/***************************************************************************/ /* */ /* type42.c */ /* */ /* FreeType Type 42 driver component. */ /* */ /* Copyright 2002 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #define FT_MAKE_OPTION_SINGLE_OBJECT #include <ft2build.h> #include "t42objs.c" #include "t42parse.c" #include "t42drivr.c" /* END */
gpl-2.0
fenggangwu/sffs
arch/powerpc/kernel/mce.c
820
9986
/* * Machine check exception handling. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright 2013 IBM Corporation * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> */ #undef DEBUG #define pr_fmt(fmt) "mce: " fmt #include <linux/types.h> #include <linux/ptrace.h> #include <linux/percpu.h> #include <linux/export.h> #include <linux/irq_work.h> #include <asm/mce.h> static DEFINE_PER_CPU(int, mce_nest_count); static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event); /* Queue for delayed MCE events. */ static DEFINE_PER_CPU(int, mce_queue_count); static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue); static void machine_check_process_queued_event(struct irq_work *work); struct irq_work mce_event_process_work = { .func = machine_check_process_queued_event, }; static void mce_set_error_info(struct machine_check_event *mce, struct mce_error_info *mce_err) { mce->error_type = mce_err->error_type; switch (mce_err->error_type) { case MCE_ERROR_TYPE_UE: mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type; break; case MCE_ERROR_TYPE_SLB: mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type; break; case MCE_ERROR_TYPE_ERAT: mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type; break; case MCE_ERROR_TYPE_TLB: mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type; break; case MCE_ERROR_TYPE_UNKNOWN: default: break; } } /* * Decode and save high level MCE information into per cpu buffer which * is an array of machine_check_event structure. */ void save_mce_event(struct pt_regs *regs, long handled, struct mce_error_info *mce_err, uint64_t nip, uint64_t addr) { uint64_t srr1; int index = __this_cpu_inc_return(mce_nest_count) - 1; struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); /* * Return if we don't have enough space to log mce event. * mce_nest_count may go beyond MAX_MC_EVT but that's ok, * the check below will stop buffer overrun. */ if (index >= MAX_MC_EVT) return; /* Populate generic machine check info */ mce->version = MCE_V1; mce->srr0 = nip; mce->srr1 = regs->msr; mce->gpr3 = regs->gpr[3]; mce->in_use = 1; mce->initiator = MCE_INITIATOR_CPU; if (handled) mce->disposition = MCE_DISPOSITION_RECOVERED; else mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; mce->severity = MCE_SEV_ERROR_SYNC; srr1 = regs->msr; /* * Populate the mce error_type and type-specific error_type. */ mce_set_error_info(mce, mce_err); if (!addr) return; if (mce->error_type == MCE_ERROR_TYPE_TLB) { mce->u.tlb_error.effective_address_provided = true; mce->u.tlb_error.effective_address = addr; } else if (mce->error_type == MCE_ERROR_TYPE_SLB) { mce->u.slb_error.effective_address_provided = true; mce->u.slb_error.effective_address = addr; } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) { mce->u.erat_error.effective_address_provided = true; mce->u.erat_error.effective_address = addr; } else if (mce->error_type == MCE_ERROR_TYPE_UE) { mce->u.ue_error.effective_address_provided = true; mce->u.ue_error.effective_address = addr; } return; } /* * get_mce_event: * mce Pointer to machine_check_event structure to be filled. * release Flag to indicate whether to free the event slot or not. * 0 <= do not release the mce event. Caller will invoke * release_mce_event() once event has been consumed. * 1 <= release the slot. * * return 1 = success * 0 = failure * * get_mce_event() will be called by platform specific machine check * handle routine and in KVM. * When we call get_mce_event(), we are still in interrupt context and * preemption will not be scheduled until ret_from_expect() routine * is called. */ int get_mce_event(struct machine_check_event *mce, bool release) { int index = __this_cpu_read(mce_nest_count) - 1; struct machine_check_event *mc_evt; int ret = 0; /* Sanity check */ if (index < 0) return ret; /* Check if we have MCE info to process. */ if (index < MAX_MC_EVT) { mc_evt = this_cpu_ptr(&mce_event[index]); /* Copy the event structure and release the original */ if (mce) *mce = *mc_evt; if (release) mc_evt->in_use = 0; ret = 1; } /* Decrement the count to free the slot. */ if (release) __this_cpu_dec(mce_nest_count); return ret; } void release_mce_event(void) { get_mce_event(NULL, true); } /* * Queue up the MCE event which then can be handled later. */ void machine_check_queue_event(void) { int index; struct machine_check_event evt; if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) return; index = __this_cpu_inc_return(mce_queue_count) - 1; /* If queue is full, just return for now. */ if (index >= MAX_MC_EVT) { __this_cpu_dec(mce_queue_count); return; } memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt)); /* Queue irq work to process this event later. */ irq_work_queue(&mce_event_process_work); } /* * process pending MCE event from the mce event queue. This function will be * called during syscall exit. */ static void machine_check_process_queued_event(struct irq_work *work) { int index; /* * For now just print it to console. * TODO: log this error event to FSP or nvram. */ while (__this_cpu_read(mce_queue_count) > 0) { index = __this_cpu_read(mce_queue_count) - 1; machine_check_print_event_info( this_cpu_ptr(&mce_event_queue[index])); __this_cpu_dec(mce_queue_count); } } void machine_check_print_event_info(struct machine_check_event *evt) { const char *level, *sevstr, *subtype; static const char *mc_ue_types[] = { "Indeterminate", "Instruction fetch", "Page table walk ifetch", "Load/Store", "Page table walk Load/Store", }; static const char *mc_slb_types[] = { "Indeterminate", "Parity", "Multihit", }; static const char *mc_erat_types[] = { "Indeterminate", "Parity", "Multihit", }; static const char *mc_tlb_types[] = { "Indeterminate", "Parity", "Multihit", }; /* Print things out */ if (evt->version != MCE_V1) { pr_err("Machine Check Exception, Unknown event version %d !\n", evt->version); return; } switch (evt->severity) { case MCE_SEV_NO_ERROR: level = KERN_INFO; sevstr = "Harmless"; break; case MCE_SEV_WARNING: level = KERN_WARNING; sevstr = ""; break; case MCE_SEV_ERROR_SYNC: level = KERN_ERR; sevstr = "Severe"; break; case MCE_SEV_FATAL: default: level = KERN_ERR; sevstr = "Fatal"; break; } printk("%s%s Machine check interrupt [%s]\n", level, sevstr, evt->disposition == MCE_DISPOSITION_RECOVERED ? "Recovered" : "[Not recovered"); printk("%s Initiator: %s\n", level, evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown"); switch (evt->error_type) { case MCE_ERROR_TYPE_UE: subtype = evt->u.ue_error.ue_error_type < ARRAY_SIZE(mc_ue_types) ? mc_ue_types[evt->u.ue_error.ue_error_type] : "Unknown"; printk("%s Error type: UE [%s]\n", level, subtype); if (evt->u.ue_error.effective_address_provided) printk("%s Effective address: %016llx\n", level, evt->u.ue_error.effective_address); if (evt->u.ue_error.physical_address_provided) printk("%s Physial address: %016llx\n", level, evt->u.ue_error.physical_address); break; case MCE_ERROR_TYPE_SLB: subtype = evt->u.slb_error.slb_error_type < ARRAY_SIZE(mc_slb_types) ? mc_slb_types[evt->u.slb_error.slb_error_type] : "Unknown"; printk("%s Error type: SLB [%s]\n", level, subtype); if (evt->u.slb_error.effective_address_provided) printk("%s Effective address: %016llx\n", level, evt->u.slb_error.effective_address); break; case MCE_ERROR_TYPE_ERAT: subtype = evt->u.erat_error.erat_error_type < ARRAY_SIZE(mc_erat_types) ? mc_erat_types[evt->u.erat_error.erat_error_type] : "Unknown"; printk("%s Error type: ERAT [%s]\n", level, subtype); if (evt->u.erat_error.effective_address_provided) printk("%s Effective address: %016llx\n", level, evt->u.erat_error.effective_address); break; case MCE_ERROR_TYPE_TLB: subtype = evt->u.tlb_error.tlb_error_type < ARRAY_SIZE(mc_tlb_types) ? mc_tlb_types[evt->u.tlb_error.tlb_error_type] : "Unknown"; printk("%s Error type: TLB [%s]\n", level, subtype); if (evt->u.tlb_error.effective_address_provided) printk("%s Effective address: %016llx\n", level, evt->u.tlb_error.effective_address); break; default: case MCE_ERROR_TYPE_UNKNOWN: printk("%s Error type: Unknown\n", level); break; } } uint64_t get_mce_fault_addr(struct machine_check_event *evt) { switch (evt->error_type) { case MCE_ERROR_TYPE_UE: if (evt->u.ue_error.effective_address_provided) return evt->u.ue_error.effective_address; break; case MCE_ERROR_TYPE_SLB: if (evt->u.slb_error.effective_address_provided) return evt->u.slb_error.effective_address; break; case MCE_ERROR_TYPE_ERAT: if (evt->u.erat_error.effective_address_provided) return evt->u.erat_error.effective_address; break; case MCE_ERROR_TYPE_TLB: if (evt->u.tlb_error.effective_address_provided) return evt->u.tlb_error.effective_address; break; default: case MCE_ERROR_TYPE_UNKNOWN: break; } return 0; } EXPORT_SYMBOL(get_mce_fault_addr);
gpl-2.0
SlimRoms/kernel_asus_flo
drivers/gpu/drm/radeon/atombios_crtc.c
2868
55308
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> #include <drm/drm_fixed.h> #include "radeon.h" #include "atom.h" #include "atom-bits.h" static void atombios_overscan_setup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); SET_CRTC_OVERSCAN_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); int a1, a2; memset(&args, 0, sizeof(args)); args.ucCRTC = radeon_crtc->crtc_id; switch (radeon_crtc->rmx_type) { case RMX_CENTER: args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); break; case RMX_ASPECT: a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; if (a1 > a2) { args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); } else if (a2 > a1) { args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); } break; case RMX_FULL: default: args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border); args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border); args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border); args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border); break; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_scaler_setup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ENABLE_SCALER_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); /* fixme - fill in enc_priv for atom dac */ enum radeon_tv_std tv_std = TV_STD_NTSC; bool is_tv = false, is_cv = false; struct drm_encoder *encoder; if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) return; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { /* find tv std */ if (encoder->crtc == crtc) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; tv_std = tv_dac->tv_std; is_tv = true; } } } memset(&args, 0, sizeof(args)); args.ucScaler = radeon_crtc->crtc_id; if (is_tv) { switch (tv_std) { case TV_STD_NTSC: default: args.ucTVStandard = ATOM_TV_NTSC; break; case TV_STD_PAL: args.ucTVStandard = ATOM_TV_PAL; break; case TV_STD_PAL_M: args.ucTVStandard = ATOM_TV_PALM; break; case TV_STD_PAL_60: args.ucTVStandard = ATOM_TV_PAL60; break; case TV_STD_NTSC_J: args.ucTVStandard = ATOM_TV_NTSCJ; break; case TV_STD_SCART_PAL: args.ucTVStandard = ATOM_TV_PAL; /* ??? */ break; case TV_STD_SECAM: args.ucTVStandard = ATOM_TV_SECAM; break; case TV_STD_PAL_CN: args.ucTVStandard = ATOM_TV_PALCN; break; } args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; } else if (is_cv) { args.ucTVStandard = ATOM_TV_CV; args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; } else { switch (radeon_crtc->rmx_type) { case RMX_FULL: args.ucEnable = ATOM_SCALER_EXPANSION; break; case RMX_CENTER: args.ucEnable = ATOM_SCALER_CENTER; break; case RMX_ASPECT: args.ucEnable = ATOM_SCALER_EXPANSION; break; default: if (ASIC_IS_AVIVO(rdev)) args.ucEnable = ATOM_SCALER_DISABLE; else args.ucEnable = ATOM_SCALER_CENTER; break; } } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if ((is_tv || is_cv) && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) { atom_rv515_force_tv_scaler(rdev, radeon_crtc); } } static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters); ENABLE_CRTC_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); args.ucCRTC = radeon_crtc->crtc_id; args.ucEnable = lock; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_enable_crtc(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC); ENABLE_CRTC_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); args.ucCRTC = radeon_crtc->crtc_id; args.ucEnable = state; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq); ENABLE_CRTC_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); args.ucCRTC = radeon_crtc->crtc_id; args.ucEnable = state; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_blank_crtc(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); BLANK_CRTC_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); args.ucCRTC = radeon_crtc->crtc_id; args.ucBlanking = state; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_powergate_crtc(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; memset(&args, 0, sizeof(args)); args.ucDispPipeId = radeon_crtc->crtc_id; args.ucEnable = state; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); switch (mode) { case DRM_MODE_DPMS_ON: radeon_crtc->enabled = true; /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); /* disable crtc pair power gating before programming */ if (ASIC_IS_DCE6(rdev)) atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_DISABLE); drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); if (radeon_crtc->enabled) atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; /* power gating is per-pair */ if (ASIC_IS_DCE6(rdev)) { struct drm_crtc *other_crtc; struct radeon_crtc *other_radeon_crtc; list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { other_radeon_crtc = to_radeon_crtc(other_crtc); if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) || ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) || ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) || ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) || ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) || ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) { /* if both crtcs in the pair are off, enable power gating */ if (other_radeon_crtc->enabled == false) atombios_powergate_crtc(crtc, ATOM_ENABLE); break; } } } /* adjust pm to dpms changes AFTER disabling crtcs */ radeon_pm_compute_clocks(rdev); break; } } static void atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; SET_CRTC_USING_DTD_TIMING_PARAMETERS args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); u16 misc = 0; memset(&args, 0, sizeof(args)); args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2)); args.usH_Blanking_Time = cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2)); args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2)); args.usV_Blanking_Time = cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2)); args.usH_SyncOffset = cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border); args.usH_SyncWidth = cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); args.usV_SyncOffset = cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border); args.usV_SyncWidth = cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); args.ucH_Border = radeon_crtc->h_border; args.ucV_Border = radeon_crtc->v_border; if (mode->flags & DRM_MODE_FLAG_NVSYNC) misc |= ATOM_VSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_NHSYNC) misc |= ATOM_HSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_CSYNC) misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) misc |= ATOM_DOUBLE_CLOCK_MODE; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_crtc_set_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing); u16 misc = 0; memset(&args, 0, sizeof(args)); args.usH_Total = cpu_to_le16(mode->crtc_htotal); args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay); args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start); args.usH_SyncWidth = cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); args.usV_Total = cpu_to_le16(mode->crtc_vtotal); args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay); args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start); args.usV_SyncWidth = cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); args.ucOverscanRight = radeon_crtc->h_border; args.ucOverscanLeft = radeon_crtc->h_border; args.ucOverscanBottom = radeon_crtc->v_border; args.ucOverscanTop = radeon_crtc->v_border; if (mode->flags & DRM_MODE_FLAG_NVSYNC) misc |= ATOM_VSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_NHSYNC) misc |= ATOM_HSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_CSYNC) misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) misc |= ATOM_DOUBLE_CLOCK_MODE; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_disable_ss(struct radeon_device *rdev, int pll_id) { u32 ss_cntl; if (ASIC_IS_DCE4(rdev)) { switch (pll_id) { case ATOM_PPLL1: ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL); ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl); break; case ATOM_PPLL2: ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL); ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl); break; case ATOM_DCPLL: case ATOM_PPLL_INVALID: return; } } else if (ASIC_IS_AVIVO(rdev)) { switch (pll_id) { case ATOM_PPLL1: ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL); ss_cntl &= ~1; WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl); break; case ATOM_PPLL2: ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL); ss_cntl &= ~1; WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl); break; case ATOM_DCPLL: case ATOM_PPLL_INVALID: return; } } } union atom_enable_ss { ENABLE_LVDS_SS_PARAMETERS lvds_ss; ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3; }; static void atombios_crtc_program_ss(struct radeon_device *rdev, int enable, int pll_id, struct radeon_atom_ss *ss) { int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); union atom_enable_ss args; memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE5(rdev)) { args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; switch (pll_id) { case ATOM_PPLL1: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_PPLL2: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_DCPLL: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; args.v3.usSpreadSpectrumAmount = cpu_to_le16(0); args.v3.usSpreadSpectrumStep = cpu_to_le16(0); break; case ATOM_PPLL_INVALID: return; } args.v3.ucEnable = enable; if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev)) args.v3.ucEnable = ATOM_DISABLE; } else if (ASIC_IS_DCE4(rdev)) { args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; switch (pll_id) { case ATOM_PPLL1: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_PPLL2: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_DCPLL: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; args.v2.usSpreadSpectrumAmount = cpu_to_le16(0); args.v2.usSpreadSpectrumStep = cpu_to_le16(0); break; case ATOM_PPLL_INVALID: return; } args.v2.ucEnable = enable; if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev)) args.v2.ucEnable = ATOM_DISABLE; } else if (ASIC_IS_DCE3(rdev)) { args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; args.v1.ucSpreadSpectrumStep = ss->step; args.v1.ucSpreadSpectrumDelay = ss->delay; args.v1.ucSpreadSpectrumRange = ss->range; args.v1.ucPpll = pll_id; args.v1.ucEnable = enable; } else if (ASIC_IS_AVIVO(rdev)) { if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) { atombios_disable_ss(rdev, pll_id); return; } args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; args.lvds_ss_2.ucEnable = enable; } else { if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) { atombios_disable_ss(rdev, pll_id); return; } args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; args.lvds_ss.ucEnable = enable; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union adjust_pixel_clock { ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3; }; static u32 atombios_adjust_pll(struct drm_crtc *crtc, struct drm_display_mode *mode, struct radeon_pll *pll, bool ss_enabled, struct radeon_atom_ss *ss) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder = NULL; struct radeon_encoder *radeon_encoder = NULL; struct drm_connector *connector = NULL; u32 adjusted_clock = mode->clock; int encoder_mode = 0; u32 dp_clock = mode->clock; int bpc = 8; bool is_duallink = false; /* reset the pll flags */ pll->flags = 0; if (ASIC_IS_AVIVO(rdev)) { if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ RADEON_PLL_PREFER_CLOSEST_LOWER); if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; if (rdev->family < CHIP_RV770) pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; /* use frac fb div on APUs */ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; } else { pll->flags |= RADEON_PLL_LEGACY; if (mode->clock > 200000) /* range limits??? */ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { radeon_encoder = to_radeon_encoder(encoder); connector = radeon_get_connector_for_encoder(encoder); /* if (connector && connector->display_info.bpc) bpc = connector->display_info.bpc; */ encoder_mode = atombios_get_encoder_mode(encoder); is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; dp_clock = dig_connector->dp_clock; } } /* use recommended ref_div for ss */ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (ss_enabled) { if (ss->refdiv) { pll->flags |= RADEON_PLL_USE_REF_DIV; pll->reference_div = ss->refdiv; if (ASIC_IS_AVIVO(rdev)) pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; } } } if (ASIC_IS_AVIVO(rdev)) { /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) adjusted_clock = mode->clock * 2; if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) pll->flags |= RADEON_PLL_IS_LCD; } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) pll->flags |= RADEON_PLL_USE_REF_DIV; } break; } } /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock * accordingly based on the encoder/transmitter to work around * special hw requirements. */ if (ASIC_IS_DCE3(rdev)) { union adjust_pixel_clock args; u8 frev, crev; int index; index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return adjusted_clock; memset(&args, 0, sizeof(args)); switch (frev) { case 1: switch (crev) { case 1: case 2: args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); args.v1.ucTransmitterID = radeon_encoder->encoder_id; args.v1.ucEncodeMode = encoder_mode; if (ss_enabled && ss->percentage) args.v1.ucConfig |= ADJUST_DISPLAY_CONFIG_SS_ENABLE; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; break; case 3: args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10); args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; args.v3.sInput.ucEncodeMode = encoder_mode; args.v3.sInput.ucDispPllConfig = 0; if (ss_enabled && ss->percentage) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE; if (ENCODER_MODE_IS_DP(encoder_mode)) { args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 16200 or 27000 */ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; if (encoder_mode == ATOM_ENCODER_MODE_HDMI) /* deep color support */ args.v3.sInput.usPixelClock = cpu_to_le16((mode->clock * bpc / 8) / 10); if (dig->coherent_mode) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; if (is_duallink) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK; } if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) args.v3.sInput.ucExtTransmitterID = radeon_encoder_get_dp_bridge_encoder_id(encoder); else args.v3.sInput.ucExtTransmitterID = 0; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; if (args.v3.sOutput.ucRefDiv) { pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; pll->flags |= RADEON_PLL_USE_REF_DIV; pll->reference_div = args.v3.sOutput.ucRefDiv; } if (args.v3.sOutput.ucPostDiv) { pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; pll->flags |= RADEON_PLL_USE_POST_DIV; pll->post_div = args.v3.sOutput.ucPostDiv; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return adjusted_clock; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return adjusted_clock; } } return adjusted_clock; } union set_pixel_clock { SET_PIXEL_CLOCK_PS_ALLOCATION base; PIXEL_CLOCK_PARAMETERS v1; PIXEL_CLOCK_PARAMETERS_V2 v2; PIXEL_CLOCK_PARAMETERS_V3 v3; PIXEL_CLOCK_PARAMETERS_V5 v5; PIXEL_CLOCK_PARAMETERS_V6 v6; }; /* on DCE5, make sure the voltage is high enough to support the * required disp clk. */ static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev, u32 dispclk) { u8 frev, crev; int index; union set_pixel_clock args; memset(&args, 0, sizeof(args)); index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 5: /* if the default dcpll clock is specified, * SetPixelClock provides the dividers */ args.v5.ucCRTC = ATOM_CRTC_INVALID; args.v5.usPixelClock = cpu_to_le16(dispclk); args.v5.ucPpll = ATOM_DCPLL; break; case 6: /* if the default dcpll clock is specified, * SetPixelClock provides the dividers */ args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); if (ASIC_IS_DCE61(rdev)) args.v6.ucPpll = ATOM_EXT_PLL1; else if (ASIC_IS_DCE6(rdev)) args.v6.ucPpll = ATOM_PPLL0; else args.v6.ucPpll = ATOM_DCPLL; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_crtc_program_pll(struct drm_crtc *crtc, u32 crtc_id, int pll_id, u32 encoder_mode, u32 encoder_id, u32 clock, u32 ref_div, u32 fb_div, u32 frac_fb_div, u32 post_div, int bpc, bool ss_enabled, struct radeon_atom_ss *ss) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; u8 frev, crev; int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); union set_pixel_clock args; memset(&args, 0, sizeof(args)); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 1: if (clock == ATOM_DISABLE) return; args.v1.usPixelClock = cpu_to_le16(clock / 10); args.v1.usRefDiv = cpu_to_le16(ref_div); args.v1.usFbDiv = cpu_to_le16(fb_div); args.v1.ucFracFbDiv = frac_fb_div; args.v1.ucPostDiv = post_div; args.v1.ucPpll = pll_id; args.v1.ucCRTC = crtc_id; args.v1.ucRefDivSrc = 1; break; case 2: args.v2.usPixelClock = cpu_to_le16(clock / 10); args.v2.usRefDiv = cpu_to_le16(ref_div); args.v2.usFbDiv = cpu_to_le16(fb_div); args.v2.ucFracFbDiv = frac_fb_div; args.v2.ucPostDiv = post_div; args.v2.ucPpll = pll_id; args.v2.ucCRTC = crtc_id; args.v2.ucRefDivSrc = 1; break; case 3: args.v3.usPixelClock = cpu_to_le16(clock / 10); args.v3.usRefDiv = cpu_to_le16(ref_div); args.v3.usFbDiv = cpu_to_le16(fb_div); args.v3.ucFracFbDiv = frac_fb_div; args.v3.ucPostDiv = post_div; args.v3.ucPpll = pll_id; args.v3.ucMiscInfo = (pll_id << 2); if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; args.v3.ucTransmitterId = encoder_id; args.v3.ucEncoderMode = encoder_mode; break; case 5: args.v5.ucCRTC = crtc_id; args.v5.usPixelClock = cpu_to_le16(clock / 10); args.v5.ucRefDiv = ref_div; args.v5.usFbDiv = cpu_to_le16(fb_div); args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); args.v5.ucPostDiv = post_div; args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; switch (bpc) { case 8: default: args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; break; case 10: args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; break; } args.v5.ucTransmitterID = encoder_id; args.v5.ucEncoderMode = encoder_mode; args.v5.ucPpll = pll_id; break; case 6: args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10); args.v6.ucRefDiv = ref_div; args.v6.usFbDiv = cpu_to_le16(fb_div); args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); args.v6.ucPostDiv = post_div; args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; switch (bpc) { case 8: default: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; break; case 10: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; break; case 12: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; break; case 16: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; break; } args.v6.ucTransmitterID = encoder_id; args.v6.ucEncoderMode = encoder_mode; args.v6.ucPpll = pll_id; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder = NULL; struct radeon_encoder *radeon_encoder = NULL; u32 pll_clock = mode->clock; u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; struct radeon_pll *pll; u32 adjusted_clock; int encoder_mode = 0; struct radeon_atom_ss ss; bool ss_enabled = false; int bpc = 8; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { radeon_encoder = to_radeon_encoder(encoder); encoder_mode = atombios_get_encoder_mode(encoder); break; } } if (!radeon_encoder) return; switch (radeon_crtc->pll_id) { case ATOM_PPLL1: pll = &rdev->clock.p1pll; break; case ATOM_PPLL2: pll = &rdev->clock.p2pll; break; case ATOM_DCPLL: case ATOM_PPLL_INVALID: default: pll = &rdev->clock.dcpll; break; } if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; int dp_clock; /* if (connector->display_info.bpc) bpc = connector->display_info.bpc; */ switch (encoder_mode) { case ATOM_ENCODER_MODE_DP_MST: case ATOM_ENCODER_MODE_DP: /* DP/eDP */ dp_clock = dig_connector->dp_clock / 10; if (ASIC_IS_DCE4(rdev)) ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_SS_ON_DP, dp_clock); else { if (dp_clock == 16200) { ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, ATOM_DP_SS_ID2); if (!ss_enabled) ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, ATOM_DP_SS_ID1); } else ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, ATOM_DP_SS_ID1); } break; case ATOM_ENCODER_MODE_LVDS: if (ASIC_IS_DCE4(rdev)) ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, dig->lcd_ss_id, mode->clock / 10); else ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, dig->lcd_ss_id); break; case ATOM_ENCODER_MODE_DVI: if (ASIC_IS_DCE4(rdev)) ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_SS_ON_TMDS, mode->clock / 10); break; case ATOM_ENCODER_MODE_HDMI: if (ASIC_IS_DCE4(rdev)) ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_SS_ON_HDMI, mode->clock / 10); break; default: break; } } /* adjust pixel clock as needed */ adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) /* TV seems to prefer the legacy algo on some boards */ radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); else if (ASIC_IS_AVIVO(rdev)) radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); else radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss); atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, encoder_mode, radeon_encoder->encoder_id, mode->clock, ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss); if (ss_enabled) { /* calculate ss amount and step size */ if (ASIC_IS_DCE4(rdev)) { u32 step_size; u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) step_size = (4 * amount * ref_div * (ss.rate * 2048)) / (125 * 25 * pll->reference_freq / 100); else step_size = (2 * amount * ref_div * (ss.rate * 2048)) / (125 * 25 * pll->reference_freq / 100); ss.step = step_size; } atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss); } } static int dce4_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_framebuffer *radeon_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; uint64_t fb_location; uint32_t fb_format, fb_pitch_pixels, tiling_flags; unsigned bankw, bankh, mtaspect, tile_split; u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); u32 tmp, viewport_w, viewport_h; int r; /* no fb bound */ if (!atomic && !crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } if (atomic) { radeon_fb = to_radeon_framebuffer(fb); target_fb = fb; } else { radeon_fb = to_radeon_framebuffer(crtc->fb); target_fb = crtc->fb; } /* If atomic, assume fb object is pinned & idle & fenced and * just update base pointers */ obj = radeon_fb->obj; rbo = gem_to_radeon_bo(obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; if (atomic) fb_location = radeon_bo_gpu_offset(rbo); else { r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); if (unlikely(r != 0)) { radeon_bo_unreserve(rbo); return -EINVAL; } } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); switch (target_fb->bits_per_pixel) { case 8: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); break; case 15: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555)); break; case 16: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); #endif break; case 24: case 32: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); #endif break; default: DRM_ERROR("Unsupported screen depth %d\n", target_fb->bits_per_pixel); return -EINVAL; } if (tiling_flags & RADEON_TILING_MACRO) { if (rdev->family >= CHIP_CAYMAN) tmp = rdev->config.cayman.tile_config; else tmp = rdev->config.evergreen.tile_config; switch ((tmp & 0xf0) >> 4) { case 0: /* 4 banks */ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); break; case 1: /* 8 banks */ default: fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); break; case 2: /* 16 banks */ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); break; } fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); } else if (tiling_flags & RADEON_TILING_MICRO) fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); switch (radeon_crtc->crtc_id) { case 0: WREG32(AVIVO_D1VGA_CONTROL, 0); break; case 1: WREG32(AVIVO_D2VGA_CONTROL, 0); break; case 2: WREG32(EVERGREEN_D3VGA_CONTROL, 0); break; case 3: WREG32(EVERGREEN_D4VGA_CONTROL, 0); break; case 4: WREG32(EVERGREEN_D5VGA_CONTROL, 0); break; case 5: WREG32(EVERGREEN_D6VGA_CONTROL, 0); break; default: break; } WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, upper_32_bits(fb_location)); WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, upper_32_bits(fb_location)); WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, target_fb->height); x &= ~3; y &= ~1; WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, (x << 16) | y); viewport_w = crtc->mode.hdisplay; viewport_h = (crtc->mode.vdisplay + 1) & ~1; WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); /* pageflip setup */ /* make sure flip is at vb rather than hb */ tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); /* set pageflip to happen anywhere in vblank interval */ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } /* Bytes per pixel may have changed */ radeon_bandwidth_update(rdev); return 0; } static int avivo_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_framebuffer *radeon_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; struct drm_framebuffer *target_fb; uint64_t fb_location; uint32_t fb_format, fb_pitch_pixels, tiling_flags; u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; u32 tmp, viewport_w, viewport_h; int r; /* no fb bound */ if (!atomic && !crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } if (atomic) { radeon_fb = to_radeon_framebuffer(fb); target_fb = fb; } else { radeon_fb = to_radeon_framebuffer(crtc->fb); target_fb = crtc->fb; } obj = radeon_fb->obj; rbo = gem_to_radeon_bo(obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; /* If atomic, assume fb object is pinned & idle & fenced and * just update base pointers */ if (atomic) fb_location = radeon_bo_gpu_offset(rbo); else { r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); if (unlikely(r != 0)) { radeon_bo_unreserve(rbo); return -EINVAL; } } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); switch (target_fb->bits_per_pixel) { case 8: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | AVIVO_D1GRPH_CONTROL_8BPP_INDEXED; break; case 15: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555; break; case 16: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; #ifdef __BIG_ENDIAN fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; #endif break; case 24: case 32: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; #ifdef __BIG_ENDIAN fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; #endif break; default: DRM_ERROR("Unsupported screen depth %d\n", target_fb->bits_per_pixel); return -EINVAL; } if (rdev->family >= CHIP_R600) { if (tiling_flags & RADEON_TILING_MACRO) fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1; else if (tiling_flags & RADEON_TILING_MICRO) fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1; } else { if (tiling_flags & RADEON_TILING_MACRO) fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; if (tiling_flags & RADEON_TILING_MICRO) fb_format |= AVIVO_D1GRPH_TILED; } if (radeon_crtc->crtc_id == 0) WREG32(AVIVO_D1VGA_CONTROL, 0); else WREG32(AVIVO_D2VGA_CONTROL, 0); if (rdev->family >= CHIP_RV770) { if (radeon_crtc->crtc_id) { WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); } else { WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); } } WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32) fb_location); WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32) fb_location); WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); if (rdev->family >= CHIP_R600) WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, target_fb->height); x &= ~3; y &= ~1; WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, (x << 16) | y); viewport_w = crtc->mode.hdisplay; viewport_h = (crtc->mode.vdisplay + 1) & ~1; WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); /* pageflip setup */ /* make sure flip is at vb rather than hb */ tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); /* set pageflip to happen anywhere in vblank interval */ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } /* Bytes per pixel may have changed */ radeon_bandwidth_update(rdev); return 0; } int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0); else if (ASIC_IS_AVIVO(rdev)) return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); else return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); } int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) return dce4_crtc_do_set_base(crtc, fb, x, y, 1); else if (ASIC_IS_AVIVO(rdev)) return avivo_crtc_do_set_base(crtc, fb, x, y, 1); else return radeon_crtc_do_set_base(crtc, fb, x, y, 1); } /* properly set additional regs when using atombios */ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); u32 disp_merge_cntl; switch (radeon_crtc->crtc_id) { case 0: disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); break; case 1: disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); break; } } static int radeon_atom_pick_pll(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *test_encoder; struct drm_crtc *test_crtc; uint32_t pll_in_use = 0; if (ASIC_IS_DCE61(rdev)) { list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { if (test_encoder->crtc && (test_encoder->crtc == crtc)) { struct radeon_encoder *test_radeon_encoder = to_radeon_encoder(test_encoder); struct radeon_encoder_atom_dig *dig = test_radeon_encoder->enc_priv; if ((test_radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) && (dig->linkb == false)) /* UNIPHY A uses PPLL2 */ return ATOM_PPLL2; } } /* UNIPHY B/C/D/E/F */ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { struct radeon_crtc *radeon_test_crtc; if (crtc == test_crtc) continue; radeon_test_crtc = to_radeon_crtc(test_crtc); if ((radeon_test_crtc->pll_id == ATOM_PPLL0) || (radeon_test_crtc->pll_id == ATOM_PPLL1)) pll_in_use |= (1 << radeon_test_crtc->pll_id); } if (!(pll_in_use & 4)) return ATOM_PPLL0; return ATOM_PPLL1; } else if (ASIC_IS_DCE4(rdev)) { list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { if (test_encoder->crtc && (test_encoder->crtc == crtc)) { /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, * depending on the asic: * DCE4: PPLL or ext clock * DCE5: DCPLL or ext clock * * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip * PPLL/DCPLL programming and only program the DP DTO for the * crtc virtual pixel clock. */ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk) return ATOM_PPLL_INVALID; } } } /* otherwise, pick one of the plls */ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { struct radeon_crtc *radeon_test_crtc; if (crtc == test_crtc) continue; radeon_test_crtc = to_radeon_crtc(test_crtc); if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) && (radeon_test_crtc->pll_id <= ATOM_PPLL2)) pll_in_use |= (1 << radeon_test_crtc->pll_id); } if (!(pll_in_use & 1)) return ATOM_PPLL1; return ATOM_PPLL2; } else return radeon_crtc->crtc_id; } void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev) { /* always set DCPLL */ if (ASIC_IS_DCE6(rdev)) atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); else if (ASIC_IS_DCE4(rdev)) { struct radeon_atom_ss ss; bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_SS_ON_DCPLL, rdev->clock.default_dispclk); if (ss_enabled) atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss); /* XXX: DCE5, make sure voltage, dispclk is high enough */ atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); if (ss_enabled) atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss); } } int atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; bool is_tvcv = false; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { /* find tv std */ if (encoder->crtc == crtc) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) is_tvcv = true; } } atombios_crtc_set_pll(crtc, adjusted_mode); if (ASIC_IS_DCE4(rdev)) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); else if (ASIC_IS_AVIVO(rdev)) { if (is_tvcv) atombios_crtc_set_timing(crtc, adjusted_mode); else atombios_set_crtc_dtd_timing(crtc, adjusted_mode); } else { atombios_crtc_set_timing(crtc, adjusted_mode); if (radeon_crtc->crtc_id == 0) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); radeon_legacy_atom_fixup(crtc); } atombios_crtc_set_base(crtc, x, y, old_fb); atombios_overscan_setup(crtc, mode, adjusted_mode); atombios_scaler_setup(crtc); return 0; } static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; return true; } static void atombios_crtc_prepare(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); /* pick pll */ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); atombios_lock_crtc(crtc, ATOM_ENABLE); atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void atombios_crtc_commit(struct drm_crtc *crtc) { atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); atombios_lock_crtc(crtc, ATOM_DISABLE); } static void atombios_crtc_disable(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_atom_ss ss; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); switch (radeon_crtc->pll_id) { case ATOM_PPLL1: case ATOM_PPLL2: /* disable the ppll */ atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; case ATOM_PPLL0: /* disable the ppll */ if (ASIC_IS_DCE61(rdev)) atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; default: break; } radeon_crtc->pll_id = -1; } static const struct drm_crtc_helper_funcs atombios_helper_funcs = { .dpms = atombios_crtc_dpms, .mode_fixup = atombios_crtc_mode_fixup, .mode_set = atombios_crtc_mode_set, .mode_set_base = atombios_crtc_set_base, .mode_set_base_atomic = atombios_crtc_set_base_atomic, .prepare = atombios_crtc_prepare, .commit = atombios_crtc_commit, .load_lut = radeon_crtc_load_lut, .disable = atombios_crtc_disable, }; void radeon_atombios_init_crtc(struct drm_device *dev, struct radeon_crtc *radeon_crtc) { struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) { switch (radeon_crtc->crtc_id) { case 0: default: radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET; break; case 1: radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET; break; case 2: radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET; break; case 3: radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET; break; case 4: radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET; break; case 5: radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET; break; } } else { if (radeon_crtc->crtc_id == 1) radeon_crtc->crtc_offset = AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; else radeon_crtc->crtc_offset = 0; } radeon_crtc->pll_id = -1; drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); }
gpl-2.0
soderstrom-rikard/linux
scripts/kconfig/lxdialog/textbox.c
3380
9206
/* * textbox.c -- implements the text box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "dialog.h" static void back_lines(int n); static void print_page(WINDOW *win, int height, int width, update_text_fn update_text, void *data); static void print_line(WINDOW *win, int row, int width); static char *get_line(void); static void print_position(WINDOW * win); static int hscroll; static int begin_reached, end_reached, page_length; static char *buf; static char *page; /* * refresh window content */ static void refresh_text_box(WINDOW *dialog, WINDOW *box, int boxh, int boxw, int cur_y, int cur_x, update_text_fn update_text, void *data) { print_page(box, boxh, boxw, update_text, data); print_position(dialog); wmove(dialog, cur_y, cur_x); /* Restore cursor position */ wrefresh(dialog); } /* * Display text from a file in a dialog box. * * keys is a null-terminated array * update_text() may not add or remove any '\n' or '\0' in tbuf */ int dialog_textbox(const char *title, char *tbuf, int initial_height, int initial_width, int *keys, int *_vscroll, int *_hscroll, update_text_fn update_text, void *data) { int i, x, y, cur_x, cur_y, key = 0; int height, width, boxh, boxw; WINDOW *dialog, *box; bool done = false; begin_reached = 1; end_reached = 0; page_length = 0; hscroll = 0; buf = tbuf; page = buf; /* page is pointer to start of page to be displayed */ if (_vscroll && *_vscroll) { begin_reached = 0; for (i = 0; i < *_vscroll; i++) get_line(); } if (_hscroll) hscroll = *_hscroll; do_resize: getmaxyx(stdscr, height, width); if (height < TEXTBOX_HEIGTH_MIN || width < TEXTBOX_WIDTH_MIN) return -ERRDISPLAYTOOSMALL; if (initial_height != 0) height = initial_height; else if (height > 4) height -= 4; else height = 0; if (initial_width != 0) width = initial_width; else if (width > 5) width -= 5; else width = 0; /* center dialog box on screen */ x = (getmaxx(stdscr) - width) / 2; y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); /* Create window for box region, used for scrolling text */ boxh = height - 4; boxw = width - 2; box = subwin(dialog, boxh, boxw, y + 1, x + 1); wattrset(box, dlg.dialog.atr); wbkgdset(box, dlg.dialog.atr & A_COLOR); keypad(box, TRUE); /* register the new window, along with its borders */ draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); wbkgdset(dialog, dlg.dialog.atr & A_COLOR); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); print_button(dialog, gettext(" Exit "), height - 2, width / 2 - 4, TRUE); wnoutrefresh(dialog); getyx(dialog, cur_y, cur_x); /* Save cursor position */ /* Print first page of text */ attr_clear(box, boxh, boxw, dlg.dialog.atr); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); while (!done) { key = wgetch(dialog); switch (key) { case 'E': /* Exit */ case 'e': case 'X': case 'x': case 'q': case '\n': done = true; break; case 'g': /* First page */ case KEY_HOME: if (!begin_reached) { begin_reached = 1; page = buf; refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); } break; case 'G': /* Last page */ case KEY_END: end_reached = 1; /* point to last char in buf */ page = buf + strlen(buf); back_lines(boxh); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case 'K': /* Previous line */ case 'k': case KEY_UP: if (begin_reached) break; back_lines(page_length + 1); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case 'B': /* Previous page */ case 'b': case 'u': case KEY_PPAGE: if (begin_reached) break; back_lines(page_length + boxh); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case 'J': /* Next line */ case 'j': case KEY_DOWN: if (end_reached) break; back_lines(page_length - 1); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case KEY_NPAGE: /* Next page */ case ' ': case 'd': if (end_reached) break; begin_reached = 0; refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case '0': /* Beginning of line */ case 'H': /* Scroll left */ case 'h': case KEY_LEFT: if (hscroll <= 0) break; if (key == '0') hscroll = 0; else hscroll--; /* Reprint current page to scroll horizontally */ back_lines(page_length); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case 'L': /* Scroll right */ case 'l': case KEY_RIGHT: if (hscroll >= MAX_LEN) break; hscroll++; /* Reprint current page to scroll horizontally */ back_lines(page_length); refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, data); break; case KEY_ESC: if (on_key_esc(dialog) == KEY_ESC) done = true; break; case KEY_RESIZE: back_lines(height); delwin(box); delwin(dialog); on_key_resize(); goto do_resize; default: for (i = 0; keys[i]; i++) { if (key == keys[i]) { done = true; break; } } } } delwin(box); delwin(dialog); if (_vscroll) { const char *s; s = buf; *_vscroll = 0; back_lines(page_length); while (s < page && (s = strchr(s, '\n'))) { (*_vscroll)++; s++; } } if (_hscroll) *_hscroll = hscroll; return key; } /* * Go back 'n' lines in text. Called by dialog_textbox(). * 'page' will be updated to point to the desired line in 'buf'. */ static void back_lines(int n) { int i; begin_reached = 0; /* Go back 'n' lines */ for (i = 0; i < n; i++) { if (*page == '\0') { if (end_reached) { end_reached = 0; continue; } } if (page == buf) { begin_reached = 1; return; } page--; do { if (page == buf) { begin_reached = 1; return; } page--; } while (*page != '\n'); page++; } } /* * Print a new page of text. */ static void print_page(WINDOW *win, int height, int width, update_text_fn update_text, void *data) { int i, passed_end = 0; if (update_text) { char *end; for (i = 0; i < height; i++) get_line(); end = page; back_lines(height); update_text(buf, page - buf, end - buf, data); } page_length = 0; for (i = 0; i < height; i++) { print_line(win, i, width); if (!passed_end) page_length++; if (end_reached && !passed_end) passed_end = 1; } wnoutrefresh(win); } /* * Print a new line of text. */ static void print_line(WINDOW * win, int row, int width) { char *line; line = get_line(); line += MIN(strlen(line), hscroll); /* Scroll horizontally */ wmove(win, row, 0); /* move cursor to correct line */ waddch(win, ' '); waddnstr(win, line, MIN(strlen(line), width - 2)); /* Clear 'residue' of previous line */ #if OLD_NCURSES { int x = getcurx(win); int i; for (i = 0; i < width - x; i++) waddch(win, ' '); } #else wclrtoeol(win); #endif } /* * Return current line of text. Called by dialog_textbox() and print_line(). * 'page' should point to start of current line before calling, and will be * updated to point to start of next line. */ static char *get_line(void) { int i = 0; static char line[MAX_LEN + 1]; end_reached = 0; while (*page != '\n') { if (*page == '\0') { end_reached = 1; break; } else if (i < MAX_LEN) line[i++] = *(page++); else { /* Truncate lines longer than MAX_LEN characters */ if (i == MAX_LEN) line[i++] = '\0'; page++; } } if (i <= MAX_LEN) line[i] = '\0'; if (!end_reached) page++; /* move past '\n' */ return line; } /* * Print current position */ static void print_position(WINDOW * win) { int percent; wattrset(win, dlg.position_indicator.atr); wbkgdset(win, dlg.position_indicator.atr & A_COLOR); percent = (page - buf) * 100 / strlen(buf); wmove(win, getmaxy(win) - 3, getmaxx(win) - 9); wprintw(win, "(%3d%%)", percent); }
gpl-2.0
threader/Huawei_S7_kernel_2.6.35
drivers/input/mouse/pxa930_trkball.c
3636
6676
/* * PXA930 track ball mouse driver * * Copyright (C) 2007 Marvell International Ltd. * 2008-02-28: Yong Yao <yaoyong@marvell.com> * initial version * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/input.h> #include <linux/version.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/hardware.h> #include <mach/pxa930_trkball.h> /* Trackball Controller Register Definitions */ #define TBCR (0x000C) #define TBCNTR (0x0010) #define TBSBC (0x0014) #define TBCR_TBRST (1 << 1) #define TBCR_TBSB (1 << 10) #define TBCR_Y_FLT(n) (((n) & 0xf) << 6) #define TBCR_X_FLT(n) (((n) & 0xf) << 2) #define TBCNTR_YM(n) (((n) >> 24) & 0xff) #define TBCNTR_YP(n) (((n) >> 16) & 0xff) #define TBCNTR_XM(n) (((n) >> 8) & 0xff) #define TBCNTR_XP(n) ((n) & 0xff) #define TBSBC_TBSBC (0x1) struct pxa930_trkball { struct pxa930_trkball_platform_data *pdata; /* Memory Mapped Register */ struct resource *mem; void __iomem *mmio_base; struct input_dev *input; }; static irqreturn_t pxa930_trkball_interrupt(int irq, void *dev_id) { struct pxa930_trkball *trkball = dev_id; struct input_dev *input = trkball->input; int tbcntr, x, y; /* According to the spec software must read TBCNTR twice: * if the read value is the same, the reading is valid */ tbcntr = __raw_readl(trkball->mmio_base + TBCNTR); if (tbcntr == __raw_readl(trkball->mmio_base + TBCNTR)) { x = (TBCNTR_XP(tbcntr) - TBCNTR_XM(tbcntr)) / 2; y = (TBCNTR_YP(tbcntr) - TBCNTR_YM(tbcntr)) / 2; input_report_rel(input, REL_X, x); input_report_rel(input, REL_Y, y); input_sync(input); } __raw_writel(TBSBC_TBSBC, trkball->mmio_base + TBSBC); __raw_writel(0, trkball->mmio_base + TBSBC); return IRQ_HANDLED; } /* For TBCR, we need to wait for a while to make sure it has been modified. */ static int write_tbcr(struct pxa930_trkball *trkball, int v) { int i = 100; __raw_writel(v, trkball->mmio_base + TBCR); while (--i) { if (__raw_readl(trkball->mmio_base + TBCR) == v) break; msleep(1); } if (i == 0) { pr_err("%s: timed out writing TBCR(%x)!\n", __func__, v); return -ETIMEDOUT; } return 0; } static void pxa930_trkball_config(struct pxa930_trkball *trkball) { uint32_t tbcr; /* According to spec, need to write the filters of x,y to 0xf first! */ tbcr = __raw_readl(trkball->mmio_base + TBCR); write_tbcr(trkball, tbcr | TBCR_X_FLT(0xf) | TBCR_Y_FLT(0xf)); write_tbcr(trkball, TBCR_X_FLT(trkball->pdata->x_filter) | TBCR_Y_FLT(trkball->pdata->y_filter)); /* According to spec, set TBCR_TBRST first, before clearing it! */ tbcr = __raw_readl(trkball->mmio_base + TBCR); write_tbcr(trkball, tbcr | TBCR_TBRST); write_tbcr(trkball, tbcr & ~TBCR_TBRST); __raw_writel(TBSBC_TBSBC, trkball->mmio_base + TBSBC); __raw_writel(0, trkball->mmio_base + TBSBC); pr_debug("%s: final TBCR=%x!\n", __func__, __raw_readl(trkball->mmio_base + TBCR)); } static int pxa930_trkball_open(struct input_dev *dev) { struct pxa930_trkball *trkball = input_get_drvdata(dev); pxa930_trkball_config(trkball); return 0; } static void pxa930_trkball_disable(struct pxa930_trkball *trkball) { uint32_t tbcr = __raw_readl(trkball->mmio_base + TBCR); /* Held in reset, gate the 32-KHz input clock off */ write_tbcr(trkball, tbcr | TBCR_TBRST); } static void pxa930_trkball_close(struct input_dev *dev) { struct pxa930_trkball *trkball = input_get_drvdata(dev); pxa930_trkball_disable(trkball); } static int __devinit pxa930_trkball_probe(struct platform_device *pdev) { struct pxa930_trkball *trkball; struct input_dev *input; struct resource *res; int irq, error; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get trkball irq\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get register memory\n"); return -ENXIO; } trkball = kzalloc(sizeof(struct pxa930_trkball), GFP_KERNEL); if (!trkball) return -ENOMEM; trkball->pdata = pdev->dev.platform_data; if (!trkball->pdata) { dev_err(&pdev->dev, "no platform data defined\n"); error = -EINVAL; goto failed; } trkball->mmio_base = ioremap_nocache(res->start, resource_size(res)); if (!trkball->mmio_base) { dev_err(&pdev->dev, "failed to ioremap registers\n"); error = -ENXIO; goto failed; } /* held the module in reset, will be enabled in open() */ pxa930_trkball_disable(trkball); error = request_irq(irq, pxa930_trkball_interrupt, IRQF_DISABLED, pdev->name, trkball); if (error) { dev_err(&pdev->dev, "failed to request irq: %d\n", error); goto failed_free_io; } platform_set_drvdata(pdev, trkball); input = input_allocate_device(); if (!input) { dev_err(&pdev->dev, "failed to allocate input device\n"); error = -ENOMEM; goto failed_free_irq; } input->name = pdev->name; input->id.bustype = BUS_HOST; input->open = pxa930_trkball_open; input->close = pxa930_trkball_close; input->dev.parent = &pdev->dev; input_set_drvdata(input, trkball); trkball->input = input; input_set_capability(input, EV_REL, REL_X); input_set_capability(input, EV_REL, REL_Y); error = input_register_device(input); if (error) { dev_err(&pdev->dev, "unable to register input device\n"); goto failed_free_input; } return 0; failed_free_input: input_free_device(input); failed_free_irq: free_irq(irq, trkball); failed_free_io: iounmap(trkball->mmio_base); failed: kfree(trkball); return error; } static int __devexit pxa930_trkball_remove(struct platform_device *pdev) { struct pxa930_trkball *trkball = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); input_unregister_device(trkball->input); free_irq(irq, trkball); iounmap(trkball->mmio_base); kfree(trkball); return 0; } static struct platform_driver pxa930_trkball_driver = { .driver = { .name = "pxa930-trkball", }, .probe = pxa930_trkball_probe, .remove = __devexit_p(pxa930_trkball_remove), }; static int __init pxa930_trkball_init(void) { return platform_driver_register(&pxa930_trkball_driver); } static void __exit pxa930_trkball_exit(void) { platform_driver_unregister(&pxa930_trkball_driver); } module_init(pxa930_trkball_init); module_exit(pxa930_trkball_exit); MODULE_AUTHOR("Yong Yao <yaoyong@marvell.com>"); MODULE_DESCRIPTION("PXA930 Trackball Mouse Driver"); MODULE_LICENSE("GPL");
gpl-2.0
GKMADE/p5
drivers/net/ne3210.c
4148
10334
/* ne3210.c Linux driver for Novell NE3210 EISA Network Adapter Copyright (C) 1998, Paul Gortmaker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Information and Code Sources: 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32) 2) The existing myriad of other Linux 8390 drivers by Donald Becker. 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file The NE3210 is an EISA shared memory NS8390 implementation. Shared memory address > 1MB should work with this driver. Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched around (or perhaps there are some defective/backwards cards ???) This driver WILL NOT WORK FOR THE NE3200 - it is completely different and does not use an 8390 at all. Updated to EISA probing API 5/2003 by Marc Zyngier. */ #include <linux/module.h> #include <linux/eisa.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mm.h> #include <asm/io.h> #include <asm/system.h> #include "8390.h" #define DRV_NAME "ne3210" static void ne3210_reset_8390(struct net_device *dev); static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); #define NE3210_START_PG 0x00 /* First page of TX buffer */ #define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */ #define NE3210_IO_EXTENT 0x20 #define NE3210_SA_PROM 0x16 /* Start of e'net addr. */ #define NE3210_RESET_PORT 0xc84 #define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */ #define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */ #define NE3210_ADDR1 0x00 #define NE3210_ADDR2 0x1b #define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */ #define NE3210_CFG2 0xc90 #define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1) /* * You can OR any of the following bits together and assign it * to NE3210_DEBUG to get verbose driver info during operation. * Currently only the probe one is implemented. */ #define NE3210_D_PROBE 0x01 #define NE3210_D_RX_PKT 0x02 #define NE3210_D_TX_PKT 0x04 #define NE3210_D_IRQ 0x08 #define NE3210_DEBUG 0x0 static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"}; static int ifmap_val[] __initdata = { IF_PORT_10BASET, IF_PORT_UNKNOWN, IF_PORT_10BASE2, IF_PORT_AUI, }; static int __init ne3210_eisa_probe (struct device *device) { unsigned long ioaddr, phys_mem; int i, retval, port_index; struct eisa_device *edev = to_eisa_device (device); struct net_device *dev; /* Allocate dev->priv and fill in 8390 specific dev fields. */ if (!(dev = alloc_ei_netdev ())) { printk ("ne3210.c: unable to allocate memory for dev!\n"); return -ENOMEM; } SET_NETDEV_DEV(dev, device); dev_set_drvdata(device, dev); ioaddr = edev->base_addr; if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) { retval = -EBUSY; goto out; } if (!request_region(ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT, DRV_NAME)) { retval = -EBUSY; goto out1; } #if NE3210_DEBUG & NE3210_D_PROBE printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig); printk("ne3210-debug: config regs: %#x %#x\n", inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2)); #endif port_index = inb(ioaddr + NE3210_CFG2) >> 6; for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n", edev->slot, ifmap[port_index], dev->dev_addr); /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07]; printk("ne3210.c: using IRQ %d, ", dev->irq); retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); if (retval) { printk (" unable to get IRQ %d.\n", dev->irq); goto out2; } phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000; /* BEWARE!! Some dain-bramaged EISA SCUs will allow you to put the card mem within the region covered by `normal' RAM !!! */ if (phys_mem > 1024*1024) { /* phys addr > 1MB */ if (phys_mem < virt_to_phys(high_memory)) { printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n", (u64)virt_to_phys(high_memory)); printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); retval = -EINVAL; goto out3; } } if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) { printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n", phys_mem); goto out3; } printk("%dkB memory at physical address %#lx\n", NE3210_STOP_PG/4, phys_mem); ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100); if (!ei_status.mem) { printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n"); printk(KERN_ERR "ne3210.c: Driver NOT installed.\n"); retval = -EAGAIN; goto out4; } printk("ne3210.c: remapped %dkB card memory to virtual address %p\n", NE3210_STOP_PG/4, ei_status.mem); dev->mem_start = (unsigned long)ei_status.mem; dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256; /* The 8390 offset is zero for the NE3210 */ dev->base_addr = ioaddr; ei_status.name = "NE3210"; ei_status.tx_start_page = NE3210_START_PG; ei_status.rx_start_page = NE3210_START_PG + TX_PAGES; ei_status.stop_page = NE3210_STOP_PG; ei_status.word16 = 1; ei_status.priv = phys_mem; if (ei_debug > 0) printk("ne3210 loaded.\n"); ei_status.reset_8390 = &ne3210_reset_8390; ei_status.block_input = &ne3210_block_input; ei_status.block_output = &ne3210_block_output; ei_status.get_8390_hdr = &ne3210_get_8390_hdr; dev->netdev_ops = &ei_netdev_ops; dev->if_port = ifmap_val[port_index]; if ((retval = register_netdev (dev))) goto out5; NS8390_init(dev, 0); return 0; out5: iounmap(ei_status.mem); out4: release_mem_region (phys_mem, NE3210_STOP_PG*0x100); out3: free_irq (dev->irq, dev); out2: release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); out1: release_region (ioaddr, NE3210_IO_EXTENT); out: free_netdev (dev); return retval; } static int __devexit ne3210_eisa_remove (struct device *device) { struct net_device *dev = dev_get_drvdata(device); unsigned long ioaddr = to_eisa_device (device)->base_addr; unregister_netdev (dev); iounmap(ei_status.mem); release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100); free_irq (dev->irq, dev); release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); release_region (ioaddr, NE3210_IO_EXTENT); free_netdev (dev); return 0; } /* * Reset by toggling the "Board Enable" bits (bit 2 and 0). */ static void ne3210_reset_8390(struct net_device *dev) { unsigned short ioaddr = dev->base_addr; outb(0x04, ioaddr + NE3210_RESET_PORT); if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name); mdelay(2); ei_status.txing = 0; outb(0x01, ioaddr + NE3210_RESET_PORT); if (ei_debug > 1) printk("reset done\n"); } /* * Note: In the following three functions is the implicit assumption * that the associated memcpy will only use "rep; movsl" as long as * we keep the counts as some multiple of doublewords. This is a * requirement of the hardware, and also prevents us from using * eth_io_copy_and_sum() since we can't guarantee it will limit * itself to doubleword access. */ /* * Grab the 8390 specific header. Similar to the block_input routine, but * we don't need to be concerned with ring wrap as the header will be at * the start of a page, so we optimize accordingly. (A single doubleword.) */ static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8); memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ } /* * Block input and output are easy on shared memory ethercards, the only * complication is when the ring buffer wraps. The count will already * be rounded up to a doubleword value via ne3210_get_8390_hdr() above. */ static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256; if (ring_offset + count > NE3210_STOP_PG*256) { /* Packet wraps over end of ring buffer. */ int semi_count = NE3210_STOP_PG*256 - ring_offset; memcpy_fromio(skb->data, start, semi_count); count -= semi_count; memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES*256, count); } else { /* Packet is in one chunk. */ memcpy_fromio(skb->data, start, count); } } static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) { void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8); count = (count + 3) & ~3; /* Round up to doubleword */ memcpy_toio(shmem, buf, count); } static struct eisa_device_id ne3210_ids[] = { { "EGL0101" }, { "NVL1801" }, { "" }, }; MODULE_DEVICE_TABLE(eisa, ne3210_ids); static struct eisa_driver ne3210_eisa_driver = { .id_table = ne3210_ids, .driver = { .name = "ne3210", .probe = ne3210_eisa_probe, .remove = __devexit_p (ne3210_eisa_remove), }, }; MODULE_DESCRIPTION("NE3210 EISA Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(eisa, ne3210_ids); static int ne3210_init(void) { return eisa_driver_register (&ne3210_eisa_driver); } static void ne3210_cleanup(void) { eisa_driver_unregister (&ne3210_eisa_driver); } module_init (ne3210_init); module_exit (ne3210_cleanup);
gpl-2.0
avisconti/prova
drivers/base/power/domain.c
4148
45518
/* * drivers/base/power/domain.c - Common code related to device power domains. * * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. * * This file is released under the GPLv2. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/pm_domain.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/suspend.h> #include <linux/export.h> #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ ({ \ type (*__routine)(struct device *__d); \ type __ret = (type)0; \ \ __routine = genpd->dev_ops.callback; \ if (__routine) { \ __ret = __routine(dev); \ } else { \ __routine = dev_gpd_data(dev)->ops.callback; \ if (__routine) \ __ret = __routine(dev); \ } \ __ret; \ }) #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ ({ \ ktime_t __start = ktime_get(); \ type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \ if (__elapsed > __gpd_data->td.field) { \ __gpd_data->td.field = __elapsed; \ dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ __elapsed); \ } \ __retval; \ }) static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); #ifdef CONFIG_PM struct generic_pm_domain *dev_to_genpd(struct device *dev) { if (IS_ERR_OR_NULL(dev->pm_domain)) return ERR_PTR(-EINVAL); return pd_to_genpd(dev->pm_domain); } static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, stop_latency_ns, "stop"); } static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, start_latency_ns, "start"); } static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, save_state_latency_ns, "state save"); } static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, restore_state_latency_ns, "state restore"); } static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) ret = !!atomic_dec_and_test(&genpd->sd_count); return ret; } static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) { atomic_inc(&genpd->sd_count); smp_mb__after_atomic_inc(); } static void genpd_acquire_lock(struct generic_pm_domain *genpd) { DEFINE_WAIT(wait); mutex_lock(&genpd->lock); /* * Wait for the domain to transition into either the active, * or the power off state. */ for (;;) { prepare_to_wait(&genpd->status_wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (genpd->status == GPD_STATE_ACTIVE || genpd->status == GPD_STATE_POWER_OFF) break; mutex_unlock(&genpd->lock); schedule(); mutex_lock(&genpd->lock); } finish_wait(&genpd->status_wait_queue, &wait); } static void genpd_release_lock(struct generic_pm_domain *genpd) { mutex_unlock(&genpd->lock); } static void genpd_set_active(struct generic_pm_domain *genpd) { if (genpd->resume_count == 0) genpd->status = GPD_STATE_ACTIVE; } /** * __pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ int __pm_genpd_poweron(struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct gpd_link *link; DEFINE_WAIT(wait); int ret = 0; /* If the domain's master is being waited for, we have to wait too. */ for (;;) { prepare_to_wait(&genpd->status_wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (genpd->status != GPD_STATE_WAIT_MASTER) break; mutex_unlock(&genpd->lock); schedule(); mutex_lock(&genpd->lock); } finish_wait(&genpd->status_wait_queue, &wait); if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) return 0; if (genpd->status != GPD_STATE_POWER_OFF) { genpd_set_active(genpd); return 0; } /* * The list is guaranteed not to change while the loop below is being * executed, unless one of the masters' .power_on() callbacks fiddles * with it. */ list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_inc(link->master); genpd->status = GPD_STATE_WAIT_MASTER; mutex_unlock(&genpd->lock); ret = pm_genpd_poweron(link->master); mutex_lock(&genpd->lock); /* * The "wait for parent" status is guaranteed not to change * while the master is powering on. */ genpd->status = GPD_STATE_POWER_OFF; wake_up_all(&genpd->status_wait_queue); if (ret) { genpd_sd_counter_dec(link->master); goto err; } } if (genpd->power_on) { ktime_t time_start = ktime_get(); s64 elapsed_ns; ret = genpd->power_on(genpd); if (ret) goto err; elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); if (elapsed_ns > genpd->power_on_latency_ns) { genpd->power_on_latency_ns = elapsed_ns; if (genpd->name) pr_warning("%s: Power-on latency exceeded, " "new value %lld ns\n", genpd->name, elapsed_ns); } } genpd_set_active(genpd); return 0; err: list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) genpd_sd_counter_dec(link->master); return ret; } /** * pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. */ int pm_genpd_poweron(struct generic_pm_domain *genpd) { int ret; mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); mutex_unlock(&genpd->lock); return ret; } #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME /** * __pm_genpd_save_device - Save the pre-suspend state of a device. * @pdd: Domain data of the device to save the state of. * @genpd: PM domain the device belongs to. */ static int __pm_genpd_save_device(struct pm_domain_data *pdd, struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; int ret = 0; if (gpd_data->need_restore) return 0; mutex_unlock(&genpd->lock); genpd_start_dev(genpd, dev); ret = genpd_save_dev(genpd, dev); genpd_stop_dev(genpd, dev); mutex_lock(&genpd->lock); if (!ret) gpd_data->need_restore = true; return ret; } /** * __pm_genpd_restore_device - Restore the pre-suspend state of a device. * @pdd: Domain data of the device to restore the state of. * @genpd: PM domain the device belongs to. */ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; if (!gpd_data->need_restore) return; mutex_unlock(&genpd->lock); genpd_start_dev(genpd, dev); genpd_restore_dev(genpd, dev); genpd_stop_dev(genpd, dev); mutex_lock(&genpd->lock); gpd_data->need_restore = false; } /** * genpd_abort_poweroff - Check if a PM domain power off should be aborted. * @genpd: PM domain to check. * * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during * a "power off" operation, which means that a "power on" has occured in the * meantime, or if its resume_count field is different from zero, which means * that one of its devices has been resumed in the meantime. */ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) { return genpd->status == GPD_STATE_WAIT_MASTER || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; } /** * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). * @genpd: PM domait to power off. * * Queue up the execution of pm_genpd_poweroff() unless it's already been done * before. */ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) { if (!work_pending(&genpd->power_off_work)) queue_work(pm_wq, &genpd->power_off_work); } /** * pm_genpd_poweroff - Remove power from a given PM domain. * @genpd: PM domain to power down. * * If all of the @genpd's devices have been suspended and all of its subdomains * have been powered down, run the runtime suspend callbacks provided by all of * the @genpd's devices' drivers and remove power from @genpd. */ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct pm_domain_data *pdd; struct gpd_link *link; unsigned int not_suspended; int ret = 0; start: /* * Do not try to power off the domain in the following situations: * (1) The domain is already in the "power off" state. * (2) The domain is waiting for its master to power up. * (3) One of the domain's devices is being resumed right now. * (4) System suspend is in progress. */ if (genpd->status == GPD_STATE_POWER_OFF || genpd->status == GPD_STATE_WAIT_MASTER || genpd->resume_count > 0 || genpd->prepared_count > 0) return 0; if (atomic_read(&genpd->sd_count) > 0) return -EBUSY; not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) not_suspended++; if (not_suspended > genpd->in_progress) return -EBUSY; if (genpd->poweroff_task) { /* * Another instance of pm_genpd_poweroff() is executing * callbacks, so tell it to start over and return. */ genpd->status = GPD_STATE_REPEAT; return 0; } if (genpd->gov && genpd->gov->power_down_ok) { if (!genpd->gov->power_down_ok(&genpd->domain)) return -EAGAIN; } genpd->status = GPD_STATE_BUSY; genpd->poweroff_task = current; list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { ret = atomic_read(&genpd->sd_count) == 0 ? __pm_genpd_save_device(pdd, genpd) : -EBUSY; if (genpd_abort_poweroff(genpd)) goto out; if (ret) { genpd_set_active(genpd); goto out; } if (genpd->status == GPD_STATE_REPEAT) { genpd->poweroff_task = NULL; goto start; } } if (genpd->power_off) { ktime_t time_start; s64 elapsed_ns; if (atomic_read(&genpd->sd_count) > 0) { ret = -EBUSY; goto out; } time_start = ktime_get(); /* * If sd_count > 0 at this point, one of the subdomains hasn't * managed to call pm_genpd_poweron() for the master yet after * incrementing it. In that case pm_genpd_poweron() will wait * for us to drop the lock, so we can call .power_off() and let * the pm_genpd_poweron() restore power for us (this shouldn't * happen very often). */ ret = genpd->power_off(genpd); if (ret == -EBUSY) { genpd_set_active(genpd); goto out; } elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); if (elapsed_ns > genpd->power_off_latency_ns) { genpd->power_off_latency_ns = elapsed_ns; if (genpd->name) pr_warning("%s: Power-off latency exceeded, " "new value %lld ns\n", genpd->name, elapsed_ns); } } genpd->status = GPD_STATE_POWER_OFF; genpd->power_off_time = ktime_get(); /* Update PM QoS information for devices in the domain. */ list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { struct gpd_timing_data *td = &to_gpd_data(pdd)->td; pm_runtime_update_max_time_suspended(pdd->dev, td->start_latency_ns + td->restore_state_latency_ns + genpd->power_on_latency_ns); } list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); genpd_queue_power_off_work(link->master); } out: genpd->poweroff_task = NULL; wake_up_all(&genpd->status_wait_queue); return ret; } /** * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. * @work: Work structure used for scheduling the execution of this function. */ static void genpd_power_off_work_fn(struct work_struct *work) { struct generic_pm_domain *genpd; genpd = container_of(work, struct generic_pm_domain, power_off_work); genpd_acquire_lock(genpd); pm_genpd_poweroff(genpd); genpd_release_lock(genpd); } /** * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. * @dev: Device to suspend. * * Carry out a runtime suspend of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a PM domain consisting of I/O devices. */ static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; bool (*stop_ok)(struct device *__dev); int ret; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; might_sleep_if(!genpd->dev_irq_safe); if (dev_gpd_data(dev)->always_on) return -EBUSY; stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; if (stop_ok && !stop_ok(dev)) return -EBUSY; ret = genpd_stop_dev(genpd, dev); if (ret) return ret; pm_runtime_update_max_time_suspended(dev, dev_gpd_data(dev)->td.start_latency_ns); /* * If power.irq_safe is set, this routine will be run with interrupts * off, so it can't use mutexes. */ if (dev->power.irq_safe) return 0; mutex_lock(&genpd->lock); genpd->in_progress++; pm_genpd_poweroff(genpd); genpd->in_progress--; mutex_unlock(&genpd->lock); return 0; } /** * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. * @dev: Device to resume. * * Carry out a runtime resume of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a PM domain consisting of I/O devices. */ static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; DEFINE_WAIT(wait); int ret; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; might_sleep_if(!genpd->dev_irq_safe); /* If power.irq_safe, the PM domain is never powered off. */ if (dev->power.irq_safe) goto out; mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); if (ret) { mutex_unlock(&genpd->lock); return ret; } genpd->status = GPD_STATE_BUSY; genpd->resume_count++; for (;;) { prepare_to_wait(&genpd->status_wait_queue, &wait, TASK_UNINTERRUPTIBLE); /* * If current is the powering off task, we have been called * reentrantly from one of the device callbacks, so we should * not wait. */ if (!genpd->poweroff_task || genpd->poweroff_task == current) break; mutex_unlock(&genpd->lock); schedule(); mutex_lock(&genpd->lock); } finish_wait(&genpd->status_wait_queue, &wait); __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); genpd->resume_count--; genpd_set_active(genpd); wake_up_all(&genpd->status_wait_queue); mutex_unlock(&genpd->lock); out: genpd_start_dev(genpd, dev); return 0; } /** * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. */ void pm_genpd_poweroff_unused(void) { struct generic_pm_domain *genpd; mutex_lock(&gpd_list_lock); list_for_each_entry(genpd, &gpd_list, gpd_list_node) genpd_queue_power_off_work(genpd); mutex_unlock(&gpd_list_lock); } #else static inline void genpd_power_off_work_fn(struct work_struct *work) {} #define pm_genpd_runtime_suspend NULL #define pm_genpd_runtime_resume NULL #endif /* CONFIG_PM_RUNTIME */ #ifdef CONFIG_PM_SLEEP static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); } static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); } static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); } static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); } static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, resume, dev); } static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); } static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); } static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); } static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); } /** * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. * * Check if the given PM domain can be powered off (during system suspend or * hibernation) and do that if so. Also, in that case propagate to its masters. * * This function is only called in "noirq" stages of system power transitions, * so it need not acquire locks (all of the "noirq" callbacks are executed * sequentially, so it is guaranteed that it will never run twice in parallel). */ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) { struct gpd_link *link; if (genpd->status == GPD_STATE_POWER_OFF) return; if (genpd->suspended_count != genpd->device_count || atomic_read(&genpd->sd_count) > 0) return; if (genpd->power_off) genpd->power_off(genpd); genpd->status = GPD_STATE_POWER_OFF; list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); pm_genpd_sync_poweroff(link->master); } } /** * resume_needed - Check whether to resume a device before system suspend. * @dev: Device to check. * @genpd: PM domain the device belongs to. * * There are two cases in which a device that can wake up the system from sleep * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled * to wake up the system and it has to remain active for this purpose while the * system is in the sleep state and (2) if the device is not enabled to wake up * the system from sleep states and it generally doesn't generate wakeup signals * by itself (those signals are generated on its behalf by other parts of the * system). In the latter case it may be necessary to reconfigure the device's * wakeup settings during system suspend, because it may have been set up to * signal remote wakeup from the system's working state as needed by runtime PM. * Return 'true' in either of the above cases. */ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) { bool active_wakeup; if (!device_can_wakeup(dev)) return false; active_wakeup = genpd_dev_active_wakeup(genpd, dev); return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; } /** * pm_genpd_prepare - Start power transition of a device in a PM domain. * @dev: Device to start the transition of. * * Start a power transition of a device (during a system-wide power transition) * under the assumption that its pm_domain field points to the domain member of * an object of type struct generic_pm_domain representing a PM domain * consisting of I/O devices. */ static int pm_genpd_prepare(struct device *dev) { struct generic_pm_domain *genpd; int ret; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; /* * If a wakeup request is pending for the device, it should be woken up * at this point and a system wakeup event should be reported if it's * set up to wake up the system from sleep states. */ pm_runtime_get_noresume(dev); if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) pm_wakeup_event(dev, 0); if (pm_wakeup_pending()) { pm_runtime_put_sync(dev); return -EBUSY; } if (resume_needed(dev, genpd)) pm_runtime_resume(dev); genpd_acquire_lock(genpd); if (genpd->prepared_count++ == 0) { genpd->suspended_count = 0; genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; } genpd_release_lock(genpd); if (genpd->suspend_power_off) { pm_runtime_put_noidle(dev); return 0; } /* * The PM domain must be in the GPD_STATE_ACTIVE state at this point, * so pm_genpd_poweron() will return immediately, but if the device * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need * to make it operational. */ pm_runtime_resume(dev); __pm_runtime_disable(dev, false); ret = pm_generic_prepare(dev); if (ret) { mutex_lock(&genpd->lock); if (--genpd->prepared_count == 0) genpd->suspend_power_off = false; mutex_unlock(&genpd->lock); pm_runtime_enable(dev); } pm_runtime_put_sync(dev); return ret; } /** * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. * @dev: Device to suspend. * * Suspend a device under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a PM domain consisting of I/O devices. */ static int pm_genpd_suspend(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); } /** * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. * @dev: Device to suspend. * * Carry out a late suspend of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a PM domain consisting of I/O devices. */ static int pm_genpd_suspend_late(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); } /** * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. * @dev: Device to suspend. * * Stop the device and remove power from the domain if all devices in it have * been stopped. */ static int pm_genpd_suspend_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; genpd_stop_dev(genpd, dev); /* * Since all of the "noirq" callbacks are executed sequentially, it is * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ genpd->suspended_count++; pm_genpd_sync_poweroff(genpd); return 0; } /** * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. * @dev: Device to resume. * * Restore power to the device's PM domain, if necessary, and start the device. */ static int pm_genpd_resume_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; /* * Since all of the "noirq" callbacks are executed sequentially, it is * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ pm_genpd_poweron(genpd); genpd->suspended_count--; return genpd_start_dev(genpd, dev); } /** * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. * @dev: Device to resume. * * Carry out an early resume of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a power domain consisting of I/O * devices. */ static int pm_genpd_resume_early(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); } /** * pm_genpd_resume - Resume of device in an I/O PM domain. * @dev: Device to resume. * * Resume a device under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a power domain consisting of I/O devices. */ static int pm_genpd_resume(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); } /** * pm_genpd_freeze - Freezing a device in an I/O PM domain. * @dev: Device to freeze. * * Freeze a device under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a power domain consisting of I/O devices. */ static int pm_genpd_freeze(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); } /** * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. * @dev: Device to freeze. * * Carry out a late freeze of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a power domain consisting of I/O * devices. */ static int pm_genpd_freeze_late(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); } /** * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. * @dev: Device to freeze. * * Carry out a late freeze of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a power domain consisting of I/O * devices. */ static int pm_genpd_freeze_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? 0 : genpd_stop_dev(genpd, dev); } /** * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. * @dev: Device to thaw. * * Start the device, unless power has been removed from the domain already * before the system transition. */ static int pm_genpd_thaw_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); } /** * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. * @dev: Device to thaw. * * Carry out an early thaw of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a power domain consisting of I/O * devices. */ static int pm_genpd_thaw_early(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); } /** * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. * @dev: Device to thaw. * * Thaw a device under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a power domain consisting of I/O devices. */ static int pm_genpd_thaw(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); } /** * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. * @dev: Device to resume. * * Make sure the domain will be in the same power state as before the * hibernation the system is resuming from and start the device if necessary. */ static int pm_genpd_restore_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; /* * Since all of the "noirq" callbacks are executed sequentially, it is * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. * * At this point suspended_count == 0 means we are being run for the * first time for the given domain in the present cycle. */ if (genpd->suspended_count++ == 0) { /* * The boot kernel might put the domain into arbitrary state, * so make it appear as powered off to pm_genpd_poweron(), so * that it tries to power it on in case it was really off. */ genpd->status = GPD_STATE_POWER_OFF; if (genpd->suspend_power_off) { /* * If the domain was off before the hibernation, make * sure it will be off going forward. */ if (genpd->power_off) genpd->power_off(genpd); return 0; } } if (genpd->suspend_power_off) return 0; pm_genpd_poweron(genpd); return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); } /** * pm_genpd_complete - Complete power transition of a device in a power domain. * @dev: Device to complete the transition of. * * Complete a power transition of a device (during a system-wide power * transition) under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a power domain consisting of I/O devices. */ static void pm_genpd_complete(struct device *dev) { struct generic_pm_domain *genpd; bool run_complete; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return; mutex_lock(&genpd->lock); run_complete = !genpd->suspend_power_off; if (--genpd->prepared_count == 0) genpd->suspend_power_off = false; mutex_unlock(&genpd->lock); if (run_complete) { pm_generic_complete(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_idle(dev); } } #else #define pm_genpd_prepare NULL #define pm_genpd_suspend NULL #define pm_genpd_suspend_late NULL #define pm_genpd_suspend_noirq NULL #define pm_genpd_resume_early NULL #define pm_genpd_resume_noirq NULL #define pm_genpd_resume NULL #define pm_genpd_freeze NULL #define pm_genpd_freeze_late NULL #define pm_genpd_freeze_noirq NULL #define pm_genpd_thaw_early NULL #define pm_genpd_thaw_noirq NULL #define pm_genpd_thaw NULL #define pm_genpd_restore_noirq NULL #define pm_genpd_complete NULL #endif /* CONFIG_PM_SLEEP */ /** * __pm_genpd_add_device - Add a device to an I/O PM domain. * @genpd: PM domain to add the device to. * @dev: Device to be added. * @td: Set of PM QoS timing parameters to attach to the device. */ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td) { struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; int ret = 0; dev_dbg(dev, "%s()\n", __func__); if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; genpd_acquire_lock(genpd); if (genpd->status == GPD_STATE_POWER_OFF) { ret = -EINVAL; goto out; } if (genpd->prepared_count > 0) { ret = -EAGAIN; goto out; } list_for_each_entry(pdd, &genpd->dev_list, list_node) if (pdd->dev == dev) { ret = -EINVAL; goto out; } gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); if (!gpd_data) { ret = -ENOMEM; goto out; } genpd->device_count++; dev->pm_domain = &genpd->domain; dev_pm_get_subsys_data(dev); dev->power.subsys_data->domain_data = &gpd_data->base; gpd_data->base.dev = dev; gpd_data->need_restore = false; list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); if (td) gpd_data->td = *td; out: genpd_release_lock(genpd); return ret; } /** * __pm_genpd_of_add_device - Add a device to an I/O PM domain. * @genpd_node: Device tree node pointer representing a PM domain to which the * the device is added to. * @dev: Device to be added. * @td: Set of PM QoS timing parameters to attach to the device. */ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, struct gpd_timing_data *td) { struct generic_pm_domain *genpd = NULL, *gpd; dev_dbg(dev, "%s()\n", __func__); if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev)) return -EINVAL; mutex_lock(&gpd_list_lock); list_for_each_entry(gpd, &gpd_list, gpd_list_node) { if (gpd->of_node == genpd_node) { genpd = gpd; break; } } mutex_unlock(&gpd_list_lock); if (!genpd) return -EINVAL; return __pm_genpd_add_device(genpd, dev, td); } /** * pm_genpd_remove_device - Remove a device from an I/O PM domain. * @genpd: PM domain to remove the device from. * @dev: Device to be removed. */ int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev) { struct pm_domain_data *pdd; int ret = -EINVAL; dev_dbg(dev, "%s()\n", __func__); if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; genpd_acquire_lock(genpd); if (genpd->prepared_count > 0) { ret = -EAGAIN; goto out; } list_for_each_entry(pdd, &genpd->dev_list, list_node) { if (pdd->dev != dev) continue; list_del_init(&pdd->list_node); pdd->dev = NULL; dev_pm_put_subsys_data(dev); dev->pm_domain = NULL; kfree(to_gpd_data(pdd)); genpd->device_count--; ret = 0; break; } out: genpd_release_lock(genpd); return ret; } /** * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device. * @dev: Device to set/unset the flag for. * @val: The new value of the device's "always on" flag. */ void pm_genpd_dev_always_on(struct device *dev, bool val) { struct pm_subsys_data *psd; unsigned long flags; spin_lock_irqsave(&dev->power.lock, flags); psd = dev_to_psd(dev); if (psd && psd->domain_data) to_gpd_data(psd->domain_data)->always_on = val; spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on); /** * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. * @genpd: Master PM domain to add the subdomain to. * @subdomain: Subdomain to be added. */ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { struct gpd_link *link; int ret = 0; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; start: genpd_acquire_lock(genpd); mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); if (subdomain->status != GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_ACTIVE) { mutex_unlock(&subdomain->lock); genpd_release_lock(genpd); goto start; } if (genpd->status == GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_POWER_OFF) { ret = -EINVAL; goto out; } list_for_each_entry(link, &genpd->slave_links, slave_node) { if (link->slave == subdomain && link->master == genpd) { ret = -EINVAL; goto out; } } link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { ret = -ENOMEM; goto out; } link->master = genpd; list_add_tail(&link->master_node, &genpd->master_links); link->slave = subdomain; list_add_tail(&link->slave_node, &subdomain->slave_links); if (subdomain->status != GPD_STATE_POWER_OFF) genpd_sd_counter_inc(genpd); out: mutex_unlock(&subdomain->lock); genpd_release_lock(genpd); return ret; } /** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. * @genpd: Master PM domain to remove the subdomain from. * @subdomain: Subdomain to be removed. */ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { struct gpd_link *link; int ret = -EINVAL; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; start: genpd_acquire_lock(genpd); list_for_each_entry(link, &genpd->master_links, master_node) { if (link->slave != subdomain) continue; mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); if (subdomain->status != GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_ACTIVE) { mutex_unlock(&subdomain->lock); genpd_release_lock(genpd); goto start; } list_del(&link->master_node); list_del(&link->slave_node); kfree(link); if (subdomain->status != GPD_STATE_POWER_OFF) genpd_sd_counter_dec(genpd); mutex_unlock(&subdomain->lock); ret = 0; break; } genpd_release_lock(genpd); return ret; } /** * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. * @dev: Device to add the callbacks to. * @ops: Set of callbacks to add. * @td: Timing data to add to the device along with the callbacks (optional). */ int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, struct gpd_timing_data *td) { struct pm_domain_data *pdd; int ret = 0; if (!(dev && dev->power.subsys_data && ops)) return -EINVAL; pm_runtime_disable(dev); device_pm_lock(); pdd = dev->power.subsys_data->domain_data; if (pdd) { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); gpd_data->ops = *ops; if (td) gpd_data->td = *td; } else { ret = -EINVAL; } device_pm_unlock(); pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); /** * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. * @dev: Device to remove the callbacks from. * @clear_td: If set, clear the device's timing data too. */ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) { struct pm_domain_data *pdd; int ret = 0; if (!(dev && dev->power.subsys_data)) return -EINVAL; pm_runtime_disable(dev); device_pm_lock(); pdd = dev->power.subsys_data->domain_data; if (pdd) { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); gpd_data->ops = (struct gpd_dev_ops){ 0 }; if (clear_td) gpd_data->td = (struct gpd_timing_data){ 0 }; } else { ret = -EINVAL; } device_pm_unlock(); pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); /* Default device callbacks for generic PM domains. */ /** * pm_genpd_default_save_state - Default "save device state" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_save_state(struct device *dev) { int (*cb)(struct device *__dev); struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.save_state; if (cb) return cb(dev); if (drv && drv->pm && drv->pm->runtime_suspend) return drv->pm->runtime_suspend(dev); return 0; } /** * pm_genpd_default_restore_state - Default PM domians "restore device state". * @dev: Device to handle. */ static int pm_genpd_default_restore_state(struct device *dev) { int (*cb)(struct device *__dev); struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.restore_state; if (cb) return cb(dev); if (drv && drv->pm && drv->pm->runtime_resume) return drv->pm->runtime_resume(dev); return 0; } #ifdef CONFIG_PM_SLEEP /** * pm_genpd_default_suspend - Default "device suspend" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_suspend(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; return cb ? cb(dev) : pm_generic_suspend(dev); } /** * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_suspend_late(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; return cb ? cb(dev) : pm_generic_suspend_late(dev); } /** * pm_genpd_default_resume_early - Default "early device resume" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_resume_early(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; return cb ? cb(dev) : pm_generic_resume_early(dev); } /** * pm_genpd_default_resume - Default "device resume" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_resume(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; return cb ? cb(dev) : pm_generic_resume(dev); } /** * pm_genpd_default_freeze - Default "device freeze" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_freeze(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; return cb ? cb(dev) : pm_generic_freeze(dev); } /** * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_freeze_late(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; return cb ? cb(dev) : pm_generic_freeze_late(dev); } /** * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_thaw_early(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; return cb ? cb(dev) : pm_generic_thaw_early(dev); } /** * pm_genpd_default_thaw - Default "device thaw" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_thaw(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; return cb ? cb(dev) : pm_generic_thaw(dev); } #else /* !CONFIG_PM_SLEEP */ #define pm_genpd_default_suspend NULL #define pm_genpd_default_suspend_late NULL #define pm_genpd_default_resume_early NULL #define pm_genpd_default_resume NULL #define pm_genpd_default_freeze NULL #define pm_genpd_default_freeze_late NULL #define pm_genpd_default_thaw_early NULL #define pm_genpd_default_thaw NULL #endif /* !CONFIG_PM_SLEEP */ /** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. * @gov: PM domain governor to associate with the domain (may be NULL). * @is_off: Initial value of the domain's power_is_off field. */ void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off) { if (IS_ERR_OR_NULL(genpd)) return; INIT_LIST_HEAD(&genpd->master_links); INIT_LIST_HEAD(&genpd->slave_links); INIT_LIST_HEAD(&genpd->dev_list); mutex_init(&genpd->lock); genpd->gov = gov; INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); genpd->in_progress = 0; atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; init_waitqueue_head(&genpd->status_wait_queue); genpd->poweroff_task = NULL; genpd->resume_count = 0; genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; genpd->domain.ops.prepare = pm_genpd_prepare; genpd->domain.ops.suspend = pm_genpd_suspend; genpd->domain.ops.suspend_late = pm_genpd_suspend_late; genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; genpd->domain.ops.resume_early = pm_genpd_resume_early; genpd->domain.ops.resume = pm_genpd_resume; genpd->domain.ops.freeze = pm_genpd_freeze; genpd->domain.ops.freeze_late = pm_genpd_freeze_late; genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; genpd->domain.ops.thaw_early = pm_genpd_thaw_early; genpd->domain.ops.thaw = pm_genpd_thaw; genpd->domain.ops.poweroff = pm_genpd_suspend; genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; genpd->domain.ops.restore_early = pm_genpd_resume_early; genpd->domain.ops.restore = pm_genpd_resume; genpd->domain.ops.complete = pm_genpd_complete; genpd->dev_ops.save_state = pm_genpd_default_save_state; genpd->dev_ops.restore_state = pm_genpd_default_restore_state; genpd->dev_ops.suspend = pm_genpd_default_suspend; genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; genpd->dev_ops.resume_early = pm_genpd_default_resume_early; genpd->dev_ops.resume = pm_genpd_default_resume; genpd->dev_ops.freeze = pm_genpd_default_freeze; genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; genpd->dev_ops.thaw = pm_genpd_default_thaw; mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); }
gpl-2.0
adbaby/android_kernel_msm8974
arch/tile/kernel/module.c
4404
5787
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * Based on i386 version, copyright (C) 2001 Rusty Russell. */ #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <asm/pgtable.h> #include <asm/homecache.h> #include <arch/opcode.h> #ifdef __tilegx__ # define Elf_Rela Elf64_Rela # define ELF_R_SYM ELF64_R_SYM # define ELF_R_TYPE ELF64_R_TYPE #else # define Elf_Rela Elf32_Rela # define ELF_R_SYM ELF32_R_SYM # define ELF_R_TYPE ELF32_R_TYPE #endif #ifdef MODULE_DEBUG #define DEBUGP printk #else #define DEBUGP(fmt...) #endif /* * Allocate some address space in the range MEM_MODULE_START to * MEM_MODULE_END and populate it with memory. */ void *module_alloc(unsigned long size) { struct page **pages; pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC); struct vm_struct *area; int i = 0; int npages; if (size == 0) return NULL; npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); if (pages == NULL) return NULL; for (; i < npages; ++i) { pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!pages[i]) goto error; } area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); if (!area) goto error; area->nr_pages = npages; area->pages = pages; if (map_vm_area(area, prot_rwx, &pages)) { vunmap(area->addr); goto error; } return area->addr; error: while (--i >= 0) __free_page(pages[i]); kfree(pages); return NULL; } /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { vfree(module_region); /* Globally flush the L1 icache. */ flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, 0, 0, 0, NULL, NULL, 0); /* * FIXME: If module_region == mod->module_init, trim exception * table entries. */ } #ifdef __tilegx__ /* * Validate that the high 16 bits of "value" is just the sign-extension of * the low 48 bits. */ static int validate_hw2_last(long value, struct module *me) { if (((value << 16) >> 16) != value) { pr_warning("module %s: Out of range HW2_LAST value %#lx\n", me->name, value); return 0; } return 1; } /* * Validate that "value" isn't too big to hold in a JumpOff relocation. */ static int validate_jumpoff(long value) { /* Determine size of jump offset. */ int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1))); /* Check to see if it fits into the relocation slot. */ long f = get_JumpOff_X1(create_JumpOff_X1(value)); f = (f << shift) >> shift; return f == value; } #endif int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf_Sym *sym; u64 *location; unsigned long value; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* * This is the symbol it is referring to. * Note that all undefined symbols have been resolved. */ sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[i].r_info); value = sym->st_value + rel[i].r_addend; switch (ELF_R_TYPE(rel[i].r_info)) { #define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value))) #ifndef __tilegx__ case R_TILE_32: *(uint32_t *)location = value; break; case R_TILE_IMM16_X0_HA: value = (value + 0x8000) >> 16; /*FALLTHROUGH*/ case R_TILE_IMM16_X0_LO: MUNGE(create_Imm16_X0); break; case R_TILE_IMM16_X1_HA: value = (value + 0x8000) >> 16; /*FALLTHROUGH*/ case R_TILE_IMM16_X1_LO: MUNGE(create_Imm16_X1); break; case R_TILE_JOFFLONG_X1: value -= (unsigned long) location; /* pc-relative */ value = (long) value >> 3; /* count by instrs */ MUNGE(create_JOffLong_X1); break; #else case R_TILEGX_64: *location = value; break; case R_TILEGX_IMM16_X0_HW2_LAST: if (!validate_hw2_last(value, me)) return -ENOEXEC; value >>= 16; /*FALLTHROUGH*/ case R_TILEGX_IMM16_X0_HW1: value >>= 16; /*FALLTHROUGH*/ case R_TILEGX_IMM16_X0_HW0: MUNGE(create_Imm16_X0); break; case R_TILEGX_IMM16_X1_HW2_LAST: if (!validate_hw2_last(value, me)) return -ENOEXEC; value >>= 16; /*FALLTHROUGH*/ case R_TILEGX_IMM16_X1_HW1: value >>= 16; /*FALLTHROUGH*/ case R_TILEGX_IMM16_X1_HW0: MUNGE(create_Imm16_X1); break; case R_TILEGX_JUMPOFF_X1: value -= (unsigned long) location; /* pc-relative */ value = (long) value >> 3; /* count by instrs */ if (!validate_jumpoff(value)) { pr_warning("module %s: Out of range jump to" " %#llx at %#llx (%p)\n", me->name, sym->st_value + rel[i].r_addend, rel[i].r_offset, location); return -ENOEXEC; } MUNGE(create_JumpOff_X1); break; #endif #undef MUNGE default: pr_err("module %s: Unknown relocation: %d\n", me->name, (int) ELF_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } return 0; }
gpl-2.0
ngvincent/android-kernel-oppo-find5
arch/arm/mach-mmp/time.c
4660
4881
/* * linux/arch/arm/mach-mmp/time.c * * Support for clocksource and clockevents * * Copyright (C) 2008 Marvell International Ltd. * All rights reserved. * * 2008-04-11: Jason Chagas <Jason.chagas@marvell.com> * 2008-10-08: Bin Yang <bin.yang@marvell.com> * * The timers module actually includes three timers, each timer with up to * three match comparators. Timer #0 is used here in free-running mode as * the clock source, and match comparator #1 used as clock event device. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/clockchips.h> #include <linux/io.h> #include <linux/irq.h> #include <asm/sched_clock.h> #include <mach/addr-map.h> #include <mach/regs-timers.h> #include <mach/regs-apbc.h> #include <mach/irqs.h> #include <mach/cputype.h> #include <asm/mach/time.h> #include "clock.h" #define TIMERS_VIRT_BASE TIMERS1_VIRT_BASE #define MAX_DELTA (0xfffffffe) #define MIN_DELTA (16) /* * FIXME: the timer needs some delay to stablize the counter capture */ static inline uint32_t timer_read(void) { int delay = 100; __raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(1)); while (delay--) cpu_relax(); return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(1)); } static u32 notrace mmp_read_sched_clock(void) { return timer_read(); } static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *c = dev_id; /* * Clear pending interrupt status. */ __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0)); /* * Disable timer 0. */ __raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER); c->event_handler(c); return IRQ_HANDLED; } static int timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { unsigned long flags; local_irq_save(flags); /* * Disable timer 0. */ __raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER); /* * Clear and enable timer match 0 interrupt. */ __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0)); __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_IER(0)); /* * Setup new clockevent timer value. */ __raw_writel(delta - 1, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0)); /* * Enable timer 0. */ __raw_writel(0x03, TIMERS_VIRT_BASE + TMR_CER); local_irq_restore(flags); return 0; } static void timer_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) { unsigned long flags; local_irq_save(flags); switch (mode) { case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: /* disable the matching interrupt */ __raw_writel(0x00, TIMERS_VIRT_BASE + TMR_IER(0)); break; case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_PERIODIC: break; } local_irq_restore(flags); } static struct clock_event_device ckevt = { .name = "clockevent", .features = CLOCK_EVT_FEAT_ONESHOT, .shift = 32, .rating = 200, .set_next_event = timer_set_next_event, .set_mode = timer_set_mode, }; static cycle_t clksrc_read(struct clocksource *cs) { return timer_read(); } static struct clocksource cksrc = { .name = "clocksource", .rating = 200, .read = clksrc_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static void __init timer_config(void) { uint32_t ccr = __raw_readl(TIMERS_VIRT_BASE + TMR_CCR); __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_CER); /* disable */ ccr &= (cpu_is_mmp2()) ? (TMR_CCR_CS_0(0) | TMR_CCR_CS_1(0)) : (TMR_CCR_CS_0(3) | TMR_CCR_CS_1(3)); __raw_writel(ccr, TIMERS_VIRT_BASE + TMR_CCR); /* set timer 0 to periodic mode, and timer 1 to free-running mode */ __raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CMR); __raw_writel(0x1, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* periodic */ __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(0)); /* clear status */ __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0)); __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(1)); /* free-running */ __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(1)); /* clear status */ __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(1)); /* enable timer 1 counter */ __raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CER); } static struct irqaction timer_irq = { .name = "timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = timer_interrupt, .dev_id = &ckevt, }; void __init timer_init(int irq) { timer_config(); setup_sched_clock(mmp_read_sched_clock, 32, CLOCK_TICK_RATE); ckevt.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, ckevt.shift); ckevt.max_delta_ns = clockevent_delta2ns(MAX_DELTA, &ckevt); ckevt.min_delta_ns = clockevent_delta2ns(MIN_DELTA, &ckevt); ckevt.cpumask = cpumask_of(0); setup_irq(irq, &timer_irq); clocksource_register_hz(&cksrc, CLOCK_TICK_RATE); clockevents_register_device(&ckevt); }
gpl-2.0
noobnl/msm-jf-kernel
drivers/misc/fsa9480.c
4916
13017
/* * fsa9480.c - FSA9480 micro USB switch device driver * * Copyright (C) 2010 Samsung Electronics * Minkyu Kang <mk7.kang@samsung.com> * Wonguk Jeong <wonguk.jeong@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/platform_data/fsa9480.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/pm_runtime.h> /* FSA9480 I2C registers */ #define FSA9480_REG_DEVID 0x01 #define FSA9480_REG_CTRL 0x02 #define FSA9480_REG_INT1 0x03 #define FSA9480_REG_INT2 0x04 #define FSA9480_REG_INT1_MASK 0x05 #define FSA9480_REG_INT2_MASK 0x06 #define FSA9480_REG_ADC 0x07 #define FSA9480_REG_TIMING1 0x08 #define FSA9480_REG_TIMING2 0x09 #define FSA9480_REG_DEV_T1 0x0a #define FSA9480_REG_DEV_T2 0x0b #define FSA9480_REG_BTN1 0x0c #define FSA9480_REG_BTN2 0x0d #define FSA9480_REG_CK 0x0e #define FSA9480_REG_CK_INT1 0x0f #define FSA9480_REG_CK_INT2 0x10 #define FSA9480_REG_CK_INTMASK1 0x11 #define FSA9480_REG_CK_INTMASK2 0x12 #define FSA9480_REG_MANSW1 0x13 #define FSA9480_REG_MANSW2 0x14 /* Control */ #define CON_SWITCH_OPEN (1 << 4) #define CON_RAW_DATA (1 << 3) #define CON_MANUAL_SW (1 << 2) #define CON_WAIT (1 << 1) #define CON_INT_MASK (1 << 0) #define CON_MASK (CON_SWITCH_OPEN | CON_RAW_DATA | \ CON_MANUAL_SW | CON_WAIT) /* Device Type 1 */ #define DEV_USB_OTG (1 << 7) #define DEV_DEDICATED_CHG (1 << 6) #define DEV_USB_CHG (1 << 5) #define DEV_CAR_KIT (1 << 4) #define DEV_UART (1 << 3) #define DEV_USB (1 << 2) #define DEV_AUDIO_2 (1 << 1) #define DEV_AUDIO_1 (1 << 0) #define DEV_T1_USB_MASK (DEV_USB_OTG | DEV_USB) #define DEV_T1_UART_MASK (DEV_UART) #define DEV_T1_CHARGER_MASK (DEV_DEDICATED_CHG | DEV_USB_CHG) /* Device Type 2 */ #define DEV_AV (1 << 6) #define DEV_TTY (1 << 5) #define DEV_PPD (1 << 4) #define DEV_JIG_UART_OFF (1 << 3) #define DEV_JIG_UART_ON (1 << 2) #define DEV_JIG_USB_OFF (1 << 1) #define DEV_JIG_USB_ON (1 << 0) #define DEV_T2_USB_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON) #define DEV_T2_UART_MASK (DEV_JIG_UART_OFF | DEV_JIG_UART_ON) #define DEV_T2_JIG_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON | \ DEV_JIG_UART_OFF | DEV_JIG_UART_ON) /* * Manual Switch * D- [7:5] / D+ [4:2] * 000: Open all / 001: USB / 010: AUDIO / 011: UART / 100: V_AUDIO */ #define SW_VAUDIO ((4 << 5) | (4 << 2)) #define SW_UART ((3 << 5) | (3 << 2)) #define SW_AUDIO ((2 << 5) | (2 << 2)) #define SW_DHOST ((1 << 5) | (1 << 2)) #define SW_AUTO ((0 << 5) | (0 << 2)) /* Interrupt 1 */ #define INT_DETACH (1 << 1) #define INT_ATTACH (1 << 0) struct fsa9480_usbsw { struct i2c_client *client; struct fsa9480_platform_data *pdata; int dev1; int dev2; int mansw; }; static struct fsa9480_usbsw *chip; static int fsa9480_write_reg(struct i2c_client *client, int reg, int value) { int ret; ret = i2c_smbus_write_byte_data(client, reg, value); if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); return ret; } static int fsa9480_read_reg(struct i2c_client *client, int reg) { int ret; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); return ret; } static int fsa9480_read_irq(struct i2c_client *client, int *value) { int ret; ret = i2c_smbus_read_i2c_block_data(client, FSA9480_REG_INT1, 2, (u8 *)value); *value &= 0xffff; if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); return ret; } static void fsa9480_set_switch(const char *buf) { struct fsa9480_usbsw *usbsw = chip; struct i2c_client *client = usbsw->client; unsigned int value; unsigned int path = 0; value = fsa9480_read_reg(client, FSA9480_REG_CTRL); if (!strncmp(buf, "VAUDIO", 6)) { path = SW_VAUDIO; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "UART", 4)) { path = SW_UART; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "AUDIO", 5)) { path = SW_AUDIO; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "DHOST", 5)) { path = SW_DHOST; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "AUTO", 4)) { path = SW_AUTO; value |= CON_MANUAL_SW; } else { printk(KERN_ERR "Wrong command\n"); return; } usbsw->mansw = path; fsa9480_write_reg(client, FSA9480_REG_MANSW1, path); fsa9480_write_reg(client, FSA9480_REG_CTRL, value); } static ssize_t fsa9480_get_switch(char *buf) { struct fsa9480_usbsw *usbsw = chip; struct i2c_client *client = usbsw->client; unsigned int value; value = fsa9480_read_reg(client, FSA9480_REG_MANSW1); if (value == SW_VAUDIO) return sprintf(buf, "VAUDIO\n"); else if (value == SW_UART) return sprintf(buf, "UART\n"); else if (value == SW_AUDIO) return sprintf(buf, "AUDIO\n"); else if (value == SW_DHOST) return sprintf(buf, "DHOST\n"); else if (value == SW_AUTO) return sprintf(buf, "AUTO\n"); else return sprintf(buf, "%x", value); } static ssize_t fsa9480_show_device(struct device *dev, struct device_attribute *attr, char *buf) { struct fsa9480_usbsw *usbsw = dev_get_drvdata(dev); struct i2c_client *client = usbsw->client; int dev1, dev2; dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); if (!dev1 && !dev2) return sprintf(buf, "NONE\n"); /* USB */ if (dev1 & DEV_T1_USB_MASK || dev2 & DEV_T2_USB_MASK) return sprintf(buf, "USB\n"); /* UART */ if (dev1 & DEV_T1_UART_MASK || dev2 & DEV_T2_UART_MASK) return sprintf(buf, "UART\n"); /* CHARGER */ if (dev1 & DEV_T1_CHARGER_MASK) return sprintf(buf, "CHARGER\n"); /* JIG */ if (dev2 & DEV_T2_JIG_MASK) return sprintf(buf, "JIG\n"); return sprintf(buf, "UNKNOWN\n"); } static ssize_t fsa9480_show_manualsw(struct device *dev, struct device_attribute *attr, char *buf) { return fsa9480_get_switch(buf); } static ssize_t fsa9480_set_manualsw(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { fsa9480_set_switch(buf); return count; } static DEVICE_ATTR(device, S_IRUGO, fsa9480_show_device, NULL); static DEVICE_ATTR(switch, S_IRUGO | S_IWUSR, fsa9480_show_manualsw, fsa9480_set_manualsw); static struct attribute *fsa9480_attributes[] = { &dev_attr_device.attr, &dev_attr_switch.attr, NULL }; static const struct attribute_group fsa9480_group = { .attrs = fsa9480_attributes, }; static void fsa9480_detect_dev(struct fsa9480_usbsw *usbsw, int intr) { int val1, val2, ctrl; struct fsa9480_platform_data *pdata = usbsw->pdata; struct i2c_client *client = usbsw->client; val1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); val2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); ctrl = fsa9480_read_reg(client, FSA9480_REG_CTRL); dev_info(&client->dev, "intr: 0x%x, dev1: 0x%x, dev2: 0x%x\n", intr, val1, val2); if (!intr) goto out; if (intr & INT_ATTACH) { /* Attached */ /* USB */ if (val1 & DEV_T1_USB_MASK || val2 & DEV_T2_USB_MASK) { if (pdata->usb_cb) pdata->usb_cb(FSA9480_ATTACHED); if (usbsw->mansw) { fsa9480_write_reg(client, FSA9480_REG_MANSW1, usbsw->mansw); } } /* UART */ if (val1 & DEV_T1_UART_MASK || val2 & DEV_T2_UART_MASK) { if (pdata->uart_cb) pdata->uart_cb(FSA9480_ATTACHED); if (!(ctrl & CON_MANUAL_SW)) { fsa9480_write_reg(client, FSA9480_REG_MANSW1, SW_UART); } } /* CHARGER */ if (val1 & DEV_T1_CHARGER_MASK) { if (pdata->charger_cb) pdata->charger_cb(FSA9480_ATTACHED); } /* JIG */ if (val2 & DEV_T2_JIG_MASK) { if (pdata->jig_cb) pdata->jig_cb(FSA9480_ATTACHED); } } else if (intr & INT_DETACH) { /* Detached */ /* USB */ if (usbsw->dev1 & DEV_T1_USB_MASK || usbsw->dev2 & DEV_T2_USB_MASK) { if (pdata->usb_cb) pdata->usb_cb(FSA9480_DETACHED); } /* UART */ if (usbsw->dev1 & DEV_T1_UART_MASK || usbsw->dev2 & DEV_T2_UART_MASK) { if (pdata->uart_cb) pdata->uart_cb(FSA9480_DETACHED); } /* CHARGER */ if (usbsw->dev1 & DEV_T1_CHARGER_MASK) { if (pdata->charger_cb) pdata->charger_cb(FSA9480_DETACHED); } /* JIG */ if (usbsw->dev2 & DEV_T2_JIG_MASK) { if (pdata->jig_cb) pdata->jig_cb(FSA9480_DETACHED); } } usbsw->dev1 = val1; usbsw->dev2 = val2; out: ctrl &= ~CON_INT_MASK; fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl); } static irqreturn_t fsa9480_irq_handler(int irq, void *data) { struct fsa9480_usbsw *usbsw = data; struct i2c_client *client = usbsw->client; int intr; /* clear interrupt */ fsa9480_read_irq(client, &intr); /* device detection */ fsa9480_detect_dev(usbsw, intr); return IRQ_HANDLED; } static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw) { struct fsa9480_platform_data *pdata = usbsw->pdata; struct i2c_client *client = usbsw->client; int ret; int intr; unsigned int ctrl = CON_MASK; /* clear interrupt */ fsa9480_read_irq(client, &intr); /* unmask interrupt (attach/detach only) */ fsa9480_write_reg(client, FSA9480_REG_INT1_MASK, 0xfc); fsa9480_write_reg(client, FSA9480_REG_INT2_MASK, 0x1f); usbsw->mansw = fsa9480_read_reg(client, FSA9480_REG_MANSW1); if (usbsw->mansw) ctrl &= ~CON_MANUAL_SW; /* Manual Switching Mode */ fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl); if (pdata && pdata->cfg_gpio) pdata->cfg_gpio(); if (client->irq) { ret = request_threaded_irq(client->irq, NULL, fsa9480_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "fsa9480 micro USB", usbsw); if (ret) { dev_err(&client->dev, "failed to reqeust IRQ\n"); return ret; } if (pdata) device_init_wakeup(&client->dev, pdata->wakeup); } return 0; } static int __devinit fsa9480_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct fsa9480_usbsw *usbsw; int ret = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; usbsw = kzalloc(sizeof(struct fsa9480_usbsw), GFP_KERNEL); if (!usbsw) { dev_err(&client->dev, "failed to allocate driver data\n"); return -ENOMEM; } usbsw->client = client; usbsw->pdata = client->dev.platform_data; chip = usbsw; i2c_set_clientdata(client, usbsw); ret = fsa9480_irq_init(usbsw); if (ret) goto fail1; ret = sysfs_create_group(&client->dev.kobj, &fsa9480_group); if (ret) { dev_err(&client->dev, "failed to create fsa9480 attribute group\n"); goto fail2; } /* ADC Detect Time: 500ms */ fsa9480_write_reg(client, FSA9480_REG_TIMING1, 0x6); if (chip->pdata->reset_cb) chip->pdata->reset_cb(); /* device detection */ fsa9480_detect_dev(usbsw, INT_ATTACH); pm_runtime_set_active(&client->dev); return 0; fail2: if (client->irq) free_irq(client->irq, usbsw); fail1: kfree(usbsw); return ret; } static int __devexit fsa9480_remove(struct i2c_client *client) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); if (client->irq) free_irq(client->irq, usbsw); sysfs_remove_group(&client->dev.kobj, &fsa9480_group); device_init_wakeup(&client->dev, 0); kfree(usbsw); return 0; } #ifdef CONFIG_PM static int fsa9480_suspend(struct i2c_client *client, pm_message_t state) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); struct fsa9480_platform_data *pdata = usbsw->pdata; if (device_may_wakeup(&client->dev) && client->irq) enable_irq_wake(client->irq); if (pdata->usb_power) pdata->usb_power(0); return 0; } static int fsa9480_resume(struct i2c_client *client) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); int dev1, dev2; if (device_may_wakeup(&client->dev) && client->irq) disable_irq_wake(client->irq); /* * Clear Pending interrupt. Note that detect_dev does what * the interrupt handler does. So, we don't miss pending and * we reenable interrupt if there is one. */ fsa9480_read_reg(client, FSA9480_REG_INT1); fsa9480_read_reg(client, FSA9480_REG_INT2); dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); /* device detection */ fsa9480_detect_dev(usbsw, (dev1 || dev2) ? INT_ATTACH : INT_DETACH); return 0; } #else #define fsa9480_suspend NULL #define fsa9480_resume NULL #endif /* CONFIG_PM */ static const struct i2c_device_id fsa9480_id[] = { {"fsa9480", 0}, {} }; MODULE_DEVICE_TABLE(i2c, fsa9480_id); static struct i2c_driver fsa9480_i2c_driver = { .driver = { .name = "fsa9480", }, .probe = fsa9480_probe, .remove = __devexit_p(fsa9480_remove), .resume = fsa9480_resume, .suspend = fsa9480_suspend, .id_table = fsa9480_id, }; module_i2c_driver(fsa9480_i2c_driver); MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>"); MODULE_DESCRIPTION("FSA9480 USB Switch driver"); MODULE_LICENSE("GPL");
gpl-2.0
Hashcode/kernel_omap
drivers/net/wan/sealevel.c
7476
8108
/* * Sealevel Systems 4021 driver. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * (c) Copyright 1999, 2001 Alan Cox * (c) Copyright 2001 Red Hat Inc. * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> #include <linux/hdlc.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/slab.h> #include <net/arp.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include "z85230.h" struct slvl_device { struct z8530_channel *chan; int channel; }; struct slvl_board { struct slvl_device dev[2]; struct z8530_dev board; int iobase; }; /* * Network driver support routines */ static inline struct slvl_device* dev_to_chan(struct net_device *dev) { return (struct slvl_device *)dev_to_hdlc(dev)->priv; } /* * Frame receive. Simple for our card as we do HDLC and there * is no funny garbage involved */ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) { /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ skb_trim(skb, skb->len - 2); skb->protocol = hdlc_type_trans(skb, c->netdevice); skb_reset_mac_header(skb); skb->dev = c->netdevice; netif_rx(skb); } /* * We've been placed in the UP state */ static int sealevel_open(struct net_device *d) { struct slvl_device *slvl = dev_to_chan(d); int err = -1; int unit = slvl->channel; /* * Link layer up. */ switch (unit) { case 0: err = z8530_sync_dma_open(d, slvl->chan); break; case 1: err = z8530_sync_open(d, slvl->chan); break; } if (err) return err; err = hdlc_open(d); if (err) { switch (unit) { case 0: z8530_sync_dma_close(d, slvl->chan); break; case 1: z8530_sync_close(d, slvl->chan); break; } return err; } slvl->chan->rx_function = sealevel_input; /* * Go go go */ netif_start_queue(d); return 0; } static int sealevel_close(struct net_device *d) { struct slvl_device *slvl = dev_to_chan(d); int unit = slvl->channel; /* * Discard new frames */ slvl->chan->rx_function = z8530_null_rx; hdlc_close(d); netif_stop_queue(d); switch (unit) { case 0: z8530_sync_dma_close(d, slvl->chan); break; case 1: z8530_sync_close(d, slvl->chan); break; } return 0; } static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) { /* struct slvl_device *slvl=dev_to_chan(d); z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ return hdlc_ioctl(d, ifr, cmd); } /* * Passed network frames, fire them downwind. */ static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) { return z8530_queue_xmit(dev_to_chan(d)->chan, skb); } static int sealevel_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) return 0; return -EINVAL; } static const struct net_device_ops sealevel_ops = { .ndo_open = sealevel_open, .ndo_stop = sealevel_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = sealevel_ioctl, }; static int slvl_setup(struct slvl_device *sv, int iobase, int irq) { struct net_device *dev = alloc_hdlcdev(sv); if (!dev) return -1; dev_to_hdlc(dev)->attach = sealevel_attach; dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; dev->netdev_ops = &sealevel_ops; dev->base_addr = iobase; dev->irq = irq; if (register_hdlc_device(dev)) { pr_err("unable to register HDLC device\n"); free_netdev(dev); return -1; } sv->chan->netdevice = dev; return 0; } /* * Allocate and setup Sealevel board. */ static __init struct slvl_board *slvl_init(int iobase, int irq, int txdma, int rxdma, int slow) { struct z8530_dev *dev; struct slvl_board *b; /* * Get the needed I/O space */ if (!request_region(iobase, 8, "Sealevel 4021")) { pr_warn("I/O 0x%X already in use\n", iobase); return NULL; } b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); if (!b) goto err_kzalloc; b->dev[0].chan = &b->board.chanA; b->dev[0].channel = 0; b->dev[1].chan = &b->board.chanB; b->dev[1].channel = 1; dev = &b->board; /* * Stuff in the I/O addressing */ dev->active = 0; b->iobase = iobase; /* * Select 8530 delays for the old board */ if (slow) iobase |= Z8530_PORT_SLEEP; dev->chanA.ctrlio = iobase + 1; dev->chanA.dataio = iobase; dev->chanB.ctrlio = iobase + 3; dev->chanB.dataio = iobase + 2; dev->chanA.irqs = &z8530_nop; dev->chanB.irqs = &z8530_nop; /* * Assert DTR enable DMA */ outb(3 | (1 << 7), b->iobase + 4); /* We want a fast IRQ for this device. Actually we'd like an even faster IRQ ;) - This is one driver RtLinux is made for */ if (request_irq(irq, z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev) < 0) { pr_warn("IRQ %d already in use\n", irq); goto err_request_irq; } dev->irq = irq; dev->chanA.private = &b->dev[0]; dev->chanB.private = &b->dev[1]; dev->chanA.dev = dev; dev->chanB.dev = dev; dev->chanA.txdma = 3; dev->chanA.rxdma = 1; if (request_dma(dev->chanA.txdma, "SeaLevel (TX)")) goto err_dma_tx; if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)")) goto err_dma_rx; disable_irq(irq); /* * Begin normal initialise */ if (z8530_init(dev) != 0) { pr_err("Z8530 series device not found\n"); enable_irq(irq); goto free_hw; } if (dev->type == Z85C30) { z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); } else { z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); } /* * Now we can take the IRQ */ enable_irq(irq); if (slvl_setup(&b->dev[0], iobase, irq)) goto free_hw; if (slvl_setup(&b->dev[1], iobase, irq)) goto free_netdev0; z8530_describe(dev, "I/O", iobase); dev->active = 1; return b; free_netdev0: unregister_hdlc_device(b->dev[0].chan->netdevice); free_netdev(b->dev[0].chan->netdevice); free_hw: free_dma(dev->chanA.rxdma); err_dma_rx: free_dma(dev->chanA.txdma); err_dma_tx: free_irq(irq, dev); err_request_irq: kfree(b); err_kzalloc: release_region(iobase, 8); return NULL; } static void __exit slvl_shutdown(struct slvl_board *b) { int u; z8530_shutdown(&b->board); for (u = 0; u < 2; u++) { struct net_device *d = b->dev[u].chan->netdevice; unregister_hdlc_device(d); free_netdev(d); } free_irq(b->board.irq, &b->board); free_dma(b->board.chanA.rxdma); free_dma(b->board.chanA.txdma); /* DMA off on the card, drop DTR */ outb(0, b->iobase); release_region(b->iobase, 8); kfree(b); } static int io=0x238; static int txdma=1; static int rxdma=3; static int irq=5; static bool slow=false; module_param(io, int, 0); MODULE_PARM_DESC(io, "The I/O base of the Sealevel card"); module_param(txdma, int, 0); MODULE_PARM_DESC(txdma, "Transmit DMA channel"); module_param(rxdma, int, 0); MODULE_PARM_DESC(rxdma, "Receive DMA channel"); module_param(irq, int, 0); MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card"); module_param(slow, bool, 0); MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012"); MODULE_AUTHOR("Alan Cox"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021"); static struct slvl_board *slvl_unit; static int __init slvl_init_module(void) { slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); return slvl_unit ? 0 : -ENODEV; } static void __exit slvl_cleanup_module(void) { if (slvl_unit) slvl_shutdown(slvl_unit); } module_init(slvl_init_module); module_exit(slvl_cleanup_module);
gpl-2.0
caoyuhua/Linux-3.10.28
drivers/mtd/chips/map_absent.c
9780
2871
/* * Common code to handle absent "placeholder" devices * Copyright 2001 Resilience Corporation <ebrower@resilience.com> * * This map driver is used to allocate "placeholder" MTD * devices on systems that have socketed/removable media. * Use of this driver as a fallback preserves the expected * registration of MTD device nodes regardless of probe outcome. * A usage example is as follows: * * my_dev[i] = do_map_probe("cfi", &my_map[i]); * if(NULL == my_dev[i]) { * my_dev[i] = do_map_probe("map_absent", &my_map[i]); * } * * Any device 'probed' with this driver will return -ENODEV * upon open. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static int map_absent_erase (struct mtd_info *, struct erase_info *); static void map_absent_sync (struct mtd_info *); static struct mtd_info *map_absent_probe(struct map_info *map); static void map_absent_destroy (struct mtd_info *); static struct mtd_chip_driver map_absent_chipdrv = { .probe = map_absent_probe, .destroy = map_absent_destroy, .name = "map_absent", .module = THIS_MODULE }; static struct mtd_info *map_absent_probe(struct map_info *map) { struct mtd_info *mtd; mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); if (!mtd) { return NULL; } map->fldrv = &map_absent_chipdrv; mtd->priv = map; mtd->name = map->name; mtd->type = MTD_ABSENT; mtd->size = map->size; mtd->_erase = map_absent_erase; mtd->_read = map_absent_read; mtd->_write = map_absent_write; mtd->_sync = map_absent_sync; mtd->flags = 0; mtd->erasesize = PAGE_SIZE; mtd->writesize = 1; __module_get(THIS_MODULE); return mtd; } static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { return -ENODEV; } static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { return -ENODEV; } static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr) { return -ENODEV; } static void map_absent_sync(struct mtd_info *mtd) { /* nop */ } static void map_absent_destroy(struct mtd_info *mtd) { /* nop */ } static int __init map_absent_init(void) { register_mtd_chip_driver(&map_absent_chipdrv); return 0; } static void __exit map_absent_exit(void) { unregister_mtd_chip_driver(&map_absent_chipdrv); } module_init(map_absent_init); module_exit(map_absent_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Resilience Corporation - Eric Brower <ebrower@resilience.com>"); MODULE_DESCRIPTION("Placeholder MTD chip driver for 'absent' chips");
gpl-2.0
kozmikkick/KozmiKKerneL-KitKat
arch/arm/mach-msm/mdm_common.c
53
33511
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/ioctl.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/debugfs.h> #include <linux/completion.h> #include <linux/workqueue.h> #include <linux/clk.h> #include <linux/mfd/pmic8058.h> #include <asm/mach-types.h> #include <asm/uaccess.h> #include <mach/mdm2.h> #include <mach/restart.h> #include <mach/subsystem_notif.h> #include <mach/subsystem_restart.h> #include <linux/msm_charm.h> #include <mach/msm_watchdog.h> #include "mdm_private.h" #include "sysmon.h" #include <mach/msm_rtb.h> #include <linux/proc_fs.h> #include <mach/board_htc.h> #ifdef CONFIG_HTC_POWEROFF_MODEM_IN_OFFMODE_CHARGING enum { BOARD_MFG_MODE_NORMAL = 0, BOARD_MFG_MODE_FACTORY2, BOARD_MFG_MODE_RECOVERY, BOARD_MFG_MODE_CHARGE, BOARD_MFG_MODE_POWERTEST, BOARD_MFG_MODE_OFFMODE_CHARGING, BOARD_MFG_MODE_MFGKERNEL, BOARD_MFG_MODE_MODEM_CALIBRATION, }; #endif #if defined(pr_warn) #undef pr_warn #endif #define pr_warn(x...) do { \ printk(KERN_WARN "[MDM][COMM] "x); \ } while (0) #if defined(pr_debug) #undef pr_debug #endif #define pr_debug(x...) do { \ printk(KERN_DEBUG "[MDM][COMM] "x); \ } while (0) #if defined(pr_info) #undef pr_info #endif #define pr_info(x...) do { \ printk(KERN_INFO "[MDM][COMM] "x); \ } while (0) #if defined(pr_err) #undef pr_err #endif #define pr_err(x...) do { \ printk(KERN_ERR "[MDM][COMM] "x); \ } while (0) #define HTC_MDM_ERROR_CONFIRM_TIME_MS 10 #define MDM_MODEM_TIMEOUT 6000 #define MDM_MODEM_DELTA 100 #define MDM_BOOT_TIMEOUT 60000L #define MDM_RDUMP_TIMEOUT 180000L static int mdm_debug_on; static struct workqueue_struct *mdm_queue; static struct workqueue_struct *mdm_sfr_queue; static void mdm_status_fn(struct work_struct *work); static void dump_mdm_related_gpio(void); static DECLARE_WORK(mdm_status_work, mdm_status_fn); static struct workqueue_struct *mdm_gpio_monitor_queue; static bool mdm_status_change_notified; #define EXTERNAL_MODEM "external_modem" static struct mdm_modem_drv *mdm_drv; DECLARE_COMPLETION(mdm_needs_reload); DECLARE_COMPLETION(mdm_boot); DECLARE_COMPLETION(mdm_ram_dumps); static int first_boot = 1; #define RD_BUF_SIZE 100 #define SFR_MAX_RETRIES 10 #define SFR_RETRY_INTERVAL 1000 #ifdef CONFIG_HTC_STORE_MODEM_RESET_INFO #define MODEM_ERRMSG_LIST_LEN 10 struct mdm_msr_info { int valid; struct timespec msr_time; char modem_errmsg[RD_BUF_SIZE]; }; int mdm_msr_index = 0; static spinlock_t msr_info_lock; static struct mdm_msr_info msr_info_list[MODEM_ERRMSG_LIST_LEN]; #endif static void dump_gpio(char *name, unsigned int gpio); static int set_mdm_errmsg(void __user *msg); static int notify_mdm_nv_write_done(void); static int mdm_loaded_status_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int ret; char *p = page; if (off > 0) { ret = 0; } else { p += sprintf(p, "%d\n", mdm_drv->mdm_ready); ret = p - page; } return ret; } static void mdm_loaded_info(void) { struct proc_dir_entry *entry = NULL; entry = create_proc_read_entry("mdm9k_status", 0, NULL, mdm_loaded_status_proc, NULL); } #ifdef CONFIG_HTC_STORE_MODEM_RESET_INFO static ssize_t modem_silent_reset_info_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int len = 0; unsigned long flags; spin_lock_irqsave(&msr_info_lock, flags); msr_info_list[mdm_msr_index].valid = 1; msr_info_list[mdm_msr_index].msr_time = current_kernel_time(); snprintf(msr_info_list[mdm_msr_index].modem_errmsg, RD_BUF_SIZE, "%s", buf); len = strlen(msr_info_list[mdm_msr_index].modem_errmsg); if(msr_info_list[mdm_msr_index].modem_errmsg[len-1] == '\n') { msr_info_list[mdm_msr_index].modem_errmsg[len-1] = '\0'; } if(++mdm_msr_index >= MODEM_ERRMSG_LIST_LEN) { mdm_msr_index = 0; } spin_unlock_irqrestore(&msr_info_lock, flags); return count; } static ssize_t modem_silent_reset_info_show(struct device *dev, struct device_attribute *attr, char *buf) { int i = 0; char tmp[RD_BUF_SIZE+30]; unsigned long flags; spin_lock_irqsave(&msr_info_lock, flags); for( i=0; i<MODEM_ERRMSG_LIST_LEN; i++ ) { if( msr_info_list[i].valid != 0 ) { snprintf(tmp, RD_BUF_SIZE+30, "%ld-%s|\n\r", msr_info_list[i].msr_time.tv_sec, msr_info_list[i].modem_errmsg); strcat(buf, tmp); memset(tmp, 0, RD_BUF_SIZE+30); } msr_info_list[i].valid = 0; memset(msr_info_list[i].modem_errmsg, 0, RD_BUF_SIZE); } strcat(buf, "\n\r\0"); spin_unlock_irqrestore(&msr_info_lock, flags); return strlen(buf); } static DEVICE_ATTR(msr_info, S_IRUSR | S_IROTH | S_IRGRP | S_IWUSR, modem_silent_reset_info_show, modem_silent_reset_info_store); static struct kobject *subsystem_restart_reason_obj; static ssize_t mdm_subsystem_restart_reason_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return modem_silent_reset_info_store(dev, attr, buf, count); } static ssize_t mdm_subsystem_restart_reason_show(struct device *dev, struct device_attribute *attr, char *buf) { return modem_silent_reset_info_show(dev, attr, buf); } static DEVICE_ATTR(subsystem_restart_reason_nonblock, S_IRUSR | S_IROTH | S_IRGRP | S_IWUSR, mdm_subsystem_restart_reason_show, mdm_subsystem_restart_reason_store); static int mdm_subsystem_restart_properties_init(void) { int ret = 0; subsystem_restart_reason_obj = kobject_create_and_add("subsystem_restart_properties", NULL); if (subsystem_restart_reason_obj == NULL) { pr_info("kobject_create_and_add: subsystem_restart_properties failed\n"); return -EFAULT; } ret = sysfs_create_file(subsystem_restart_reason_obj, &dev_attr_subsystem_restart_reason_nonblock.attr); if (ret) { pr_info("sysfs_create_file: subsystem_restart_reason_nonblock failed\n"); return -EFAULT; } return 0; } static int mdm_subsystem_restart_properties_release(void) { if(subsystem_restart_reason_obj != NULL) { sysfs_remove_file(subsystem_restart_reason_obj, &dev_attr_subsystem_restart_reason_nonblock.attr); kobject_put(subsystem_restart_reason_obj); } return 0; } static int modem_silent_reset_info_sysfs_attrs(struct platform_device *pdev) { int i = 0; unsigned long flags; spin_lock_irqsave(&msr_info_lock, flags); mdm_msr_index = 0; for( i=0; i<MODEM_ERRMSG_LIST_LEN; i++ ) { msr_info_list[i].valid = 0; memset(msr_info_list[i].modem_errmsg, 0, RD_BUF_SIZE); } spin_unlock_irqrestore(&msr_info_lock, flags); return device_create_file(&pdev->dev, &dev_attr_msr_info); } #endif static void mdm_restart_reason_fn(struct work_struct *work) { int ret, ntries = 0; char sfr_buf[RD_BUF_SIZE]; #ifdef CONFIG_HTC_STORE_MODEM_RESET_INFO unsigned long flags; #endif do { msleep(SFR_RETRY_INTERVAL); #if defined(CONFIG_BUILD_EDIAG) pr_info("SYSMON is supposed to be used as char dev with specific purpose.\n"); ret = -EINVAL; #else ret = sysmon_get_reason(SYSMON_SS_EXT_MODEM, sfr_buf, sizeof(sfr_buf)); #endif if (ret) { pr_err("%s: Error retrieving mdm restart reason, ret = %d, " "%d/%d tries\n", __func__, ret, ntries + 1, SFR_MAX_RETRIES); } else { pr_err("mdm restart reason: %s\n", sfr_buf); #ifdef CONFIG_HTC_STORE_MODEM_RESET_INFO spin_lock_irqsave(&msr_info_lock, flags); msr_info_list[mdm_msr_index].valid = 1; msr_info_list[mdm_msr_index].msr_time = current_kernel_time(); snprintf(msr_info_list[mdm_msr_index].modem_errmsg, RD_BUF_SIZE, "%s", sfr_buf); if(++mdm_msr_index >= MODEM_ERRMSG_LIST_LEN) { mdm_msr_index = 0; } spin_unlock_irqrestore(&msr_info_lock, flags); #endif break; } } while (++ntries < SFR_MAX_RETRIES); } static DECLARE_WORK(sfr_reason_work, mdm_restart_reason_fn); static void mdm_status_check_fn(struct work_struct *work) { int value = 0; msleep(3000); pr_info("%s mdm_status_change notified? %c\n", __func__, mdm_status_change_notified ? 'Y': 'N'); if (!mdm_status_change_notified) { dump_mdm_related_gpio(); value = gpio_get_value(mdm_drv->mdm2ap_status_gpio); if (value == 1) queue_work_on(0, mdm_queue, &mdm_status_work); } } static DECLARE_WORK(mdm_status_check_work, mdm_status_check_fn); #define MDM2AP_STATUS_CHANGE_MAX_WAIT_TIME 5000 #define MDM2AP_STATUS_CHANGE_TOTAL_CHECK_TIME 20 static int mdm_hsic_reconnectd_check_fn(void) { int ret = 0; int i = 0, value = 0; for ( i = 0; i <= MDM2AP_STATUS_CHANGE_TOTAL_CHECK_TIME; i++ ) { value = gpio_get_value( mdm_drv->mdm2ap_status_gpio ); if ( value == 1 && mdm_drv->mdm_hsic_reconnectd == 1 ) { ret = 1; break; } if ( i < MDM2AP_STATUS_CHANGE_TOTAL_CHECK_TIME ) { msleep( (MDM2AP_STATUS_CHANGE_MAX_WAIT_TIME / MDM2AP_STATUS_CHANGE_TOTAL_CHECK_TIME) ); } } if ( ret == 0 ) { pr_info("%s: return 0, mdm_hsic_reconnectd=[%d]\n", __func__, mdm_drv->mdm_hsic_reconnectd); } else { pr_info("%s: return 1\n", __func__); } return ret; } long mdm_modem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int status, ret = 0; if (_IOC_TYPE(cmd) != CHARM_CODE) { pr_err("%s: invalid ioctl code\n", __func__); return -EINVAL; } pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd)); switch (cmd) { case WAKE_CHARM: pr_info("%s: Powering on mdm\n", __func__); mdm_drv->mdm_ready = 0; mdm_drv->mdm_hsic_reconnectd = 0; mdm_drv->ops->power_on_mdm_cb(mdm_drv); break; case CHECK_FOR_BOOT: if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0) put_user(1, (unsigned long __user *) arg); else put_user(0, (unsigned long __user *) arg); break; case NORMAL_BOOT_DONE: { int ret_mdm_hsic_reconnectd = 0; pr_debug("%s: check if mdm is booted up\n", __func__); get_user(status, (unsigned long __user *) arg); if (status) { pr_debug("%s: normal boot failed\n", __func__); mdm_drv->mdm_boot_status = -EIO; } else { pr_info("%s: normal boot done\n", __func__); mdm_drv->mdm_boot_status = 0; } mdm_status_change_notified = false; queue_work_on(0, mdm_gpio_monitor_queue, &mdm_status_check_work); mdm_drv->mdm_ready = 1; if (mdm_drv->ops->normal_boot_done_cb != NULL) mdm_drv->ops->normal_boot_done_cb(mdm_drv); ret_mdm_hsic_reconnectd = mdm_hsic_reconnectd_check_fn(); if ( ret_mdm_hsic_reconnectd == 1 ) { pr_info("%s: ret_mdm_hsic_reconnectd == 1\n", __func__); } else { pr_info("%s: ret_mdm_hsic_reconnectd == 0\n", __func__); } if (!first_boot) complete(&mdm_boot); else first_boot = 0; } break; case RAM_DUMP_DONE: pr_debug("%s: mdm done collecting RAM dumps\n", __func__); get_user(status, (unsigned long __user *) arg); if (status) mdm_drv->mdm_ram_dump_status = -EIO; else { pr_info("%s: ramdump collection completed\n", __func__); mdm_drv->mdm_ram_dump_status = 0; } complete(&mdm_ram_dumps); break; case WAIT_FOR_RESTART: pr_debug("%s: wait for mdm to need images reloaded\n", __func__); if (mdm_drv) { dump_gpio("MDM2AP_STATUS", mdm_drv->mdm2ap_status_gpio); dump_gpio("MDM2AP_ERRFATAL", mdm_drv->mdm2ap_errfatal_gpio); } ret = wait_for_completion_interruptible(&mdm_needs_reload); if (!ret && mdm_drv) { put_user(mdm_drv->boot_type, (unsigned long __user *) arg); pr_err("%s: mdm_drv->boot_type:%d\n", __func__, mdm_drv->boot_type); } INIT_COMPLETION(mdm_needs_reload); break; case GET_MFG_MODE: pr_info("%s: board_mfg_mode()=%d\n", __func__, board_mfg_mode()); put_user(board_mfg_mode(), (unsigned long __user *) arg); break; case SET_MODEM_ERRMSG: pr_info("%s: Set modem fatal errmsg\n", __func__); ret = set_mdm_errmsg((void __user *) arg); break; case GET_RADIO_FLAG: pr_info("%s:get_radio_flag()=%x\n", __func__, get_radio_flag()); if ((get_radio_flag() & RADIO_FLAG_USB_UPLOAD) && mdm_drv != NULL) { pr_info("AP2MDM_STATUS GPIO:%d\n", mdm_drv->ap2mdm_status_gpio); pr_info("AP2MDM_ERRFATAL GPIO:%d\n", mdm_drv->ap2mdm_errfatal_gpio); pr_info("AP2MDM_PMIC_RESET_N GPIO:%d\n", mdm_drv->ap2mdm_pmic_reset_n_gpio); pr_info("MDM2AP_STATUS GPIO:%d\n", mdm_drv->mdm2ap_status_gpio); pr_info("MDM2AP_ERRFATAL GPIO:%d\n", mdm_drv->mdm2ap_errfatal_gpio); pr_info("MDM2AP_HSIC_READY GPIO:%d\n", mdm_drv->mdm2ap_hsic_ready_gpio); pr_info("AP2MDM_IPC1 GPIO:%d\n", mdm_drv->ap2mdm_ipc1_gpio); } put_user(get_radio_flag(), (unsigned long __user *) arg); break; case EFS_SYNC_DONE: pr_info("%s: efs sync is done\n", __func__); break; case NV_WRITE_DONE: pr_info("%s: NV write done!\n", __func__); notify_mdm_nv_write_done(); break; case HTC_POWER_OFF_CHARM: pr_info("%s: (HTC_POWER_OFF_CHARM)Powering off mdm\n", __func__); if ( mdm_drv->ops->htc_power_down_mdm_cb ) { mdm_drv->mdm_ready = 0; mdm_drv->mdm_hsic_reconnectd = 0; mdm_drv->ops->htc_power_down_mdm_cb(mdm_drv); } break; case HTC_UPDATE_CRC_RESTART_LEVEL: pr_info("%s: (HTC_UPDATE_CRC_RESTART_LEVEL)\n", __func__); subsystem_update_restart_level_for_crc(); break; default: pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd)); ret = -EINVAL; break; } return ret; } static void dump_gpio(char *name, unsigned int gpio) { if (gpio == 0) { pr_err("%s: Cannot dump %s, due to invalid gpio number %d\n", __func__, name, gpio); return; } pr_info("%s: %s\t= %d\n", __func__, name, gpio_get_value(gpio)); return; } static void dump_mdm_related_gpio(void) { dump_gpio("AP2MDM_STATUS", mdm_drv->ap2mdm_status_gpio); dump_gpio("AP2MDM_ERRFATAL", mdm_drv->ap2mdm_errfatal_gpio); dump_gpio("AP2MDM_PMIC_RESET_N", mdm_drv->ap2mdm_pmic_reset_n_gpio); dump_gpio("MDM2AP_STATUS", mdm_drv->mdm2ap_status_gpio); dump_gpio("MDM2AP_ERRFATAL", mdm_drv->mdm2ap_errfatal_gpio); return; } static char modem_errmsg[MODEM_ERRMSG_LEN]; static int set_mdm_errmsg(void __user *msg) { memset(modem_errmsg, 0, sizeof(modem_errmsg)); if (unlikely(copy_from_user(modem_errmsg, msg, MODEM_ERRMSG_LEN))) { pr_err("%s: copy modem_errmsg failed\n", __func__); return -EFAULT; } modem_errmsg[MODEM_ERRMSG_LEN-1] = '\0'; pr_info("%s: set modem errmsg: %s\n", __func__, modem_errmsg); return 0; } char *get_mdm_errmsg(void) { if (strlen(modem_errmsg) <= 0) { pr_err("%s: can not get mdm errmsg.\n", __func__); return NULL; } return modem_errmsg; } EXPORT_SYMBOL(get_mdm_errmsg); static int notify_mdm_nv_write_done(void) { gpio_direction_output(mdm_drv->ap2mdm_ipc1_gpio, 1); msleep(1); gpio_direction_output(mdm_drv->ap2mdm_ipc1_gpio, 0); return 0; } extern void set_mdm2ap_errfatal_restart_flag(unsigned); static void mdm_crash_dump_dbg_info(void) { dump_mdm_related_gpio(); printk(KERN_INFO "=== Show qcks stack ===\n"); show_thread_group_state_filter("qcks", 0); printk(KERN_INFO "\n"); printk(KERN_INFO "=== Show efsks stack ===\n"); show_thread_group_state_filter("efsks", 0); printk(KERN_INFO "\n"); printk(KERN_INFO "=== Show ks stack ===\n"); show_thread_group_state_filter("ks", 0); printk(KERN_INFO "\n"); pr_info("### Show Blocked State in ###\n"); show_state_filter(TASK_UNINTERRUPTIBLE); if (get_restart_level() == RESET_SOC) msm_rtb_disable(); if (get_restart_level() == RESET_SOC) set_mdm2ap_errfatal_restart_flag(1); } static void mdm_fatal_fn(struct work_struct *work) { int i; int value = gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio); if (value == 1) { for (i = HTC_MDM_ERROR_CONFIRM_TIME_MS; i > 0; i--) { msleep(1); if (gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio) == 0) { pr_info("%s: mdm fatal high %d(ms) confirm failed... Abort!\n", __func__, HTC_MDM_ERROR_CONFIRM_TIME_MS); return; } } } else if (value == 0) { pr_info("%s: mdm fatal high is a false alarm!\n", __func__); return; } mdm_crash_dump_dbg_info(); pr_info("%s: Reseting the mdm due to an errfatal\n", __func__); subsystem_restart(EXTERNAL_MODEM); } static DECLARE_WORK(mdm_fatal_work, mdm_fatal_fn); static void mdm_status_fn(struct work_struct *work) { int i; int value = gpio_get_value(mdm_drv->mdm2ap_status_gpio); if (!mdm_drv->mdm_ready) return; if (value == 0) { for (i = HTC_MDM_ERROR_CONFIRM_TIME_MS; i > 0; i--) { msleep(1); if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 1) { pr_info("%s: mdm status low %d(ms) confirm failed... Abort!\n", __func__, HTC_MDM_ERROR_CONFIRM_TIME_MS); return; } } } if ( ( get_radio_flag() & RADIO_FLAG_USB_UPLOAD ) ) { if ( value == 0 ) { int val_gpio = 0; msleep(40); val_gpio = gpio_get_value(mdm_drv->mdm2ap_hsic_ready_gpio); pr_info("%s:mdm2ap_hsic_ready_gpio=[%d]\n", __func__, val_gpio); } } mdm_status_change_notified = true; mdm_drv->ops->status_cb(mdm_drv, value); pr_debug("%s: status:%d\n", __func__, value); if (value == 0) { pr_info("%s: unexpected reset external modem\n", __func__); mdm_crash_dump_dbg_info(); subsystem_restart(EXTERNAL_MODEM); } else if (value == 1) { pr_info("%s: status = 1: mdm is now ready\n", __func__); } } #ifdef CONFIG_QSC_MODEM void qsc_boot_after_mdm_bootloader(int state); static void qsc_boot_up_fn(struct work_struct *work) { free_irq(mdm_drv->mdm2ap_bootloader_irq, NULL); qsc_boot_after_mdm_bootloader(MDM_BOOTLOAER_GPIO_IRQ_RECEIVED); } static DECLARE_WORK(qsc_boot_up_work, qsc_boot_up_fn); #endif static void mdm_disable_irqs(void) { disable_irq_nosync(mdm_drv->mdm_errfatal_irq); disable_irq_nosync(mdm_drv->mdm_status_irq); } static irqreturn_t mdm_errfatal(int irq, void *dev_id) { pr_err("%s: detect mdm errfatal pin rising\n", __func__); if (mdm_drv->mdm_ready && (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 1)) { pr_err("%s: mdm got errfatal interrupt\n", __func__); pr_debug("%s: scheduling work now\n", __func__); queue_work_on(0, mdm_queue, &mdm_fatal_work); } return IRQ_HANDLED; } static int mdm_modem_open(struct inode *inode, struct file *file) { return 0; } static const struct file_operations mdm_modem_fops = { .owner = THIS_MODULE, .open = mdm_modem_open, .unlocked_ioctl = mdm_modem_ioctl, }; static struct miscdevice mdm_modem_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "mdm", .fops = &mdm_modem_fops }; static int mdm_panic_prep(struct notifier_block *this, unsigned long event, void *ptr) { int i; pr_debug("%s: setting AP2MDM_ERRFATAL high for a non graceful reset\n", __func__); mdm_disable_irqs(); gpio_set_value(mdm_drv->ap2mdm_errfatal_gpio, 1); for (i = MDM_MODEM_TIMEOUT; i > 0; i -= MDM_MODEM_DELTA) { pet_watchdog(); mdelay(MDM_MODEM_DELTA); if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0) break; } if (i <= 0) pr_err("%s: MDM2AP_STATUS never went low\n", __func__); return NOTIFY_DONE; } static struct notifier_block mdm_panic_blk = { .notifier_call = mdm_panic_prep, }; static irqreturn_t mdm_status_change(int irq, void *dev_id) { pr_debug("%s: mdm sent status change interrupt\n", __func__); queue_work_on(0, mdm_queue, &mdm_status_work); return IRQ_HANDLED; } #ifdef CONFIG_QSC_MODEM static irqreturn_t mdm_in_bootloader(int irq, void *dev_id) { pr_info("%s: got mdm2ap_bootloader interrupt\n", __func__); queue_work(mdm_queue, &qsc_boot_up_work); return IRQ_HANDLED; } #endif static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys) { mdm_drv->mdm_ready = 0; gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1); if (mdm_drv->pdata->ramdump_delay_ms > 0) { msleep(mdm_drv->pdata->ramdump_delay_ms); } mdm_drv->ops->power_down_mdm_cb(mdm_drv); return 0; } static int mdm_subsys_powerup(const struct subsys_data *crashed_subsys) { gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 0); gpio_direction_output(mdm_drv->ap2mdm_status_gpio, 1); mdm_drv->boot_type = CHARM_NORMAL_BOOT; pr_info("%s: mdm_needs_reload\n", __func__); complete(&mdm_needs_reload); if (!wait_for_completion_timeout(&mdm_boot, msecs_to_jiffies(MDM_BOOT_TIMEOUT))) { mdm_drv->mdm_boot_status = -ETIMEDOUT; pr_info("%s: mdm modem restart timed out.\n", __func__); } else { pr_info("%s: mdm modem has been restarted\n", __func__); queue_work_on(0, mdm_sfr_queue, &sfr_reason_work); } INIT_COMPLETION(mdm_boot); return mdm_drv->mdm_boot_status; } static int mdm_subsys_ramdumps(int want_dumps, const struct subsys_data *crashed_subsys) { mdm_drv->mdm_ram_dump_status = 0; if (want_dumps) { mdm_drv->boot_type = CHARM_RAM_DUMPS; complete(&mdm_needs_reload); if (!wait_for_completion_timeout(&mdm_ram_dumps, msecs_to_jiffies(MDM_RDUMP_TIMEOUT))) { mdm_drv->mdm_ram_dump_status = -ETIMEDOUT; pr_info("%s: mdm modem ramdumps timed out.\n", __func__); } else pr_info("%s: mdm modem ramdumps completed.\n", __func__); INIT_COMPLETION(mdm_ram_dumps); gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1); mdm_drv->ops->power_down_mdm_cb(mdm_drv); } return mdm_drv->mdm_ram_dump_status; } static struct subsys_data mdm_subsystem = { .shutdown = mdm_subsys_shutdown, .ramdump = mdm_subsys_ramdumps, .powerup = mdm_subsys_powerup, .name = EXTERNAL_MODEM, }; static int mdm_debug_on_set(void *data, u64 val) { mdm_debug_on = val; if (mdm_drv->ops->debug_state_changed_cb) mdm_drv->ops->debug_state_changed_cb(mdm_debug_on); return 0; } static int mdm_debug_on_get(void *data, u64 *val) { *val = mdm_debug_on; return 0; } DEFINE_SIMPLE_ATTRIBUTE(mdm_debug_on_fops, mdm_debug_on_get, mdm_debug_on_set, "%llu\n"); static int mdm_debugfs_init(void) { struct dentry *dent; dent = debugfs_create_dir("mdm_dbg", 0); if (IS_ERR(dent)) return PTR_ERR(dent); debugfs_create_file("debug_on", 0644, dent, NULL, &mdm_debug_on_fops); return 0; } static void mdm_modem_initialize_data(struct platform_device *pdev, struct mdm_ops *mdm_ops) { struct resource *pres; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "MDM2AP_ERRFATAL"); if (pres) mdm_drv->mdm2ap_errfatal_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "AP2MDM_ERRFATAL"); if (pres) mdm_drv->ap2mdm_errfatal_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "MDM2AP_STATUS"); if (pres) mdm_drv->mdm2ap_status_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "AP2MDM_STATUS"); if (pres) mdm_drv->ap2mdm_status_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "MDM2AP_WAKEUP"); if (pres) mdm_drv->mdm2ap_wakeup_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "AP2MDM_WAKEUP"); if (pres) mdm_drv->ap2mdm_wakeup_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "AP2MDM_PMIC_RESET_N"); if (pres) mdm_drv->ap2mdm_pmic_reset_n_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "AP2MDM_KPDPWR_N"); if (pres) mdm_drv->ap2mdm_kpdpwr_n_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "MDM2AP_HSIC_READY"); if (pres) mdm_drv->mdm2ap_hsic_ready_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "AP2MDM_IPC1"); if (pres) mdm_drv->ap2mdm_ipc1_gpio = pres->start; pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "MDM2AP_BOOTLOADER"); if (pres) mdm_drv->mdm2ap_bootloader_gpio = pres->start; #ifdef CONFIG_HTC_STORE_MODEM_RESET_INFO modem_silent_reset_info_sysfs_attrs(pdev); mdm_subsystem_restart_properties_init(); #endif mdm_drv->boot_type = CHARM_NORMAL_BOOT; mdm_drv->ops = mdm_ops; mdm_drv->pdata = pdev->dev.platform_data; } extern void register_ap2mdm_pmic_reset_n_gpio(unsigned); int mdm_common_create(struct platform_device *pdev, struct mdm_ops *p_mdm_cb) { int ret = -1, irq; #ifdef CONFIG_HTC_POWEROFF_MODEM_IN_OFFMODE_CHARGING int mfg_mode = BOARD_MFG_MODE_NORMAL; mfg_mode = board_mfg_mode(); if ( mfg_mode == BOARD_MFG_MODE_OFFMODE_CHARGING ) { unsigned ap2mdm_pmic_reset_n_gpio = 0; struct resource *pres; pr_info("%s: BOARD_MFG_MODE_OFFMODE_CHARGING\n", __func__); pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "AP2MDM_PMIC_RESET_N"); if (pres) { ap2mdm_pmic_reset_n_gpio = pres->start; if ( ap2mdm_pmic_reset_n_gpio > 0 ) { gpio_request(ap2mdm_pmic_reset_n_gpio, "AP2MDM_PMIC_RESET_N"); gpio_direction_output(ap2mdm_pmic_reset_n_gpio, 0); pr_info("%s: Pull AP2MDM_PMIC_RESET_N(%d) to low\n", __func__, ap2mdm_pmic_reset_n_gpio); } } else { pr_info("%s: pres=NULL\n", __func__); } return 0; } else { pr_info("%s: mfg_mode=[%d]\n", __func__, mfg_mode); } #else pr_info("%s: CONFIG_HTC_POWEROFF_MODEM_IN_OFFMODE_CHARGING not set\n", __func__); #endif mdm_drv = kzalloc(sizeof(struct mdm_modem_drv), GFP_KERNEL); if (mdm_drv == NULL) { pr_err("%s: kzalloc fail.\n", __func__); goto alloc_err; } mdm_modem_initialize_data(pdev, p_mdm_cb); if (mdm_drv->ops->debug_state_changed_cb) mdm_drv->ops->debug_state_changed_cb(mdm_debug_on); register_ap2mdm_pmic_reset_n_gpio(mdm_drv->ap2mdm_pmic_reset_n_gpio); gpio_request(mdm_drv->ap2mdm_status_gpio, "AP2MDM_STATUS"); gpio_request(mdm_drv->ap2mdm_errfatal_gpio, "AP2MDM_ERRFATAL"); gpio_request(mdm_drv->ap2mdm_pmic_reset_n_gpio, "AP2MDM_PMIC_RESET_N"); gpio_request(mdm_drv->mdm2ap_status_gpio, "MDM2AP_STATUS"); gpio_request(mdm_drv->mdm2ap_errfatal_gpio, "MDM2AP_ERRFATAL"); gpio_request(mdm_drv->mdm2ap_hsic_ready_gpio, "MDM2AP_HSIC_READY"); gpio_request(mdm_drv->ap2mdm_ipc1_gpio, "AP2MDM_IPC1"); #ifdef CONFIG_QSC_MODEM gpio_request(mdm_drv->mdm2ap_bootloader_gpio, "MDM2AP_BOOTLOADER"); #endif if (mdm_drv->ap2mdm_wakeup_gpio > 0) gpio_request(mdm_drv->ap2mdm_wakeup_gpio, "AP2MDM_WAKEUP"); #if 0 gpio_direction_output(mdm_drv->ap2mdm_status_gpio, 1); #endif gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 0); gpio_direction_output(mdm_drv->ap2mdm_ipc1_gpio, 0); #ifdef CONFIG_QSC_MODEM gpio_tlmm_config(GPIO_CFG((mdm_drv->mdm2ap_bootloader_gpio), 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG_ENABLE); gpio_direction_input(mdm_drv->mdm2ap_bootloader_gpio); #endif if (mdm_drv->ap2mdm_wakeup_gpio > 0) gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 0); gpio_direction_input(mdm_drv->mdm2ap_status_gpio); gpio_direction_input(mdm_drv->mdm2ap_errfatal_gpio); mdm_queue = create_singlethread_workqueue("mdm_queue"); if (!mdm_queue) { pr_err("%s: could not create workqueue. All mdm " "functionality will be disabled\n", __func__); ret = -ENOMEM; goto fatal_err; } mdm_sfr_queue = alloc_workqueue("mdm_sfr_queue", 0, 0); if (!mdm_sfr_queue) { pr_err("%s: could not create workqueue mdm_sfr_queue." " All mdm functionality will be disabled\n", __func__); ret = -ENOMEM; destroy_workqueue(mdm_queue); goto fatal_err; } mdm_gpio_monitor_queue = create_singlethread_workqueue("mdm_gpio_monitor_queue"); if (!mdm_gpio_monitor_queue) { pr_err("%s: could not create workqueue for monitoring GPIO status \n", __func__); destroy_workqueue(mdm_gpio_monitor_queue); } atomic_notifier_chain_register(&panic_notifier_list, &mdm_panic_blk); mdm_debugfs_init(); mdm_loaded_info(); ssr_register_subsystem(&mdm_subsystem); irq = MSM_GPIO_TO_INT(mdm_drv->mdm2ap_errfatal_gpio); if (irq < 0) { pr_err("%s: could not get MDM2AP_ERRFATAL IRQ resource. " "error=%d No IRQ will be generated on errfatal.", __func__, irq); goto errfatal_err; } ret = request_irq(irq, mdm_errfatal, IRQF_TRIGGER_RISING , "mdm errfatal", NULL); if (ret < 0) { pr_err("%s: MDM2AP_ERRFATAL IRQ#%d request failed with error=%d" ". No IRQ will be generated on errfatal.", __func__, irq, ret); goto errfatal_err; } mdm_drv->mdm_errfatal_irq = irq; errfatal_err: irq = MSM_GPIO_TO_INT(mdm_drv->mdm2ap_status_gpio); if (irq < 0) { pr_err("%s: could not get MDM2AP_STATUS IRQ resource. " "error=%d No IRQ will be generated on status change.", __func__, irq); goto status_err; } ret = request_threaded_irq(irq, NULL, mdm_status_change, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_SHARED, "mdm status", mdm_drv); if (ret < 0) { pr_err("%s: MDM2AP_STATUS IRQ#%d request failed with error=%d" ". No IRQ will be generated on status change.", __func__, irq, ret); goto status_err; } mdm_drv->mdm_status_irq = irq; status_err: mdm_drv->ops->power_on_mdm_cb(mdm_drv); #ifdef CONFIG_QSC_MODEM irq = MSM_GPIO_TO_INT(mdm_drv->mdm2ap_bootloader_gpio); if (irq < 0) { pr_err("%s: could not get mdm2ap_bootloader irq resource, error=%d. Skip waiting for MDM_BOOTLOADER interrupt.", __func__, irq); } else { ret = request_threaded_irq(irq, NULL, mdm_in_bootloader, IRQF_TRIGGER_RISING, "mdm in bootloader", NULL); if (ret < 0) { pr_err("%s: mdm2ap_bootloader irq request failed with error=%d. Skip waiting for MDM_BOOTLOADER interrupt.", __func__, ret); } else { qsc_boot_after_mdm_bootloader(MDM_BOOTLOAER_GPIO_IRQ_REGISTERED); mdm_drv->mdm2ap_bootloader_irq = irq; pr_info("%s: Registered mdm2ap_bootloader irq, gpio<%d> irq<%d> ret<%d>\n" , __func__, mdm_drv->mdm2ap_bootloader_gpio, irq, ret); } } #endif pr_info("%s: Registering mdm modem\n", __func__); return misc_register(&mdm_modem_misc); fatal_err: gpio_free(mdm_drv->ap2mdm_status_gpio); gpio_free(mdm_drv->ap2mdm_errfatal_gpio); gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio); gpio_free(mdm_drv->ap2mdm_pmic_reset_n_gpio); gpio_free(mdm_drv->mdm2ap_status_gpio); gpio_free(mdm_drv->mdm2ap_errfatal_gpio); gpio_free(mdm_drv->mdm2ap_hsic_ready_gpio); gpio_free(mdm_drv->ap2mdm_ipc1_gpio); #ifdef CONFIG_QSC_MODEM gpio_free(mdm_drv->mdm2ap_bootloader_gpio); #endif if (mdm_drv->ap2mdm_wakeup_gpio > 0) gpio_free(mdm_drv->ap2mdm_wakeup_gpio); kfree(mdm_drv); ret = -ENODEV; alloc_err: return ret; } int mdm_common_modem_remove(struct platform_device *pdev) { int ret; #ifdef CONFIG_HTC_POWEROFF_MODEM_IN_OFFMODE_CHARGING int mfg_mode = BOARD_MFG_MODE_NORMAL; mfg_mode = board_mfg_mode(); if ( mfg_mode == BOARD_MFG_MODE_OFFMODE_CHARGING ) { unsigned ap2mdm_pmic_reset_n_gpio = 0; struct resource *pres; pr_info("%s: BOARD_MFG_MODE_OFFMODE_CHARGING\n", __func__); pres = platform_get_resource_byname(pdev, IORESOURCE_IO, "AP2MDM_PMIC_RESET_N"); if (pres) { ap2mdm_pmic_reset_n_gpio = pres->start; if ( ap2mdm_pmic_reset_n_gpio > 0 ) { gpio_free(ap2mdm_pmic_reset_n_gpio); pr_info("%s: gpio_free AP2MDM_PMIC_RESET_N(%d)\n", __func__, ap2mdm_pmic_reset_n_gpio); } } return 0; } #endif gpio_free(mdm_drv->ap2mdm_status_gpio); gpio_free(mdm_drv->ap2mdm_errfatal_gpio); gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio); gpio_free(mdm_drv->ap2mdm_pmic_reset_n_gpio); gpio_free(mdm_drv->mdm2ap_status_gpio); gpio_free(mdm_drv->mdm2ap_errfatal_gpio); gpio_free(mdm_drv->mdm2ap_hsic_ready_gpio); gpio_free(mdm_drv->ap2mdm_ipc1_gpio); #ifdef CONFIG_QSC_MODEM gpio_free(mdm_drv->mdm2ap_bootloader_gpio); #endif if (mdm_drv->ap2mdm_wakeup_gpio > 0) gpio_free(mdm_drv->ap2mdm_wakeup_gpio); kfree(mdm_drv); ret = misc_deregister(&mdm_modem_misc); #ifdef CONFIG_HTC_STORE_MODEM_RESET_INFO mdm_subsystem_restart_properties_release(); #endif return ret; } void mdm_common_modem_shutdown(struct platform_device *pdev) { #ifdef CONFIG_HTC_POWEROFF_MODEM_IN_OFFMODE_CHARGING int mfg_mode = BOARD_MFG_MODE_NORMAL; mfg_mode = board_mfg_mode(); if ( mfg_mode == BOARD_MFG_MODE_OFFMODE_CHARGING ) { pr_info("%s: BOARD_MFG_MODE_OFFMODE_CHARGING\n", __func__); return; } #endif pr_info("%s: setting AP2MDM_STATUS low for a graceful restart\n", __func__); mdm_disable_irqs(); gpio_set_value(mdm_drv->ap2mdm_status_gpio, 0); mdm_drv->ops->power_down_mdm_cb(mdm_drv); } int mdm_common_htc_get_mdm2ap_errfatal_level(void) { int value = 0; if (mdm_drv != NULL && mdm_drv->mdm2ap_errfatal_gpio != 0) value = gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio); pr_info("%s: %d\n", __func__, value); return value; } EXPORT_SYMBOL_GPL(mdm_common_htc_get_mdm2ap_errfatal_level); int mdm_common_htc_get_mdm2ap_status_level(void) { int value = 0; if (mdm_drv != NULL && mdm_drv->mdm2ap_status_gpio != 0) value = gpio_get_value(mdm_drv->mdm2ap_status_gpio); pr_info("%s: %d\n", __func__, value); return value; } EXPORT_SYMBOL_GPL(mdm_common_htc_get_mdm2ap_status_level);
gpl-2.0
d3trax/asuswrt-merlin
release/src-rt-6.x.4708/cfe/cfe/arch/arm/board/bcm947xx/src/tinymt32.c
53
3909
/** * @file tinymt32.c * * @brief Tiny Mersenne Twister only 127 bit internal state * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (The University of Tokyo) * * Copyright (C) 2011 Mutsuo Saito, Makoto Matsumoto, * Hiroshima University and The University of Tokyo. * All rights reserved. * * The 3-clause BSD License is applied to this software, see * LICENSE.txt * * $Id:: tinymt32.c 1306 2012-06-21 14:10:10Z jeung $: * $Rev::file = 1306 : Global SVN Revision = 1306 $: * */ /* FILE-CSTYLED */ #include <tinymt32.h> #define MIN_LOOP 8 #define PRE_LOOP 8 /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t ini_func1(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1664525UL; } /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t ini_func2(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1566083941UL; } /** * This function certificate the period of 2^127-1. * @param random tinymt state vector. */ static void period_certification(tinymt32_t * random) { if ((random->status[0] & TINYMT32_MASK) == 0 && random->status[1] == 0 && random->status[2] == 0 && random->status[3] == 0) { random->status[0] = 'T'; random->status[1] = 'I'; random->status[2] = 'N'; random->status[3] = 'Y'; } } /** * This function initializes the internal state array with a 32-bit * unsigned integer seed. * @param random tinymt state vector. * @param seed a 32-bit unsigned integer used as a seed. */ void tinymt32_init(tinymt32_t * random, uint32_t seed) { int i; random->status[0] = seed; random->status[1] = random->mat1; random->status[2] = random->mat2; random->status[3] = random->tmat; for ( i = 1; i < MIN_LOOP; i++) { random->status[i & 3] ^= i + UINT32_C(1812433253) * (random->status[(i - 1) & 3] ^ (random->status[(i - 1) & 3] >> 30)); } period_certification(random); for ( i = 0; i < PRE_LOOP; i++) { tinymt32_next_state(random); } } /** * This function initializes the internal state array, * with an array of 32-bit unsigned integers used as seeds * @param random tinymt state vector. * @param init_key the array of 32-bit integers, used as a seed. * @param key_length the length of init_key. */ void tinymt32_init_by_array(tinymt32_t * random, uint32_t init_key[], int key_length) { const int lag = 1; const int mid = 1; const int size = 4; int i, j; int count; uint32_t r; uint32_t * st = &random->status[0]; st[0] = 0; st[1] = random->mat1; st[2] = random->mat2; st[3] = random->tmat; if (key_length + 1 > MIN_LOOP) { count = key_length + 1; } else { count = MIN_LOOP; } r = ini_func1(st[0] ^ st[mid % size] ^ st[(size - 1) % size]); st[mid % size] += r; r += key_length; st[(mid + lag) % size] += r; st[0] = r; count--; for (i = 1, j = 0; (j < count) && (j < key_length); j++) { r = ini_func1(st[i] ^ st[(i + mid) % size] ^ st[(i + size - 1) % size]); st[(i + mid) % size] += r; r += init_key[j] + i; st[(i + mid + lag) % size] += r; st[i] = r; i = (i + 1) % size; } for (; j < count; j++) { r = ini_func1(st[i] ^ st[(i + mid) % size] ^ st[(i + size - 1) % size]); st[(i + mid) % size] += r; r += i; st[(i + mid + lag) % size] += r; st[i] = r; i = (i + 1) % size; } for (j = 0; j < size; j++) { r = ini_func2(st[i] + st[(i + mid) % size] + st[(i + size - 1) % size]); st[(i + mid) % size] ^= r; r -= i; st[(i + mid + lag) % size] ^= r; st[i] = r; i = (i + 1) % size; } period_certification(random); for (i = 0; i < PRE_LOOP; i++) { tinymt32_next_state(random); } }
gpl-2.0
linuxium/ubuntu-xenial
drivers/net/wireless/mwifiex/cmdevt.c
53
51335
/* * Marvell Wireless LAN device driver: commands and events * * Copyright (C) 2011-2014, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" #include "11n.h" #include "11ac.h" /* * This function initializes a command node. * * The actual allocation of the node is not done by this function. It only * initiates a node by filling it with default parameters. Similarly, * allocation of the different buffers used (IOCTL buffer, data buffer) are * not done by this function either. */ static void mwifiex_init_cmd_node(struct mwifiex_private *priv, struct cmd_ctrl_node *cmd_node, u32 cmd_oid, void *data_buf, bool sync) { cmd_node->priv = priv; cmd_node->cmd_oid = cmd_oid; if (sync) { cmd_node->wait_q_enabled = true; cmd_node->cmd_wait_q_woken = false; cmd_node->condition = &cmd_node->cmd_wait_q_woken; } cmd_node->data_buf = data_buf; cmd_node->cmd_skb = cmd_node->skb; } /* * This function returns a command node from the free queue depending upon * availability. */ static struct cmd_ctrl_node * mwifiex_get_cmd_node(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node; unsigned long flags; spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); if (list_empty(&adapter->cmd_free_q)) { mwifiex_dbg(adapter, ERROR, "GET_CMD_NODE: cmd node not available\n"); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); return NULL; } cmd_node = list_first_entry(&adapter->cmd_free_q, struct cmd_ctrl_node, list); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); return cmd_node; } /* * This function cleans up a command node. * * The function resets the fields including the buffer pointers. * This function does not try to free the buffers. They must be * freed before calling this function. * * This function will however call the receive completion callback * in case a response buffer is still available before resetting * the pointer. */ static void mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { cmd_node->cmd_oid = 0; cmd_node->cmd_flag = 0; cmd_node->data_buf = NULL; cmd_node->wait_q_enabled = false; if (cmd_node->cmd_skb) skb_trim(cmd_node->cmd_skb, 0); if (cmd_node->resp_skb) { adapter->if_ops.cmdrsp_complete(adapter, cmd_node->resp_skb); cmd_node->resp_skb = NULL; } } /* * This function sends a host command to the firmware. * * The function copies the host command into the driver command * buffer, which will be transferred to the firmware later by the * main thread. */ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, struct mwifiex_ds_misc_cmd *pcmd_ptr) { /* Copy the HOST command to command buffer */ memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len); mwifiex_dbg(priv->adapter, CMD, "cmd: host cmd size = %d\n", pcmd_ptr->len); return 0; } /* * This function downloads a command to the firmware. * * The function performs sanity tests, sets the command sequence * number and size, converts the header fields to CPU format before * sending. Afterwards, it logs the command ID and action for debugging * and sets up the command timeout timer. */ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, struct cmd_ctrl_node *cmd_node) { struct mwifiex_adapter *adapter = priv->adapter; int ret; struct host_cmd_ds_command *host_cmd; uint16_t cmd_code; uint16_t cmd_size; unsigned long flags; __le32 tmp; if (!adapter || !cmd_node) return -1; host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); /* Sanity test */ if (host_cmd == NULL || host_cmd->size == 0) { mwifiex_dbg(adapter, ERROR, "DNLD_CMD: host_cmd is null\t" "or cmd size is 0, not sending\n"); if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, cmd_node); return -1; } cmd_code = le16_to_cpu(host_cmd->command); cmd_size = le16_to_cpu(host_cmd->size); if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET && cmd_code != HostCmd_CMD_FUNC_SHUTDOWN && cmd_code != HostCmd_CMD_FUNC_INIT) { mwifiex_dbg(adapter, ERROR, "DNLD_CMD: FW in reset state, ignore cmd %#x\n", cmd_code); mwifiex_recycle_cmd_node(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); return -1; } /* Set command sequence number */ adapter->seq_num++; host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO (adapter->seq_num, cmd_node->priv->bss_num, cmd_node->priv->bss_type)); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = cmd_node; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); /* Adjust skb length */ if (cmd_node->cmd_skb->len > cmd_size) /* * cmd_size is less than sizeof(struct host_cmd_ds_command). * Trim off the unused portion. */ skb_trim(cmd_node->cmd_skb, cmd_size); else if (cmd_node->cmd_skb->len < cmd_size) /* * cmd_size is larger than sizeof(struct host_cmd_ds_command) * because we have appended custom IE TLV. Increase skb length * accordingly. */ skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len); mwifiex_dbg(adapter, CMD, "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code, le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)), cmd_size, le16_to_cpu(host_cmd->seq_num)); mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size); if (adapter->iface_type == MWIFIEX_USB) { tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); skb_push(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN); memcpy(cmd_node->cmd_skb->data, &tmp, MWIFIEX_TYPE_LEN); adapter->cmd_sent = true; ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_CMD_EVENT, cmd_node->cmd_skb, NULL); skb_pull(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN); if (ret == -EBUSY) cmd_node->cmd_skb = NULL; } else { skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, cmd_node->cmd_skb, NULL); skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN); } if (ret == -1) { mwifiex_dbg(adapter, ERROR, "DNLD_CMD: host to card failed\n"); if (adapter->iface_type == MWIFIEX_USB) adapter->cmd_sent = false; if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); adapter->dbg.num_cmd_host_to_card_failure++; return -1; } /* Save the last command id and action to debug log */ adapter->dbg.last_cmd_index = (adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM; adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code; adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] = le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)); /* Clear BSS_NO_BITS from HostCmd */ cmd_code &= HostCmd_CMD_ID_MASK; /* Setup the timer after transmit command */ mod_timer(&adapter->cmd_timer, jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S)); return 0; } /* * This function downloads a sleep confirm command to the firmware. * * The function performs sanity tests, sets the command sequence * number and size, converts the header fields to CPU format before * sending. * * No responses are needed for sleep confirm command. */ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) { int ret; struct mwifiex_private *priv; struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) adapter->sleep_cfm->data; struct sk_buff *sleep_cfm_tmp; __le32 tmp; priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); adapter->seq_num++; sleep_cfm_buf->seq_num = cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO (adapter->seq_num, priv->bss_num, priv->bss_type))); mwifiex_dbg(adapter, CMD, "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", le16_to_cpu(sleep_cfm_buf->command), le16_to_cpu(sleep_cfm_buf->action), le16_to_cpu(sleep_cfm_buf->size), le16_to_cpu(sleep_cfm_buf->seq_num)); mwifiex_dbg_dump(adapter, CMD_D, "SLEEP_CFM buffer: ", sleep_cfm_buf, le16_to_cpu(sleep_cfm_buf->size)); if (adapter->iface_type == MWIFIEX_USB) { sleep_cfm_tmp = dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm) + MWIFIEX_TYPE_LEN); skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm) + MWIFIEX_TYPE_LEN); tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); memcpy(sleep_cfm_tmp->data, &tmp, MWIFIEX_TYPE_LEN); memcpy(sleep_cfm_tmp->data + MWIFIEX_TYPE_LEN, adapter->sleep_cfm->data, sizeof(struct mwifiex_opt_sleep_confirm)); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_CMD_EVENT, sleep_cfm_tmp, NULL); if (ret != -EBUSY) dev_kfree_skb_any(sleep_cfm_tmp); } else { skb_push(adapter->sleep_cfm, INTF_HEADER_LEN); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, adapter->sleep_cfm, NULL); skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN); } if (ret == -1) { mwifiex_dbg(adapter, ERROR, "SLEEP_CFM: failed\n"); adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++; return -1; } if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl)) /* Response is not needed for sleep confirm command */ adapter->ps_state = PS_STATE_SLEEP; else adapter->ps_state = PS_STATE_SLEEP_CFM; if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) && (adapter->is_hs_configured && !adapter->sleep_period.period)) { adapter->pm_wakeup_card_req = true; mwifiex_hs_activated_event(mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), true); } return ret; } /* * This function allocates the command buffers and links them to * the command free queue. * * The driver uses a pre allocated number of command buffers, which * are created at driver initializations and freed at driver cleanup. * Every command needs to obtain a command buffer from this pool before * it can be issued. The command free queue lists the command buffers * currently free to use, while the command pending queue lists the * command buffers already in use and awaiting handling. Command buffers * are returned to the free queue after use. */ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; u32 i; /* Allocate and initialize struct cmd_ctrl_node */ cmd_array = kcalloc(MWIFIEX_NUM_OF_CMD_BUFFER, sizeof(struct cmd_ctrl_node), GFP_KERNEL); if (!cmd_array) return -ENOMEM; adapter->cmd_pool = cmd_array; /* Allocate and initialize command buffers */ for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER); if (!cmd_array[i].skb) { mwifiex_dbg(adapter, ERROR, "unable to allocate command buffer\n"); return -ENOMEM; } } for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) mwifiex_insert_cmd_to_free_q(adapter, &cmd_array[i]); return 0; } /* * This function frees the command buffers. * * The function calls the completion callback for all the command * buffers that still have response buffers associated with them. */ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; u32 i; /* Need to check if cmd pool is allocated or not */ if (!adapter->cmd_pool) { mwifiex_dbg(adapter, FATAL, "info: FREE_CMD_BUF: cmd_pool is null\n"); return 0; } cmd_array = adapter->cmd_pool; /* Release shared memory buffers */ for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { if (cmd_array[i].skb) { mwifiex_dbg(adapter, CMD, "cmd: free cmd buffer %d\n", i); dev_kfree_skb_any(cmd_array[i].skb); } if (!cmd_array[i].resp_skb) continue; if (adapter->iface_type == MWIFIEX_USB) adapter->if_ops.cmdrsp_complete(adapter, cmd_array[i].resp_skb); else dev_kfree_skb_any(cmd_array[i].resp_skb); } /* Release struct cmd_ctrl_node */ if (adapter->cmd_pool) { mwifiex_dbg(adapter, CMD, "cmd: free cmd pool\n"); kfree(adapter->cmd_pool); adapter->cmd_pool = NULL; } return 0; } /* * This function handles events generated by firmware. * * Event body of events received from firmware are not used (though they are * saved), only the event ID is used. Some events are re-invoked by * the driver, with a new event body. * * After processing, the function calls the completion callback * for cleanup. */ int mwifiex_process_event(struct mwifiex_adapter *adapter) { int ret; struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); struct sk_buff *skb = adapter->event_skb; u32 eventcause = adapter->event_cause; struct mwifiex_rxinfo *rx_info; /* Save the last event to debug log */ adapter->dbg.last_event_index = (adapter->dbg.last_event_index + 1) % DBG_CMD_NUM; adapter->dbg.last_event[adapter->dbg.last_event_index] = (u16) eventcause; /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, EVENT_GET_BSS_NUM(eventcause), EVENT_GET_BSS_TYPE(eventcause)); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Clear BSS_NO_BITS from event */ eventcause &= EVENT_ID_MASK; adapter->event_cause = eventcause; if (skb) { rx_info = MWIFIEX_SKB_RXCB(skb); memset(rx_info, 0, sizeof(*rx_info)); rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; mwifiex_dbg_dump(adapter, EVT_D, "Event Buf:", skb->data, skb->len); } mwifiex_dbg(adapter, EVENT, "EVENT: cause: %#x\n", eventcause); if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) ret = mwifiex_process_uap_event(priv); else ret = mwifiex_process_sta_event(priv); adapter->event_cause = 0; adapter->event_skb = NULL; adapter->if_ops.event_complete(adapter, skb); return ret; } /* * This function prepares a command and send it to the firmware. * * Preparation includes - * - Sanity tests to make sure the card is still present or the FW * is not reset * - Getting a new command node from the command free queue * - Initializing the command node for default parameters * - Fill up the non-default parameters and buffer pointers * - Add the command to pending queue */ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync) { int ret; struct mwifiex_adapter *adapter = priv->adapter; struct cmd_ctrl_node *cmd_node; struct host_cmd_ds_command *cmd_ptr; if (!adapter) { pr_err("PREP_CMD: adapter is NULL\n"); return -1; } if (adapter->is_suspended) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: device in suspended state\n"); return -1; } if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: host entering sleep state\n"); return -1; } if (adapter->surprise_removed) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: card is removed\n"); return -1; } if (adapter->is_cmd_timedout) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: FW is in bad state\n"); return -1; } if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) { if (cmd_no != HostCmd_CMD_FUNC_INIT) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: FW in reset state\n"); return -1; } } /* Get a new command node */ cmd_node = mwifiex_get_cmd_node(adapter); if (!cmd_node) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: no free cmd node\n"); return -1; } /* Initialize the command node */ mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync); if (!cmd_node->cmd_skb) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: no free cmd buf\n"); return -1; } memset(skb_put(cmd_node->cmd_skb, sizeof(struct host_cmd_ds_command)), 0, sizeof(struct host_cmd_ds_command)); cmd_ptr = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); cmd_ptr->command = cpu_to_le16(cmd_no); cmd_ptr->result = 0; /* Prepare command */ if (cmd_no) { switch (cmd_no) { case HostCmd_CMD_UAP_SYS_CONFIG: case HostCmd_CMD_UAP_BSS_START: case HostCmd_CMD_UAP_BSS_STOP: case HostCmd_CMD_UAP_STA_DEAUTH: case HOST_CMD_APCMD_SYS_RESET: case HOST_CMD_APCMD_STA_LIST: ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action, cmd_oid, data_buf, cmd_ptr); break; default: ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action, cmd_oid, data_buf, cmd_ptr); break; } } else { ret = mwifiex_cmd_host_cmd(priv, cmd_ptr, data_buf); cmd_node->cmd_flag |= CMD_F_HOSTCMD; } /* Return error, since the command preparation failed */ if (ret) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: cmd %#x preparation failed\n", cmd_no); mwifiex_insert_cmd_to_free_q(adapter, cmd_node); return -1; } /* Send command */ if (cmd_no == HostCmd_CMD_802_11_SCAN || cmd_no == HostCmd_CMD_802_11_SCAN_EXT) { mwifiex_queue_scan_cmd(priv, cmd_node); } else { mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); queue_work(adapter->workqueue, &adapter->main_work); if (cmd_node->wait_q_enabled) ret = mwifiex_wait_queue_complete(adapter, cmd_node); } return ret; } /* * This function returns a command to the command free queue. * * The function also calls the completion callback if required, before * cleaning the command node and re-inserting it into the free queue. */ void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { unsigned long flags; if (!cmd_node) return; if (cmd_node->wait_q_enabled) mwifiex_complete_cmd(adapter, cmd_node); /* Clean the node */ mwifiex_clean_cmd_node(adapter, cmd_node); /* Insert node into cmd_free_q */ spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); list_add_tail(&cmd_node->list, &adapter->cmd_free_q); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); } /* This function reuses a command node. */ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data; mwifiex_insert_cmd_to_free_q(adapter, cmd_node); atomic_dec(&adapter->cmd_pending); mwifiex_dbg(adapter, CMD, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n", le16_to_cpu(host_cmd->command), atomic_read(&adapter->cmd_pending)); } /* * This function queues a command to the command pending queue. * * This in effect adds the command to the command list to be executed. * Exit PS command is handled specially, by placing it always to the * front of the command queue. */ void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node, u32 add_tail) { struct host_cmd_ds_command *host_cmd = NULL; u16 command; unsigned long flags; host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); if (!host_cmd) { mwifiex_dbg(adapter, ERROR, "QUEUE_CMD: host_cmd is NULL\n"); return; } command = le16_to_cpu(host_cmd->command); /* Exit_PS command needs to be queued in the header always. */ if (command == HostCmd_CMD_802_11_PS_MODE_ENH) { struct host_cmd_ds_802_11_ps_mode_enh *pm = &host_cmd->params.psmode_enh; if ((le16_to_cpu(pm->action) == DIS_PS) || (le16_to_cpu(pm->action) == DIS_AUTO_PS)) { if (adapter->ps_state != PS_STATE_AWAKE) add_tail = false; } } spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); if (add_tail) list_add_tail(&cmd_node->list, &adapter->cmd_pending_q); else list_add(&cmd_node->list, &adapter->cmd_pending_q); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); atomic_inc(&adapter->cmd_pending); mwifiex_dbg(adapter, CMD, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n", command, atomic_read(&adapter->cmd_pending)); } /* * This function executes the next command in command pending queue. * * This function will fail if a command is already in processing stage, * otherwise it will dequeue the first command from the command pending * queue and send to the firmware. * * If the device is currently in host sleep mode, any commands, except the * host sleep configuration command will de-activate the host sleep. For PS * mode, the function will put the firmware back to sleep if applicable. */ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; struct cmd_ctrl_node *cmd_node; int ret = 0; struct host_cmd_ds_command *host_cmd; unsigned long cmd_flags; unsigned long cmd_pending_q_flags; /* Check if already in processing */ if (adapter->curr_cmd) { mwifiex_dbg(adapter, FATAL, "EXEC_NEXT_CMD: cmd in processing\n"); return -1; } spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); /* Check if any command is pending */ spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); if (list_empty(&adapter->cmd_pending_q)) { spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); return 0; } cmd_node = list_first_entry(&adapter->cmd_pending_q, struct cmd_ctrl_node, list); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); priv = cmd_node->priv; if (adapter->ps_state != PS_STATE_AWAKE) { mwifiex_dbg(adapter, ERROR, "%s: cannot send cmd in sleep state,\t" "this should not happen\n", __func__); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); return ret; } spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Any command sent to the firmware when host is in sleep * mode should de-configure host sleep. We should skip the * host sleep configuration command itself though */ if (priv && (host_cmd->command != cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) { if (adapter->hs_activated) { adapter->is_hs_configured = false; mwifiex_hs_activated_event(priv, false); } } return ret; } /* * This function handles the command response. * * After processing, the function cleans the command node and puts * it back to the command free queue. */ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) { struct host_cmd_ds_command *resp; struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); int ret = 0; uint16_t orig_cmdresp_no; uint16_t cmdresp_no; uint16_t cmdresp_result; unsigned long flags; /* Now we got response from FW, cancel the command timer */ del_timer_sync(&adapter->cmd_timer); if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) { resp = (struct host_cmd_ds_command *) adapter->upld_buf; mwifiex_dbg(adapter, ERROR, "CMD_RESP: NULL curr_cmd, %#x\n", le16_to_cpu(resp->command)); return -1; } adapter->is_cmd_timedout = 0; resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { /* Copy original response back to response buffer */ struct mwifiex_ds_misc_cmd *hostcmd; uint16_t size = le16_to_cpu(resp->size); mwifiex_dbg(adapter, INFO, "info: host cmd resp size = %d\n", size); size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER); if (adapter->curr_cmd->data_buf) { hostcmd = adapter->curr_cmd->data_buf; hostcmd->len = size; memcpy(hostcmd->cmd, resp, size); } } orig_cmdresp_no = le16_to_cpu(resp->command); /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(le16_to_cpu(resp->seq_num)), HostCmd_GET_BSS_TYPE(le16_to_cpu(resp->seq_num))); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Clear RET_BIT from HostCmd */ resp->command = cpu_to_le16(orig_cmdresp_no & HostCmd_CMD_ID_MASK); cmdresp_no = le16_to_cpu(resp->command); cmdresp_result = le16_to_cpu(resp->result); /* Save the last command response to debug log */ adapter->dbg.last_cmd_resp_index = (adapter->dbg.last_cmd_resp_index + 1) % DBG_CMD_NUM; adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] = orig_cmdresp_no; mwifiex_dbg(adapter, CMD, "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", orig_cmdresp_no, cmdresp_result, le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num)); mwifiex_dbg_dump(adapter, CMD_D, "CMD_RESP buffer:", resp, le16_to_cpu(resp->size)); if (!(orig_cmdresp_no & HostCmd_RET_BIT)) { mwifiex_dbg(adapter, ERROR, "CMD_RESP: invalid cmd resp\n"); if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); return -1; } if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { adapter->curr_cmd->cmd_flag &= ~CMD_F_HOSTCMD; if ((cmdresp_result == HostCmd_RESULT_OK) && (cmdresp_no == HostCmd_CMD_802_11_HS_CFG_ENH)) ret = mwifiex_ret_802_11_hs_cfg(priv, resp); } else { /* handle response */ ret = mwifiex_process_sta_cmdresp(priv, cmdresp_no, resp); } /* Check init command response */ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) { if (ret) { mwifiex_dbg(adapter, ERROR, "%s: cmd %#x failed during\t" "initialization\n", __func__, cmdresp_no); mwifiex_init_fw_complete(adapter); return -1; } else if (adapter->last_init_cmd == cmdresp_no) adapter->hw_status = MWIFIEX_HW_STATUS_INIT_DONE; } if (adapter->curr_cmd) { if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = ret; mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); } return ret; } /* * This function handles the timeout of command sending. * * It will re-send the same command again. */ void mwifiex_cmd_timeout_func(unsigned long function_context) { struct mwifiex_adapter *adapter = (struct mwifiex_adapter *) function_context; struct cmd_ctrl_node *cmd_node; adapter->is_cmd_timedout = 1; if (!adapter->curr_cmd) { mwifiex_dbg(adapter, ERROR, "cmd: empty curr_cmd\n"); return; } cmd_node = adapter->curr_cmd; if (cmd_node) { adapter->dbg.timeout_cmd_id = adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; adapter->dbg.timeout_cmd_act = adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index]; mwifiex_dbg(adapter, MSG, "%s: Timeout cmd id = %#x, act = %#x\n", __func__, adapter->dbg.timeout_cmd_id, adapter->dbg.timeout_cmd_act); mwifiex_dbg(adapter, MSG, "num_data_h2c_failure = %d\n", adapter->dbg.num_tx_host_to_card_failure); mwifiex_dbg(adapter, MSG, "num_cmd_h2c_failure = %d\n", adapter->dbg.num_cmd_host_to_card_failure); mwifiex_dbg(adapter, MSG, "is_cmd_timedout = %d\n", adapter->is_cmd_timedout); mwifiex_dbg(adapter, MSG, "num_tx_timeout = %d\n", adapter->dbg.num_tx_timeout); mwifiex_dbg(adapter, MSG, "last_cmd_index = %d\n", adapter->dbg.last_cmd_index); mwifiex_dbg(adapter, MSG, "last_cmd_id: %*ph\n", (int)sizeof(adapter->dbg.last_cmd_id), adapter->dbg.last_cmd_id); mwifiex_dbg(adapter, MSG, "last_cmd_act: %*ph\n", (int)sizeof(adapter->dbg.last_cmd_act), adapter->dbg.last_cmd_act); mwifiex_dbg(adapter, MSG, "last_cmd_resp_index = %d\n", adapter->dbg.last_cmd_resp_index); mwifiex_dbg(adapter, MSG, "last_cmd_resp_id: %*ph\n", (int)sizeof(adapter->dbg.last_cmd_resp_id), adapter->dbg.last_cmd_resp_id); mwifiex_dbg(adapter, MSG, "last_event_index = %d\n", adapter->dbg.last_event_index); mwifiex_dbg(adapter, MSG, "last_event: %*ph\n", (int)sizeof(adapter->dbg.last_event), adapter->dbg.last_event); mwifiex_dbg(adapter, MSG, "data_sent=%d cmd_sent=%d\n", adapter->data_sent, adapter->cmd_sent); mwifiex_dbg(adapter, MSG, "ps_mode=%d ps_state=%d\n", adapter->ps_mode, adapter->ps_state); if (cmd_node->wait_q_enabled) { adapter->cmd_wait_q.status = -ETIMEDOUT; mwifiex_cancel_pending_ioctl(adapter); } } if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) { mwifiex_init_fw_complete(adapter); return; } if (adapter->if_ops.device_dump) adapter->if_ops.device_dump(adapter); if (adapter->if_ops.card_reset) adapter->if_ops.card_reset(adapter); } /* * This function cancels all the pending commands. * * The current command, all commands in command pending queue and all scan * commands in scan pending queue are cancelled. All the completion callbacks * are called with failure status to ensure cleanup. */ void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL, *tmp_node; unsigned long flags, cmd_flags; struct mwifiex_private *priv; int i; spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); /* Cancel current cmd */ if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) { adapter->curr_cmd->wait_q_enabled = false; adapter->cmd_wait_q.status = -1; mwifiex_complete_cmd(adapter, adapter->curr_cmd); /* no recycle probably wait for response */ } /* Cancel all pending command */ spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); list_for_each_entry_safe(cmd_node, tmp_node, &adapter->cmd_pending_q, list) { list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, cmd_node); spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); } spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); /* Cancel all pending scan command */ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); list_for_each_entry_safe(cmd_node, tmp_node, &adapter->scan_pending_q, list) { list_del(&cmd_node->list); cmd_node->wait_q_enabled = false; mwifiex_insert_cmd_to_free_q(adapter, cmd_node); } spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); if (adapter->scan_processing) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; if (!priv) continue; if (priv->scan_request) { mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } } } } /* * This function cancels all pending commands that matches with * the given IOCTL request. * * Both the current command buffer and the pending command queue are * searched for matching IOCTL request. The completion callback of * the matched command is called with failure status to ensure cleanup. * In case of scan commands, all pending commands in scan pending queue * are cancelled. */ void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; unsigned long cmd_flags; unsigned long scan_pending_q_flags; struct mwifiex_private *priv; int i; if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); cmd_node = adapter->curr_cmd; /* setting curr_cmd to NULL is quite dangerous, because * mwifiex_process_cmdresp checks curr_cmd to be != NULL * at the beginning then relies on it and dereferences * it at will * this probably works since mwifiex_cmd_timeout_func * is the only caller of this function and responses * at that point */ adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); mwifiex_recycle_cmd_node(adapter, cmd_node); } /* Cancel all pending scan command */ spin_lock_irqsave(&adapter->scan_pending_q_lock, scan_pending_q_flags); list_for_each_entry_safe(cmd_node, tmp_node, &adapter->scan_pending_q, list) { list_del(&cmd_node->list); cmd_node->wait_q_enabled = false; mwifiex_insert_cmd_to_free_q(adapter, cmd_node); } spin_unlock_irqrestore(&adapter->scan_pending_q_lock, scan_pending_q_flags); if (adapter->scan_processing) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; if (!priv) continue; if (priv->scan_request) { mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } } } } /* * This function sends the sleep confirm command to firmware, if * possible. * * The sleep confirm command cannot be issued if command response, * data response or event response is awaiting handling, or if we * are in the middle of sending a command, or expecting a command * response. */ void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter) { if (!adapter->cmd_sent && !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter)) mwifiex_dnld_sleep_confirm_cmd(adapter); else mwifiex_dbg(adapter, CMD, "cmd: Delay Sleep Confirm (%s%s%s)\n", (adapter->cmd_sent) ? "D" : "", (adapter->curr_cmd) ? "C" : "", (IS_CARD_RX_RCVD(adapter)) ? "R" : ""); } /* * This function sends a Host Sleep activated event to applications. * * This event is generated by the driver, with a blank event body. */ void mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated) { if (activated) { if (priv->adapter->is_hs_configured) { priv->adapter->hs_activated = true; mwifiex_update_rxreor_flags(priv->adapter, RXREOR_FORCE_NO_DROP); mwifiex_dbg(priv->adapter, EVENT, "event: hs_activated\n"); priv->adapter->hs_activate_wait_q_woken = true; wake_up_interruptible( &priv->adapter->hs_activate_wait_q); } else { mwifiex_dbg(priv->adapter, EVENT, "event: HS not configured\n"); } } else { mwifiex_dbg(priv->adapter, EVENT, "event: hs_deactivated\n"); priv->adapter->hs_activated = false; } } /* * This function handles the command response of a Host Sleep configuration * command. * * Handling includes changing the header fields into CPU format * and setting the current host sleep activation status in driver. * * In case host sleep status change, the function generates an event to * notify the applications. */ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_hs_cfg_enh *phs_cfg = &resp->params.opt_hs_cfg; uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && adapter->iface_type != MWIFIEX_USB) { mwifiex_hs_activated_event(priv, true); return 0; } else { mwifiex_dbg(adapter, CMD, "cmd: CMD_RESP: HS_CFG cmd reply\t" " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n", resp->result, conditions, phs_cfg->params.hs_config.gpio, phs_cfg->params.hs_config.gap); } if (conditions != HS_CFG_CANCEL) { adapter->is_hs_configured = true; if (adapter->iface_type == MWIFIEX_USB) mwifiex_hs_activated_event(priv, true); } else { adapter->is_hs_configured = false; if (adapter->hs_activated) mwifiex_hs_activated_event(priv, false); } return 0; } /* * This function wakes up the adapter and generates a Host Sleep * cancel event on receiving the power up interrupt. */ void mwifiex_process_hs_config(struct mwifiex_adapter *adapter) { mwifiex_dbg(adapter, INFO, "info: %s: auto cancelling host sleep\t" "since there is interrupt from the firmware\n", __func__); adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; adapter->is_hs_configured = false; adapter->is_suspended = false; mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); } EXPORT_SYMBOL_GPL(mwifiex_process_hs_config); /* * This function handles the command response of a sleep confirm command. * * The function sets the card state to SLEEP if the response indicates success. */ void mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter, u8 *pbuf, u32 upld_len) { struct host_cmd_ds_command *cmd = (struct host_cmd_ds_command *) pbuf; struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); uint16_t result = le16_to_cpu(cmd->result); uint16_t command = le16_to_cpu(cmd->command); uint16_t seq_num = le16_to_cpu(cmd->seq_num); if (!upld_len) { mwifiex_dbg(adapter, ERROR, "%s: cmd size is 0\n", __func__); return; } mwifiex_dbg(adapter, CMD, "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", command, result, le16_to_cpu(cmd->size), seq_num); /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num), HostCmd_GET_BSS_TYPE(seq_num)); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Update sequence number */ seq_num = HostCmd_GET_SEQ_NO(seq_num); /* Clear RET_BIT from HostCmd */ command &= HostCmd_CMD_ID_MASK; if (command != HostCmd_CMD_802_11_PS_MODE_ENH) { mwifiex_dbg(adapter, ERROR, "%s: rcvd unexpected resp for cmd %#x, result = %x\n", __func__, command, result); return; } if (result) { mwifiex_dbg(adapter, ERROR, "%s: sleep confirm cmd failed\n", __func__); adapter->pm_wakeup_card_req = false; adapter->ps_state = PS_STATE_AWAKE; return; } adapter->pm_wakeup_card_req = true; if (adapter->is_hs_configured) mwifiex_hs_activated_event(mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), true); adapter->ps_state = PS_STATE_SLEEP; cmd->command = cpu_to_le16(command); cmd->seq_num = cpu_to_le16(seq_num); } EXPORT_SYMBOL_GPL(mwifiex_process_sleep_confirm_resp); /* * This function prepares an enhanced power mode command. * * This function can be used to disable power save or to configure * power save with auto PS or STA PS or auto deep sleep. * * Preparation includes - * - Setting command ID, action and proper size * - Setting Power Save bitmap, PS parameters TLV, PS mode TLV, * auto deep sleep TLV (as required) * - Ensuring correct endian-ness */ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, u16 cmd_action, uint16_t ps_bitmap, struct mwifiex_ds_auto_ds *auto_ds) { struct host_cmd_ds_802_11_ps_mode_enh *psmode_enh = &cmd->params.psmode_enh; u8 *tlv; u16 cmd_size = 0; cmd->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH); if (cmd_action == DIS_AUTO_PS) { psmode_enh->action = cpu_to_le16(DIS_AUTO_PS); psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap); cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) + sizeof(psmode_enh->params.ps_bitmap)); } else if (cmd_action == GET_PS) { psmode_enh->action = cpu_to_le16(GET_PS); psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap); cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) + sizeof(psmode_enh->params.ps_bitmap)); } else if (cmd_action == EN_AUTO_PS) { psmode_enh->action = cpu_to_le16(EN_AUTO_PS); psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap); cmd_size = S_DS_GEN + sizeof(psmode_enh->action) + sizeof(psmode_enh->params.ps_bitmap); tlv = (u8 *) cmd + cmd_size; if (ps_bitmap & BITMAP_STA_PS) { struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_ps_param *ps_tlv = (struct mwifiex_ie_types_ps_param *) tlv; struct mwifiex_ps_param *ps_mode = &ps_tlv->param; ps_tlv->header.type = cpu_to_le16(TLV_TYPE_PS_PARAM); ps_tlv->header.len = cpu_to_le16(sizeof(*ps_tlv) - sizeof(struct mwifiex_ie_types_header)); cmd_size += sizeof(*ps_tlv); tlv += sizeof(*ps_tlv); mwifiex_dbg(priv->adapter, CMD, "cmd: PS Command: Enter PS\n"); ps_mode->null_pkt_interval = cpu_to_le16(adapter->null_pkt_interval); ps_mode->multiple_dtims = cpu_to_le16(adapter->multiple_dtim); ps_mode->bcn_miss_timeout = cpu_to_le16(adapter->bcn_miss_time_out); ps_mode->local_listen_interval = cpu_to_le16(adapter->local_listen_interval); ps_mode->adhoc_wake_period = cpu_to_le16(adapter->adhoc_awake_period); ps_mode->delay_to_ps = cpu_to_le16(adapter->delay_to_ps); ps_mode->mode = cpu_to_le16(adapter->enhanced_ps_mode); } if (ps_bitmap & BITMAP_AUTO_DS) { struct mwifiex_ie_types_auto_ds_param *auto_ds_tlv = (struct mwifiex_ie_types_auto_ds_param *) tlv; u16 idletime = 0; auto_ds_tlv->header.type = cpu_to_le16(TLV_TYPE_AUTO_DS_PARAM); auto_ds_tlv->header.len = cpu_to_le16(sizeof(*auto_ds_tlv) - sizeof(struct mwifiex_ie_types_header)); cmd_size += sizeof(*auto_ds_tlv); tlv += sizeof(*auto_ds_tlv); if (auto_ds) idletime = auto_ds->idle_time; mwifiex_dbg(priv->adapter, CMD, "cmd: PS Command: Enter Auto Deep Sleep\n"); auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime); } cmd->size = cpu_to_le16(cmd_size); } return 0; } /* * This function handles the command response of an enhanced power mode * command. * * Handling includes changing the header fields into CPU format * and setting the current enhanced power mode in driver. */ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv, struct host_cmd_ds_command *resp, struct mwifiex_ds_pm_cfg *pm_cfg) { struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_ps_mode_enh *ps_mode = &resp->params.psmode_enh; uint16_t action = le16_to_cpu(ps_mode->action); uint16_t ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap); uint16_t auto_ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap); mwifiex_dbg(adapter, INFO, "info: %s: PS_MODE cmd reply result=%#x action=%#X\n", __func__, resp->result, action); if (action == EN_AUTO_PS) { if (auto_ps_bitmap & BITMAP_AUTO_DS) { mwifiex_dbg(adapter, CMD, "cmd: Enabled auto deep sleep\n"); priv->adapter->is_deep_sleep = true; } if (auto_ps_bitmap & BITMAP_STA_PS) { mwifiex_dbg(adapter, CMD, "cmd: Enabled STA power save\n"); if (adapter->sleep_period.period) mwifiex_dbg(adapter, CMD, "cmd: set to uapsd/pps mode\n"); } } else if (action == DIS_AUTO_PS) { if (ps_bitmap & BITMAP_AUTO_DS) { priv->adapter->is_deep_sleep = false; mwifiex_dbg(adapter, CMD, "cmd: Disabled auto deep sleep\n"); } if (ps_bitmap & BITMAP_STA_PS) { mwifiex_dbg(adapter, CMD, "cmd: Disabled STA power save\n"); if (adapter->sleep_period.period) { adapter->delay_null_pkt = false; adapter->tx_lock_flag = false; adapter->pps_uapsd_mode = false; } } } else if (action == GET_PS) { if (ps_bitmap & BITMAP_STA_PS) adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP; else adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM; mwifiex_dbg(adapter, CMD, "cmd: ps_bitmap=%#x\n", ps_bitmap); if (pm_cfg) { /* This section is for get power save mode */ if (ps_bitmap & BITMAP_STA_PS) pm_cfg->param.ps_mode = 1; else pm_cfg->param.ps_mode = 0; } } return 0; } /* * This function prepares command to get hardware specifications. * * Preparation includes - * - Setting command ID, action and proper size * - Setting permanent address parameter * - Ensuring correct endian-ness */ int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd) { struct host_cmd_ds_get_hw_spec *hw_spec = &cmd->params.hw_spec; cmd->command = cpu_to_le16(HostCmd_CMD_GET_HW_SPEC); cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_get_hw_spec) + S_DS_GEN); memcpy(hw_spec->permanent_addr, priv->curr_addr, ETH_ALEN); return 0; } /* * This function handles the command response of get hardware * specifications. * * Handling includes changing the header fields into CPU format * and saving/updating the following parameters in driver - * - Firmware capability information * - Firmware band settings * - Ad-hoc start band and channel * - Ad-hoc 11n activation status * - Firmware release number * - Number of antennas * - Hardware address * - Hardware interface version * - Firmware version * - Region code * - 11n capabilities * - MCS support fields * - MP end port */ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec; struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_header *tlv; struct hw_spec_api_rev *api_rev; u16 resp_size, api_id; int i, left_len, parsed_len = 0; adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info); if (IS_SUPPORT_MULTI_BANDS(adapter)) adapter->fw_bands = (u8) GET_FW_DEFAULT_BANDS(adapter); else adapter->fw_bands = BAND_B; adapter->config_bands = adapter->fw_bands; if (adapter->fw_bands & BAND_A) { if (adapter->fw_bands & BAND_GN) { adapter->config_bands |= BAND_AN; adapter->fw_bands |= BAND_AN; } if (adapter->fw_bands & BAND_AN) { adapter->adhoc_start_band = BAND_A | BAND_AN; adapter->adhoc_11n_enabled = true; } else { adapter->adhoc_start_band = BAND_A; } priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL_A; } else if (adapter->fw_bands & BAND_GN) { adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN; priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; adapter->adhoc_11n_enabled = true; } else if (adapter->fw_bands & BAND_G) { adapter->adhoc_start_band = BAND_G | BAND_B; priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; } else if (adapter->fw_bands & BAND_B) { adapter->adhoc_start_band = BAND_B; priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; } adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number); adapter->fw_api_ver = (adapter->fw_release_number >> 16) & 0xff; adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna); if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) { adapter->is_hw_11ac_capable = true; /* Copy 11AC cap */ adapter->hw_dot_11ac_dev_cap = le32_to_cpu(hw_spec->dot_11ac_dev_cap); adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK; adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK; /* Copy 11AC mcs */ adapter->hw_dot_11ac_mcs_support = le32_to_cpu(hw_spec->dot_11ac_mcs_support); adapter->usr_dot_11ac_mcs_support = adapter->hw_dot_11ac_mcs_support; } else { adapter->is_hw_11ac_capable = false; } resp_size = le16_to_cpu(resp->size) - S_DS_GEN; if (resp_size > sizeof(struct host_cmd_ds_get_hw_spec)) { /* we have variable HW SPEC information */ left_len = resp_size - sizeof(struct host_cmd_ds_get_hw_spec); while (left_len > sizeof(struct mwifiex_ie_types_header)) { tlv = (void *)&hw_spec->tlvs + parsed_len; switch (le16_to_cpu(tlv->type)) { case TLV_TYPE_API_REV: api_rev = (struct hw_spec_api_rev *)tlv; api_id = le16_to_cpu(api_rev->api_id); switch (api_id) { case KEY_API_VER_ID: adapter->key_api_major_ver = api_rev->major_ver; adapter->key_api_minor_ver = api_rev->minor_ver; mwifiex_dbg(adapter, INFO, "key_api v%d.%d\n", adapter->key_api_major_ver, adapter->key_api_minor_ver); break; case FW_API_VER_ID: adapter->fw_api_ver = api_rev->major_ver; mwifiex_dbg(adapter, INFO, "Firmware api version %d\n", adapter->fw_api_ver); break; default: mwifiex_dbg(adapter, FATAL, "Unknown api_id: %d\n", api_id); break; } break; default: mwifiex_dbg(adapter, FATAL, "Unknown GET_HW_SPEC TLV type: %#x\n", le16_to_cpu(tlv->type)); break; } parsed_len += le16_to_cpu(tlv->len) + sizeof(struct mwifiex_ie_types_header); left_len -= le16_to_cpu(tlv->len) + sizeof(struct mwifiex_ie_types_header); } } mwifiex_dbg(adapter, INFO, "info: GET_HW_SPEC: fw_release_number- %#x\n", adapter->fw_release_number); mwifiex_dbg(adapter, INFO, "info: GET_HW_SPEC: permanent addr: %pM\n", hw_spec->permanent_addr); mwifiex_dbg(adapter, INFO, "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n", le16_to_cpu(hw_spec->hw_if_version), le16_to_cpu(hw_spec->version)); ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr); adapter->region_code = le16_to_cpu(hw_spec->region_code); for (i = 0; i < MWIFIEX_MAX_REGION_CODE; i++) /* Use the region code to search for the index */ if (adapter->region_code == region_code_index[i]) break; /* If it's unidentified region code, use the default (world) */ if (i >= MWIFIEX_MAX_REGION_CODE) { adapter->region_code = 0x00; mwifiex_dbg(adapter, WARN, "cmd: unknown region code, use default (USA)\n"); } adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap); adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support; adapter->user_dev_mcs_support = adapter->hw_dev_mcs_support; if (adapter->if_ops.update_mp_end_port) adapter->if_ops.update_mp_end_port(adapter, le16_to_cpu(hw_spec->mp_end_port)); if (adapter->fw_api_ver == MWIFIEX_FW_V15) adapter->scan_chan_gap_enabled = true; return 0; }
gpl-2.0
adrienverge/linux
drivers/infiniband/ulp/ipoib/ipoib_ib.c
53
32373
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/delay.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/ip.h> #include <linux/tcp.h> #include "ipoib.h" #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA static int data_debug_level; module_param(data_debug_level, int, 0644); MODULE_PARM_DESC(data_debug_level, "Enable data path debug tracing if > 0"); #endif struct ipoib_ah *ipoib_create_ah(struct net_device *dev, struct ib_pd *pd, struct ib_ah_attr *attr) { struct ipoib_ah *ah; struct ib_ah *vah; ah = kmalloc(sizeof *ah, GFP_KERNEL); if (!ah) return ERR_PTR(-ENOMEM); ah->dev = dev; ah->last_send = 0; kref_init(&ah->ref); vah = ib_create_ah(pd, attr); if (IS_ERR(vah)) { kfree(ah); ah = (struct ipoib_ah *)vah; } else { ah->ah = vah; ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); } return ah; } void ipoib_free_ah(struct kref *kref) { struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); struct ipoib_dev_priv *priv = netdev_priv(ah->dev); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); list_add_tail(&ah->list, &priv->dead_ahs); spin_unlock_irqrestore(&priv->lock, flags); } static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, u64 mapping[IPOIB_UD_RX_SG]) { ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE); } static int ipoib_ib_post_receive(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_recv_wr *bad_wr; int ret; priv->rx_wr.wr_id = id | IPOIB_OP_RECV; priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); if (unlikely(ret)) { ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); dev_kfree_skb_any(priv->rx_ring[id].skb); priv->rx_ring[id].skb = NULL; } return ret; } static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; int buf_size; u64 *mapping; buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN); if (unlikely(!skb)) return NULL; /* * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte * header. So we need 4 more bytes to get to 48 and align the * IP header to a multiple of 16. */ skb_reserve(skb, 4); mapping = priv->rx_ring[id].mapping; mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) goto error; priv->rx_ring[id].skb = skb; return skb; error: dev_kfree_skb_any(skb); return NULL; } static int ipoib_ib_post_receives(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i; for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_alloc_rx_skb(dev, i)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); return -ENOMEM; } if (ipoib_ib_post_receive(dev, i)) { ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); return -EIO; } } return 0; } static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; struct sk_buff *skb; u64 mapping[IPOIB_UD_RX_SG]; union ib_gid *dgid; union ib_gid *sgid; ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_recvq_size)) { ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", wr_id, ipoib_recvq_size); return; } skb = priv->rx_ring[wr_id].skb; if (unlikely(wc->status != IB_WC_SUCCESS)) { if (wc->status != IB_WC_WR_FLUSH_ERR) ipoib_warn(priv, "failed recv event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); dev_kfree_skb_any(skb); priv->rx_ring[wr_id].skb = NULL; return; } memcpy(mapping, priv->rx_ring[wr_id].mapping, IPOIB_UD_RX_SG * sizeof *mapping); /* * If we can't allocate a new RX buffer, dump * this packet and reuse the old buffer. */ if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) { ++dev->stats.rx_dropped; goto repost; } ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", wc->byte_len, wc->slid); ipoib_ud_dma_unmap_rx(priv, mapping); skb_put(skb, wc->byte_len); /* First byte of dgid signals multicast when 0xff */ dgid = &((struct ib_grh *)skb->data)->dgid; if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff) skb->pkt_type = PACKET_HOST; else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; sgid = &((struct ib_grh *)skb->data)->sgid; /* * Drop packets that this interface sent, ie multicast packets * that the HCA has replicated. */ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) { int need_repost = 1; if ((wc->wc_flags & IB_WC_GRH) && sgid->global.interface_id != priv->local_gid.global.interface_id) need_repost = 0; if (need_repost) { dev_kfree_skb_any(skb); goto repost; } } skb_pull(skb, IB_GRH_BYTES); skb->protocol = ((struct ipoib_header *) skb->data)->proto; skb_reset_mac_header(skb); skb_pull(skb, IPOIB_ENCAP_LEN); ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; skb->dev = dev; if ((dev->features & NETIF_F_RXCSUM) && likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&priv->napi, skb); repost: if (unlikely(ipoib_ib_post_receive(dev, wr_id))) ipoib_warn(priv, "ipoib_ib_post_receive failed " "for buf %d\n", wr_id); } int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; int i; int off; if (skb_headlen(skb)) { mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) return -EIO; off = 1; } else off = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping[i + off] = ib_dma_map_page(ca, skb_frag_page(frag), frag->page_offset, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) goto partial_error; } return 0; partial_error: for (; i > 0; --i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE); } if (off) ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); return -EIO; } void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; int i; int off; if (skb_headlen(skb)) { ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); off = 1; } else off = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ib_dma_unmap_page(priv->ca, mapping[i + off], skb_frag_size(frag), DMA_TO_DEVICE); } } /* * As the result of a completion error the QP Can be transferred to SQE states. * The function checks if the (send)QP is in SQE state and * moves it back to RTS state, that in order to have it functional again. */ static void ipoib_qp_state_validate_work(struct work_struct *work) { struct ipoib_qp_state_validate *qp_work = container_of(work, struct ipoib_qp_state_validate, work); struct ipoib_dev_priv *priv = qp_work->priv; struct ib_qp_attr qp_attr; struct ib_qp_init_attr query_init_attr; int ret; ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr); if (ret) { ipoib_warn(priv, "%s: Failed to query QP ret: %d\n", __func__, ret); goto free_res; } pr_info("%s: QP: 0x%x is in state: %d\n", __func__, priv->qp->qp_num, qp_attr.qp_state); /* currently support only in SQE->RTS transition*/ if (qp_attr.qp_state == IB_QPS_SQE) { qp_attr.qp_state = IB_QPS_RTS; ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE); if (ret) { pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n", ret, priv->qp->qp_num); goto free_res; } pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n", __func__, priv->qp->qp_num); } else { pr_warn("QP (%d) will stay in state: %d\n", priv->qp->qp_num, qp_attr.qp_state); } free_res: kfree(qp_work); } static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id; struct ipoib_tx_buf *tx_req; ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_sendq_size)) { ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", wr_id, ipoib_sendq_size); return; } tx_req = &priv->tx_ring[wr_id]; ipoib_dma_unmap_tx(priv, tx_req); ++dev->stats.tx_packets; dev->stats.tx_bytes += tx_req->skb->len; dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && netif_queue_stopped(dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(dev); if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) { struct ipoib_qp_state_validate *qp_work; ipoib_warn(priv, "failed send event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC); if (!qp_work) { ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n", __func__, priv->qp->qp_num); return; } INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work); qp_work->priv = priv; queue_work(priv->wq, &qp_work->work); } } static int poll_tx(struct ipoib_dev_priv *priv) { int n, i; n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); for (i = 0; i < n; ++i) ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i); return n == MAX_SEND_CQE; } int ipoib_poll(struct napi_struct *napi, int budget) { struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); struct net_device *dev = priv->dev; int done; int t; int n, i; done = 0; poll_more: while (done < budget) { int max = (budget - done); t = min(IPOIB_NUM_WC, max); n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); for (i = 0; i < n; i++) { struct ib_wc *wc = priv->ibwc + i; if (wc->wr_id & IPOIB_OP_RECV) { ++done; if (wc->wr_id & IPOIB_OP_CM) ipoib_cm_handle_rx_wc(dev, wc); else ipoib_ib_handle_rx_wc(dev, wc); } else ipoib_cm_handle_tx_wc(priv->dev, wc); } if (n != t) break; } if (done < budget) { napi_complete(napi); if (unlikely(ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)) && napi_reschedule(napi)) goto poll_more; } return done; } void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) { struct net_device *dev = dev_ptr; struct ipoib_dev_priv *priv = netdev_priv(dev); napi_schedule(&priv->napi); } static void drain_tx_cq(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); netif_tx_lock(dev); while (poll_tx(priv)) ; /* nothing */ if (netif_queue_stopped(dev)) mod_timer(&priv->poll_timer, jiffies + 1); netif_tx_unlock(dev); } void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) { struct ipoib_dev_priv *priv = netdev_priv(dev_ptr); mod_timer(&priv->poll_timer, jiffies); } static inline int post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head, int hlen) { struct ib_send_wr *bad_wr; struct sk_buff *skb = tx_req->skb; ipoib_build_sge(priv, tx_req); priv->tx_wr.wr.wr_id = wr_id; priv->tx_wr.remote_qpn = qpn; priv->tx_wr.ah = address; if (head) { priv->tx_wr.mss = skb_shinfo(skb)->gso_size; priv->tx_wr.header = head; priv->tx_wr.hlen = hlen; priv->tx_wr.wr.opcode = IB_WR_LSO; } else priv->tx_wr.wr.opcode = IB_WR_SEND; return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); } void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_ah *address, u32 qpn) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_tx_buf *tx_req; int hlen, rc; void *phead; unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb); if (skb_is_gso(skb)) { hlen = skb_transport_offset(skb) + tcp_hdrlen(skb); phead = skb->data; if (unlikely(!skb_pull(skb, hlen))) { ipoib_warn(priv, "linear data too small\n"); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } } else { if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); return; } phead = NULL; hlen = 0; } if (skb_shinfo(skb)->nr_frags > usable_sge) { if (skb_linearize(skb) < 0) { ipoib_warn(priv, "skb could not be linearized\n"); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } /* Does skb_linearize return ok without reducing nr_frags? */ if (skb_shinfo(skb)->nr_frags > usable_sge) { ipoib_warn(priv, "too many frags after skb linearize\n"); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } } ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", skb->len, address, qpn); /* * We put the skb into the tx_ring _before_ we call post_send() * because it's entirely possible that the completion handler will * run before we execute anything after the post_send(). That * means we have to make sure everything is properly recorded and * our state is consistent before we call post_send(). */ tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } if (skb->ip_summed == CHECKSUM_PARTIAL) priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; else priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) ipoib_warn(priv, "request notify on send CQ failed\n"); netif_stop_queue(dev); } skb_orphan(skb); skb_dst_drop(skb); rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, tx_req, phead, hlen); if (unlikely(rc)) { ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; --priv->tx_outstanding; ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); if (netif_queue_stopped(dev)) netif_wake_queue(dev); } else { netif_trans_update(dev); address->last_send = priv->tx_head; ++priv->tx_head; } if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) while (poll_tx(priv)) ; /* nothing */ } static void __ipoib_reap_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_ah *ah, *tah; LIST_HEAD(remove_list); unsigned long flags; netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) if ((int) priv->tx_tail - (int) ah->last_send >= 0) { list_del(&ah->list); ib_destroy_ah(ah->ah); kfree(ah); } spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); } void ipoib_reap_ah(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, ah_reap_task.work); struct net_device *dev = priv->dev; __ipoib_reap_ah(dev); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); } static void ipoib_flush_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); cancel_delayed_work(&priv->ah_reap_task); flush_workqueue(priv->wq); ipoib_reap_ah(&priv->ah_reap_task.work); } static void ipoib_stop_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); set_bit(IPOIB_STOP_REAPER, &priv->flags); ipoib_flush_ah(dev); } static void ipoib_ib_tx_timer_func(unsigned long ctx) { drain_tx_cq((struct net_device *)ctx); } int ipoib_ib_dev_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; ipoib_pkey_dev_check_presence(dev); if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey, (!(priv->pkey & 0x7fff) ? "Invalid" : "not found")); return -1; } ret = ipoib_init_qp(dev); if (ret) { ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); return -1; } ret = ipoib_ib_post_receives(dev); if (ret) { ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); goto dev_stop; } ret = ipoib_cm_dev_open(dev); if (ret) { ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); goto dev_stop; } clear_bit(IPOIB_STOP_REAPER, &priv->flags); queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); return 0; dev_stop: if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); ipoib_ib_dev_stop(dev); return -1; } void ipoib_pkey_dev_check_presence(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); if (!(priv->pkey & 0x7fff) || ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); else set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); } int ipoib_ib_dev_up(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_pkey_dev_check_presence(dev); if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { ipoib_dbg(priv, "PKEY is not assigned.\n"); return 0; } set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); return ipoib_mcast_start_thread(dev); } int ipoib_ib_dev_down(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "downing ib_dev\n"); clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); netif_carrier_off(dev); ipoib_mcast_stop_thread(dev); ipoib_mcast_dev_flush(dev); ipoib_flush_paths(dev); return 0; } static int recvs_pending(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int pending = 0; int i; for (i = 0; i < ipoib_recvq_size; ++i) if (priv->rx_ring[i].skb) ++pending; return pending; } void ipoib_drain_cq(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i, n; /* * We call completion handling routines that expect to be * called from the BH-disabled NAPI poll context, so disable * BHs here too. */ local_bh_disable(); do { n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); for (i = 0; i < n; ++i) { /* * Convert any successful completions to flush * errors to avoid passing packets up the * stack after bringing the device down. */ if (priv->ibwc[i].status == IB_WC_SUCCESS) priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) { if (priv->ibwc[i].wr_id & IPOIB_OP_CM) ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); else ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); } else ipoib_cm_handle_tx_wc(dev, priv->ibwc + i); } } while (n == IPOIB_NUM_WC); while (poll_tx(priv)) ; /* nothing */ local_bh_enable(); } int ipoib_ib_dev_stop(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; unsigned long begin; struct ipoib_tx_buf *tx_req; int i; if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_disable(&priv->napi); ipoib_cm_dev_stop(dev); /* * Move our QP to the error state and then reinitialize in * when all work requests have completed or have been flushed. */ qp_attr.qp_state = IB_QPS_ERR; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); /* Wait for all sends and receives to complete */ begin = jiffies; while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { if (time_after(jiffies, begin + 5 * HZ)) { ipoib_warn(priv, "timing out; %d sends %d receives not completed\n", priv->tx_head - priv->tx_tail, recvs_pending(dev)); /* * assume the HW is wedged and just free up * all our pending work requests. */ while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & (ipoib_sendq_size - 1)]; ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; --priv->tx_outstanding; } for (i = 0; i < ipoib_recvq_size; ++i) { struct ipoib_rx_buf *rx_req; rx_req = &priv->rx_ring[i]; if (!rx_req->skb) continue; ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[i].mapping); dev_kfree_skb_any(rx_req->skb); rx_req->skb = NULL; } goto timeout; } ipoib_drain_cq(dev); msleep(1); } ipoib_dbg(priv, "All sends and receives done.\n"); timeout: del_timer_sync(&priv->poll_timer); qp_attr.qp_state = IB_QPS_RESET; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to RESET state\n"); ipoib_flush_ah(dev); ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); return 0; } int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) { struct ipoib_dev_priv *priv = netdev_priv(dev); priv->ca = ca; priv->port = port; priv->qp = NULL; if (ipoib_transport_dev_init(dev, ca)) { printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name); return -ENODEV; } setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func, (unsigned long) dev); if (dev->flags & IFF_UP) { if (ipoib_ib_dev_open(dev)) { ipoib_transport_dev_cleanup(dev); return -ENODEV; } } return 0; } /* * Takes whatever value which is in pkey index 0 and updates priv->pkey * returns 0 if the pkey value was changed. */ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) { int result; u16 prev_pkey; prev_pkey = priv->pkey; result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey); if (result) { ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n", priv->port, result); return result; } priv->pkey |= 0x8000; if (prev_pkey != priv->pkey) { ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n", prev_pkey, priv->pkey); /* * Update the pkey in the broadcast address, while making sure to set * the full membership bit, so that we join the right broadcast group. */ priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; return 0; } return 1; } /* * returns 0 if pkey value was found in a different slot. */ static inline int update_child_pkey(struct ipoib_dev_priv *priv) { u16 old_index = priv->pkey_index; priv->pkey_index = 0; ipoib_pkey_dev_check_presence(priv->dev); if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && (old_index == priv->pkey_index)) return 1; return 0; } /* * returns true if the device address of the ipoib interface has changed and the * new address is a valid one (i.e in the gid table), return false otherwise. */ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) { union ib_gid search_gid; union ib_gid gid0; union ib_gid *netdev_gid; int err; u16 index; u8 port; bool ret = false; netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4); if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) return false; netif_addr_lock_bh(priv->dev); /* The subnet prefix may have changed, update it now so we won't have * to do it later */ priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix; netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix; search_gid.global.subnet_prefix = gid0.global.subnet_prefix; search_gid.global.interface_id = priv->local_gid.global.interface_id; netif_addr_unlock_bh(priv->dev); err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, priv->dev, &port, &index); netif_addr_lock_bh(priv->dev); if (search_gid.global.interface_id != priv->local_gid.global.interface_id) /* There was a change while we were looking up the gid, bail * here and let the next work sort this out */ goto out; /* The next section of code needs some background: * Per IB spec the port GUID can't change if the HCA is powered on. * port GUID is the basis for GID at index 0 which is the basis for * the default device address of a ipoib interface. * * so it seems the flow should be: * if user_changed_dev_addr && gid in gid tbl * set bit dev_addr_set * return true * else * return false * * The issue is that there are devices that don't follow the spec, * they change the port GUID when the HCA is powered, so in order * not to break userspace applications, We need to check if the * user wanted to control the device address and we assume that * if he sets the device address back to be based on GID index 0, * he no longer wishs to control it. * * If the user doesn't control the the device address, * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means * the port GUID has changed and GID at index 0 has changed * so we need to change priv->local_gid and priv->dev->dev_addr * to reflect the new GID. */ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { if (!err && port == priv->port) { set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); if (index == 0) clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags); else set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags); ret = true; } else { ret = false; } } else { if (!err && port == priv->port) { ret = true; } else { if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) { memcpy(&priv->local_gid, &gid0, sizeof(priv->local_gid)); memcpy(priv->dev->dev_addr + 4, &gid0, sizeof(priv->local_gid)); ret = true; } } } out: netif_addr_unlock_bh(priv->dev); return ret; } static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, enum ipoib_flush_level level, int nesting) { struct ipoib_dev_priv *cpriv; struct net_device *dev = priv->dev; int result; down_read_nested(&priv->vlan_rwsem, nesting); /* * Flush any child interfaces too -- they might be up even if * the parent is down. */ list_for_each_entry(cpriv, &priv->child_intfs, list) __ipoib_ib_dev_flush(cpriv, level, nesting + 1); up_read(&priv->vlan_rwsem); if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && level != IPOIB_FLUSH_HEAVY) { /* Make sure the dev_addr is set even if not flushing */ if (level == IPOIB_FLUSH_LIGHT) ipoib_dev_addr_changed_valid(priv); ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); return; } if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { /* interface is down. update pkey and leave. */ if (level == IPOIB_FLUSH_HEAVY) { if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) update_parent_pkey(priv); else update_child_pkey(priv); } else if (level == IPOIB_FLUSH_LIGHT) ipoib_dev_addr_changed_valid(priv); ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); return; } if (level == IPOIB_FLUSH_HEAVY) { /* child devices chase their origin pkey value, while non-child * (parent) devices should always takes what present in pkey index 0 */ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { result = update_child_pkey(priv); if (result) { /* restart QP only if P_Key index is changed */ ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); return; } } else { result = update_parent_pkey(priv); /* restart QP only if P_Key value changed */ if (result) { ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n"); return; } } } if (level == IPOIB_FLUSH_LIGHT) { ipoib_mark_paths_invalid(dev); ipoib_mcast_dev_flush(dev); ipoib_flush_ah(dev); } if (level >= IPOIB_FLUSH_NORMAL) ipoib_ib_dev_down(dev); if (level == IPOIB_FLUSH_HEAVY) { if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) ipoib_ib_dev_stop(dev); if (ipoib_ib_dev_open(dev) != 0) return; if (netif_queue_stopped(dev)) netif_start_queue(dev); } /* * The device could have been brought down between the start and when * we get here, don't bring it back up if it's not configured up */ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { if (level >= IPOIB_FLUSH_NORMAL) ipoib_ib_dev_up(dev); if (ipoib_dev_addr_changed_valid(priv)) ipoib_mcast_restart_task(&priv->restart_task); } } void ipoib_ib_dev_flush_light(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_light); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0); } void ipoib_ib_dev_flush_normal(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_normal); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0); } void ipoib_ib_dev_flush_heavy(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_heavy); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0); } void ipoib_ib_dev_cleanup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "cleaning up ib_dev\n"); /* * We must make sure there are no more (path) completions * that may wish to touch priv fields that are no longer valid */ ipoib_flush_paths(dev); ipoib_mcast_stop_thread(dev); ipoib_mcast_dev_flush(dev); /* * All of our ah references aren't free until after * ipoib_mcast_dev_flush(), ipoib_flush_paths, and * the neighbor garbage collection is stopped and reaped. * That should all be done now, so make a final ah flush. */ ipoib_stop_ah(dev); ipoib_transport_dev_cleanup(dev); }
gpl-2.0
SimpleAOSP-Kernel/kernel_flounder
arch/arm/mach-tegra/board-flounder-pinmux.c
53
1989
/* * arch/arm/mach-tegra/board-flounder-pinmux.c * * Copyright (c) 2013, NVIDIA Corporation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/gpio.h> #include <mach/pinmux.h> #include <mach/gpio-tegra.h> #include "board.h" #include "board-flounder.h" #include "devices.h" #include "gpio-names.h" #include <mach/pinmux-t12.h> #include "board-flounder-pinmux-t12x.h" static __initdata struct tegra_drive_pingroup_config flounder_drive_pinmux[] = { /* SDMMC1 */ SET_DRIVE(SDIO1, ENABLE, DISABLE, DIV_1, 46, 42, FASTEST, FASTEST), /* SDMMC3 */ SET_DRIVE(SDIO3, ENABLE, DISABLE, DIV_1, 20, 36, FASTEST, FASTEST), /* SDMMC4 */ SET_DRIVE_WITH_TYPE(GMA, ENABLE, DISABLE, DIV_1, 10, 20, FASTEST, FASTEST, 1), }; static void __init flounder_gpio_init_configure(void) { int len; int i; struct gpio_init_pin_info *pins_info; len = ARRAY_SIZE(init_gpio_mode_flounder_common); pins_info = init_gpio_mode_flounder_common; for (i = 0; i < len; ++i) { tegra_gpio_init_configure(pins_info->gpio_nr, pins_info->is_input, pins_info->value); pins_info++; } } int __init flounder_pinmux_init(void) { if (!of_machine_is_compatible("nvidia,tn8")) flounder_gpio_init_configure(); tegra_pinmux_config_table(flounder_pinmux_common, ARRAY_SIZE(flounder_pinmux_common)); tegra_drive_pinmux_config_table(flounder_drive_pinmux, ARRAY_SIZE(flounder_drive_pinmux)); tegra_pinmux_config_table(unused_pins_lowpower, ARRAY_SIZE(unused_pins_lowpower)); return 0; }
gpl-2.0
wangxiaofei6485/linux-2.6.32-fl2440
drivers/serial/serial_ks8695.c
565
16037
/* * drivers/serial/serial_ks8695.c * * Driver for KS8695 serial ports * * Based on drivers/serial/serial_amba.c, by Kam Lee. * * Copyright 2002-2005 Micrel Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/module.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/device.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <mach/regs-uart.h> #include <mach/regs-irq.h> #if defined(CONFIG_SERIAL_KS8695_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> #define SERIAL_KS8695_MAJOR 204 #define SERIAL_KS8695_MINOR 16 #define SERIAL_KS8695_DEVNAME "ttyAM" #define SERIAL_KS8695_NR 1 /* * Access macros for the KS8695 UART */ #define UART_GET_CHAR(p) (__raw_readl((p)->membase + KS8695_URRB) & 0xFF) #define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + KS8695_URTH) #define UART_GET_FCR(p) __raw_readl((p)->membase + KS8695_URFC) #define UART_PUT_FCR(p, c) __raw_writel((c), (p)->membase + KS8695_URFC) #define UART_GET_MSR(p) __raw_readl((p)->membase + KS8695_URMS) #define UART_GET_LSR(p) __raw_readl((p)->membase + KS8695_URLS) #define UART_GET_LCR(p) __raw_readl((p)->membase + KS8695_URLC) #define UART_PUT_LCR(p, c) __raw_writel((c), (p)->membase + KS8695_URLC) #define UART_GET_MCR(p) __raw_readl((p)->membase + KS8695_URMC) #define UART_PUT_MCR(p, c) __raw_writel((c), (p)->membase + KS8695_URMC) #define UART_GET_BRDR(p) __raw_readl((p)->membase + KS8695_URBD) #define UART_PUT_BRDR(p, c) __raw_writel((c), (p)->membase + KS8695_URBD) #define KS8695_CLR_TX_INT() __raw_writel(1 << KS8695_IRQ_UART_TX, KS8695_IRQ_VA + KS8695_INTST) #define UART_DUMMY_LSR_RX 0x100 #define UART_PORT_SIZE (KS8695_USR - KS8695_URRB + 4) static inline int tx_enabled(struct uart_port *port) { return port->unused[0] & 1; } static inline int rx_enabled(struct uart_port *port) { return port->unused[0] & 2; } static inline int ms_enabled(struct uart_port *port) { return port->unused[0] & 4; } static inline void ms_enable(struct uart_port *port, int enabled) { if(enabled) port->unused[0] |= 4; else port->unused[0] &= ~4; } static inline void rx_enable(struct uart_port *port, int enabled) { if(enabled) port->unused[0] |= 2; else port->unused[0] &= ~2; } static inline void tx_enable(struct uart_port *port, int enabled) { if(enabled) port->unused[0] |= 1; else port->unused[0] &= ~1; } #ifdef SUPPORT_SYSRQ static struct console ks8695_console; #endif static void ks8695uart_stop_tx(struct uart_port *port) { if (tx_enabled(port)) { /* use disable_irq_nosync() and not disable_irq() to avoid self * imposed deadlock by not waiting for irq handler to end, * since this ks8695uart_stop_tx() is called from interrupt context. */ disable_irq_nosync(KS8695_IRQ_UART_TX); tx_enable(port, 0); } } static void ks8695uart_start_tx(struct uart_port *port) { if (!tx_enabled(port)) { enable_irq(KS8695_IRQ_UART_TX); tx_enable(port, 1); } } static void ks8695uart_stop_rx(struct uart_port *port) { if (rx_enabled(port)) { disable_irq(KS8695_IRQ_UART_RX); rx_enable(port, 0); } } static void ks8695uart_enable_ms(struct uart_port *port) { if (!ms_enabled(port)) { enable_irq(KS8695_IRQ_UART_MODEM_STATUS); ms_enable(port,1); } } static void ks8695uart_disable_ms(struct uart_port *port) { if (ms_enabled(port)) { disable_irq(KS8695_IRQ_UART_MODEM_STATUS); ms_enable(port,0); } } static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id) { struct uart_port *port = dev_id; struct tty_struct *tty = port->state->port.tty; unsigned int status, ch, lsr, flg, max_count = 256; status = UART_GET_LSR(port); /* clears pending LSR interrupts */ while ((status & URLS_URDR) && max_count--) { ch = UART_GET_CHAR(port); flg = TTY_NORMAL; port->icount.rx++; /* * Note that the error handling code is * out of the main execution path */ lsr = UART_GET_LSR(port) | UART_DUMMY_LSR_RX; if (unlikely(lsr & (URLS_URBI | URLS_URPE | URLS_URFE | URLS_URROE))) { if (lsr & URLS_URBI) { lsr &= ~(URLS_URFE | URLS_URPE); port->icount.brk++; if (uart_handle_break(port)) goto ignore_char; } if (lsr & URLS_URPE) port->icount.parity++; if (lsr & URLS_URFE) port->icount.frame++; if (lsr & URLS_URROE) port->icount.overrun++; lsr &= port->read_status_mask; if (lsr & URLS_URBI) flg = TTY_BREAK; else if (lsr & URLS_URPE) flg = TTY_PARITY; else if (lsr & URLS_URFE) flg = TTY_FRAME; } if (uart_handle_sysrq_char(port, ch)) goto ignore_char; uart_insert_char(port, lsr, URLS_URROE, ch, flg); ignore_char: status = UART_GET_LSR(port); } tty_flip_buffer_push(tty); return IRQ_HANDLED; } static irqreturn_t ks8695uart_tx_chars(int irq, void *dev_id) { struct uart_port *port = dev_id; struct circ_buf *xmit = &port->state->xmit; unsigned int count; if (port->x_char) { KS8695_CLR_TX_INT(); UART_PUT_CHAR(port, port->x_char); port->icount.tx++; port->x_char = 0; return IRQ_HANDLED; } if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { ks8695uart_stop_tx(port); return IRQ_HANDLED; } count = 16; /* fifo size */ while (!uart_circ_empty(xmit) && (count-- > 0)) { KS8695_CLR_TX_INT(); UART_PUT_CHAR(port, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (uart_circ_empty(xmit)) ks8695uart_stop_tx(port); return IRQ_HANDLED; } static irqreturn_t ks8695uart_modem_status(int irq, void *dev_id) { struct uart_port *port = dev_id; unsigned int status; /* * clear modem interrupt by reading MSR */ status = UART_GET_MSR(port); if (status & URMS_URDDCD) uart_handle_dcd_change(port, status & URMS_URDDCD); if (status & URMS_URDDST) port->icount.dsr++; if (status & URMS_URDCTS) uart_handle_cts_change(port, status & URMS_URDCTS); if (status & URMS_URTERI) port->icount.rng++; wake_up_interruptible(&port->state->port.delta_msr_wait); return IRQ_HANDLED; } static unsigned int ks8695uart_tx_empty(struct uart_port *port) { return (UART_GET_LSR(port) & URLS_URTE) ? TIOCSER_TEMT : 0; } static unsigned int ks8695uart_get_mctrl(struct uart_port *port) { unsigned int result = 0; unsigned int status; status = UART_GET_MSR(port); if (status & URMS_URDCD) result |= TIOCM_CAR; if (status & URMS_URDSR) result |= TIOCM_DSR; if (status & URMS_URCTS) result |= TIOCM_CTS; if (status & URMS_URRI) result |= TIOCM_RI; return result; } static void ks8695uart_set_mctrl(struct uart_port *port, u_int mctrl) { unsigned int mcr; mcr = UART_GET_MCR(port); if (mctrl & TIOCM_RTS) mcr |= URMC_URRTS; else mcr &= ~URMC_URRTS; if (mctrl & TIOCM_DTR) mcr |= URMC_URDTR; else mcr &= ~URMC_URDTR; UART_PUT_MCR(port, mcr); } static void ks8695uart_break_ctl(struct uart_port *port, int break_state) { unsigned int lcr; lcr = UART_GET_LCR(port); if (break_state == -1) lcr |= URLC_URSBC; else lcr &= ~URLC_URSBC; UART_PUT_LCR(port, lcr); } static int ks8695uart_startup(struct uart_port *port) { int retval; set_irq_flags(KS8695_IRQ_UART_TX, IRQF_VALID | IRQF_NOAUTOEN); tx_enable(port, 0); rx_enable(port, 1); ms_enable(port, 1); /* * Allocate the IRQ */ retval = request_irq(KS8695_IRQ_UART_TX, ks8695uart_tx_chars, IRQF_DISABLED, "UART TX", port); if (retval) goto err_tx; retval = request_irq(KS8695_IRQ_UART_RX, ks8695uart_rx_chars, IRQF_DISABLED, "UART RX", port); if (retval) goto err_rx; retval = request_irq(KS8695_IRQ_UART_LINE_STATUS, ks8695uart_rx_chars, IRQF_DISABLED, "UART LineStatus", port); if (retval) goto err_ls; retval = request_irq(KS8695_IRQ_UART_MODEM_STATUS, ks8695uart_modem_status, IRQF_DISABLED, "UART ModemStatus", port); if (retval) goto err_ms; return 0; err_ms: free_irq(KS8695_IRQ_UART_LINE_STATUS, port); err_ls: free_irq(KS8695_IRQ_UART_RX, port); err_rx: free_irq(KS8695_IRQ_UART_TX, port); err_tx: return retval; } static void ks8695uart_shutdown(struct uart_port *port) { /* * Free the interrupt */ free_irq(KS8695_IRQ_UART_RX, port); free_irq(KS8695_IRQ_UART_TX, port); free_irq(KS8695_IRQ_UART_MODEM_STATUS, port); free_irq(KS8695_IRQ_UART_LINE_STATUS, port); /* disable break condition and fifos */ UART_PUT_LCR(port, UART_GET_LCR(port) & ~URLC_URSBC); UART_PUT_FCR(port, UART_GET_FCR(port) & ~URFC_URFE); } static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned int lcr, fcr = 0; unsigned long flags; unsigned int baud, quot; /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = uart_get_divisor(port, baud); switch (termios->c_cflag & CSIZE) { case CS5: lcr = URCL_5; break; case CS6: lcr = URCL_6; break; case CS7: lcr = URCL_7; break; default: lcr = URCL_8; break; } /* stop bits */ if (termios->c_cflag & CSTOPB) lcr |= URLC_URSB; /* parity */ if (termios->c_cflag & PARENB) { if (termios->c_cflag & CMSPAR) { /* Mark or Space parity */ if (termios->c_cflag & PARODD) lcr |= URPE_MARK; else lcr |= URPE_SPACE; } else if (termios->c_cflag & PARODD) lcr |= URPE_ODD; else lcr |= URPE_EVEN; } if (port->fifosize > 1) fcr = URFC_URFRT_8 | URFC_URTFR | URFC_URRFR | URFC_URFE; spin_lock_irqsave(&port->lock, flags); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); port->read_status_mask = URLS_URROE; if (termios->c_iflag & INPCK) port->read_status_mask |= (URLS_URFE | URLS_URPE); if (termios->c_iflag & (BRKINT | PARMRK)) port->read_status_mask |= URLS_URBI; /* * Characters to ignore */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= (URLS_URFE | URLS_URPE); if (termios->c_iflag & IGNBRK) { port->ignore_status_mask |= URLS_URBI; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= URLS_URROE; } /* * Ignore all characters if CREAD is not set. */ if ((termios->c_cflag & CREAD) == 0) port->ignore_status_mask |= UART_DUMMY_LSR_RX; /* first, disable everything */ if (UART_ENABLE_MS(port, termios->c_cflag)) ks8695uart_enable_ms(port); else ks8695uart_disable_ms(port); /* Set baud rate */ UART_PUT_BRDR(port, quot); UART_PUT_LCR(port, lcr); UART_PUT_FCR(port, fcr); spin_unlock_irqrestore(&port->lock, flags); } static const char *ks8695uart_type(struct uart_port *port) { return port->type == PORT_KS8695 ? "KS8695" : NULL; } /* * Release the memory region(s) being used by 'port' */ static void ks8695uart_release_port(struct uart_port *port) { release_mem_region(port->mapbase, UART_PORT_SIZE); } /* * Request the memory region(s) being used by 'port' */ static int ks8695uart_request_port(struct uart_port *port) { return request_mem_region(port->mapbase, UART_PORT_SIZE, "serial_ks8695") != NULL ? 0 : -EBUSY; } /* * Configure/autoconfigure the port. */ static void ks8695uart_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { port->type = PORT_KS8695; ks8695uart_request_port(port); } } /* * verify the new serial_struct (for TIOCSSERIAL). */ static int ks8695uart_verify_port(struct uart_port *port, struct serial_struct *ser) { int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_KS8695) ret = -EINVAL; if (ser->irq != port->irq) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; return ret; } static struct uart_ops ks8695uart_pops = { .tx_empty = ks8695uart_tx_empty, .set_mctrl = ks8695uart_set_mctrl, .get_mctrl = ks8695uart_get_mctrl, .stop_tx = ks8695uart_stop_tx, .start_tx = ks8695uart_start_tx, .stop_rx = ks8695uart_stop_rx, .enable_ms = ks8695uart_enable_ms, .break_ctl = ks8695uart_break_ctl, .startup = ks8695uart_startup, .shutdown = ks8695uart_shutdown, .set_termios = ks8695uart_set_termios, .type = ks8695uart_type, .release_port = ks8695uart_release_port, .request_port = ks8695uart_request_port, .config_port = ks8695uart_config_port, .verify_port = ks8695uart_verify_port, }; static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = { { .membase = (void *) KS8695_UART_VA, .mapbase = KS8695_UART_VA, .iotype = SERIAL_IO_MEM, .irq = KS8695_IRQ_UART_TX, .uartclk = KS8695_CLOCK_RATE * 16, .fifosize = 16, .ops = &ks8695uart_pops, .flags = ASYNC_BOOT_AUTOCONF, .line = 0, } }; #ifdef CONFIG_SERIAL_KS8695_CONSOLE static void ks8695_console_putchar(struct uart_port *port, int ch) { while (!(UART_GET_LSR(port) & URLS_URTHRE)) barrier(); UART_PUT_CHAR(port, ch); } static void ks8695_console_write(struct console *co, const char *s, u_int count) { struct uart_port *port = ks8695uart_ports + co->index; uart_console_write(port, s, count, ks8695_console_putchar); } static void __init ks8695_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits) { unsigned int lcr; lcr = UART_GET_LCR(port); switch (lcr & URLC_PARITY) { case URPE_ODD: *parity = 'o'; break; case URPE_EVEN: *parity = 'e'; break; default: *parity = 'n'; } switch (lcr & URLC_URCL) { case URCL_5: *bits = 5; break; case URCL_6: *bits = 6; break; case URCL_7: *bits = 7; break; default: *bits = 8; } *baud = port->uartclk / (UART_GET_BRDR(port) & 0x0FFF); *baud /= 16; *baud &= 0xFFFFFFF0; } static int __init ks8695_console_setup(struct console *co, char *options) { struct uart_port *port; int baud = 115200; int bits = 8; int parity = 'n'; int flow = 'n'; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ port = uart_get_console(ks8695uart_ports, SERIAL_KS8695_NR, co); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else ks8695_console_get_options(port, &baud, &parity, &bits); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver ks8695_reg; static struct console ks8695_console = { .name = SERIAL_KS8695_DEVNAME, .write = ks8695_console_write, .device = uart_console_device, .setup = ks8695_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &ks8695_reg, }; static int __init ks8695_console_init(void) { register_console(&ks8695_console); return 0; } console_initcall(ks8695_console_init); #define KS8695_CONSOLE &ks8695_console #else #define KS8695_CONSOLE NULL #endif static struct uart_driver ks8695_reg = { .owner = THIS_MODULE, .driver_name = "serial_ks8695", .dev_name = SERIAL_KS8695_DEVNAME, .major = SERIAL_KS8695_MAJOR, .minor = SERIAL_KS8695_MINOR, .nr = SERIAL_KS8695_NR, .cons = KS8695_CONSOLE, }; static int __init ks8695uart_init(void) { int i, ret; printk(KERN_INFO "Serial: Micrel KS8695 UART driver\n"); ret = uart_register_driver(&ks8695_reg); if (ret) return ret; for (i = 0; i < SERIAL_KS8695_NR; i++) uart_add_one_port(&ks8695_reg, &ks8695uart_ports[0]); return 0; } static void __exit ks8695uart_exit(void) { int i; for (i = 0; i < SERIAL_KS8695_NR; i++) uart_remove_one_port(&ks8695_reg, &ks8695uart_ports[0]); uart_unregister_driver(&ks8695_reg); } module_init(ks8695uart_init); module_exit(ks8695uart_exit); MODULE_DESCRIPTION("KS8695 serial port driver"); MODULE_AUTHOR("Micrel Inc."); MODULE_LICENSE("GPL");
gpl-2.0
WarrickJiang/linux-stable
sound/soc/codecs/hdmi.c
821
3097
/* * ALSA SoC codec driver for HDMI audio codecs. * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * Author: Ricardo Neri <ricardo.neri@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/module.h> #include <sound/soc.h> #include <linux/of.h> #include <linux/of_device.h> #define DRV_NAME "hdmi-audio-codec" static const struct snd_soc_dapm_widget hdmi_widgets[] = { SND_SOC_DAPM_INPUT("RX"), SND_SOC_DAPM_OUTPUT("TX"), }; static const struct snd_soc_dapm_route hdmi_routes[] = { { "Capture", NULL, "RX" }, { "TX", NULL, "Playback" }, }; static struct snd_soc_dai_driver hdmi_codec_dai = { .name = "hdmi-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 8, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, .sig_bits = 24, }, .capture = { .stream_name = "Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, }, }; #ifdef CONFIG_OF static const struct of_device_id hdmi_audio_codec_ids[] = { { .compatible = "linux,hdmi-audio", }, { } }; MODULE_DEVICE_TABLE(of, hdmi_audio_codec_ids); #endif static struct snd_soc_codec_driver hdmi_codec = { .dapm_widgets = hdmi_widgets, .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets), .dapm_routes = hdmi_routes, .num_dapm_routes = ARRAY_SIZE(hdmi_routes), .ignore_pmdown_time = true, }; static int hdmi_codec_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &hdmi_codec, &hdmi_codec_dai, 1); } static int hdmi_codec_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver hdmi_codec_driver = { .driver = { .name = DRV_NAME, .of_match_table = of_match_ptr(hdmi_audio_codec_ids), }, .probe = hdmi_codec_probe, .remove = hdmi_codec_remove, }; module_platform_driver(hdmi_codec_driver); MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>"); MODULE_DESCRIPTION("ASoC generic HDMI codec driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
dmansfield/linux
arch/powerpc/boot/libfdt-wrapper.c
1333
5193
/* * This file does the necessary interface mapping between the bootwrapper * device tree operations and the interface provided by shared source * files flatdevicetree.[ch]. * * Copyright 2007 David Gibson, IBM Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <stddef.h> #include <stdio.h> #include <page.h> #include <libfdt.h> #include "ops.h" #define DEBUG 0 #define BAD_ERROR(err) (((err) < 0) \ && ((err) != -FDT_ERR_NOTFOUND) \ && ((err) != -FDT_ERR_EXISTS)) #define check_err(err) \ ({ \ if (BAD_ERROR(err) || ((err < 0) && DEBUG)) \ printf("%s():%d %s\n\r", __func__, __LINE__, \ fdt_strerror(err)); \ if (BAD_ERROR(err)) \ exit(); \ (err < 0) ? -1 : 0; \ }) #define offset_devp(off) \ ({ \ unsigned long _offset = (off); \ check_err(_offset) ? NULL : (void *)(_offset+1); \ }) #define devp_offset_find(devp) (((unsigned long)(devp))-1) #define devp_offset(devp) (devp ? ((unsigned long)(devp))-1 : 0) static void *fdt; static void *buf; /* = NULL */ #define EXPAND_GRANULARITY 1024 static void expand_buf(int minexpand) { int size = fdt_totalsize(fdt); int rc; size = _ALIGN(size + minexpand, EXPAND_GRANULARITY); buf = platform_ops.realloc(buf, size); if (!buf) fatal("Couldn't find %d bytes to expand device tree\n\r", size); rc = fdt_open_into(fdt, buf, size); if (rc != 0) fatal("Couldn't expand fdt into new buffer: %s\n\r", fdt_strerror(rc)); fdt = buf; } static void *fdt_wrapper_finddevice(const char *path) { return offset_devp(fdt_path_offset(fdt, path)); } static int fdt_wrapper_getprop(const void *devp, const char *name, void *buf, const int buflen) { const void *p; int len; p = fdt_getprop(fdt, devp_offset(devp), name, &len); if (!p) return check_err(len); memcpy(buf, p, min(len, buflen)); return len; } static int fdt_wrapper_setprop(const void *devp, const char *name, const void *buf, const int len) { int rc; rc = fdt_setprop(fdt, devp_offset(devp), name, buf, len); if (rc == -FDT_ERR_NOSPACE) { expand_buf(len + 16); rc = fdt_setprop(fdt, devp_offset(devp), name, buf, len); } return check_err(rc); } static int fdt_wrapper_del_node(const void *devp) { return fdt_del_node(fdt, devp_offset(devp)); } static void *fdt_wrapper_get_parent(const void *devp) { return offset_devp(fdt_parent_offset(fdt, devp_offset(devp))); } static void *fdt_wrapper_create_node(const void *devp, const char *name) { int offset; offset = fdt_add_subnode(fdt, devp_offset(devp), name); if (offset == -FDT_ERR_NOSPACE) { expand_buf(strlen(name) + 16); offset = fdt_add_subnode(fdt, devp_offset(devp), name); } return offset_devp(offset); } static void *fdt_wrapper_find_node_by_prop_value(const void *prev, const char *name, const char *val, int len) { int offset = fdt_node_offset_by_prop_value(fdt, devp_offset_find(prev), name, val, len); return offset_devp(offset); } static void *fdt_wrapper_find_node_by_compatible(const void *prev, const char *val) { int offset = fdt_node_offset_by_compatible(fdt, devp_offset_find(prev), val); return offset_devp(offset); } static char *fdt_wrapper_get_path(const void *devp, char *buf, int len) { int rc; rc = fdt_get_path(fdt, devp_offset(devp), buf, len); if (check_err(rc)) return NULL; return buf; } static unsigned long fdt_wrapper_finalize(void) { int rc; rc = fdt_pack(fdt); if (rc != 0) fatal("Couldn't pack flat tree: %s\n\r", fdt_strerror(rc)); return (unsigned long)fdt; } void fdt_init(void *blob) { int err; int bufsize; dt_ops.finddevice = fdt_wrapper_finddevice; dt_ops.getprop = fdt_wrapper_getprop; dt_ops.setprop = fdt_wrapper_setprop; dt_ops.get_parent = fdt_wrapper_get_parent; dt_ops.create_node = fdt_wrapper_create_node; dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value; dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible; dt_ops.del_node = fdt_wrapper_del_node; dt_ops.get_path = fdt_wrapper_get_path; dt_ops.finalize = fdt_wrapper_finalize; /* Make sure the dt blob is the right version and so forth */ fdt = blob; bufsize = fdt_totalsize(fdt) + EXPAND_GRANULARITY; buf = malloc(bufsize); if(!buf) fatal("malloc failed. can't relocate the device tree\n\r"); err = fdt_open_into(fdt, buf, bufsize); if (err != 0) fatal("fdt_init(): %s\n\r", fdt_strerror(err)); fdt = buf; }
gpl-2.0
araca/Zen-Kernel-Huawei-P7
drivers/vhost/vhost.c
1333
38629
/* Copyright (C) 2009 Red Hat, Inc. * Copyright (C) 2006 Rusty Russell IBM Corporation * * Author: Michael S. Tsirkin <mst@redhat.com> * * Inspiration, some code, and most witty comments come from * Documentation/virtual/lguest/lguest.c, by Rusty Russell * * This work is licensed under the terms of the GNU GPL, version 2. * * Generic code for virtio server in host kernel. */ #include <linux/eventfd.h> #include <linux/vhost.h> #include <linux/virtio_net.h> #include <linux/mm.h> #include <linux/mmu_context.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/cgroup.h> #include <linux/net.h> #include <linux/if_packet.h> #include <linux/if_arp.h> #include "vhost.h" enum { VHOST_MEMORY_MAX_NREGIONS = 64, VHOST_MEMORY_F_LOG = 0x1, }; #define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num]) #define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num]) static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { struct vhost_poll *poll; poll = container_of(pt, struct vhost_poll, table); poll->wqh = wqh; add_wait_queue(wqh, &poll->wait); } static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); if (!((unsigned long)key & poll->mask)) return 0; vhost_poll_queue(poll); return 0; } static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) { INIT_LIST_HEAD(&work->node); work->fn = fn; init_waitqueue_head(&work->done); work->flushing = 0; work->queue_seq = work->done_seq = 0; } /* Init poll structure */ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, unsigned long mask, struct vhost_dev *dev) { init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); init_poll_funcptr(&poll->table, vhost_poll_func); poll->mask = mask; poll->dev = dev; vhost_work_init(&poll->work, fn); } /* Start polling a file. We add ourselves to file's wait queue. The caller must * keep a reference to a file until after vhost_poll_stop is called. */ void vhost_poll_start(struct vhost_poll *poll, struct file *file) { unsigned long mask; mask = file->f_op->poll(file, &poll->table); if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); } /* Stop polling a file. After this function returns, it becomes safe to drop the * file reference. You must also flush afterwards. */ void vhost_poll_stop(struct vhost_poll *poll) { remove_wait_queue(poll->wqh, &poll->wait); } static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, unsigned seq) { int left; spin_lock_irq(&dev->work_lock); left = seq - work->done_seq; spin_unlock_irq(&dev->work_lock); return left <= 0; } static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) { unsigned seq; int flushing; spin_lock_irq(&dev->work_lock); seq = work->queue_seq; work->flushing++; spin_unlock_irq(&dev->work_lock); wait_event(work->done, vhost_work_seq_done(dev, work, seq)); spin_lock_irq(&dev->work_lock); flushing = --work->flushing; spin_unlock_irq(&dev->work_lock); BUG_ON(flushing < 0); } /* Flush any work that has been scheduled. When calling this, don't hold any * locks that are also used by the callback. */ void vhost_poll_flush(struct vhost_poll *poll) { vhost_work_flush(poll->dev, &poll->work); } static inline void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) { unsigned long flags; spin_lock_irqsave(&dev->work_lock, flags); if (list_empty(&work->node)) { list_add_tail(&work->node, &dev->work_list); work->queue_seq++; wake_up_process(dev->worker); } spin_unlock_irqrestore(&dev->work_lock, flags); } void vhost_poll_queue(struct vhost_poll *poll) { vhost_work_queue(poll->dev, &poll->work); } static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { vq->num = 1; vq->desc = NULL; vq->avail = NULL; vq->used = NULL; vq->last_avail_idx = 0; vq->avail_idx = 0; vq->last_used_idx = 0; vq->signalled_used = 0; vq->signalled_used_valid = false; vq->used_flags = 0; vq->log_used = false; vq->log_addr = -1ull; vq->vhost_hlen = 0; vq->sock_hlen = 0; vq->private_data = NULL; vq->log_base = NULL; vq->error_ctx = NULL; vq->error = NULL; vq->kick = NULL; vq->call_ctx = NULL; vq->call = NULL; vq->log_ctx = NULL; } static int vhost_worker(void *data) { struct vhost_dev *dev = data; struct vhost_work *work = NULL; unsigned uninitialized_var(seq); use_mm(dev->mm); for (;;) { /* mb paired w/ kthread_stop */ set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&dev->work_lock); if (work) { work->done_seq = seq; if (work->flushing) wake_up_all(&work->done); } if (kthread_should_stop()) { spin_unlock_irq(&dev->work_lock); __set_current_state(TASK_RUNNING); break; } if (!list_empty(&dev->work_list)) { work = list_first_entry(&dev->work_list, struct vhost_work, node); list_del_init(&work->node); seq = work->queue_seq; } else work = NULL; spin_unlock_irq(&dev->work_lock); if (work) { __set_current_state(TASK_RUNNING); work->fn(work); } else schedule(); } unuse_mm(dev->mm); return 0; } /* Helper to allocate iovec buffers for all vqs. */ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect * UIO_MAXIOV, GFP_KERNEL); dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV, GFP_KERNEL); dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads * UIO_MAXIOV, GFP_KERNEL); if (!dev->vqs[i].indirect || !dev->vqs[i].log || !dev->vqs[i].heads) goto err_nomem; } return 0; err_nomem: for (; i >= 0; --i) { kfree(dev->vqs[i].indirect); kfree(dev->vqs[i].log); kfree(dev->vqs[i].heads); } return -ENOMEM; } static void vhost_dev_free_iovecs(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { kfree(dev->vqs[i].indirect); dev->vqs[i].indirect = NULL; kfree(dev->vqs[i].log); dev->vqs[i].log = NULL; kfree(dev->vqs[i].heads); dev->vqs[i].heads = NULL; } } long vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue *vqs, int nvqs) { int i; dev->vqs = vqs; dev->nvqs = nvqs; mutex_init(&dev->mutex); dev->log_ctx = NULL; dev->log_file = NULL; dev->memory = NULL; dev->mm = NULL; spin_lock_init(&dev->work_lock); INIT_LIST_HEAD(&dev->work_list); dev->worker = NULL; for (i = 0; i < dev->nvqs; ++i) { dev->vqs[i].log = NULL; dev->vqs[i].indirect = NULL; dev->vqs[i].heads = NULL; dev->vqs[i].dev = dev; mutex_init(&dev->vqs[i].mutex); vhost_vq_reset(dev, dev->vqs + i); if (dev->vqs[i].handle_kick) vhost_poll_init(&dev->vqs[i].poll, dev->vqs[i].handle_kick, POLLIN, dev); } return 0; } /* Caller should have device mutex */ long vhost_dev_check_owner(struct vhost_dev *dev) { /* Are you the owner? If not, I don't think you mean to do that */ return dev->mm == current->mm ? 0 : -EPERM; } struct vhost_attach_cgroups_struct { struct vhost_work work; struct task_struct *owner; int ret; }; static void vhost_attach_cgroups_work(struct vhost_work *work) { struct vhost_attach_cgroups_struct *s; s = container_of(work, struct vhost_attach_cgroups_struct, work); s->ret = cgroup_attach_task_all(s->owner, current); } static int vhost_attach_cgroups(struct vhost_dev *dev) { struct vhost_attach_cgroups_struct attach; attach.owner = current; vhost_work_init(&attach.work, vhost_attach_cgroups_work); vhost_work_queue(dev, &attach.work); vhost_work_flush(dev, &attach.work); return attach.ret; } /* Caller should have device mutex */ static long vhost_dev_set_owner(struct vhost_dev *dev) { struct task_struct *worker; int err; /* Is there an owner already? */ if (dev->mm) { err = -EBUSY; goto err_mm; } /* No owner, become one */ dev->mm = get_task_mm(current); worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); if (IS_ERR(worker)) { err = PTR_ERR(worker); goto err_worker; } dev->worker = worker; wake_up_process(worker); /* avoid contributing to loadavg */ err = vhost_attach_cgroups(dev); if (err) goto err_cgroup; err = vhost_dev_alloc_iovecs(dev); if (err) goto err_cgroup; return 0; err_cgroup: kthread_stop(worker); dev->worker = NULL; err_worker: if (dev->mm) mmput(dev->mm); dev->mm = NULL; err_mm: return err; } /* Caller should have device mutex */ long vhost_dev_reset_owner(struct vhost_dev *dev) { struct vhost_memory *memory; /* Restore memory to default empty mapping. */ memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); if (!memory) return -ENOMEM; vhost_dev_cleanup(dev); memory->nregions = 0; RCU_INIT_POINTER(dev->memory, memory); return 0; } /* Caller should have device mutex */ void vhost_dev_cleanup(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { if (dev->vqs[i].kick && dev->vqs[i].handle_kick) { vhost_poll_stop(&dev->vqs[i].poll); vhost_poll_flush(&dev->vqs[i].poll); } if (dev->vqs[i].error_ctx) eventfd_ctx_put(dev->vqs[i].error_ctx); if (dev->vqs[i].error) fput(dev->vqs[i].error); if (dev->vqs[i].kick) fput(dev->vqs[i].kick); if (dev->vqs[i].call_ctx) eventfd_ctx_put(dev->vqs[i].call_ctx); if (dev->vqs[i].call) fput(dev->vqs[i].call); vhost_vq_reset(dev, dev->vqs + i); } vhost_dev_free_iovecs(dev); if (dev->log_ctx) eventfd_ctx_put(dev->log_ctx); dev->log_ctx = NULL; if (dev->log_file) fput(dev->log_file); dev->log_file = NULL; /* No one will access memory at this point */ kfree(rcu_dereference_protected(dev->memory, lockdep_is_held(&dev->mutex))); RCU_INIT_POINTER(dev->memory, NULL); WARN_ON(!list_empty(&dev->work_list)); if (dev->worker) { kthread_stop(dev->worker); dev->worker = NULL; } if (dev->mm) mmput(dev->mm); dev->mm = NULL; } static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) { u64 a = addr / VHOST_PAGE_SIZE / 8; /* Make sure 64 bit math will not overflow. */ if (a > ULONG_MAX - (unsigned long)log_base || a + (unsigned long)log_base > ULONG_MAX) return 0; return access_ok(VERIFY_WRITE, log_base + a, (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); } /* Caller should have vq mutex and device mutex. */ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, int log_all) { int i; if (!mem) return 0; for (i = 0; i < mem->nregions; ++i) { struct vhost_memory_region *m = mem->regions + i; unsigned long a = m->userspace_addr; if (m->memory_size > ULONG_MAX) return 0; else if (!access_ok(VERIFY_WRITE, (void __user *)a, m->memory_size)) return 0; else if (log_all && !log_access_ok(log_base, m->guest_phys_addr, m->memory_size)) return 0; } return 1; } /* Can we switch to this memory table? */ /* Caller should have device mutex but not vq mutex */ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, int log_all) { int i; for (i = 0; i < d->nvqs; ++i) { int ok; mutex_lock(&d->vqs[i].mutex); /* If ring is inactive, will check when it's enabled. */ if (d->vqs[i].private_data) ok = vq_memory_access_ok(d->vqs[i].log_base, mem, log_all); else ok = 1; mutex_unlock(&d->vqs[i].mutex); if (!ok) return 0; } return 1; } static int vq_access_ok(struct vhost_dev *d, unsigned int num, struct vring_desc __user *desc, struct vring_avail __user *avail, struct vring_used __user *used) { size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return access_ok(VERIFY_READ, desc, num * sizeof *desc) && access_ok(VERIFY_READ, avail, sizeof *avail + num * sizeof *avail->ring + s) && access_ok(VERIFY_WRITE, used, sizeof *used + num * sizeof *used->ring + s); } /* Can we log writes? */ /* Caller should have device mutex but not vq mutex */ int vhost_log_access_ok(struct vhost_dev *dev) { struct vhost_memory *mp; mp = rcu_dereference_protected(dev->memory, lockdep_is_held(&dev->mutex)); return memory_access_ok(dev, mp, 1); } /* Verify access for write logging. */ /* Caller should have vq mutex and device mutex */ static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, void __user *log_base) { struct vhost_memory *mp; size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; mp = rcu_dereference_protected(vq->dev->memory, lockdep_is_held(&vq->mutex)); return vq_memory_access_ok(log_base, mp, vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && (!vq->log_used || log_access_ok(log_base, vq->log_addr, sizeof *vq->used + vq->num * sizeof *vq->used->ring + s)); } /* Can we start vq? */ /* Caller should have vq mutex and device mutex */ int vhost_vq_access_ok(struct vhost_virtqueue *vq) { return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && vq_log_access_ok(vq->dev, vq, vq->log_base); } static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem, *oldmem; unsigned long size = offsetof(struct vhost_memory, regions); if (copy_from_user(&mem, m, size)) return -EFAULT; if (mem.padding) return -EOPNOTSUPP; if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) return -E2BIG; newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); if (!newmem) return -ENOMEM; memcpy(newmem, &mem, size); if (copy_from_user(newmem->regions, m->regions, mem.nregions * sizeof *m->regions)) { kfree(newmem); return -EFAULT; } if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) { kfree(newmem); return -EFAULT; } oldmem = rcu_dereference_protected(d->memory, lockdep_is_held(&d->mutex)); rcu_assign_pointer(d->memory, newmem); synchronize_rcu(); kfree(oldmem); return 0; } static int init_used(struct vhost_virtqueue *vq, struct vring_used __user *used) { int r = put_user(vq->used_flags, &used->flags); if (r) return r; vq->signalled_used_valid = false; return get_user(vq->last_used_idx, &used->idx); } static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) { struct file *eventfp, *filep = NULL, *pollstart = NULL, *pollstop = NULL; struct eventfd_ctx *ctx = NULL; u32 __user *idxp = argp; struct vhost_virtqueue *vq; struct vhost_vring_state s; struct vhost_vring_file f; struct vhost_vring_addr a; u32 idx; long r; r = get_user(idx, idxp); if (r < 0) return r; if (idx >= d->nvqs) return -ENOBUFS; vq = d->vqs + idx; mutex_lock(&vq->mutex); switch (ioctl) { case VHOST_SET_VRING_NUM: /* Resizing ring with an active backend? * You don't want to do that. */ if (vq->private_data) { r = -EBUSY; break; } if (copy_from_user(&s, argp, sizeof s)) { r = -EFAULT; break; } if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { r = -EINVAL; break; } vq->num = s.num; break; case VHOST_SET_VRING_BASE: /* Moving base with an active backend? * You don't want to do that. */ if (vq->private_data) { r = -EBUSY; break; } if (copy_from_user(&s, argp, sizeof s)) { r = -EFAULT; break; } if (s.num > 0xffff) { r = -EINVAL; break; } vq->last_avail_idx = s.num; /* Forget the cached index value. */ vq->avail_idx = vq->last_avail_idx; break; case VHOST_GET_VRING_BASE: s.index = idx; s.num = vq->last_avail_idx; if (copy_to_user(argp, &s, sizeof s)) r = -EFAULT; break; case VHOST_SET_VRING_ADDR: if (copy_from_user(&a, argp, sizeof a)) { r = -EFAULT; break; } if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { r = -EOPNOTSUPP; break; } /* For 32bit, verify that the top 32bits of the user data are set to zero. */ if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || (u64)(unsigned long)a.used_user_addr != a.used_user_addr || (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) { r = -EFAULT; break; } if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) || (a.used_user_addr & (sizeof *vq->used->ring - 1)) || (a.log_guest_addr & (sizeof *vq->used->ring - 1))) { r = -EINVAL; break; } /* We only verify access here if backend is configured. * If it is not, we don't as size might not have been setup. * We will verify when backend is configured. */ if (vq->private_data) { if (!vq_access_ok(d, vq->num, (void __user *)(unsigned long)a.desc_user_addr, (void __user *)(unsigned long)a.avail_user_addr, (void __user *)(unsigned long)a.used_user_addr)) { r = -EINVAL; break; } /* Also validate log access for used ring if enabled. */ if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) && !log_access_ok(vq->log_base, a.log_guest_addr, sizeof *vq->used + vq->num * sizeof *vq->used->ring)) { r = -EINVAL; break; } } r = init_used(vq, (struct vring_used __user *)(unsigned long) a.used_user_addr); if (r) break; vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); vq->desc = (void __user *)(unsigned long)a.desc_user_addr; vq->avail = (void __user *)(unsigned long)a.avail_user_addr; vq->log_addr = a.log_guest_addr; vq->used = (void __user *)(unsigned long)a.used_user_addr; break; case VHOST_SET_VRING_KICK: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; break; } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != vq->kick) { pollstop = filep = vq->kick; pollstart = vq->kick = eventfp; } else filep = eventfp; break; case VHOST_SET_VRING_CALL: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; break; } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != vq->call) { filep = vq->call; ctx = vq->call_ctx; vq->call = eventfp; vq->call_ctx = eventfp ? eventfd_ctx_fileget(eventfp) : NULL; } else filep = eventfp; break; case VHOST_SET_VRING_ERR: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; break; } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != vq->error) { filep = vq->error; vq->error = eventfp; ctx = vq->error_ctx; vq->error_ctx = eventfp ? eventfd_ctx_fileget(eventfp) : NULL; } else filep = eventfp; break; default: r = -ENOIOCTLCMD; } if (pollstop && vq->handle_kick) vhost_poll_stop(&vq->poll); if (ctx) eventfd_ctx_put(ctx); if (filep) fput(filep); if (pollstart && vq->handle_kick) vhost_poll_start(&vq->poll, vq->kick); mutex_unlock(&vq->mutex); if (pollstop && vq->handle_kick) vhost_poll_flush(&vq->poll); return r; } /* Caller must have device mutex */ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; struct file *eventfp, *filep = NULL; struct eventfd_ctx *ctx = NULL; u64 p; long r; int i, fd; /* If you are not the owner, you can become one */ if (ioctl == VHOST_SET_OWNER) { r = vhost_dev_set_owner(d); goto done; } /* You must be the owner to do anything else */ r = vhost_dev_check_owner(d); if (r) goto done; switch (ioctl) { case VHOST_SET_MEM_TABLE: r = vhost_set_memory(d, argp); break; case VHOST_SET_LOG_BASE: if (copy_from_user(&p, argp, sizeof p)) { r = -EFAULT; break; } if ((u64)(unsigned long)p != p) { r = -EFAULT; break; } for (i = 0; i < d->nvqs; ++i) { struct vhost_virtqueue *vq; void __user *base = (void __user *)(unsigned long)p; vq = d->vqs + i; mutex_lock(&vq->mutex); /* If ring is inactive, will check when it's enabled. */ if (vq->private_data && !vq_log_access_ok(d, vq, base)) r = -EFAULT; else vq->log_base = base; mutex_unlock(&vq->mutex); } break; case VHOST_SET_LOG_FD: r = get_user(fd, (int __user *)argp); if (r < 0) break; eventfp = fd == -1 ? NULL : eventfd_fget(fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != d->log_file) { filep = d->log_file; ctx = d->log_ctx; d->log_ctx = eventfp ? eventfd_ctx_fileget(eventfp) : NULL; } else filep = eventfp; for (i = 0; i < d->nvqs; ++i) { mutex_lock(&d->vqs[i].mutex); d->vqs[i].log_ctx = d->log_ctx; mutex_unlock(&d->vqs[i].mutex); } if (ctx) eventfd_ctx_put(ctx); if (filep) fput(filep); break; default: r = vhost_set_vring(d, ioctl, argp); break; } done: return r; } static const struct vhost_memory_region *find_region(struct vhost_memory *mem, __u64 addr, __u32 len) { struct vhost_memory_region *reg; int i; /* linear search is not brilliant, but we really have on the order of 6 * regions in practice */ for (i = 0; i < mem->nregions; ++i) { reg = mem->regions + i; if (reg->guest_phys_addr <= addr && reg->guest_phys_addr + reg->memory_size - 1 >= addr) return reg; } return NULL; } /* TODO: This is really inefficient. We need something like get_user() * (instruction directly accesses the data, with an exception table entry * returning -EFAULT). See Documentation/x86/exception-tables.txt. */ static int set_bit_to_user(int nr, void __user *addr) { unsigned long log = (unsigned long)addr; struct page *page; void *base; int bit = nr + (log % PAGE_SIZE) * 8; int r; r = get_user_pages_fast(log, 1, 1, &page); if (r < 0) return r; BUG_ON(r != 1); base = kmap_atomic(page, KM_USER0); set_bit(bit, base); kunmap_atomic(base, KM_USER0); set_page_dirty_lock(page); put_page(page); return 0; } static int log_write(void __user *log_base, u64 write_address, u64 write_length) { u64 write_page = write_address / VHOST_PAGE_SIZE; int r; if (!write_length) return 0; write_length += write_address % VHOST_PAGE_SIZE; for (;;) { u64 base = (u64)(unsigned long)log_base; u64 log = base + write_page / 8; int bit = write_page % 8; if ((u64)(unsigned long)log != log) return -EFAULT; r = set_bit_to_user(bit, (void __user *)(unsigned long)log); if (r < 0) return r; if (write_length <= VHOST_PAGE_SIZE) break; write_length -= VHOST_PAGE_SIZE; write_page += 1; } return r; } int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, unsigned int log_num, u64 len) { int i, r; /* Make sure data written is seen before log. */ smp_wmb(); for (i = 0; i < log_num; ++i) { u64 l = min(log[i].len, len); r = log_write(vq->log_base, log[i].addr, l); if (r < 0) return r; len -= l; if (!len) { if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); return 0; } } /* Length written exceeds what we have stored. This is a bug. */ BUG(); return 0; } static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, struct iovec iov[], int iov_size) { const struct vhost_memory_region *reg; struct vhost_memory *mem; struct iovec *_iov; u64 s = 0; int ret = 0; rcu_read_lock(); mem = rcu_dereference(dev->memory); while ((u64)len > s) { u64 size; if (unlikely(ret >= iov_size)) { ret = -ENOBUFS; break; } reg = find_region(mem, addr, len); if (unlikely(!reg)) { ret = -EFAULT; break; } _iov = iov + ret; size = reg->memory_size - addr + reg->guest_phys_addr; _iov->iov_len = min((u64)len, size); _iov->iov_base = (void __user *)(unsigned long) (reg->userspace_addr + addr - reg->guest_phys_addr); s += size; addr += size; ++ret; } rcu_read_unlock(); return ret; } /* Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, * or -1U if we're at the end. */ static unsigned next_desc(struct vring_desc *desc) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ if (!(desc->flags & VRING_DESC_F_NEXT)) return -1U; /* Check they're not leading us off end of descriptors. */ next = desc->next; /* Make sure compiler knows to grab that: we don't want it changing! */ /* We will use the result as an index in an array, so most * architectures only need a compiler barrier here. */ read_barrier_depends(); return next; } static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num, struct vring_desc *indirect) { struct vring_desc desc; unsigned int i = 0, count, found = 0; int ret; /* Sanity check */ if (unlikely(indirect->len % sizeof desc)) { vq_err(vq, "Invalid length in indirect descriptor: " "len 0x%llx not multiple of 0x%zx\n", (unsigned long long)indirect->len, sizeof desc); return -EINVAL; } ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, UIO_MAXIOV); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d in indirect.\n", ret); return ret; } /* We will use the result as an address to read from, so most * architectures only need a compiler barrier here. */ read_barrier_depends(); count = indirect->len / sizeof desc; /* Buffers are chained via a 16 bit next field, so * we can have at most 2^16 of these. */ if (unlikely(count > USHRT_MAX + 1)) { vq_err(vq, "Indirect buffer length too big: %d\n", indirect->len); return -E2BIG; } do { unsigned iov_count = *in_num + *out_num; if (unlikely(++found > count)) { vq_err(vq, "Loop detected: last one at %u " "indirect size %u\n", i, count); return -EINVAL; } if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect, sizeof desc))) { vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", i, (size_t)indirect->addr + i * sizeof desc); return -EINVAL; } if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) { vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", i, (size_t)indirect->addr + i * sizeof desc); return -EINVAL; } ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, iov_size - iov_count); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d indirect idx %d\n", ret, i); return ret; } /* If this is an input descriptor, increment that count. */ if (desc.flags & VRING_DESC_F_WRITE) { *in_num += ret; if (unlikely(log)) { log[*log_num].addr = desc.addr; log[*log_num].len = desc.len; ++*log_num; } } else { /* If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (unlikely(*in_num)) { vq_err(vq, "Indirect descriptor " "has out after in: idx %d\n", i); return -EINVAL; } *out_num += ret; } } while ((i = next_desc(&desc)) != -1); return 0; } /* This looks in the virtqueue and for the first available buffer, and converts * it to an iovec for convenient access. Since descriptors consist of some * number of output then some number of input descriptors, it's actually two * iovecs, but we pack them into one and note how many of each there were. * * This function returns the descriptor number found, or vq->num (which is * never a valid descriptor number) if none was found. A negative code is * returned on error. */ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num) { struct vring_desc desc; unsigned int i, head, found = 0; u16 last_avail_idx; int ret; /* Check it isn't doing very strange things with descriptor numbers. */ last_avail_idx = vq->last_avail_idx; if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) { vq_err(vq, "Failed to access avail idx at %p\n", &vq->avail->idx); return -EFAULT; } if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { vq_err(vq, "Guest moved used index from %u to %u", last_avail_idx, vq->avail_idx); return -EFAULT; } /* If there's nothing new since last we looked, return invalid. */ if (vq->avail_idx == last_avail_idx) return vq->num; /* Only get avail ring entries after they have been exposed by guest. */ smp_rmb(); /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ if (unlikely(__get_user(head, &vq->avail->ring[last_avail_idx % vq->num]))) { vq_err(vq, "Failed to read head: idx %d address %p\n", last_avail_idx, &vq->avail->ring[last_avail_idx % vq->num]); return -EFAULT; } /* If their number is silly, that's an error. */ if (unlikely(head >= vq->num)) { vq_err(vq, "Guest says index %u > %u is available", head, vq->num); return -EINVAL; } /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; if (unlikely(log)) *log_num = 0; i = head; do { unsigned iov_count = *in_num + *out_num; if (unlikely(i >= vq->num)) { vq_err(vq, "Desc index is %u > %u, head = %u", i, vq->num, head); return -EINVAL; } if (unlikely(++found > vq->num)) { vq_err(vq, "Loop detected: last one at %u " "vq size %u head %u\n", i, vq->num, head); return -EINVAL; } ret = __copy_from_user(&desc, vq->desc + i, sizeof desc); if (unlikely(ret)) { vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", i, vq->desc + i); return -EFAULT; } if (desc.flags & VRING_DESC_F_INDIRECT) { ret = get_indirect(dev, vq, iov, iov_size, out_num, in_num, log, log_num, &desc); if (unlikely(ret < 0)) { vq_err(vq, "Failure detected " "in indirect descriptor at idx %d\n", i); return ret; } continue; } ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, iov_size - iov_count); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d descriptor idx %d\n", ret, i); return ret; } if (desc.flags & VRING_DESC_F_WRITE) { /* If this is an input descriptor, * increment that count. */ *in_num += ret; if (unlikely(log)) { log[*log_num].addr = desc.addr; log[*log_num].len = desc.len; ++*log_num; } } else { /* If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (unlikely(*in_num)) { vq_err(vq, "Descriptor has out after in: " "idx %d\n", i); return -EINVAL; } *out_num += ret; } } while ((i = next_desc(&desc)) != -1); /* On success, increment avail index. */ vq->last_avail_idx++; /* Assume notifications from guest are disabled at this point, * if they aren't we would need to update avail_event index. */ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); return head; } /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) { vq->last_avail_idx -= n; } /* After we've used one of their buffers, we tell them about it. We'll then * want to notify the guest, using eventfd. */ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) { struct vring_used_elem __user *used; /* The virtqueue contains a ring of used buffers. Get a pointer to the * next entry in that used ring. */ used = &vq->used->ring[vq->last_used_idx % vq->num]; if (__put_user(head, &used->id)) { vq_err(vq, "Failed to write used id"); return -EFAULT; } if (__put_user(len, &used->len)) { vq_err(vq, "Failed to write used len"); return -EFAULT; } /* Make sure buffer is written before we update index. */ smp_wmb(); if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) { vq_err(vq, "Failed to increment used idx"); return -EFAULT; } if (unlikely(vq->log_used)) { /* Make sure data is seen before log. */ smp_wmb(); /* Log used ring entry write. */ log_write(vq->log_base, vq->log_addr + ((void __user *)used - (void __user *)vq->used), sizeof *used); /* Log used index update. */ log_write(vq->log_base, vq->log_addr + offsetof(struct vring_used, idx), sizeof vq->used->idx); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } vq->last_used_idx++; /* If the driver never bothers to signal in a very long while, * used index might wrap around. If that happens, invalidate * signalled_used index we stored. TODO: make sure driver * signals at least once in 2^16 and remove this. */ if (unlikely(vq->last_used_idx == vq->signalled_used)) vq->signalled_used_valid = false; return 0; } static int __vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { struct vring_used_elem __user *used; u16 old, new; int start; start = vq->last_used_idx % vq->num; used = vq->used->ring + start; if (__copy_to_user(used, heads, count * sizeof *used)) { vq_err(vq, "Failed to write used"); return -EFAULT; } if (unlikely(vq->log_used)) { /* Make sure data is seen before log. */ smp_wmb(); /* Log used ring entry write. */ log_write(vq->log_base, vq->log_addr + ((void __user *)used - (void __user *)vq->used), count * sizeof *used); } old = vq->last_used_idx; new = (vq->last_used_idx += count); /* If the driver never bothers to signal in a very long while, * used index might wrap around. If that happens, invalidate * signalled_used index we stored. TODO: make sure driver * signals at least once in 2^16 and remove this. */ if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) vq->signalled_used_valid = false; return 0; } /* After we've used one of their buffers, we tell them about it. We'll then * want to notify the guest, using eventfd. */ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { int start, n, r; start = vq->last_used_idx % vq->num; n = vq->num - start; if (n < count) { r = __vhost_add_used_n(vq, heads, n); if (r < 0) return r; heads += n; count -= n; } r = __vhost_add_used_n(vq, heads, count); /* Make sure buffer is written before we update index. */ smp_wmb(); if (put_user(vq->last_used_idx, &vq->used->idx)) { vq_err(vq, "Failed to increment used idx"); return -EFAULT; } if (unlikely(vq->log_used)) { /* Log used index update. */ log_write(vq->log_base, vq->log_addr + offsetof(struct vring_used, idx), sizeof vq->used->idx); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } return r; } static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { __u16 old, new, event; bool v; /* Flush out used index updates. This is paired * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && unlikely(vq->avail_idx == vq->last_avail_idx)) return true; if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { __u16 flags; if (__get_user(flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; } return !(flags & VRING_AVAIL_F_NO_INTERRUPT); } old = vq->signalled_used; v = vq->signalled_used_valid; new = vq->signalled_used = vq->last_used_idx; vq->signalled_used_valid = true; if (unlikely(!v)) return true; if (get_user(event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } return vring_need_event(event, new, old); } /* This actually signals the guest, using eventfd. */ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) { /* Signal the Guest tell them we used something up. */ if (vq->call_ctx && vhost_notify(dev, vq)) eventfd_signal(vq->call_ctx, 1); } /* And here's the combo meal deal. Supersize me! */ void vhost_add_used_and_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq, unsigned int head, int len) { vhost_add_used(vq, head, len); vhost_signal(dev, vq); } /* multi-buffer version of vhost_add_used_and_signal */ void vhost_add_used_and_signal_n(struct vhost_dev *dev, struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { vhost_add_used_n(vq, heads, count); vhost_signal(dev, vq); } /* OK, now we need to know about added descriptors. */ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { u16 avail_idx; int r; if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) return false; vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { r = put_user(vq->used_flags, &vq->used->flags); if (r) { vq_err(vq, "Failed to enable notification at %p: %d\n", &vq->used->flags, r); return false; } } else { r = put_user(vq->avail_idx, vhost_avail_event(vq)); if (r) { vq_err(vq, "Failed to update avail event index at %p: %d\n", vhost_avail_event(vq), r); return false; } } if (unlikely(vq->log_used)) { void __user *used; /* Make sure data is seen before log. */ smp_wmb(); used = vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ? &vq->used->flags : vhost_avail_event(vq); /* Log used flags or event index entry write. Both are 16 bit * fields. */ log_write(vq->log_base, vq->log_addr + (used - (void __user *)vq->used), sizeof(u16)); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } /* They could have slipped one in as we were doing that: make * sure it's written, then check again. */ smp_mb(); r = __get_user(avail_idx, &vq->avail->idx); if (r) { vq_err(vq, "Failed to check avail idx at %p: %d\n", &vq->avail->idx, r); return false; } return avail_idx != vq->avail_idx; } /* We don't need to be notified again. */ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { int r; if (vq->used_flags & VRING_USED_F_NO_NOTIFY) return; vq->used_flags |= VRING_USED_F_NO_NOTIFY; if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { r = put_user(vq->used_flags, &vq->used->flags); if (r) vq_err(vq, "Failed to enable notification at %p: %d\n", &vq->used->flags, r); } }
gpl-2.0
fransklaver/linux
arch/sh/kernel/cpu/init.c
1845
8533
/* * arch/sh/kernel/cpu/init.c * * CPU init code * * Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2003 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/log2.h> #include <asm/mmu_context.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/cache.h> #include <asm/elf.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/sh_bios.h> #include <asm/setup.h> #ifdef CONFIG_SH_FPU #define cpu_has_fpu 1 #else #define cpu_has_fpu 0 #endif #ifdef CONFIG_SH_DSP #define cpu_has_dsp 1 #else #define cpu_has_dsp 0 #endif /* * Generic wrapper for command line arguments to disable on-chip * peripherals (nofpu, nodsp, and so forth). */ #define onchip_setup(x) \ static int x##_disabled = !cpu_has_##x; \ \ static int x##_setup(char *opts) \ { \ x##_disabled = 1; \ return 1; \ } \ __setup("no" __stringify(x), x##_setup); onchip_setup(fpu); onchip_setup(dsp); #ifdef CONFIG_SPECULATIVE_EXECUTION #define CPUOPM 0xff2f0000 #define CPUOPM_RABD (1 << 5) static void speculative_execution_init(void) { /* Clear RABD */ __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); /* Flush the update */ (void)__raw_readl(CPUOPM); ctrl_barrier(); } #else #define speculative_execution_init() do { } while (0) #endif #ifdef CONFIG_CPU_SH4A #define EXPMASK 0xff2f0004 #define EXPMASK_RTEDS (1 << 0) #define EXPMASK_BRDSSLP (1 << 1) #define EXPMASK_MMCAW (1 << 4) static void expmask_init(void) { unsigned long expmask = __raw_readl(EXPMASK); /* * Future proofing. * * Disable support for slottable sleep instruction, non-nop * instructions in the rte delay slot, and associative writes to * the memory-mapped cache array. */ expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW); __raw_writel(expmask, EXPMASK); ctrl_barrier(); } #else #define expmask_init() do { } while (0) #endif /* 2nd-level cache init */ void __attribute__ ((weak)) l2_cache_init(void) { } /* * Generic first-level cache init */ #ifdef CONFIG_SUPERH32 static void cache_init(void) { unsigned long ccr, flags; jump_to_uncached(); ccr = __raw_readl(SH_CCR); /* * At this point we don't know whether the cache is enabled or not - a * bootloader may have enabled it. There are at least 2 things that * could be dirty in the cache at this point: * 1. kernel command line set up by boot loader * 2. spilled registers from the prolog of this function * => before re-initialising the cache, we must do a purge of the whole * cache out to memory for safety. As long as nothing is spilled * during the loop to lines that have already been done, this is safe. * - RPC */ if (ccr & CCR_CACHE_ENABLE) { unsigned long ways, waysize, addrstart; waysize = current_cpu_data.dcache.sets; #ifdef CCR_CACHE_ORA /* * If the OC is already in RAM mode, we only have * half of the entries to flush.. */ if (ccr & CCR_CACHE_ORA) waysize >>= 1; #endif waysize <<= current_cpu_data.dcache.entry_shift; #ifdef CCR_CACHE_EMODE /* If EMODE is not set, we only have 1 way to flush. */ if (!(ccr & CCR_CACHE_EMODE)) ways = 1; else #endif ways = current_cpu_data.dcache.ways; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) __raw_writel(0, addr); addrstart += current_cpu_data.dcache.way_incr; } while (--ways); } /* * Default CCR values .. enable the caches * and invalidate them immediately.. */ flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE; #ifdef CCR_CACHE_EMODE /* Force EMODE if possible */ if (current_cpu_data.dcache.ways > 1) flags |= CCR_CACHE_EMODE; else flags &= ~CCR_CACHE_EMODE; #endif #if defined(CONFIG_CACHE_WRITETHROUGH) /* Write-through */ flags |= CCR_CACHE_WT; #elif defined(CONFIG_CACHE_WRITEBACK) /* Write-back */ flags |= CCR_CACHE_CB; #else /* Off */ flags &= ~CCR_CACHE_ENABLE; #endif l2_cache_init(); __raw_writel(flags, SH_CCR); back_to_cached(); } #else #define cache_init() do { } while (0) #endif #define CSHAPE(totalsize, linesize, assoc) \ ((totalsize & ~0xff) | (linesize << 4) | assoc) #define CACHE_DESC_SHAPE(desc) \ CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways) static void detect_cache_shape(void) { l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache); if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED) l1i_cache_shape = l1d_cache_shape; else l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache); if (current_cpu_data.flags & CPU_HAS_L2_CACHE) l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache); else l2_cache_shape = -1; /* No S-cache */ } static void fpu_init(void) { /* Disable the FPU */ if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { printk("FPU Disabled\n"); current_cpu_data.flags &= ~CPU_HAS_FPU; } disable_fpu(); clear_used_math(); } #ifdef CONFIG_SH_DSP static void release_dsp(void) { unsigned long sr; /* Clear SR.DSP bit */ __asm__ __volatile__ ( "stc\tsr, %0\n\t" "and\t%1, %0\n\t" "ldc\t%0, sr\n\t" : "=&r" (sr) : "r" (~SR_DSP) ); } static void dsp_init(void) { unsigned long sr; /* * Set the SR.DSP bit, wait for one instruction, and then read * back the SR value. */ __asm__ __volatile__ ( "stc\tsr, %0\n\t" "or\t%1, %0\n\t" "ldc\t%0, sr\n\t" "nop\n\t" "stc\tsr, %0\n\t" : "=&r" (sr) : "r" (SR_DSP) ); /* If the DSP bit is still set, this CPU has a DSP */ if (sr & SR_DSP) current_cpu_data.flags |= CPU_HAS_DSP; /* Disable the DSP */ if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) { printk("DSP Disabled\n"); current_cpu_data.flags &= ~CPU_HAS_DSP; } /* Now that we've determined the DSP status, clear the DSP bit. */ release_dsp(); } #else static inline void dsp_init(void) { } #endif /* CONFIG_SH_DSP */ /** * cpu_init * * This is our initial entry point for each CPU, and is invoked on the * boot CPU prior to calling start_kernel(). For SMP, a combination of * this and start_secondary() will bring up each processor to a ready * state prior to hand forking the idle loop. * * We do all of the basic processor init here, including setting up * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and * subsequently platform_setup()) things like determining the CPU * subtype and initial configuration will all be done. * * Each processor family is still responsible for doing its own probing * and cache configuration in cpu_probe(). */ asmlinkage void cpu_init(void) { current_thread_info()->cpu = hard_smp_processor_id(); /* First, probe the CPU */ cpu_probe(); if (current_cpu_data.type == CPU_SH_NONE) panic("Unknown CPU"); /* First setup the rest of the I-cache info */ current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - current_cpu_data.icache.linesz; current_cpu_data.icache.way_size = current_cpu_data.icache.sets * current_cpu_data.icache.linesz; /* And the D-cache too */ current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - current_cpu_data.dcache.linesz; current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * current_cpu_data.dcache.linesz; /* Init the cache */ cache_init(); if (raw_smp_processor_id() == 0) { shm_align_mask = max_t(unsigned long, current_cpu_data.dcache.way_size - 1, PAGE_SIZE - 1); /* Boot CPU sets the cache shape */ detect_cache_shape(); } fpu_init(); dsp_init(); /* * Initialize the per-CPU ASID cache very early, since the * TLB flushing routines depend on this being setup. */ current_cpu_data.asid_cache = NO_CONTEXT; current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32; speculative_execution_init(); expmask_init(); /* Do the rest of the boot processor setup */ if (raw_smp_processor_id() == 0) { /* Save off the BIOS VBR, if there is one */ sh_bios_vbr_init(); /* * Setup VBR for boot CPU. Secondary CPUs do this through * start_secondary(). */ per_cpu_trap_init(); /* * Boot processor to setup the FP and extended state * context info. */ init_thread_xstate(); } }
gpl-2.0
embeddedarm/linux-2.6.35-ts4800
drivers/leds/leds-h1940.c
1845
3771
/* * drivers/leds/leds-h1940.c * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * H1940 leds driver * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/leds.h> #include <linux/gpio.h> #include <mach/regs-gpio.h> #include <mach/hardware.h> #include <mach/h1940-latch.h> /* * Green led. */ static void h1940_greenled_set(struct led_classdev *led_dev, enum led_brightness value) { switch (value) { case LED_HALF: h1940_latch_control(0, H1940_LATCH_LED_FLASH); s3c2410_gpio_setpin(S3C2410_GPA7, 1); break; case LED_FULL: h1940_latch_control(0, H1940_LATCH_LED_GREEN); s3c2410_gpio_setpin(S3C2410_GPA7, 1); break; default: case LED_OFF: h1940_latch_control(H1940_LATCH_LED_FLASH, 0); h1940_latch_control(H1940_LATCH_LED_GREEN, 0); s3c2410_gpio_setpin(S3C2410_GPA7, 0); break; } } static struct led_classdev h1940_greenled = { .name = "h1940:green", .brightness_set = h1940_greenled_set, .default_trigger = "h1940-charger", }; /* * Red led. */ static void h1940_redled_set(struct led_classdev *led_dev, enum led_brightness value) { switch (value) { case LED_HALF: h1940_latch_control(0, H1940_LATCH_LED_FLASH); s3c2410_gpio_setpin(S3C2410_GPA1, 1); break; case LED_FULL: h1940_latch_control(0, H1940_LATCH_LED_RED); s3c2410_gpio_setpin(S3C2410_GPA1, 1); break; default: case LED_OFF: h1940_latch_control(H1940_LATCH_LED_FLASH, 0); h1940_latch_control(H1940_LATCH_LED_RED, 0); s3c2410_gpio_setpin(S3C2410_GPA1, 0); break; } } static struct led_classdev h1940_redled = { .name = "h1940:red", .brightness_set = h1940_redled_set, .default_trigger = "h1940-charger", }; /* * Blue led. * (it can only be blue flashing led) */ static void h1940_blueled_set(struct led_classdev *led_dev, enum led_brightness value) { if (value) { /* flashing Blue */ h1940_latch_control(0, H1940_LATCH_LED_FLASH); s3c2410_gpio_setpin(S3C2410_GPA3, 1); } else { h1940_latch_control(H1940_LATCH_LED_FLASH, 0); s3c2410_gpio_setpin(S3C2410_GPA3, 0); } } static struct led_classdev h1940_blueled = { .name = "h1940:blue", .brightness_set = h1940_blueled_set, .default_trigger = "h1940-bluetooth", }; static int __devinit h1940leds_probe(struct platform_device *pdev) { int ret; ret = led_classdev_register(&pdev->dev, &h1940_greenled); if (ret) goto err_green; ret = led_classdev_register(&pdev->dev, &h1940_redled); if (ret) goto err_red; ret = led_classdev_register(&pdev->dev, &h1940_blueled); if (ret) goto err_blue; return 0; err_blue: led_classdev_unregister(&h1940_redled); err_red: led_classdev_unregister(&h1940_greenled); err_green: return ret; } static int h1940leds_remove(struct platform_device *pdev) { led_classdev_unregister(&h1940_greenled); led_classdev_unregister(&h1940_redled); led_classdev_unregister(&h1940_blueled); return 0; } static struct platform_driver h1940leds_driver = { .driver = { .name = "h1940-leds", .owner = THIS_MODULE, }, .probe = h1940leds_probe, .remove = h1940leds_remove, }; static int __init h1940leds_init(void) { return platform_driver_register(&h1940leds_driver); } static void __exit h1940leds_exit(void) { platform_driver_unregister(&h1940leds_driver); } module_init(h1940leds_init); module_exit(h1940leds_exit); MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("LED driver for the iPAQ H1940"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:h1940-leds");
gpl-2.0
titusece/linux_imx
drivers/irqchip/irq-s3c24xx.c
2613
42978
/* * S3C24XX IRQ handling * * Copyright (c) 2003-2004 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * Copyright (c) 2012 Heiko Stuebner <heiko@sntech.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/io.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <asm/exception.h> #include <asm/mach/irq.h> #include <mach/regs-irq.h> #include <mach/regs-gpio.h> #include <plat/cpu.h> #include <plat/regs-irqtype.h> #include <plat/pm.h> #include "irqchip.h" #define S3C_IRQTYPE_NONE 0 #define S3C_IRQTYPE_EINT 1 #define S3C_IRQTYPE_EDGE 2 #define S3C_IRQTYPE_LEVEL 3 struct s3c_irq_data { unsigned int type; unsigned long offset; unsigned long parent_irq; /* data gets filled during init */ struct s3c_irq_intc *intc; unsigned long sub_bits; struct s3c_irq_intc *sub_intc; }; /* * Sructure holding the controller data * @reg_pending register holding pending irqs * @reg_intpnd special register intpnd in main intc * @reg_mask mask register * @domain irq_domain of the controller * @parent parent controller for ext and sub irqs * @irqs irq-data, always s3c_irq_data[32] */ struct s3c_irq_intc { void __iomem *reg_pending; void __iomem *reg_intpnd; void __iomem *reg_mask; struct irq_domain *domain; struct s3c_irq_intc *parent; struct s3c_irq_data *irqs; }; /* * Array holding pointers to the global controller structs * [0] ... main_intc * [1] ... sub_intc * [2] ... main_intc2 on s3c2416 */ static struct s3c_irq_intc *s3c_intc[3]; static void s3c_irq_mask(struct irq_data *data) { struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); struct s3c_irq_intc *intc = irq_data->intc; struct s3c_irq_intc *parent_intc = intc->parent; struct s3c_irq_data *parent_data; unsigned long mask; unsigned int irqno; mask = __raw_readl(intc->reg_mask); mask |= (1UL << irq_data->offset); __raw_writel(mask, intc->reg_mask); if (parent_intc) { parent_data = &parent_intc->irqs[irq_data->parent_irq]; /* check to see if we need to mask the parent IRQ * The parent_irq is always in main_intc, so the hwirq * for find_mapping does not need an offset in any case. */ if ((mask & parent_data->sub_bits) == parent_data->sub_bits) { irqno = irq_find_mapping(parent_intc->domain, irq_data->parent_irq); s3c_irq_mask(irq_get_irq_data(irqno)); } } } static void s3c_irq_unmask(struct irq_data *data) { struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); struct s3c_irq_intc *intc = irq_data->intc; struct s3c_irq_intc *parent_intc = intc->parent; unsigned long mask; unsigned int irqno; mask = __raw_readl(intc->reg_mask); mask &= ~(1UL << irq_data->offset); __raw_writel(mask, intc->reg_mask); if (parent_intc) { irqno = irq_find_mapping(parent_intc->domain, irq_data->parent_irq); s3c_irq_unmask(irq_get_irq_data(irqno)); } } static inline void s3c_irq_ack(struct irq_data *data) { struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); struct s3c_irq_intc *intc = irq_data->intc; unsigned long bitval = 1UL << irq_data->offset; __raw_writel(bitval, intc->reg_pending); if (intc->reg_intpnd) __raw_writel(bitval, intc->reg_intpnd); } static int s3c_irq_type(struct irq_data *data, unsigned int type) { switch (type) { case IRQ_TYPE_NONE: break; case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_EDGE_BOTH: irq_set_handler(data->irq, handle_edge_irq); break; case IRQ_TYPE_LEVEL_LOW: case IRQ_TYPE_LEVEL_HIGH: irq_set_handler(data->irq, handle_level_irq); break; default: pr_err("No such irq type %d", type); return -EINVAL; } return 0; } static int s3c_irqext_type_set(void __iomem *gpcon_reg, void __iomem *extint_reg, unsigned long gpcon_offset, unsigned long extint_offset, unsigned int type) { unsigned long newvalue = 0, value; /* Set the GPIO to external interrupt mode */ value = __raw_readl(gpcon_reg); value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset); __raw_writel(value, gpcon_reg); /* Set the external interrupt to pointed trigger type */ switch (type) { case IRQ_TYPE_NONE: pr_warn("No edge setting!\n"); break; case IRQ_TYPE_EDGE_RISING: newvalue = S3C2410_EXTINT_RISEEDGE; break; case IRQ_TYPE_EDGE_FALLING: newvalue = S3C2410_EXTINT_FALLEDGE; break; case IRQ_TYPE_EDGE_BOTH: newvalue = S3C2410_EXTINT_BOTHEDGE; break; case IRQ_TYPE_LEVEL_LOW: newvalue = S3C2410_EXTINT_LOWLEV; break; case IRQ_TYPE_LEVEL_HIGH: newvalue = S3C2410_EXTINT_HILEV; break; default: pr_err("No such irq type %d", type); return -EINVAL; } value = __raw_readl(extint_reg); value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset); __raw_writel(value, extint_reg); return 0; } static int s3c_irqext_type(struct irq_data *data, unsigned int type) { void __iomem *extint_reg; void __iomem *gpcon_reg; unsigned long gpcon_offset, extint_offset; if ((data->hwirq >= 4) && (data->hwirq <= 7)) { gpcon_reg = S3C2410_GPFCON; extint_reg = S3C24XX_EXTINT0; gpcon_offset = (data->hwirq) * 2; extint_offset = (data->hwirq) * 4; } else if ((data->hwirq >= 8) && (data->hwirq <= 15)) { gpcon_reg = S3C2410_GPGCON; extint_reg = S3C24XX_EXTINT1; gpcon_offset = (data->hwirq - 8) * 2; extint_offset = (data->hwirq - 8) * 4; } else if ((data->hwirq >= 16) && (data->hwirq <= 23)) { gpcon_reg = S3C2410_GPGCON; extint_reg = S3C24XX_EXTINT2; gpcon_offset = (data->hwirq - 8) * 2; extint_offset = (data->hwirq - 16) * 4; } else { return -EINVAL; } return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset, extint_offset, type); } static int s3c_irqext0_type(struct irq_data *data, unsigned int type) { void __iomem *extint_reg; void __iomem *gpcon_reg; unsigned long gpcon_offset, extint_offset; if ((data->hwirq >= 0) && (data->hwirq <= 3)) { gpcon_reg = S3C2410_GPFCON; extint_reg = S3C24XX_EXTINT0; gpcon_offset = (data->hwirq) * 2; extint_offset = (data->hwirq) * 4; } else { return -EINVAL; } return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset, extint_offset, type); } static struct irq_chip s3c_irq_chip = { .name = "s3c", .irq_ack = s3c_irq_ack, .irq_mask = s3c_irq_mask, .irq_unmask = s3c_irq_unmask, .irq_set_type = s3c_irq_type, .irq_set_wake = s3c_irq_wake }; static struct irq_chip s3c_irq_level_chip = { .name = "s3c-level", .irq_mask = s3c_irq_mask, .irq_unmask = s3c_irq_unmask, .irq_ack = s3c_irq_ack, .irq_set_type = s3c_irq_type, }; static struct irq_chip s3c_irqext_chip = { .name = "s3c-ext", .irq_mask = s3c_irq_mask, .irq_unmask = s3c_irq_unmask, .irq_ack = s3c_irq_ack, .irq_set_type = s3c_irqext_type, .irq_set_wake = s3c_irqext_wake }; static struct irq_chip s3c_irq_eint0t4 = { .name = "s3c-ext0", .irq_ack = s3c_irq_ack, .irq_mask = s3c_irq_mask, .irq_unmask = s3c_irq_unmask, .irq_set_wake = s3c_irq_wake, .irq_set_type = s3c_irqext0_type, }; static void s3c_irq_demux(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); struct s3c_irq_intc *intc = irq_data->intc; struct s3c_irq_intc *sub_intc = irq_data->sub_intc; unsigned long src; unsigned long msk; unsigned int n; unsigned int offset; /* we're using individual domains for the non-dt case * and one big domain for the dt case where the subintc * starts at hwirq number 32. */ offset = (intc->domain->of_node) ? 32 : 0; chained_irq_enter(chip, desc); src = __raw_readl(sub_intc->reg_pending); msk = __raw_readl(sub_intc->reg_mask); src &= ~msk; src &= irq_data->sub_bits; while (src) { n = __ffs(src); src &= ~(1 << n); irq = irq_find_mapping(sub_intc->domain, offset + n); generic_handle_irq(irq); } chained_irq_exit(chip, desc); } static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc, struct pt_regs *regs, int intc_offset) { int pnd; int offset; int irq; pnd = __raw_readl(intc->reg_intpnd); if (!pnd) return false; /* non-dt machines use individual domains */ if (!intc->domain->of_node) intc_offset = 0; /* We have a problem that the INTOFFSET register does not always * show one interrupt. Occasionally we get two interrupts through * the prioritiser, and this causes the INTOFFSET register to show * what looks like the logical-or of the two interrupt numbers. * * Thanks to Klaus, Shannon, et al for helping to debug this problem */ offset = __raw_readl(intc->reg_intpnd + 4); /* Find the bit manually, when the offset is wrong. * The pending register only ever contains the one bit of the next * interrupt to handle. */ if (!(pnd & (1 << offset))) offset = __ffs(pnd); irq = irq_find_mapping(intc->domain, intc_offset + offset); handle_IRQ(irq, regs); return true; } asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs) { do { if (likely(s3c_intc[0])) if (s3c24xx_handle_intc(s3c_intc[0], regs, 0)) continue; if (s3c_intc[2]) if (s3c24xx_handle_intc(s3c_intc[2], regs, 64)) continue; break; } while (1); } #ifdef CONFIG_FIQ /** * s3c24xx_set_fiq - set the FIQ routing * @irq: IRQ number to route to FIQ on processor. * @on: Whether to route @irq to the FIQ, or to remove the FIQ routing. * * Change the state of the IRQ to FIQ routing depending on @irq and @on. If * @on is true, the @irq is checked to see if it can be routed and the * interrupt controller updated to route the IRQ. If @on is false, the FIQ * routing is cleared, regardless of which @irq is specified. */ int s3c24xx_set_fiq(unsigned int irq, bool on) { u32 intmod; unsigned offs; if (on) { offs = irq - FIQ_START; if (offs > 31) return -EINVAL; intmod = 1 << offs; } else { intmod = 0; } __raw_writel(intmod, S3C2410_INTMOD); return 0; } EXPORT_SYMBOL_GPL(s3c24xx_set_fiq); #endif static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct s3c_irq_intc *intc = h->host_data; struct s3c_irq_data *irq_data = &intc->irqs[hw]; struct s3c_irq_intc *parent_intc; struct s3c_irq_data *parent_irq_data; unsigned int irqno; /* attach controller pointer to irq_data */ irq_data->intc = intc; irq_data->offset = hw; parent_intc = intc->parent; /* set handler and flags */ switch (irq_data->type) { case S3C_IRQTYPE_NONE: return 0; case S3C_IRQTYPE_EINT: /* On the S3C2412, the EINT0to3 have a parent irq * but need the s3c_irq_eint0t4 chip */ if (parent_intc && (!soc_is_s3c2412() || hw >= 4)) irq_set_chip_and_handler(virq, &s3c_irqext_chip, handle_edge_irq); else irq_set_chip_and_handler(virq, &s3c_irq_eint0t4, handle_edge_irq); break; case S3C_IRQTYPE_EDGE: if (parent_intc || intc->reg_pending == S3C2416_SRCPND2) irq_set_chip_and_handler(virq, &s3c_irq_level_chip, handle_edge_irq); else irq_set_chip_and_handler(virq, &s3c_irq_chip, handle_edge_irq); break; case S3C_IRQTYPE_LEVEL: if (parent_intc) irq_set_chip_and_handler(virq, &s3c_irq_level_chip, handle_level_irq); else irq_set_chip_and_handler(virq, &s3c_irq_chip, handle_level_irq); break; default: pr_err("irq-s3c24xx: unsupported irqtype %d\n", irq_data->type); return -EINVAL; } irq_set_chip_data(virq, irq_data); set_irq_flags(virq, IRQF_VALID); if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) { if (irq_data->parent_irq > 31) { pr_err("irq-s3c24xx: parent irq %lu is out of range\n", irq_data->parent_irq); goto err; } parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; parent_irq_data->sub_intc = intc; parent_irq_data->sub_bits |= (1UL << hw); /* attach the demuxer to the parent irq */ irqno = irq_find_mapping(parent_intc->domain, irq_data->parent_irq); if (!irqno) { pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n", irq_data->parent_irq); goto err; } irq_set_chained_handler(irqno, s3c_irq_demux); } return 0; err: set_irq_flags(virq, 0); /* the only error can result from bad mapping data*/ return -EINVAL; } static struct irq_domain_ops s3c24xx_irq_ops = { .map = s3c24xx_irq_map, .xlate = irq_domain_xlate_twocell, }; static void s3c24xx_clear_intc(struct s3c_irq_intc *intc) { void __iomem *reg_source; unsigned long pend; unsigned long last; int i; /* if intpnd is set, read the next pending irq from there */ reg_source = intc->reg_intpnd ? intc->reg_intpnd : intc->reg_pending; last = 0; for (i = 0; i < 4; i++) { pend = __raw_readl(reg_source); if (pend == 0 || pend == last) break; __raw_writel(pend, intc->reg_pending); if (intc->reg_intpnd) __raw_writel(pend, intc->reg_intpnd); pr_info("irq: clearing pending status %08x\n", (int)pend); last = pend; } } static struct s3c_irq_intc * __init s3c24xx_init_intc(struct device_node *np, struct s3c_irq_data *irq_data, struct s3c_irq_intc *parent, unsigned long address) { struct s3c_irq_intc *intc; void __iomem *base = (void *)0xf6000000; /* static mapping */ int irq_num; int irq_start; int ret; intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL); if (!intc) return ERR_PTR(-ENOMEM); intc->irqs = irq_data; if (parent) intc->parent = parent; /* select the correct data for the controller. * Need to hard code the irq num start and offset * to preserve the static mapping for now */ switch (address) { case 0x4a000000: pr_debug("irq: found main intc\n"); intc->reg_pending = base; intc->reg_mask = base + 0x08; intc->reg_intpnd = base + 0x10; irq_num = 32; irq_start = S3C2410_IRQ(0); break; case 0x4a000018: pr_debug("irq: found subintc\n"); intc->reg_pending = base + 0x18; intc->reg_mask = base + 0x1c; irq_num = 29; irq_start = S3C2410_IRQSUB(0); break; case 0x4a000040: pr_debug("irq: found intc2\n"); intc->reg_pending = base + 0x40; intc->reg_mask = base + 0x48; intc->reg_intpnd = base + 0x50; irq_num = 8; irq_start = S3C2416_IRQ(0); break; case 0x560000a4: pr_debug("irq: found eintc\n"); base = (void *)0xfd000000; intc->reg_mask = base + 0xa4; intc->reg_pending = base + 0xa8; irq_num = 24; irq_start = S3C2410_IRQ(32); break; default: pr_err("irq: unsupported controller address\n"); ret = -EINVAL; goto err; } /* now that all the data is complete, init the irq-domain */ s3c24xx_clear_intc(intc); intc->domain = irq_domain_add_legacy(np, irq_num, irq_start, 0, &s3c24xx_irq_ops, intc); if (!intc->domain) { pr_err("irq: could not create irq-domain\n"); ret = -EINVAL; goto err; } set_handle_irq(s3c24xx_handle_irq); return intc; err: kfree(intc); return ERR_PTR(ret); } static struct s3c_irq_data init_eint[32] = { { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */ }; #ifdef CONFIG_CPU_S3C2410 static struct s3c_irq_data init_s3c2410base[32] = { { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ { .type = S3C_IRQTYPE_EDGE, }, /* WDT */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ { .type = S3C_IRQTYPE_EDGE, }, /* SDI */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ }; static struct s3c_irq_data init_s3c2410subint[32] = { { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ }; void __init s3c2410_init_irq(void) { #ifdef CONFIG_FIQ init_FIQ(FIQ_START); #endif s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2410base[0], NULL, 0x4a000000); if (IS_ERR(s3c_intc[0])) { pr_err("irq: could not create main interrupt controller\n"); return; } s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2410subint[0], s3c_intc[0], 0x4a000018); s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); } #endif #ifdef CONFIG_CPU_S3C2412 static struct s3c_irq_data init_s3c2412base[32] = { { .type = S3C_IRQTYPE_LEVEL, }, /* EINT0 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT1 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT2 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT3 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ { .type = S3C_IRQTYPE_EDGE, }, /* WDT */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ { .type = S3C_IRQTYPE_LEVEL, }, /* SDI/CF */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ }; static struct s3c_irq_data init_s3c2412eint[32] = { { .type = S3C_IRQTYPE_EINT, .parent_irq = 0 }, /* EINT0 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 1 }, /* EINT1 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 2 }, /* EINT2 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 3 }, /* EINT3 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */ { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */ }; static struct s3c_irq_data init_s3c2412subint[32] = { { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ { .type = S3C_IRQTYPE_NONE, }, { .type = S3C_IRQTYPE_NONE, }, { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* SDI */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* CF */ }; void __init s3c2412_init_irq(void) { pr_info("S3C2412: IRQ Support\n"); #ifdef CONFIG_FIQ init_FIQ(FIQ_START); #endif s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2412base[0], NULL, 0x4a000000); if (IS_ERR(s3c_intc[0])) { pr_err("irq: could not create main interrupt controller\n"); return; } s3c24xx_init_intc(NULL, &init_s3c2412eint[0], s3c_intc[0], 0x560000a4); s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2412subint[0], s3c_intc[0], 0x4a000018); } #endif #ifdef CONFIG_CPU_S3C2416 static struct s3c_irq_data init_s3c2416base[32] = { { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */ { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */ { .type = S3C_IRQTYPE_NONE, }, /* reserved */ { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */ { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ { .type = S3C_IRQTYPE_EDGE, }, /* NAND */ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ { .type = S3C_IRQTYPE_NONE, }, { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ }; static struct s3c_irq_data init_s3c2416subint[32] = { { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ { .type = S3C_IRQTYPE_NONE }, /* reserved */ { .type = S3C_IRQTYPE_NONE }, /* reserved */ { .type = S3C_IRQTYPE_NONE }, /* reserved */ { .type = S3C_IRQTYPE_NONE }, /* reserved */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */ }; static struct s3c_irq_data init_s3c2416_second[32] = { { .type = S3C_IRQTYPE_EDGE }, /* 2D */ { .type = S3C_IRQTYPE_NONE }, /* reserved */ { .type = S3C_IRQTYPE_NONE }, /* reserved */ { .type = S3C_IRQTYPE_NONE }, /* reserved */ { .type = S3C_IRQTYPE_EDGE }, /* PCM0 */ { .type = S3C_IRQTYPE_NONE }, /* reserved */ { .type = S3C_IRQTYPE_EDGE }, /* I2S0 */ }; void __init s3c2416_init_irq(void) { pr_info("S3C2416: IRQ Support\n"); #ifdef CONFIG_FIQ init_FIQ(FIQ_START); #endif s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2416base[0], NULL, 0x4a000000); if (IS_ERR(s3c_intc[0])) { pr_err("irq: could not create main interrupt controller\n"); return; } s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2416subint[0], s3c_intc[0], 0x4a000018); s3c_intc[2] = s3c24xx_init_intc(NULL, &init_s3c2416_second[0], NULL, 0x4a000040); } #endif #ifdef CONFIG_CPU_S3C2440 static struct s3c_irq_data init_s3c2440base[32] = { { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ { .type = S3C_IRQTYPE_EDGE, }, /* SDI */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ }; static struct s3c_irq_data init_s3c2440subint[32] = { { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */ }; void __init s3c2440_init_irq(void) { pr_info("S3C2440: IRQ Support\n"); #ifdef CONFIG_FIQ init_FIQ(FIQ_START); #endif s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2440base[0], NULL, 0x4a000000); if (IS_ERR(s3c_intc[0])) { pr_err("irq: could not create main interrupt controller\n"); return; } s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2440subint[0], s3c_intc[0], 0x4a000018); } #endif #ifdef CONFIG_CPU_S3C2442 static struct s3c_irq_data init_s3c2442base[32] = { { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ { .type = S3C_IRQTYPE_EDGE, }, /* WDT */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ { .type = S3C_IRQTYPE_EDGE, }, /* LCD */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */ { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */ { .type = S3C_IRQTYPE_EDGE, }, /* SDI */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ }; static struct s3c_irq_data init_s3c2442subint[32] = { { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */ }; void __init s3c2442_init_irq(void) { pr_info("S3C2442: IRQ Support\n"); #ifdef CONFIG_FIQ init_FIQ(FIQ_START); #endif s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2442base[0], NULL, 0x4a000000); if (IS_ERR(s3c_intc[0])) { pr_err("irq: could not create main interrupt controller\n"); return; } s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2442subint[0], s3c_intc[0], 0x4a000018); } #endif #ifdef CONFIG_CPU_S3C2443 static struct s3c_irq_data init_s3c2443base[32] = { { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */ { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */ { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */ { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */ { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */ { .type = S3C_IRQTYPE_EDGE, }, /* TICK */ { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */ { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */ { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */ { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */ { .type = S3C_IRQTYPE_EDGE, }, /* CFON */ { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */ { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */ { .type = S3C_IRQTYPE_EDGE, }, /* NAND */ { .type = S3C_IRQTYPE_EDGE, }, /* USBD */ { .type = S3C_IRQTYPE_EDGE, }, /* USBH */ { .type = S3C_IRQTYPE_EDGE, }, /* IIC */ { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */ { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */ { .type = S3C_IRQTYPE_EDGE, }, /* RTC */ { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */ }; static struct s3c_irq_data init_s3c2443subint[32] = { { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */ { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */ { .type = S3C_IRQTYPE_NONE }, /* reserved */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD1 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */ { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */ }; void __init s3c2443_init_irq(void) { pr_info("S3C2443: IRQ Support\n"); #ifdef CONFIG_FIQ init_FIQ(FIQ_START); #endif s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2443base[0], NULL, 0x4a000000); if (IS_ERR(s3c_intc[0])) { pr_err("irq: could not create main interrupt controller\n"); return; } s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4); s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2443subint[0], s3c_intc[0], 0x4a000018); } #endif #ifdef CONFIG_OF static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { unsigned int ctrl_num = hw / 32; unsigned int intc_hw = hw % 32; struct s3c_irq_intc *intc = s3c_intc[ctrl_num]; struct s3c_irq_intc *parent_intc = intc->parent; struct s3c_irq_data *irq_data = &intc->irqs[intc_hw]; /* attach controller pointer to irq_data */ irq_data->intc = intc; irq_data->offset = intc_hw; if (!parent_intc) irq_set_chip_and_handler(virq, &s3c_irq_chip, handle_edge_irq); else irq_set_chip_and_handler(virq, &s3c_irq_level_chip, handle_edge_irq); irq_set_chip_data(virq, irq_data); set_irq_flags(virq, IRQF_VALID); return 0; } /* Translate our of irq notation * format: <ctrl_num ctrl_irq parent_irq type> */ static int s3c24xx_irq_xlate_of(struct irq_domain *d, struct device_node *n, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct s3c_irq_intc *intc; struct s3c_irq_intc *parent_intc; struct s3c_irq_data *irq_data; struct s3c_irq_data *parent_irq_data; int irqno; if (WARN_ON(intsize < 4)) return -EINVAL; if (intspec[0] > 2 || !s3c_intc[intspec[0]]) { pr_err("controller number %d invalid\n", intspec[0]); return -EINVAL; } intc = s3c_intc[intspec[0]]; *out_hwirq = intspec[0] * 32 + intspec[2]; *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK; parent_intc = intc->parent; if (parent_intc) { irq_data = &intc->irqs[intspec[2]]; irq_data->parent_irq = intspec[1]; parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; parent_irq_data->sub_intc = intc; parent_irq_data->sub_bits |= (1UL << intspec[2]); /* parent_intc is always s3c_intc[0], so no offset */ irqno = irq_create_mapping(parent_intc->domain, intspec[1]); if (irqno < 0) { pr_err("irq: could not map parent interrupt\n"); return irqno; } irq_set_chained_handler(irqno, s3c_irq_demux); } return 0; } static struct irq_domain_ops s3c24xx_irq_ops_of = { .map = s3c24xx_irq_map_of, .xlate = s3c24xx_irq_xlate_of, }; struct s3c24xx_irq_of_ctrl { char *name; unsigned long offset; struct s3c_irq_intc **handle; struct s3c_irq_intc **parent; struct irq_domain_ops *ops; }; static int __init s3c_init_intc_of(struct device_node *np, struct device_node *interrupt_parent, struct s3c24xx_irq_of_ctrl *s3c_ctrl, int num_ctrl) { struct s3c_irq_intc *intc; struct s3c24xx_irq_of_ctrl *ctrl; struct irq_domain *domain; void __iomem *reg_base; int i; reg_base = of_iomap(np, 0); if (!reg_base) { pr_err("irq-s3c24xx: could not map irq registers\n"); return -EINVAL; } domain = irq_domain_add_linear(np, num_ctrl * 32, &s3c24xx_irq_ops_of, NULL); if (!domain) { pr_err("irq: could not create irq-domain\n"); return -EINVAL; } for (i = 0; i < num_ctrl; i++) { ctrl = &s3c_ctrl[i]; pr_debug("irq: found controller %s\n", ctrl->name); intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL); if (!intc) return -ENOMEM; intc->domain = domain; intc->irqs = kzalloc(sizeof(struct s3c_irq_data) * 32, GFP_KERNEL); if (!intc->irqs) { kfree(intc); return -ENOMEM; } if (ctrl->parent) { intc->reg_pending = reg_base + ctrl->offset; intc->reg_mask = reg_base + ctrl->offset + 0x4; if (*(ctrl->parent)) { intc->parent = *(ctrl->parent); } else { pr_warn("irq: parent of %s missing\n", ctrl->name); kfree(intc->irqs); kfree(intc); continue; } } else { intc->reg_pending = reg_base + ctrl->offset; intc->reg_mask = reg_base + ctrl->offset + 0x08; intc->reg_intpnd = reg_base + ctrl->offset + 0x10; } s3c24xx_clear_intc(intc); s3c_intc[i] = intc; } set_handle_irq(s3c24xx_handle_irq); return 0; } static struct s3c24xx_irq_of_ctrl s3c2410_ctrl[] = { { .name = "intc", .offset = 0, }, { .name = "subintc", .offset = 0x18, .parent = &s3c_intc[0], } }; int __init s3c2410_init_intc_of(struct device_node *np, struct device_node *interrupt_parent, struct s3c24xx_irq_of_ctrl *ctrl, int num_ctrl) { return s3c_init_intc_of(np, interrupt_parent, s3c2410_ctrl, ARRAY_SIZE(s3c2410_ctrl)); } IRQCHIP_DECLARE(s3c2410_irq, "samsung,s3c2410-irq", s3c2410_init_intc_of); static struct s3c24xx_irq_of_ctrl s3c2416_ctrl[] = { { .name = "intc", .offset = 0, }, { .name = "subintc", .offset = 0x18, .parent = &s3c_intc[0], }, { .name = "intc2", .offset = 0x40, } }; int __init s3c2416_init_intc_of(struct device_node *np, struct device_node *interrupt_parent, struct s3c24xx_irq_of_ctrl *ctrl, int num_ctrl) { return s3c_init_intc_of(np, interrupt_parent, s3c2416_ctrl, ARRAY_SIZE(s3c2416_ctrl)); } IRQCHIP_DECLARE(s3c2416_irq, "samsung,s3c2416-irq", s3c2416_init_intc_of); #endif
gpl-2.0
mint2g/stock-kernel
net/irda/irda_device.c
3637
8199
/********************************************************************* * * Filename: irda_device.c * Version: 0.9 * Description: Utility functions used by the device drivers * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sat Oct 9 09:22:27 1999 * Modified at: Sun Jan 23 17:41:24 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/capability.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/kmod.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <asm/ioctls.h> #include <asm/uaccess.h> #include <asm/dma.h> #include <asm/io.h> #include <net/irda/irda_device.h> #include <net/irda/irlap.h> #include <net/irda/timer.h> #include <net/irda/wrapper.h> static void __irda_task_delete(struct irda_task *task); static hashbin_t *dongles = NULL; static hashbin_t *tasks = NULL; static void irda_task_timer_expired(void *data); int __init irda_device_init( void) { dongles = hashbin_new(HB_NOLOCK); if (dongles == NULL) { IRDA_WARNING("IrDA: Can't allocate dongles hashbin!\n"); return -ENOMEM; } spin_lock_init(&dongles->hb_spinlock); tasks = hashbin_new(HB_LOCK); if (tasks == NULL) { IRDA_WARNING("IrDA: Can't allocate tasks hashbin!\n"); hashbin_delete(dongles, NULL); return -ENOMEM; } /* We no longer initialise the driver ourselves here, we let * the system do it for us... - Jean II */ return 0; } static void leftover_dongle(void *arg) { struct dongle_reg *reg = arg; IRDA_WARNING("IrDA: Dongle type %x not unregistered\n", reg->type); } void irda_device_cleanup(void) { IRDA_DEBUG(4, "%s()\n", __func__); hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete); hashbin_delete(dongles, leftover_dongle); } /* * Function irda_device_set_media_busy (self, status) * * Called when we have detected that another station is transmitting * in contention mode. */ void irda_device_set_media_busy(struct net_device *dev, int status) { struct irlap_cb *self; IRDA_DEBUG(4, "%s(%s)\n", __func__, status ? "TRUE" : "FALSE"); self = (struct irlap_cb *) dev->atalk_ptr; /* Some drivers may enable the receive interrupt before calling * irlap_open(), or they may disable the receive interrupt * after calling irlap_close(). * The IrDA stack is protected from this in irlap_driver_rcv(). * However, the driver calls directly the wrapper, that calls * us directly. Make sure we protect ourselves. * Jean II */ if (!self || self->magic != LAP_MAGIC) return; if (status) { self->media_busy = TRUE; if (status == SMALL) irlap_start_mbusy_timer(self, SMALLBUSY_TIMEOUT); else irlap_start_mbusy_timer(self, MEDIABUSY_TIMEOUT); IRDA_DEBUG( 4, "Media busy!\n"); } else { self->media_busy = FALSE; irlap_stop_mbusy_timer(self); } } EXPORT_SYMBOL(irda_device_set_media_busy); /* * Function irda_device_is_receiving (dev) * * Check if the device driver is currently receiving data * */ int irda_device_is_receiving(struct net_device *dev) { struct if_irda_req req; int ret; IRDA_DEBUG(2, "%s()\n", __func__); if (!dev->netdev_ops->ndo_do_ioctl) { IRDA_ERROR("%s: do_ioctl not impl. by device driver\n", __func__); return -1; } ret = (dev->netdev_ops->ndo_do_ioctl)(dev, (struct ifreq *) &req, SIOCGRECEIVING); if (ret < 0) return ret; return req.ifr_receiving; } static void __irda_task_delete(struct irda_task *task) { del_timer(&task->timer); kfree(task); } static void irda_task_delete(struct irda_task *task) { /* Unregister task */ hashbin_remove(tasks, (long) task, NULL); __irda_task_delete(task); } /* * Function irda_task_kick (task) * * Tries to execute a task possible multiple times until the task is either * finished, or askes for a timeout. When a task is finished, we do post * processing, and notify the parent task, that is waiting for this task * to complete. */ static int irda_task_kick(struct irda_task *task) { int finished = TRUE; int count = 0; int timeout; IRDA_DEBUG(2, "%s()\n", __func__); IRDA_ASSERT(task != NULL, return -1;); IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;); /* Execute task until it's finished, or askes for a timeout */ do { timeout = task->function(task); if (count++ > 100) { IRDA_ERROR("%s: error in task handler!\n", __func__); irda_task_delete(task); return TRUE; } } while ((timeout == 0) && (task->state != IRDA_TASK_DONE)); if (timeout < 0) { IRDA_ERROR("%s: Error executing task!\n", __func__); irda_task_delete(task); return TRUE; } /* Check if we are finished */ if (task->state == IRDA_TASK_DONE) { del_timer(&task->timer); /* Do post processing */ if (task->finished) task->finished(task); /* Notify parent */ if (task->parent) { /* Check if parent is waiting for us to complete */ if (task->parent->state == IRDA_TASK_CHILD_WAIT) { task->parent->state = IRDA_TASK_CHILD_DONE; /* Stop timer now that we are here */ del_timer(&task->parent->timer); /* Kick parent task */ irda_task_kick(task->parent); } } irda_task_delete(task); } else if (timeout > 0) { irda_start_timer(&task->timer, timeout, (void *) task, irda_task_timer_expired); finished = FALSE; } else { IRDA_DEBUG(0, "%s(), not finished, and no timeout!\n", __func__); finished = FALSE; } return finished; } /* * Function irda_task_timer_expired (data) * * Task time has expired. We now try to execute task (again), and restart * the timer if the task has not finished yet */ static void irda_task_timer_expired(void *data) { struct irda_task *task; IRDA_DEBUG(2, "%s()\n", __func__); task = (struct irda_task *) data; irda_task_kick(task); } /* * Function irda_device_setup (dev) * * This function should be used by low level device drivers in a similar way * as ether_setup() is used by normal network device drivers */ static void irda_device_setup(struct net_device *dev) { dev->hard_header_len = 0; dev->addr_len = LAP_ALEN; dev->type = ARPHRD_IRDA; dev->tx_queue_len = 8; /* Window size + 1 s-frame */ memset(dev->broadcast, 0xff, LAP_ALEN); dev->mtu = 2048; dev->flags = IFF_NOARP; } /* * Funciton alloc_irdadev * Allocates and sets up an IRDA device in a manner similar to * alloc_etherdev. */ struct net_device *alloc_irdadev(int sizeof_priv) { return alloc_netdev(sizeof_priv, "irda%d", irda_device_setup); } EXPORT_SYMBOL(alloc_irdadev); #ifdef CONFIG_ISA_DMA_API /* * Function setup_dma (idev, buffer, count, mode) * * Setup the DMA channel. Commonly used by LPC FIR drivers * */ void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(channel); clear_dma_ff(channel); set_dma_mode(channel, mode); set_dma_addr(channel, buffer); set_dma_count(channel, count); enable_dma(channel); release_dma_lock(flags); } EXPORT_SYMBOL(irda_setup_dma); #endif
gpl-2.0
Vitronic-GmbH/HwdZynqLinux
fs/hpfs/anode.c
4661
15737
/* * linux/fs/hpfs/anode.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * handling HPFS anode tree that contains file allocation info */ #include "hpfs_fn.h" /* Find a sector in allocation tree */ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, struct bplus_header *btree, unsigned sec, struct buffer_head *bh) { anode_secno a = -1; struct anode *anode; int i; int c1, c2 = 0; go_down: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; if (bp_internal(btree)) { for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { a = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; goto go_down; } hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a); brelse(bh); return -1; } for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { brelse(bh); return -1; } if (inode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno); hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno); hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length); } brelse(bh); return a; } hpfs_error(s, "sector %08x not found in external anode %08x", sec, a); brelse(bh); return -1; } /* Add a sector to tree */ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno) { struct bplus_header *btree; struct anode *anode = NULL, *ranode = NULL; struct fnode *fnode; anode_secno a, na = -1, ra, up = -1; secno se; struct buffer_head *bh, *bh1, *bh2; int n; unsigned fs; int c1, c2 = 0; if (fnod) { if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1; btree = &fnode->btree; } else { if (!(anode = hpfs_map_anode(s, node, &bh))) return -1; btree = &anode->btree; } a = node; go_down: if ((n = btree->n_used_nodes - 1) < -!!fnod) { hpfs_error(s, "anode %08x has no entries", a); brelse(bh); return -1; } if (bp_internal(btree)) { a = le32_to_cpu(btree->u.internal[n].down); btree->u.internal[n].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1; if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; goto go_down; } if (n >= 0) { if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, fnod?'f':'a', node); brelse(bh); return -1; } if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { le32_add_cpu(&btree->u.external[n].length, 1); mark_buffer_dirty(bh); brelse(bh); return se; } } else { if (fsecno) { hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno); brelse(bh); return -1; } se = !fnod ? node : (node + 16384) & ~16383; } if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) { brelse(bh); return -1; } fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); if (!btree->n_free_nodes) { up = a != node ? le32_to_cpu(anode->up) : -1; if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { brelse(bh); hpfs_free_sectors(s, se, 1); return -1; } if (a == node && fnod) { anode->up = cpu_to_le32(node); anode->btree.flags |= BP_fnode_parent; anode->btree.n_used_nodes = btree->n_used_nodes; anode->btree.first_free = btree->first_free; anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes; memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12); btree->flags |= BP_internal; btree->n_free_nodes = 11; btree->n_used_nodes = 1; btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); btree->u.internal[0].file_secno = cpu_to_le32(-1); btree->u.internal[0].down = cpu_to_le32(na); mark_buffer_dirty(bh); } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { brelse(bh); brelse(bh1); hpfs_free_sectors(s, se, 1); hpfs_free_sectors(s, na, 1); return -1; } brelse(bh); bh = bh1; btree = &anode->btree; } btree->n_free_nodes--; n = btree->n_used_nodes++; le16_add_cpu(&btree->first_free, 12); btree->u.external[n].disk_secno = cpu_to_le32(se); btree->u.external[n].file_secno = cpu_to_le32(fs); btree->u.external[n].length = cpu_to_le32(1); mark_buffer_dirty(bh); brelse(bh); if ((a == node && fnod) || na == -1) return se; c2 = 0; while (up != (anode_secno)-1) { struct anode *new_anode; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; if (up != node || !fnod) { if (!(anode = hpfs_map_anode(s, up, &bh))) return -1; btree = &anode->btree; } else { if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1; btree = &fnode->btree; } if (btree->n_free_nodes) { btree->n_free_nodes--; n = btree->n_used_nodes++; le16_add_cpu(&btree->first_free, 8); btree->u.internal[n].file_secno = cpu_to_le32(-1); btree->u.internal[n].down = cpu_to_le32(na); btree->u.internal[n-1].file_secno = cpu_to_le32(fs); mark_buffer_dirty(bh); brelse(bh); brelse(bh2); hpfs_free_sectors(s, ra, 1); if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(up); if (up == node && fnod) anode->btree.flags |= BP_fnode_parent; else anode->btree.flags &= ~BP_fnode_parent; mark_buffer_dirty(bh); brelse(bh); } return se; } up = up != node ? le32_to_cpu(anode->up) : -1; btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1); mark_buffer_dirty(bh); brelse(bh); a = na; if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { anode = new_anode; /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ anode->btree.flags |= BP_internal; anode->btree.n_used_nodes = 1; anode->btree.n_free_nodes = 59; anode->btree.first_free = cpu_to_le16(16); anode->btree.u.internal[0].down = cpu_to_le32(a); anode->btree.u.internal[0].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if ((anode = hpfs_map_anode(s, a, &bh))) { anode->up = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); } } else na = a; } if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(node); if (fnod) anode->btree.flags |= BP_fnode_parent; mark_buffer_dirty(bh); brelse(bh); } if (!fnod) { if (!(anode = hpfs_map_anode(s, node, &bh))) { brelse(bh2); return -1; } btree = &anode->btree; } else { if (!(fnode = hpfs_map_fnode(s, node, &bh))) { brelse(bh2); return -1; } btree = &fnode->btree; } ranode->up = cpu_to_le32(node); memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); if (fnod) ranode->btree.flags |= BP_fnode_parent; ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes; if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) { struct anode *unode; if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { unode->up = cpu_to_le32(ra); unode->btree.flags &= ~BP_fnode_parent; mark_buffer_dirty(bh1); brelse(bh1); } } btree->flags |= BP_internal; btree->n_free_nodes = fnod ? 10 : 58; btree->n_used_nodes = 2; btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); btree->u.internal[0].file_secno = cpu_to_le32(fs); btree->u.internal[0].down = cpu_to_le32(ra); btree->u.internal[1].file_secno = cpu_to_le32(-1); btree->u.internal[1].down = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); mark_buffer_dirty(bh2); brelse(bh2); return se; } /* * Remove allocation tree. Recursion would look much nicer but * I want to avoid it because it can cause stack overflow. */ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) { struct bplus_header *btree1 = btree; struct anode *anode = NULL; anode_secno ano = 0, oano; struct buffer_head *bh; int level = 0; int pos = 0; int i; int c1, c2 = 0; int d1, d2; go_down: d2 = 0; while (bp_internal(btree1)) { ano = le32_to_cpu(btree1->u.internal[pos].down); if (level) brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) return; if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; level++; pos = 0; } for (i = 0; i < btree1->n_used_nodes; i++) hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length)); go_up: if (!level) return; brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; hpfs_free_sectors(s, ano, 1); oano = ano; ano = le32_to_cpu(anode->up); if (--level) { if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; } else btree1 = btree; for (i = 0; i < btree1->n_used_nodes; i++) { if (le32_to_cpu(btree1->u.internal[i].down) == oano) { if ((pos = i + 1) < btree1->n_used_nodes) goto go_down; else goto go_up; } } hpfs_error(s, "reference to anode %08x not found in anode %08x " "(probably bad up pointer)", oano, level ? ano : -1); if (level) brelse(bh); } /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */ static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec) { struct anode *anode; struct buffer_head *bh; if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh); } int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos, unsigned len, char *buf) { struct buffer_head *bh; char *data; secno sec; unsigned l; while (len) { if (ano) { if ((sec = anode_lookup(s, a, pos >> 9)) == -1) return -1; } else sec = a + (pos >> 9); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1; if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) return -1; l = 0x200 - (pos & 0x1ff); if (l > len) l = len; memcpy(buf, data + (pos & 0x1ff), l); brelse(bh); buf += l; pos += l; len -= l; } return 0; } int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos, unsigned len, const char *buf) { struct buffer_head *bh; char *data; secno sec; unsigned l; while (len) { if (ano) { if ((sec = anode_lookup(s, a, pos >> 9)) == -1) return -1; } else sec = a + (pos >> 9); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1; if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) return -1; l = 0x200 - (pos & 0x1ff); if (l > len) l = len; memcpy(data + (pos & 0x1ff), buf, l); mark_buffer_dirty(bh); brelse(bh); buf += l; pos += l; len -= l; } return 0; } void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len) { struct anode *anode; struct buffer_head *bh; if (ano) { if (!(anode = hpfs_map_anode(s, a, &bh))) return; hpfs_remove_btree(s, &anode->btree); brelse(bh); hpfs_free_sectors(s, a, 1); } else hpfs_free_sectors(s, a, (len + 511) >> 9); } /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) { struct fnode *fnode; struct anode *anode; struct buffer_head *bh; struct bplus_header *btree; anode_secno node = f; int i, j, nodes; int c1, c2 = 0; if (fno) { if (!(fnode = hpfs_map_fnode(s, f, &bh))) return; btree = &fnode->btree; } else { if (!(anode = hpfs_map_anode(s, f, &bh))) return; btree = &anode->btree; } if (!secs) { hpfs_remove_btree(s, btree); if (fno) { btree->n_free_nodes = 8; btree->n_used_nodes = 0; btree->first_free = cpu_to_le16(8); btree->flags &= ~BP_internal; mark_buffer_dirty(bh); } else hpfs_free_sectors(s, f, 1); brelse(bh); return; } while (bp_internal(btree)) { nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; brelse(bh); hpfs_error(s, "internal btree %08x doesn't end with -1", node); return; f: for (j = i + 1; j < btree->n_used_nodes; j++) hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes); mark_buffer_dirty(bh); if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) { brelse(bh); return; } node = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) return; if (!(anode = hpfs_map_anode(s, node, &bh))) return; btree = &anode->btree; } nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff; brelse(bh); return; ff: if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) { hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); if (i) i--; } else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) { hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs - le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length) - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */ btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno)); } for (j = i + 1; j < btree->n_used_nodes; j++) hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length)); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes); mark_buffer_dirty(bh); brelse(bh); } /* Remove file or directory and it's eas - note that directory must be empty when this is called. */ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno) { struct buffer_head *bh; struct fnode *fnode; struct extended_attribute *ea; struct extended_attribute *ea_end; if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree); else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (ea_indirect(ea)) hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l)); brelse(bh); hpfs_free_sectors(s, fno, 1); }
gpl-2.0
engine95/navelA-990
drivers/acpi/acpica/utmutex.c
4917
10054
/******************************************************************************* * * Module Name: utmutex - local mutex support * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utmutex") /* Local prototypes */ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id); static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id); /******************************************************************************* * * FUNCTION: acpi_ut_mutex_initialize * * PARAMETERS: None. * * RETURN: Status * * DESCRIPTION: Create the system mutex objects. This includes mutexes, * spin locks, and reader/writer locks. * ******************************************************************************/ acpi_status acpi_ut_mutex_initialize(void) { u32 i; acpi_status status; ACPI_FUNCTION_TRACE(ut_mutex_initialize); /* Create each of the predefined mutex objects */ for (i = 0; i < ACPI_NUM_MUTEX; i++) { status = acpi_ut_create_mutex(i); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Create the spinlocks for use at interrupt level */ status = acpi_os_create_lock (&acpi_gbl_gpe_lock); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } status = acpi_os_create_lock (&acpi_gbl_hardware_lock); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } /* Mutex for _OSI support */ status = acpi_os_create_mutex(&acpi_gbl_osi_mutex); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Create the reader/writer lock for namespace access */ status = acpi_ut_create_rw_lock(&acpi_gbl_namespace_rw_lock); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_mutex_terminate * * PARAMETERS: None. * * RETURN: None. * * DESCRIPTION: Delete all of the system mutex objects. This includes mutexes, * spin locks, and reader/writer locks. * ******************************************************************************/ void acpi_ut_mutex_terminate(void) { u32 i; ACPI_FUNCTION_TRACE(ut_mutex_terminate); /* Delete each predefined mutex object */ for (i = 0; i < ACPI_NUM_MUTEX; i++) { acpi_ut_delete_mutex(i); } acpi_os_delete_mutex(acpi_gbl_osi_mutex); /* Delete the spinlocks */ acpi_os_delete_lock(acpi_gbl_gpe_lock); acpi_os_delete_lock(acpi_gbl_hardware_lock); /* Delete the reader/writer lock */ acpi_ut_delete_rw_lock(&acpi_gbl_namespace_rw_lock); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_create_mutex * * PARAMETERS: mutex_iD - ID of the mutex to be created * * RETURN: Status * * DESCRIPTION: Create a mutex object. * ******************************************************************************/ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id); if (!acpi_gbl_mutex_info[mutex_id].mutex) { status = acpi_os_create_mutex(&acpi_gbl_mutex_info[mutex_id].mutex); acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; acpi_gbl_mutex_info[mutex_id].use_count = 0; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_delete_mutex * * PARAMETERS: mutex_iD - ID of the mutex to be deleted * * RETURN: Status * * DESCRIPTION: Delete a mutex object. * ******************************************************************************/ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) { ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id); acpi_os_delete_mutex(acpi_gbl_mutex_info[mutex_id].mutex); acpi_gbl_mutex_info[mutex_id].mutex = NULL; acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; } /******************************************************************************* * * FUNCTION: acpi_ut_acquire_mutex * * PARAMETERS: mutex_iD - ID of the mutex to be acquired * * RETURN: Status * * DESCRIPTION: Acquire a mutex object. * ******************************************************************************/ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) { acpi_status status; acpi_thread_id this_thread_id; ACPI_FUNCTION_NAME(ut_acquire_mutex); if (mutex_id > ACPI_MAX_MUTEX) { return (AE_BAD_PARAMETER); } this_thread_id = acpi_os_get_thread_id(); #ifdef ACPI_MUTEX_DEBUG { u32 i; /* * Mutex debug code, for internal debugging only. * * Deadlock prevention. Check if this thread owns any mutexes of value * greater than or equal to this one. If so, the thread has violated * the mutex ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { if (i == mutex_id) { ACPI_ERROR((AE_INFO, "Mutex [%s] already acquired by this thread [%u]", acpi_ut_get_mutex_name (mutex_id), (u32)this_thread_id)); return (AE_ALREADY_ACQUIRED); } ACPI_ERROR((AE_INFO, "Invalid acquire order: Thread %u owns [%s], wants [%s]", (u32)this_thread_id, acpi_ut_get_mutex_name(i), acpi_ut_get_mutex_name(mutex_id))); return (AE_ACQUIRE_DEADLOCK); } } } #endif ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u attempting to acquire Mutex [%s]\n", (u32)this_thread_id, acpi_ut_get_mutex_name(mutex_id))); status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, ACPI_WAIT_FOREVER); if (ACPI_SUCCESS(status)) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u acquired Mutex [%s]\n", (u32)this_thread_id, acpi_ut_get_mutex_name(mutex_id))); acpi_gbl_mutex_info[mutex_id].use_count++; acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id; } else { ACPI_EXCEPTION((AE_INFO, status, "Thread %u could not acquire Mutex [0x%X]", (u32)this_thread_id, mutex_id)); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_release_mutex * * PARAMETERS: mutex_iD - ID of the mutex to be released * * RETURN: Status * * DESCRIPTION: Release a mutex object. * ******************************************************************************/ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) { ACPI_FUNCTION_NAME(ut_release_mutex); ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n", (u32)acpi_os_get_thread_id(), acpi_ut_get_mutex_name(mutex_id))); if (mutex_id > ACPI_MAX_MUTEX) { return (AE_BAD_PARAMETER); } /* * Mutex must be acquired in order to release it! */ if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) { ACPI_ERROR((AE_INFO, "Mutex [0x%X] is not acquired, cannot release", mutex_id)); return (AE_NOT_ACQUIRED); } #ifdef ACPI_MUTEX_DEBUG { u32 i; /* * Mutex debug code, for internal debugging only. * * Deadlock prevention. Check if this thread owns any mutexes of value * greater than this one. If so, the thread has violated the mutex * ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { if (acpi_gbl_mutex_info[i].thread_id == acpi_os_get_thread_id()) { if (i == mutex_id) { continue; } ACPI_ERROR((AE_INFO, "Invalid release order: owns [%s], releasing [%s]", acpi_ut_get_mutex_name(i), acpi_ut_get_mutex_name(mutex_id))); return (AE_RELEASE_DEADLOCK); } } } #endif /* Mark unlocked FIRST */ acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; acpi_os_release_mutex(acpi_gbl_mutex_info[mutex_id].mutex); return (AE_OK); }
gpl-2.0
mathkid95/linux_lg_lollipop
drivers/spi/spi-bfin-sport.c
4917
23566
/* * SPI bus via the Blackfin SPORT peripheral * * Enter bugs at http://blackfin.uclinux.org/ * * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/gpio.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> #include <asm/portmux.h> #include <asm/bfin5xx_spi.h> #include <asm/blackfin.h> #include <asm/bfin_sport.h> #include <asm/cacheflush.h> #define DRV_NAME "bfin-sport-spi" #define DRV_DESC "SPI bus via the Blackfin SPORT" MODULE_AUTHOR("Cliff Cai"); MODULE_DESCRIPTION(DRV_DESC); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:bfin-sport-spi"); enum bfin_sport_spi_state { START_STATE, RUNNING_STATE, DONE_STATE, ERROR_STATE, }; struct bfin_sport_spi_master_data; struct bfin_sport_transfer_ops { void (*write) (struct bfin_sport_spi_master_data *); void (*read) (struct bfin_sport_spi_master_data *); void (*duplex) (struct bfin_sport_spi_master_data *); }; struct bfin_sport_spi_master_data { /* Driver model hookup */ struct device *dev; /* SPI framework hookup */ struct spi_master *master; /* Regs base of SPI controller */ struct sport_register __iomem *regs; int err_irq; /* Pin request list */ u16 *pin_req; /* Driver message queue */ struct workqueue_struct *workqueue; struct work_struct pump_messages; spinlock_t lock; struct list_head queue; int busy; bool run; /* Message Transfer pump */ struct tasklet_struct pump_transfers; /* Current message transfer state info */ enum bfin_sport_spi_state state; struct spi_message *cur_msg; struct spi_transfer *cur_transfer; struct bfin_sport_spi_slave_data *cur_chip; union { void *tx; u8 *tx8; u16 *tx16; }; void *tx_end; union { void *rx; u8 *rx8; u16 *rx16; }; void *rx_end; int cs_change; struct bfin_sport_transfer_ops *ops; }; struct bfin_sport_spi_slave_data { u16 ctl_reg; u16 baud; u16 cs_chg_udelay; /* Some devices require > 255usec delay */ u32 cs_gpio; u16 idle_tx_val; struct bfin_sport_transfer_ops *ops; }; static void bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data) { bfin_write_or(&drv_data->regs->tcr1, TSPEN); bfin_write_or(&drv_data->regs->rcr1, TSPEN); SSYNC(); } static void bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data) { bfin_write_and(&drv_data->regs->tcr1, ~TSPEN); bfin_write_and(&drv_data->regs->rcr1, ~TSPEN); SSYNC(); } /* Caculate the SPI_BAUD register value based on input HZ */ static u16 bfin_sport_hz_to_spi_baud(u32 speed_hz) { u_long clk, sclk = get_sclk(); int div = (sclk / (2 * speed_hz)) - 1; if (div < 0) div = 0; clk = sclk / (2 * (div + 1)); if (clk > speed_hz) div++; return div; } /* Chip select operation functions for cs_change flag */ static void bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip) { gpio_direction_output(chip->cs_gpio, 0); } static void bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip) { gpio_direction_output(chip->cs_gpio, 1); /* Move delay here for consistency */ if (chip->cs_chg_udelay) udelay(chip->cs_chg_udelay); } static void bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data) { unsigned long timeout = jiffies + HZ; while (!(bfin_read(&drv_data->regs->stat) & RXNE)) { if (!time_before(jiffies, timeout)) break; } } static void bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data) { u16 dummy; while (drv_data->tx < drv_data->tx_end) { bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); bfin_sport_spi_stat_poll_complete(drv_data); dummy = bfin_read(&drv_data->regs->rx16); } } static void bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tx16, tx_val); bfin_sport_spi_stat_poll_complete(drv_data); *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); } } static void bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data) { while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); bfin_sport_spi_stat_poll_complete(drv_data); *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); } } static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = { .write = bfin_sport_spi_u8_writer, .read = bfin_sport_spi_u8_reader, .duplex = bfin_sport_spi_u8_duplex, }; static void bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data) { u16 dummy; while (drv_data->tx < drv_data->tx_end) { bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); bfin_sport_spi_stat_poll_complete(drv_data); dummy = bfin_read(&drv_data->regs->rx16); } } static void bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tx16, tx_val); bfin_sport_spi_stat_poll_complete(drv_data); *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); } } static void bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data) { while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); bfin_sport_spi_stat_poll_complete(drv_data); *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); } } static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = { .write = bfin_sport_spi_u16_writer, .read = bfin_sport_spi_u16_reader, .duplex = bfin_sport_spi_u16_duplex, }; /* stop controller and re-config current chip */ static void bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) { struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; bfin_sport_spi_disable(drv_data); dev_dbg(drv_data->dev, "restoring spi ctl state\n"); bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); bfin_write(&drv_data->regs->tclkdiv, chip->baud); SSYNC(); bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); SSYNC(); bfin_sport_spi_cs_active(chip); } /* test if there is more transfer to be done */ static enum bfin_sport_spi_state bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct spi_transfer *trans = drv_data->cur_transfer; /* Move to next transfer */ if (trans->transfer_list.next != &msg->transfers) { drv_data->cur_transfer = list_entry(trans->transfer_list.next, struct spi_transfer, transfer_list); return RUNNING_STATE; } return DONE_STATE; } /* * caller already set message->status; * dma and pio irqs are blocked give finished message back */ static void bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data) { struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; unsigned long flags; struct spi_message *msg; spin_lock_irqsave(&drv_data->lock, flags); msg = drv_data->cur_msg; drv_data->state = START_STATE; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); if (!drv_data->cs_change) bfin_sport_spi_cs_deactive(chip); if (msg->complete) msg->complete(msg->context); } static irqreturn_t sport_err_handler(int irq, void *dev_id) { struct bfin_sport_spi_master_data *drv_data = dev_id; u16 status; dev_dbg(drv_data->dev, "%s enter\n", __func__); status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF); if (status) { bfin_write(&drv_data->regs->stat, status); SSYNC(); bfin_sport_spi_disable(drv_data); dev_err(drv_data->dev, "status error:%s%s%s%s\n", status & TOVF ? " TOVF" : "", status & TUVF ? " TUVF" : "", status & ROVF ? " ROVF" : "", status & RUVF ? " RUVF" : ""); } return IRQ_HANDLED; } static void bfin_sport_spi_pump_transfers(unsigned long data) { struct bfin_sport_spi_master_data *drv_data = (void *)data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; struct bfin_sport_spi_slave_data *chip = NULL; unsigned int bits_per_word; u32 tranf_success = 1; u32 transfer_speed; u8 full_duplex = 0; /* Get current state information */ message = drv_data->cur_msg; transfer = drv_data->cur_transfer; chip = drv_data->cur_chip; if (transfer->speed_hz) transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz); else transfer_speed = chip->baud; bfin_write(&drv_data->regs->tclkdiv, transfer_speed); SSYNC(); /* * if msg is error or done, report it back using complete() callback */ /* Handle for abort */ if (drv_data->state == ERROR_STATE) { dev_dbg(drv_data->dev, "transfer: we've hit an error\n"); message->status = -EIO; bfin_sport_spi_giveback(drv_data); return; } /* Handle end of message */ if (drv_data->state == DONE_STATE) { dev_dbg(drv_data->dev, "transfer: all done!\n"); message->status = 0; bfin_sport_spi_giveback(drv_data); return; } /* Delay if requested at end of transfer */ if (drv_data->state == RUNNING_STATE) { dev_dbg(drv_data->dev, "transfer: still running ...\n"); previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) udelay(previous->delay_usecs); } if (transfer->len == 0) { /* Move to next transfer of this msg */ drv_data->state = bfin_sport_spi_next_transfer(drv_data); /* Schedule next transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); } if (transfer->tx_buf != NULL) { drv_data->tx = (void *)transfer->tx_buf; drv_data->tx_end = drv_data->tx + transfer->len; dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n", transfer->tx_buf, drv_data->tx_end); } else drv_data->tx = NULL; if (transfer->rx_buf != NULL) { full_duplex = transfer->tx_buf != NULL; drv_data->rx = transfer->rx_buf; drv_data->rx_end = drv_data->rx + transfer->len; dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n", transfer->rx_buf, drv_data->rx_end); } else drv_data->rx = NULL; drv_data->cs_change = transfer->cs_change; /* Bits per word setup */ bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word ? : 8; if (bits_per_word % 16 == 0) drv_data->ops = &bfin_sport_transfer_ops_u16; else drv_data->ops = &bfin_sport_transfer_ops_u8; bfin_write(&drv_data->regs->tcr2, bits_per_word - 1); bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1); bfin_write(&drv_data->regs->rcr2, bits_per_word - 1); drv_data->state = RUNNING_STATE; if (drv_data->cs_change) bfin_sport_spi_cs_active(chip); dev_dbg(drv_data->dev, "now pumping a transfer: width is %d, len is %d\n", bits_per_word, transfer->len); /* PIO mode write then read */ dev_dbg(drv_data->dev, "doing IO transfer\n"); bfin_sport_spi_enable(drv_data); if (full_duplex) { /* full duplex mode */ BUG_ON((drv_data->tx_end - drv_data->tx) != (drv_data->rx_end - drv_data->rx)); drv_data->ops->duplex(drv_data); if (drv_data->tx != drv_data->tx_end) tranf_success = 0; } else if (drv_data->tx != NULL) { /* write only half duplex */ drv_data->ops->write(drv_data); if (drv_data->tx != drv_data->tx_end) tranf_success = 0; } else if (drv_data->rx != NULL) { /* read only half duplex */ drv_data->ops->read(drv_data); if (drv_data->rx != drv_data->rx_end) tranf_success = 0; } bfin_sport_spi_disable(drv_data); if (!tranf_success) { dev_dbg(drv_data->dev, "IO write error!\n"); drv_data->state = ERROR_STATE; } else { /* Update total byte transfered */ message->actual_length += transfer->len; /* Move to next transfer of this msg */ drv_data->state = bfin_sport_spi_next_transfer(drv_data); if (drv_data->cs_change) bfin_sport_spi_cs_deactive(chip); } /* Schedule next transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); } /* pop a msg from queue and kick off real transfer */ static void bfin_sport_spi_pump_messages(struct work_struct *work) { struct bfin_sport_spi_master_data *drv_data; unsigned long flags; struct spi_message *next_msg; drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages); /* Lock queue and check for queue work */ spin_lock_irqsave(&drv_data->lock, flags); if (list_empty(&drv_data->queue) || !drv_data->run) { /* pumper kicked off but no work to do */ drv_data->busy = 0; spin_unlock_irqrestore(&drv_data->lock, flags); return; } /* Make sure we are not already running a message */ if (drv_data->cur_msg) { spin_unlock_irqrestore(&drv_data->lock, flags); return; } /* Extract head of queue */ next_msg = list_entry(drv_data->queue.next, struct spi_message, queue); drv_data->cur_msg = next_msg; /* Setup the SSP using the per chip configuration */ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); list_del_init(&drv_data->cur_msg->queue); /* Initialize message state */ drv_data->cur_msg->state = START_STATE; drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, struct spi_transfer, transfer_list); bfin_sport_spi_restore_state(drv_data); dev_dbg(drv_data->dev, "got a message to pump, " "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n", drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio, drv_data->cur_chip->ctl_reg); dev_dbg(drv_data->dev, "the first transfer len is %d\n", drv_data->cur_transfer->len); /* Mark as busy and launch transfers */ tasklet_schedule(&drv_data->pump_transfers); drv_data->busy = 1; spin_unlock_irqrestore(&drv_data->lock, flags); } /* * got a msg to transfer, queue it in drv_data->queue. * And kick off message pumper */ static int bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg) { struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master); unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); if (!drv_data->run) { spin_unlock_irqrestore(&drv_data->lock, flags); return -ESHUTDOWN; } msg->actual_length = 0; msg->status = -EINPROGRESS; msg->state = START_STATE; dev_dbg(&spi->dev, "adding an msg in transfer()\n"); list_add_tail(&msg->queue, &drv_data->queue); if (drv_data->run && !drv_data->busy) queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); return 0; } /* Called every time common spi devices change state */ static int bfin_sport_spi_setup(struct spi_device *spi) { struct bfin_sport_spi_slave_data *chip, *first = NULL; int ret; /* Only alloc (or use chip_info) on first setup */ chip = spi_get_ctldata(spi); if (chip == NULL) { struct bfin5xx_spi_chip *chip_info; chip = first = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; /* platform chip_info isn't required */ chip_info = spi->controller_data; if (chip_info) { /* * DITFS and TDTYPE are only thing we don't set, but * they probably shouldn't be changed by people. */ if (chip_info->ctl_reg || chip_info->enable_dma) { ret = -EINVAL; dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields"); goto error; } chip->cs_chg_udelay = chip_info->cs_chg_udelay; chip->idle_tx_val = chip_info->idle_tx_val; } } if (spi->bits_per_word % 8) { dev_err(&spi->dev, "%d bits_per_word is not supported\n", spi->bits_per_word); ret = -EINVAL; goto error; } /* translate common spi framework into our register * following configure contents are same for tx and rx. */ if (spi->mode & SPI_CPHA) chip->ctl_reg &= ~TCKFE; else chip->ctl_reg |= TCKFE; if (spi->mode & SPI_LSB_FIRST) chip->ctl_reg |= TLSBIT; else chip->ctl_reg &= ~TLSBIT; /* Sport in master mode */ chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS; chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz); chip->cs_gpio = spi->chip_select; ret = gpio_request(chip->cs_gpio, spi->modalias); if (ret) goto error; dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n", spi->modalias, spi->bits_per_word); dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n", chip->ctl_reg, spi->chip_select); spi_set_ctldata(spi, chip); bfin_sport_spi_cs_deactive(chip); return ret; error: kfree(first); return ret; } /* * callback for spi framework. * clean driver specific data */ static void bfin_sport_spi_cleanup(struct spi_device *spi) { struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi); if (!chip) return; gpio_free(chip->cs_gpio); kfree(chip); } static int bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data) { INIT_LIST_HEAD(&drv_data->queue); spin_lock_init(&drv_data->lock); drv_data->run = false; drv_data->busy = 0; /* init transfer tasklet */ tasklet_init(&drv_data->pump_transfers, bfin_sport_spi_pump_transfers, (unsigned long)drv_data); /* init messages workqueue */ INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages); drv_data->workqueue = create_singlethread_workqueue(dev_name(drv_data->master->dev.parent)); if (drv_data->workqueue == NULL) return -EBUSY; return 0; } static int bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data) { unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); if (drv_data->run || drv_data->busy) { spin_unlock_irqrestore(&drv_data->lock, flags); return -EBUSY; } drv_data->run = true; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; spin_unlock_irqrestore(&drv_data->lock, flags); queue_work(drv_data->workqueue, &drv_data->pump_messages); return 0; } static inline int bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data) { unsigned long flags; unsigned limit = 500; int status = 0; spin_lock_irqsave(&drv_data->lock, flags); /* * This is a bit lame, but is optimized for the common execution path. * A wait_queue on the drv_data->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ drv_data->run = false; while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { spin_unlock_irqrestore(&drv_data->lock, flags); msleep(10); spin_lock_irqsave(&drv_data->lock, flags); } if (!list_empty(&drv_data->queue) || drv_data->busy) status = -EBUSY; spin_unlock_irqrestore(&drv_data->lock, flags); return status; } static inline int bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data) { int status; status = bfin_sport_spi_stop_queue(drv_data); if (status) return status; destroy_workqueue(drv_data->workqueue); return 0; } static int __devinit bfin_sport_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct bfin5xx_spi_master *platform_info; struct spi_master *master; struct resource *res, *ires; struct bfin_sport_spi_master_data *drv_data; int status; platform_info = dev->platform_data; /* Allocate master with space for drv_data */ master = spi_alloc_master(dev, sizeof(*master) + 16); if (!master) { dev_err(dev, "cannot alloc spi_master\n"); return -ENOMEM; } drv_data = spi_master_get_devdata(master); drv_data->master = master; drv_data->dev = dev; drv_data->pin_req = platform_info->pin_req; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; master->bus_num = pdev->id; master->num_chipselect = platform_info->num_chipselect; master->cleanup = bfin_sport_spi_cleanup; master->setup = bfin_sport_spi_setup; master->transfer = bfin_sport_spi_transfer; /* Find and map our resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(dev, "cannot get IORESOURCE_MEM\n"); status = -ENOENT; goto out_error_get_res; } drv_data->regs = ioremap(res->start, resource_size(res)); if (drv_data->regs == NULL) { dev_err(dev, "cannot map registers\n"); status = -ENXIO; goto out_error_ioremap; } ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!ires) { dev_err(dev, "cannot get IORESOURCE_IRQ\n"); status = -ENODEV; goto out_error_get_ires; } drv_data->err_irq = ires->start; /* Initial and start queue */ status = bfin_sport_spi_init_queue(drv_data); if (status) { dev_err(dev, "problem initializing queue\n"); goto out_error_queue_alloc; } status = bfin_sport_spi_start_queue(drv_data); if (status) { dev_err(dev, "problem starting queue\n"); goto out_error_queue_alloc; } status = request_irq(drv_data->err_irq, sport_err_handler, 0, "sport_spi_err", drv_data); if (status) { dev_err(dev, "unable to request sport err irq\n"); goto out_error_irq; } status = peripheral_request_list(drv_data->pin_req, DRV_NAME); if (status) { dev_err(dev, "requesting peripherals failed\n"); goto out_error_peripheral; } /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); status = spi_register_master(master); if (status) { dev_err(dev, "problem registering spi master\n"); goto out_error_master; } dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs); return 0; out_error_master: peripheral_free_list(drv_data->pin_req); out_error_peripheral: free_irq(drv_data->err_irq, drv_data); out_error_irq: out_error_queue_alloc: bfin_sport_spi_destroy_queue(drv_data); out_error_get_ires: iounmap(drv_data->regs); out_error_ioremap: out_error_get_res: spi_master_put(master); return status; } /* stop hardware and remove the driver */ static int __devexit bfin_sport_spi_remove(struct platform_device *pdev) { struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; if (!drv_data) return 0; /* Remove the queue */ status = bfin_sport_spi_destroy_queue(drv_data); if (status) return status; /* Disable the SSP at the peripheral and SOC level */ bfin_sport_spi_disable(drv_data); /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); peripheral_free_list(drv_data->pin_req); /* Prevent double remove */ platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state) { struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); int status; status = bfin_sport_spi_stop_queue(drv_data); if (status) return status; /* stop hardware */ bfin_sport_spi_disable(drv_data); return status; } static int bfin_sport_spi_resume(struct platform_device *pdev) { struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); int status; /* Enable the SPI interface */ bfin_sport_spi_enable(drv_data); /* Start the queue running */ status = bfin_sport_spi_start_queue(drv_data); if (status) dev_err(drv_data->dev, "problem resuming queue\n"); return status; } #else # define bfin_sport_spi_suspend NULL # define bfin_sport_spi_resume NULL #endif static struct platform_driver bfin_sport_spi_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = bfin_sport_spi_probe, .remove = __devexit_p(bfin_sport_spi_remove), .suspend = bfin_sport_spi_suspend, .resume = bfin_sport_spi_resume, }; module_platform_driver(bfin_sport_spi_driver);
gpl-2.0
Krizthian/BeliveKernel_otus
arch/arm/mach-omap2/clkt_iclk.c
7989
2049
/* * OMAP2/3 interface clock control * * Copyright (C) 2011 Nokia Corporation * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/clock.h> #include <plat/prcm.h> #include "clock.h" #include "clock2xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" /* Private functions */ /* XXX */ void omap2_clkt_iclk_allow_idle(struct clk *clk) { u32 v, r; r = ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); v = __raw_readl((__force void __iomem *)r); v |= (1 << clk->enable_bit); __raw_writel(v, (__force void __iomem *)r); } /* XXX */ void omap2_clkt_iclk_deny_idle(struct clk *clk) { u32 v, r; r = ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); v = __raw_readl((__force void __iomem *)r); v &= ~(1 << clk->enable_bit); __raw_writel(v, (__force void __iomem *)r); } /* Public data */ const struct clkops clkops_omap2_iclk_dflt_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_companion = omap2_clk_dflt_find_companion, .find_idlest = omap2_clk_dflt_find_idlest, .allow_idle = omap2_clkt_iclk_allow_idle, .deny_idle = omap2_clkt_iclk_deny_idle, }; const struct clkops clkops_omap2_iclk_dflt = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .allow_idle = omap2_clkt_iclk_allow_idle, .deny_idle = omap2_clkt_iclk_deny_idle, }; const struct clkops clkops_omap2_iclk_idle_only = { .allow_idle = omap2_clkt_iclk_allow_idle, .deny_idle = omap2_clkt_iclk_deny_idle, }; const struct clkops clkops_omap2_mdmclk_dflt_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_companion = omap2_clk_dflt_find_companion, .find_idlest = omap2_clk_dflt_find_idlest, .allow_idle = omap2_clkt_iclk_allow_idle, .deny_idle = omap2_clkt_iclk_deny_idle, };
gpl-2.0
no-cannabis/linux-matrix1
fs/yaffs2/yaffs_attribs.c
7989
3014
/* * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. * * Copyright (C) 2002-2010 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "yaffs_guts.h" #include "yaffs_attribs.h" void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh) { obj->yst_uid = oh->yst_uid; obj->yst_gid = oh->yst_gid; obj->yst_atime = oh->yst_atime; obj->yst_mtime = oh->yst_mtime; obj->yst_ctime = oh->yst_ctime; obj->yst_rdev = oh->yst_rdev; } void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj) { oh->yst_uid = obj->yst_uid; oh->yst_gid = obj->yst_gid; oh->yst_atime = obj->yst_atime; oh->yst_mtime = obj->yst_mtime; oh->yst_ctime = obj->yst_ctime; oh->yst_rdev = obj->yst_rdev; } void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c) { obj->yst_mtime = Y_CURRENT_TIME; if (do_a) obj->yst_atime = obj->yst_mtime; if (do_c) obj->yst_ctime = obj->yst_mtime; } void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev) { yaffs_load_current_time(obj, 1, 1); obj->yst_rdev = rdev; obj->yst_uid = uid; obj->yst_gid = gid; } loff_t yaffs_get_file_size(struct yaffs_obj *obj) { YCHAR *alias = NULL; obj = yaffs_get_equivalent_obj(obj); switch (obj->variant_type) { case YAFFS_OBJECT_TYPE_FILE: return obj->variant.file_variant.file_size; case YAFFS_OBJECT_TYPE_SYMLINK: alias = obj->variant.symlink_variant.alias; if (!alias) return 0; return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH); default: return 0; } } int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr) { unsigned int valid = attr->ia_valid; if (valid & ATTR_MODE) obj->yst_mode = attr->ia_mode; if (valid & ATTR_UID) obj->yst_uid = attr->ia_uid; if (valid & ATTR_GID) obj->yst_gid = attr->ia_gid; if (valid & ATTR_ATIME) obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime); if (valid & ATTR_CTIME) obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime); if (valid & ATTR_MTIME) obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime); if (valid & ATTR_SIZE) yaffs_resize_file(obj, attr->ia_size); yaffs_update_oh(obj, NULL, 1, 0, 0, NULL); return YAFFS_OK; } int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr) { unsigned int valid = 0; attr->ia_mode = obj->yst_mode; valid |= ATTR_MODE; attr->ia_uid = obj->yst_uid; valid |= ATTR_UID; attr->ia_gid = obj->yst_gid; valid |= ATTR_GID; Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime; valid |= ATTR_ATIME; Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime; valid |= ATTR_CTIME; Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime; valid |= ATTR_MTIME; attr->ia_size = yaffs_get_file_size(obj); valid |= ATTR_SIZE; attr->ia_valid = valid; return YAFFS_OK; }
gpl-2.0
SlimRoms/kernel_xiaomi_armani
arch/sh/drivers/pci/ops-sh4.c
12341
2527
/* * Generic SH-4 / SH-4A PCIC operations (SH7751, SH7780). * * Copyright (C) 2002 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License v2. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/pci.h> #include <linux/io.h> #include <linux/spinlock.h> #include <asm/addrspace.h> #include "pci-sh4.h" /* * Direct access to PCI hardware... */ #define CONFIG_CMD(bus, devfn, where) \ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) /* * Functions for accessing PCI configuration space with type 1 accesses */ static int sh4_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct pci_channel *chan = bus->sysdata; unsigned long flags; u32 data; /* * PCIPDR may only be accessed as 32 bit words, * so we must do byte alignment by hand */ raw_spin_lock_irqsave(&pci_config_lock, flags); pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR); data = pci_read_reg(chan, SH4_PCIPDR); raw_spin_unlock_irqrestore(&pci_config_lock, flags); switch (size) { case 1: *val = (data >> ((where & 3) << 3)) & 0xff; break; case 2: *val = (data >> ((where & 2) << 3)) & 0xffff; break; case 4: *val = data; break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } return PCIBIOS_SUCCESSFUL; } /* * Since SH4 only does 32bit access we'll have to do a read, * mask,write operation. * We'll allow an odd byte offset, though it should be illegal. */ static int sh4_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct pci_channel *chan = bus->sysdata; unsigned long flags; int shift; u32 data; raw_spin_lock_irqsave(&pci_config_lock, flags); pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR); data = pci_read_reg(chan, SH4_PCIPDR); raw_spin_unlock_irqrestore(&pci_config_lock, flags); switch (size) { case 1: shift = (where & 3) << 3; data &= ~(0xff << shift); data |= ((val & 0xff) << shift); break; case 2: shift = (where & 2) << 3; data &= ~(0xffff << shift); data |= ((val & 0xffff) << shift); break; case 4: data = val; break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } pci_write_reg(chan, data, SH4_PCIPDR); return PCIBIOS_SUCCESSFUL; } struct pci_ops sh4_pci_ops = { .read = sh4_pci_read, .write = sh4_pci_write, }; int __attribute__((weak)) pci_fixup_pcic(struct pci_channel *chan) { /* Nothing to do. */ return 0; }
gpl-2.0
NicholasPace/android_kernel_motorola_msm8916
drivers/tty/serial/8250/8250_exar_st16c554.c
12341
1151
/* * Written by Paul B Schroeder < pschroeder "at" uplogix "dot" com > * Based on 8250_boca. * * Copyright (C) 2005 Russell King. * Data taken from include/asm-i386/serial.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/serial_8250.h> #define PORT(_base,_irq) \ { \ .iobase = _base, \ .irq = _irq, \ .uartclk = 1843200, \ .iotype = UPIO_PORT, \ .flags = UPF_BOOT_AUTOCONF, \ } static struct plat_serial8250_port exar_data[] = { PORT(0x100, 5), PORT(0x108, 5), PORT(0x110, 5), PORT(0x118, 5), { }, }; static struct platform_device exar_device = { .name = "serial8250", .id = PLAT8250_DEV_EXAR_ST16C554, .dev = { .platform_data = exar_data, }, }; static int __init exar_init(void) { return platform_device_register(&exar_device); } module_init(exar_init); MODULE_AUTHOR("Paul B Schroeder"); MODULE_DESCRIPTION("8250 serial probe module for Exar cards"); MODULE_LICENSE("GPL");
gpl-2.0
profglavcho/tesr1
fs/fat/fatent.c
54
16871
/* * Copyright (C) 2004, OGAWA Hirofumi * Released under GPL v2. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/msdos_fs.h> #include <linux/blkdev.h> #include "fat.h" struct fatent_operations { void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); void (*ent_set_ptr)(struct fat_entry *, int); int (*ent_bread)(struct super_block *, struct fat_entry *, int, sector_t); int (*ent_get)(struct fat_entry *); void (*ent_put)(struct fat_entry *, int); int (*ent_next)(struct fat_entry *); }; static DEFINE_SPINLOCK(fat12_entry_lock); static void fat12_ent_blocknr(struct super_block *sb, int entry, int *offset, sector_t *blocknr) { struct msdos_sb_info *sbi = MSDOS_SB(sb); int bytes = entry + (entry >> 1); WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); *offset = bytes & (sb->s_blocksize - 1); *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); } static void fat_ent_blocknr(struct super_block *sb, int entry, int *offset, sector_t *blocknr) { struct msdos_sb_info *sbi = MSDOS_SB(sb); int bytes = (entry << sbi->fatent_shift); WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); *offset = bytes & (sb->s_blocksize - 1); *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); } static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset) { struct buffer_head **bhs = fatent->bhs; if (fatent->nr_bhs == 1) { WARN_ON(offset >= (bhs[0]->b_size - 1)); fatent->u.ent12_p[0] = bhs[0]->b_data + offset; fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1); } else { WARN_ON(offset != (bhs[0]->b_size - 1)); fatent->u.ent12_p[0] = bhs[0]->b_data + offset; fatent->u.ent12_p[1] = bhs[1]->b_data; } } static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset) { WARN_ON(offset & (2 - 1)); fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset); } static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset) { WARN_ON(offset & (4 - 1)); fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset); } static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent, int offset, sector_t blocknr) { struct buffer_head **bhs = fatent->bhs; WARN_ON(blocknr < MSDOS_SB(sb)->fat_start); fatent->fat_inode = MSDOS_SB(sb)->fat_inode; bhs[0] = sb_bread(sb, blocknr); if (!bhs[0]) goto err; if ((offset + 1) < sb->s_blocksize) fatent->nr_bhs = 1; else { /* This entry is block boundary, it needs the next block */ blocknr++; bhs[1] = sb_bread(sb, blocknr); if (!bhs[1]) goto err_brelse; fatent->nr_bhs = 2; } fat12_ent_set_ptr(fatent, offset); return 0; err_brelse: brelse(bhs[0]); err: fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr); return -EIO; } static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent, int offset, sector_t blocknr) { struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; WARN_ON(blocknr < MSDOS_SB(sb)->fat_start); fatent->fat_inode = MSDOS_SB(sb)->fat_inode; fatent->bhs[0] = sb_bread(sb, blocknr); if (!fatent->bhs[0]) { fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr); return -EIO; } fatent->nr_bhs = 1; ops->ent_set_ptr(fatent, offset); return 0; } static int fat12_ent_get(struct fat_entry *fatent) { u8 **ent12_p = fatent->u.ent12_p; int next; spin_lock(&fat12_entry_lock); if (fatent->entry & 1) next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4); else next = (*ent12_p[1] << 8) | *ent12_p[0]; spin_unlock(&fat12_entry_lock); next &= 0x0fff; if (next >= BAD_FAT12) next = FAT_ENT_EOF; return next; } static int fat16_ent_get(struct fat_entry *fatent) { int next = le16_to_cpu(*fatent->u.ent16_p); WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1)); if (next >= BAD_FAT16) next = FAT_ENT_EOF; return next; } static int fat32_ent_get(struct fat_entry *fatent) { int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff; WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1)); if (next >= BAD_FAT32) next = FAT_ENT_EOF; return next; } static void fat12_ent_put(struct fat_entry *fatent, int new) { u8 **ent12_p = fatent->u.ent12_p; if (new == FAT_ENT_EOF) new = EOF_FAT12; spin_lock(&fat12_entry_lock); if (fatent->entry & 1) { *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f); *ent12_p[1] = new >> 4; } else { *ent12_p[0] = new & 0xff; *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8); } spin_unlock(&fat12_entry_lock); mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); if (fatent->nr_bhs == 2) mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode); } static void fat16_ent_put(struct fat_entry *fatent, int new) { if (new == FAT_ENT_EOF) new = EOF_FAT16; *fatent->u.ent16_p = cpu_to_le16(new); mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); } static void fat32_ent_put(struct fat_entry *fatent, int new) { if (new == FAT_ENT_EOF) new = EOF_FAT32; WARN_ON(new & 0xf0000000); new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff; *fatent->u.ent32_p = cpu_to_le32(new); mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); } static int fat12_ent_next(struct fat_entry *fatent) { u8 **ent12_p = fatent->u.ent12_p; struct buffer_head **bhs = fatent->bhs; u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1); fatent->entry++; if (fatent->nr_bhs == 1) { WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 2))); WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))); if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) { ent12_p[0] = nextp - 1; ent12_p[1] = nextp; return 1; } } else { WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))); WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data); ent12_p[0] = nextp - 1; ent12_p[1] = nextp; brelse(bhs[0]); bhs[0] = bhs[1]; fatent->nr_bhs = 1; return 1; } ent12_p[0] = NULL; ent12_p[1] = NULL; return 0; } static int fat16_ent_next(struct fat_entry *fatent) { const struct buffer_head *bh = fatent->bhs[0]; fatent->entry++; if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) { fatent->u.ent16_p++; return 1; } fatent->u.ent16_p = NULL; return 0; } static int fat32_ent_next(struct fat_entry *fatent) { const struct buffer_head *bh = fatent->bhs[0]; fatent->entry++; if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) { fatent->u.ent32_p++; return 1; } fatent->u.ent32_p = NULL; return 0; } static struct fatent_operations fat12_ops = { .ent_blocknr = fat12_ent_blocknr, .ent_set_ptr = fat12_ent_set_ptr, .ent_bread = fat12_ent_bread, .ent_get = fat12_ent_get, .ent_put = fat12_ent_put, .ent_next = fat12_ent_next, }; static struct fatent_operations fat16_ops = { .ent_blocknr = fat_ent_blocknr, .ent_set_ptr = fat16_ent_set_ptr, .ent_bread = fat_ent_bread, .ent_get = fat16_ent_get, .ent_put = fat16_ent_put, .ent_next = fat16_ent_next, }; static struct fatent_operations fat32_ops = { .ent_blocknr = fat_ent_blocknr, .ent_set_ptr = fat32_ent_set_ptr, .ent_bread = fat_ent_bread, .ent_get = fat32_ent_get, .ent_put = fat32_ent_put, .ent_next = fat32_ent_next, }; static inline void lock_fat(struct msdos_sb_info *sbi) { mutex_lock(&sbi->fat_lock); } static inline void unlock_fat(struct msdos_sb_info *sbi) { mutex_unlock(&sbi->fat_lock); } void fat_ent_access_init(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); mutex_init(&sbi->fat_lock); switch (sbi->fat_bits) { case 32: sbi->fatent_shift = 2; sbi->fatent_ops = &fat32_ops; break; case 16: sbi->fatent_shift = 1; sbi->fatent_ops = &fat16_ops; break; case 12: sbi->fatent_shift = -1; sbi->fatent_ops = &fat12_ops; break; } } static inline int fat_ent_update_ptr(struct super_block *sb, struct fat_entry *fatent, int offset, sector_t blocknr) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct buffer_head **bhs = fatent->bhs; /* Is this fatent's blocks including this entry? */ if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr) return 0; if (sbi->fat_bits == 12) { if ((offset + 1) < sb->s_blocksize) { /* This entry is on bhs[0]. */ if (fatent->nr_bhs == 2) { brelse(bhs[1]); fatent->nr_bhs = 1; } } else { /* This entry needs the next block. */ if (fatent->nr_bhs != 2) return 0; if (bhs[1]->b_blocknr != (blocknr + 1)) return 0; } } ops->ent_set_ptr(fatent, offset); return 1; } int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); struct fatent_operations *ops = sbi->fatent_ops; int err, offset; sector_t blocknr; if (entry < FAT_START_ENT || sbi->max_cluster <= entry) { fatent_brelse(fatent); /* * The original is too much log, modify it to the ratelimit version. */ //fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry); fat_fs_error_ratelimit(sb, "invalid access to FAT (entry 0x%08x)", entry); return -EIO; } fatent_set_entry(fatent, entry); ops->ent_blocknr(sb, entry, &offset, &blocknr); if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) { fatent_brelse(fatent); err = ops->ent_bread(sb, fatent, offset, blocknr); if (err) return err; } return ops->ent_get(fatent); } /* FIXME: We can write the blocks as more big chunk. */ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs, int nr_bhs) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *c_bh; int err, n, copy; err = 0; for (copy = 1; copy < sbi->fats; copy++) { sector_t backup_fat = sbi->fat_length * copy; for (n = 0; n < nr_bhs; n++) { c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr); if (!c_bh) { err = -ENOMEM; goto error; } memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize); set_buffer_uptodate(c_bh); mark_buffer_dirty_inode(c_bh, sbi->fat_inode); if (sb->s_flags & MS_SYNCHRONOUS) err = sync_dirty_buffer(c_bh); brelse(c_bh); if (err) goto error; } } error: return err; } int fat_ent_write(struct inode *inode, struct fat_entry *fatent, int new, int wait) { struct super_block *sb = inode->i_sb; struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; int err; ops->ent_put(fatent, new); if (wait) { err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs); if (err) return err; } return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs); } static inline int fat_ent_next(struct msdos_sb_info *sbi, struct fat_entry *fatent) { if (sbi->fatent_ops->ent_next(fatent)) { if (fatent->entry < sbi->max_cluster) return 1; } return 0; } static inline int fat_ent_read_block(struct super_block *sb, struct fat_entry *fatent) { struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; sector_t blocknr; int offset; fatent_brelse(fatent); ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); return ops->ent_bread(sb, fatent, offset, blocknr); } static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs, struct fat_entry *fatent) { int n, i; for (n = 0; n < fatent->nr_bhs; n++) { for (i = 0; i < *nr_bhs; i++) { if (fatent->bhs[n] == bhs[i]) break; } if (i == *nr_bhs) { get_bh(fatent->bhs[n]); bhs[i] = fatent->bhs[n]; (*nr_bhs)++; } } } int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent, prev_ent; struct buffer_head *bhs[MAX_BUF_PER_PAGE]; int i, count, err, nr_bhs, idx_clus; BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */ lock_fat(sbi); if (sbi->free_clusters != -1 && sbi->free_clus_valid && sbi->free_clusters < nr_cluster) { unlock_fat(sbi); return -ENOSPC; } err = nr_bhs = idx_clus = 0; count = FAT_START_ENT; fatent_init(&prev_ent); fatent_init(&fatent); fatent_set_entry(&fatent, sbi->prev_free + 1); while (count < sbi->max_cluster) { if (fatent.entry >= sbi->max_cluster) fatent.entry = FAT_START_ENT; fatent_set_entry(&fatent, fatent.entry); err = fat_ent_read_block(sb, &fatent); if (err) goto out; /* Find the free entries in a block */ do { if (ops->ent_get(&fatent) == FAT_ENT_FREE) { int entry = fatent.entry; /* make the cluster chain */ ops->ent_put(&fatent, FAT_ENT_EOF); if (prev_ent.nr_bhs) ops->ent_put(&prev_ent, entry); fat_collect_bhs(bhs, &nr_bhs, &fatent); sbi->prev_free = entry; if (sbi->free_clusters != -1) sbi->free_clusters--; sb->s_dirt = 1; cluster[idx_clus] = entry; idx_clus++; if (idx_clus == nr_cluster) goto out; /* * fat_collect_bhs() gets ref-count of bhs, * so we can still use the prev_ent. */ prev_ent = fatent; } count++; if (count == sbi->max_cluster) break; } while (fat_ent_next(sbi, &fatent)); } /* Couldn't allocate the free entries */ sbi->free_clusters = 0; sbi->free_clus_valid = 1; sb->s_dirt = 1; err = -ENOSPC; out: unlock_fat(sbi); fatent_brelse(&fatent); if (!err) { if (inode_needs_sync(inode)) err = fat_sync_bhs(bhs, nr_bhs); if (!err) err = fat_mirror_bhs(sb, bhs, nr_bhs); } for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); if (err && idx_clus) fat_free_clusters(inode, cluster[0]); return err; } int fat_free_clusters(struct inode *inode, int cluster) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent; struct buffer_head *bhs[MAX_BUF_PER_PAGE]; int i, err, nr_bhs; int first_cl = cluster; nr_bhs = 0; fatent_init(&fatent); lock_fat(sbi); do { cluster = fat_ent_read(inode, &fatent, cluster); if (cluster < 0) { err = cluster; goto error; } else if (cluster == FAT_ENT_FREE) { fat_fs_error(sb, "%s: deleting FAT entry beyond EOF", __func__); err = -EIO; goto error; } if (sbi->options.discard) { /* * Issue discard for the sectors we no longer * care about, batching contiguous clusters * into one request */ if (cluster != fatent.entry + 1) { int nr_clus = fatent.entry - first_cl + 1; sb_issue_discard(sb, fat_clus_to_blknr(sbi, first_cl), nr_clus * sbi->sec_per_clus, GFP_NOFS, 0); first_cl = cluster; } } ops->ent_put(&fatent, FAT_ENT_FREE); if (sbi->free_clusters != -1) { sbi->free_clusters++; sb->s_dirt = 1; } if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) { if (sb->s_flags & MS_SYNCHRONOUS) { err = fat_sync_bhs(bhs, nr_bhs); if (err) goto error; } err = fat_mirror_bhs(sb, bhs, nr_bhs); if (err) goto error; for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); nr_bhs = 0; } fat_collect_bhs(bhs, &nr_bhs, &fatent); } while (cluster != FAT_ENT_EOF); if (sb->s_flags & MS_SYNCHRONOUS) { err = fat_sync_bhs(bhs, nr_bhs); if (err) goto error; } err = fat_mirror_bhs(sb, bhs, nr_bhs); error: fatent_brelse(&fatent); for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); unlock_fat(sbi); return err; } EXPORT_SYMBOL_GPL(fat_free_clusters); /* 128kb is the whole sectors for FAT12 and FAT16 */ #define FAT_READA_SIZE (128 * 1024) static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent, unsigned long reada_blocks) { struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; sector_t blocknr; int i, offset; ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); for (i = 0; i < reada_blocks; i++) sb_breadahead(sb, blocknr + i); } int fat_count_free_clusters(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent; unsigned long reada_blocks, reada_mask, cur_block; int err = 0, free; lock_fat(sbi); if (sbi->free_clusters != -1 && sbi->free_clus_valid) goto out; reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits; reada_mask = reada_blocks - 1; cur_block = 0; free = 0; fatent_init(&fatent); fatent_set_entry(&fatent, FAT_START_ENT); while (fatent.entry < sbi->max_cluster) { /* readahead of fat blocks */ if ((cur_block & reada_mask) == 0) { unsigned long rest = sbi->fat_length - cur_block; fat_ent_reada(sb, &fatent, min(reada_blocks, rest)); } cur_block++; err = fat_ent_read_block(sb, &fatent); if (err) goto out; do { if (ops->ent_get(&fatent) == FAT_ENT_FREE) free++; } while (fat_ent_next(sbi, &fatent)); } sbi->free_clusters = free; sbi->free_clus_valid = 1; sb->s_dirt = 1; fatent_brelse(&fatent); out: unlock_fat(sbi); return err; }
gpl-2.0
wenboqiu/TestOpenGL
TestOpenGL/cocos2d/cocos/ui/UIListView.cpp
54
13635
/**************************************************************************** Copyright (c) 2013-2014 Chukong Technologies Inc. http://www.cocos2d-x.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "ui/UIListView.h" #include "ui/UIHelper.h" NS_CC_BEGIN namespace ui { IMPLEMENT_CLASS_GUI_INFO(ListView) ListView::ListView(): _model(nullptr), _gravity(Gravity::CENTER_VERTICAL), _itemsMargin(0.0f), _curSelectedIndex(0), _refreshViewDirty(true), _listViewEventListener(nullptr), _listViewEventSelector(nullptr), _eventCallback(nullptr) { this->setTouchEnabled(true); } ListView::~ListView() { _listViewEventListener = nullptr; _listViewEventSelector = nullptr; _items.clear(); CC_SAFE_RELEASE(_model); } ListView* ListView::create() { ListView* widget = new (std::nothrow) ListView(); if (widget && widget->init()) { widget->autorelease(); return widget; } CC_SAFE_DELETE(widget); return nullptr; } bool ListView::init() { if (ScrollView::init()) { setLayoutType(Type::VERTICAL); return true; } return false; } void ListView::setItemModel(Widget *model) { if (nullptr == model) { CCLOG("Can't set a nullptr to item model!"); return; } CC_SAFE_RELEASE_NULL(_model); _model = model; CC_SAFE_RETAIN(_model); } void ListView::updateInnerContainerSize() { switch (_direction) { case Direction::VERTICAL: { size_t length = _items.size(); float totalHeight = (length - 1) * _itemsMargin; for (auto& item : _items) { totalHeight += item->getContentSize().height; } float finalWidth = _contentSize.width; float finalHeight = totalHeight; setInnerContainerSize(Size(finalWidth, finalHeight)); break; } case Direction::HORIZONTAL: { size_t length = _items.size(); float totalWidth = (length - 1) * _itemsMargin; for (auto& item : _items) { totalWidth += item->getContentSize().width; } float finalWidth = totalWidth; float finalHeight = _contentSize.height; setInnerContainerSize(Size(finalWidth, finalHeight)); break; } default: break; } } void ListView::remedyVerticalLayoutParameter(LinearLayoutParameter* layoutParameter, ssize_t itemIndex) { CCASSERT(nullptr != layoutParameter, "Layout parameter can't be nullptr!"); switch (_gravity) { case Gravity::LEFT: layoutParameter->setGravity(LinearLayoutParameter::LinearGravity::LEFT); break; case Gravity::RIGHT: layoutParameter->setGravity(LinearLayoutParameter::LinearGravity::RIGHT); break; case Gravity::CENTER_HORIZONTAL: layoutParameter->setGravity(LinearLayoutParameter::LinearGravity::CENTER_HORIZONTAL); break; default: break; } if (0 == itemIndex) { layoutParameter->setMargin(Margin::ZERO); } else { layoutParameter->setMargin(Margin(0.0f, _itemsMargin, 0.0f, 0.0f)); } } void ListView::remedyHorizontalLayoutParameter(LinearLayoutParameter* layoutParameter, ssize_t itemIndex) { CCASSERT(nullptr != layoutParameter, "Layout parameter can't be nullptr!"); switch (_gravity) { case Gravity::TOP: layoutParameter->setGravity(LinearLayoutParameter::LinearGravity::TOP); break; case Gravity::BOTTOM: layoutParameter->setGravity(LinearLayoutParameter::LinearGravity::BOTTOM); break; case Gravity::CENTER_VERTICAL: layoutParameter->setGravity(LinearLayoutParameter::LinearGravity::CENTER_VERTICAL); break; default: break; } if (0 == itemIndex) { layoutParameter->setMargin(Margin::ZERO); } else { layoutParameter->setMargin(Margin(_itemsMargin, 0.0f, 0.0f, 0.0f)); } } void ListView::remedyLayoutParameter(Widget *item) { CCASSERT(nullptr != item, "ListView Item can't be nullptr!"); LinearLayoutParameter* linearLayoutParameter = (LinearLayoutParameter*)(item->getLayoutParameter()); bool isLayoutParameterExists = true; if (!linearLayoutParameter) { linearLayoutParameter = LinearLayoutParameter::create(); isLayoutParameterExists = false; } ssize_t itemIndex = getIndex(item); switch (_direction) { case Direction::VERTICAL: { this->remedyVerticalLayoutParameter(linearLayoutParameter, itemIndex); break; } case Direction::HORIZONTAL: { this->remedyHorizontalLayoutParameter(linearLayoutParameter, itemIndex); break; } default: break; } if (!isLayoutParameterExists) { item->setLayoutParameter(linearLayoutParameter); } } void ListView::pushBackDefaultItem() { if (nullptr == _model) { return; } Widget* newItem = _model->clone(); remedyLayoutParameter(newItem); addChild(newItem); _refreshViewDirty = true; } void ListView::insertDefaultItem(ssize_t index) { if (nullptr == _model) { return; } Widget* newItem = _model->clone(); _items.insert(index, newItem); ScrollView::addChild(newItem); remedyLayoutParameter(newItem); _refreshViewDirty = true; } void ListView::pushBackCustomItem(Widget* item) { remedyLayoutParameter(item); addChild(item); _refreshViewDirty = true; } void ListView::addChild(cocos2d::Node *child, int zOrder, int tag) { ScrollView::addChild(child, zOrder, tag); Widget* widget = dynamic_cast<Widget*>(child); if (nullptr != widget) { _items.pushBack(widget); } } void ListView::addChild(cocos2d::Node *child) { ListView::addChild(child, child->getLocalZOrder(), child->getName()); } void ListView::addChild(cocos2d::Node *child, int zOrder) { ListView::addChild(child, zOrder, child->getName()); } void ListView::addChild(Node* child, int zOrder, const std::string &name) { ScrollView::addChild(child, zOrder, name); Widget* widget = dynamic_cast<Widget*>(child); if (nullptr != widget) { _items.pushBack(widget); } } void ListView::removeChild(cocos2d::Node *child, bool cleaup) { Widget* widget = dynamic_cast<Widget*>(child); if (nullptr != widget) { _items.eraseObject(widget); } ScrollView::removeChild(child, cleaup); } void ListView::removeAllChildren() { this->removeAllChildrenWithCleanup(true); } void ListView::removeAllChildrenWithCleanup(bool cleanup) { ScrollView::removeAllChildrenWithCleanup(cleanup); _items.clear(); } void ListView::insertCustomItem(Widget* item, ssize_t index) { _items.insert(index, item); ScrollView::addChild(item); remedyLayoutParameter(item); _refreshViewDirty = true; } void ListView::removeItem(ssize_t index) { Widget* item = getItem(index); if (nullptr == item) { return; } removeChild(item, true); _refreshViewDirty = true; } void ListView::removeLastItem() { removeItem(_items.size() -1); } void ListView::removeAllItems() { removeAllChildren(); } Widget* ListView::getItem(ssize_t index)const { if (index < 0 || index >= _items.size()) { return nullptr; } return _items.at(index); } Vector<Widget*>& ListView::getItems() { return _items; } ssize_t ListView::getIndex(Widget *item) const { if (nullptr == item) { return -1; } return _items.getIndex(item); } void ListView::setGravity(Gravity gravity) { if (_gravity == gravity) { return; } _gravity = gravity; _refreshViewDirty = true; } void ListView::setItemsMargin(float margin) { if (_itemsMargin == margin) { return; } _itemsMargin = margin; _refreshViewDirty = true; } float ListView::getItemsMargin()const { return _itemsMargin; } void ListView::setDirection(Direction dir) { switch (dir) { case Direction::NONE: case Direction::BOTH: break; case Direction::VERTICAL: setLayoutType(Type::VERTICAL); break; case Direction::HORIZONTAL: setLayoutType(Type::HORIZONTAL); break; default: return; break; } ScrollView::setDirection(dir); } void ListView::requestRefreshView() { _refreshViewDirty = true; } void ListView::refreshView() { ssize_t length = _items.size(); for (int i=0; i<length; i++) { Widget* item = _items.at(i); item->setLocalZOrder(i); remedyLayoutParameter(item); } updateInnerContainerSize(); } void ListView::forceDoLayout() { if (_refreshViewDirty) { refreshView(); _refreshViewDirty = false; } this->_innerContainer->forceDoLayout(); } void ListView::doLayout() { Layout::doLayout(); if (_refreshViewDirty) { refreshView(); _refreshViewDirty = false; } } void ListView::addEventListenerListView(Ref *target, SEL_ListViewEvent selector) { _listViewEventListener = target; _listViewEventSelector = selector; } void ListView::addEventListener(const ccListViewCallback& callback) { _eventCallback = callback; } void ListView::selectedItemEvent(TouchEventType event) { this->retain(); switch (event) { case TouchEventType::BEGAN: { if (_listViewEventListener && _listViewEventSelector) { (_listViewEventListener->*_listViewEventSelector)(this, LISTVIEW_ONSELECTEDITEM_START); } if (_eventCallback) { _eventCallback(this,EventType::ON_SELECTED_ITEM_START); } if (_ccEventCallback) { _ccEventCallback(this, static_cast<int>(EventType::ON_SELECTED_ITEM_START)); } } break; default: { if (_listViewEventListener && _listViewEventSelector) { (_listViewEventListener->*_listViewEventSelector)(this, LISTVIEW_ONSELECTEDITEM_END); } if (_eventCallback) { _eventCallback(this, EventType::ON_SELECTED_ITEM_END); } if (_ccEventCallback) { _ccEventCallback(this, static_cast<int>(EventType::ON_SELECTED_ITEM_END)); } } break; } this->release(); } void ListView::interceptTouchEvent(TouchEventType event, Widget *sender, Touch* touch) { ScrollView::interceptTouchEvent(event, sender, touch); if (event != TouchEventType::MOVED) { Widget* parent = sender; while (parent) { if (parent && parent->getParent() == _innerContainer) { _curSelectedIndex = getIndex(parent); break; } parent = dynamic_cast<Widget*>(parent->getParent()); } if (sender->isHighlighted()) { selectedItemEvent(event); } } } ssize_t ListView::getCurSelectedIndex() const { return _curSelectedIndex; } void ListView::onSizeChanged() { ScrollView::onSizeChanged(); _refreshViewDirty = true; } std::string ListView::getDescription() const { return "ListView"; } Widget* ListView::createCloneInstance() { return ListView::create(); } void ListView::copyClonedWidgetChildren(Widget* model) { auto& arrayItems = static_cast<ListView*>(model)->getItems(); for (auto& item : arrayItems) { pushBackCustomItem(item->clone()); } } void ListView::copySpecialProperties(Widget *widget) { ListView* listViewEx = dynamic_cast<ListView*>(widget); if (listViewEx) { ScrollView::copySpecialProperties(widget); setItemModel(listViewEx->_model); setItemsMargin(listViewEx->_itemsMargin); setGravity(listViewEx->_gravity); _listViewEventListener = listViewEx->_listViewEventListener; _listViewEventSelector = listViewEx->_listViewEventSelector; _eventCallback = listViewEx->_eventCallback; } } } NS_CC_END
gpl-2.0
wejoncy/linux-digilent
drivers/gpu/drm/via/via_drv.c
54
3597
/* * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/module.h> #include "drmP.h" #include "via_drm.h" #include "via_drv.h" #include "drm_pciids.h" static int via_driver_open(struct drm_device *dev, struct drm_file *file) { struct via_file_private *file_priv; DRM_DEBUG_DRIVER("\n"); file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); if (!file_priv) return -ENOMEM; file->driver_priv = file_priv; INIT_LIST_HEAD(&file_priv->obj_list); return 0; } void via_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct via_file_private *file_priv = file->driver_priv; kfree(file_priv); } static struct pci_device_id pciidlist[] = { viadrv_PCI_IDS }; static const struct file_operations via_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif .llseek = noop_llseek, }; static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .load = via_driver_load, .unload = via_driver_unload, .open = via_driver_open, .preclose = via_reclaim_buffers_locked, .postclose = via_driver_postclose, .context_dtor = via_final_context, .get_vblank_counter = via_get_vblank_counter, .enable_vblank = via_enable_vblank, .disable_vblank = via_disable_vblank, .irq_preinstall = via_driver_irq_preinstall, .irq_postinstall = via_driver_irq_postinstall, .irq_uninstall = via_driver_irq_uninstall, .irq_handler = via_driver_irq_handler, .dma_quiescent = via_driver_dma_quiescent, .lastclose = via_lastclose, .ioctls = via_ioctls, .fops = &via_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static struct pci_driver via_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, }; static int __init via_init(void) { driver.num_ioctls = via_max_ioctl; via_init_command_verifier(); return drm_pci_init(&driver, &via_pci_driver); } static void __exit via_exit(void) { drm_pci_exit(&driver, &via_pci_driver); } module_init(via_init); module_exit(via_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
GalaxyTab4/bliss_kernel_samsung_matisse
drivers/net/wireless/ipsecdrvtl/bx.c
310
25505
/* 'src_pm_pgpNetPMSA.c' Obfuscated by COBF (Version 1.06 2006-01-07 by BB) at Wed Jan 15 12:08:55 2014 */ #include"cobf.h" #ifdef _WIN32 #if defined( UNDER_CE) && defined( bb355) || ! defined( bb356) #define bb354 1 #define bb330 1 #else #define bb352 bb334 #define bb351 1 #define bb340 1 #endif #define bb347 1 #include"uncobf.h" #include<ndis.h> #include"cobf.h" #ifdef UNDER_CE #include"uncobf.h" #include<ndiswan.h> #include"cobf.h" #endif #include"uncobf.h" #include<stdio.h> #include<basetsd.h> #include"cobf.h" bba bbs bbl bbf, *bb1;bba bbs bbe bbq, *bb94;bba bb135 bb123, *bb332; bba bbs bbl bb39, *bb72;bba bbs bb135 bbk, *bb59;bba bbe bbu, *bb134; bba bbh bbf*bb89; #ifdef bb311 bba bbd bb60, *bb122; #endif #else #include"uncobf.h" #include<linux/module.h> #include<linux/ctype.h> #include<linux/time.h> #include<linux/slab.h> #include"cobf.h" #ifndef bb120 #define bb120 #ifdef _WIN32 #include"uncobf.h" #include<wtypes.h> #include"cobf.h" #else #ifdef bb119 #include"uncobf.h" #include<linux/types.h> #include"cobf.h" #else #include"uncobf.h" #include<stddef.h> #include<sys/types.h> #include"cobf.h" #endif #endif #ifdef _WIN32 bba bb113 bb249; #else bba bbe bbu, *bb134, *bb236; #define bb201 1 #define bb202 0 bba bb270 bb211, *bb268, *bb234;bba bbe bb207, *bb217, *bb212;bba bbs bbq, *bb94, *bb223;bba bb6 bb274, *bb265;bba bbs bb6 bb280, *bb254; bba bb6 bb116, *bb285;bba bbs bb6 bb63, *bb264;bba bb63 bb242, *bb229 ;bba bb63 bb267, *bb235;bba bb116 bb113, *bb240;bba bb227 bb210;bba bb255 bb123;bba bb245 bb83;bba bb115 bb118;bba bb115 bb272; #ifdef bb226 bba bb288 bb39, *bb72;bba bb241 bbk, *bb59;bba bb253 bbd, *bb29;bba bb208 bb56, *bb112; #else bba bb224 bb39, *bb72;bba bb291 bbk, *bb59;bba bb238 bbd, *bb29;bba bb262 bb56, *bb112; #endif bba bb39 bbf, *bb1, *bb271;bba bbk bb228, *bb251, *bb277;bba bbk bb231 , *bb260, *bb225;bba bbd bb60, *bb122, *bb248;bba bb83 bb36, *bb279, * bb292;bba bbd bb232, *bb213, *bb243;bba bb118 bb281, *bb269, *bb275; bba bb56 bb222, *bb289, *bb247; #define bb140 bbb bba bbb*bb205, *bb77;bba bbh bbb*bb290;bba bbl bb252;bba bbl*bb259; bba bbh bbl*bb82; #if defined( bb119) bba bbe bb114; #endif bba bb114 bb20;bba bb20*bb215;bba bbh bb20*bb187; #if defined( bb282) || defined( bb209) bba bb20 bb37;bba bb20 bb111; #else bba bbl bb37;bba bbs bbl bb111; #endif bba bbh bb37*bb278;bba bb37*bb246;bba bb60 bb257, *bb261;bba bbb* bb106;bba bb106*bb258; #define bb263( bb35) bbi bb35##__ { bbe bb230; }; bba bbi bb35##__ * \ bb35 bba bbi{bb36 bb191,bb216,bb284,bb276;}bb266, *bb220, *bb273;bba bbi{ bb36 bb8,bb193;}bb244, *bb250, *bb237;bba bbi{bb36 bb206,bb221;}bb286 , *bb233, *bb214; #endif bba bbh bbf*bb89; #endif bba bbf bb103; #define IN #define OUT #ifdef _DEBUG #define bb145( bbc) bb32( bbc) #else #define bb145( bbc) ( bbb)( bbc) #endif bba bbe bb161, *bb173; #define bb287 0 #define bb315 1 #define bb299 2 #define bb319 3 #define bb357 4 bba bbe bb349;bba bbb*bb121; #endif #ifdef _WIN32 #ifndef UNDER_CE #define bb31 bb341 #define bb43 bb338 bba bbs bb6 bb31;bba bb6 bb43; #endif #else #endif #ifdef _WIN32 bbb*bb128(bb31 bb47);bbb bb109(bbb* );bbb*bb137(bb31 bb159,bb31 bb47); #else #define bb128( bbc) bb146(1, bbc, bb139) #define bb109( bbc) bb348( bbc) #define bb137( bbc, bbn) bb146( bbc, bbn, bb139) #endif #ifdef _WIN32 #define bb32( bbc) bb339( bbc) #else #ifdef _DEBUG bbe bb144(bbh bbl*bb95,bbh bbl*bb25,bbs bb239); #define bb32( bbc) ( bbb)(( bbc) || ( bb144(# bbc, __FILE__, __LINE__ \ ))) #else #define bb32( bbc) (( bbb)0) #endif #endif bb43 bb302(bb43*bb320); #ifndef _WIN32 bbe bb328(bbh bbl*bbg);bbe bb323(bbh bbl*bb19,...); #endif #ifdef _WIN32 bba bb343 bb97; #define bb141( bbc) bb345( bbc) #define bb143( bbc) bb358( bbc) #define bb133( bbc) bb353( bbc) #define bb132( bbc) bb333( bbc) #else bba bb350 bb97; #define bb141( bbc) ( bbb)( * bbc = bb344( bbc)) #define bb143( bbc) (( bbb)0) #define bb133( bbc) bb337( bbc) #define bb132( bbc) bb342( bbc) #endif #define bb956 bb53(0x0800) #define bb1140 bb53(0x0806) #define bb937 bb53(0x01f4) #define bb939 bb53(0x1194) #define bb1134 bb53(0x4000) #define bb1117 bb53(0x2000) #define bb1108 bb53(0x1FFF) #define bb1067( bb8) (( bb8) & bb53(0x2000 | 0x1FFF)) #define bb993( bb8) ((( bb195( bb8)) & 0x1FFF) << 3) #define bb979( bb8) ((( bb8) & bb53(0x1FFF)) == 0) #define bb496( bb8) (( bb8) & bb53(0x2000)) #define bb988( bb8) (!( bb496( bb8))) #pragma pack(push, 1) bba bbi{bbf bb369[6 ];bbf bb1008[6 ];bbk bb375;}bb365, *bb376;bba bbi{ bbf bb444[6 ];bbk bb375;}bb1084, *bb1093;bba bbi{bbf bb938:4 ;bbf bb1089 :4 ;bbf bb1055;bbk bb367;bbk bb892;bbk bb566;bbf bb1002;bbf bb283;bbk bb614;bbd bb309;bbd bb203;}bb326, *bb318;bba bbi{bbk bb1035;bbk bb1083 ;bbf bb1038;bbf bb1046;bbk bb1063;bbf bb1082[6 ];bbd bb1039;bbf bb1092 [6 ];bbd bb1090;}bb1075, *bb1064; #pragma pack(pop) bba bbi{bbk bb218;bbk bb424;bbk bb1005;bbk bb322;}bb415, *bb346;bba bbi{bbk bb218;bbk bb591;bbd bb543;bbd bb923;bbf bb92;bbf bb172;bbk bb158;bbk bb322;bbk bb1018;}bb483, *bb313;bba bbi{bbf bb1078;bbf bb1070;bbf bb1088;bbf bb1040;bbd bb1065;bbk bb1050;bbk bb373;bbd bb1033;bbd bb1079;bbd bb1066;bbd bb1062;bbf bb1085[16 ];bbf bb1052[64 ] ;bbf bb25[128 ];bbf bb1034[64 ];}bb1045, *bb1074;bba bbi{bbd bb309;bbd bb203;bbf bb918;bbf bb283;bbk bb914;}bb610, *bb560; #if defined( _WIN32) #define bb53( bbc) (((( bbc) & 0XFF00) >> 8) | ((( bbc) & 0X00FF) << \ 8)) #define bb195( bbc) ( bb53( bbc)) #define bb445( bbc) (((( bbc) & 0XFF000000) >> 24) | ((( bbc) & \ 0X00FF0000) >> 8) | ((( bbc) & 0X0000FF00) << 8) | ((( bbc) & \ 0X000000FF) << 24)) #define bb493( bbc) ( bb445( bbc)) #endif bbk bb928(bbh bbb*bb297);bbk bb859(bbh bbb*bb510,bbe bb22);bba bb83 bb4;bb10{bb98=0 ,bb364=-12000 ,bb336=-11999 ,bb379=-11998 ,bb658=-11997 , bb811=-11996 ,bb717=-11995 ,bb871=-11994 ,bb780=-11992 ,bb713=-11991 , bb834=-11990 ,bb730=-11989 ,bb837=-11988 ,bb645=-11987 ,bb900=-11986 , bb752=-11985 ,bb687=-11984 ,bb813=-11983 ,bb648=-11982 ,bb844=-11981 , bb904=-11980 ,bb667=-11979 ,bb801=-11978 ,bb851=-11977 ,bb578=-11976 , bb782=-11975 ,bb762=-11960 ,bb803=-11959 ,bb677=-11500 ,bb725=-11499 , bb849=-11498 ,bb791=-11497 ,bb878=-11496 ,bb772=-11495 ,bb898=-11494 , bb769=-11493 ,bb843=-11492 ,bb887=-11491 ,bb865=-11490 ,bb727=-11489 , bb660=-11488 ,bb866=-11487 ,bb873=-11486 ,bb655=-11485 ,bb638=-11484 , bb905=-11483 ,bb830=-11482 ,bb617=-11481 ,bb691=-11480 ,bb757=-11479 , bb637=-11478 ,bb653=-11477 ,bb759=-11476 ,bb864=-11475 ,bb862=-11474 , bb706=-11473 ,bb695=-11472 ,bb635=-11460 ,bb840=-11450 ,bb729=-11449 , bb698=-11448 ,bb726=-11447 ,bb847=-11446 ,bb624=-11445 ,bb894=-11444 , bb701=-11443 ,bb694=-11440 ,bb781=-11439 ,bb774=-11438 ,bb793=-11437 , bb659=-11436 ,bb746=-11435 ,bb845=-11420 ,bb522=-11419 ,bb567=-11418 , bb673=-11417 ,bb627=-11416 ,bb775=-11415 ,bb788=-11414 ,bb724=-11413 , bb854=-11412 ,bb715=-11411 ,bb665=-11410 ,bb623=-11409 ,bb700=-11408 , bb897=-11407 ,bb907=-11406 ,bb688=-11405 ,bb666=-11404 ,bb639=-11403 , bb754=-11402 ,bb764=-11401 ,bb664=-11400 ,bb880=-11399 ,bb883=-11398 , bb756=-11397 ,bb670=-11396 ,bb875=-11395 ,bb671=-11394 ,bb827=-11393 , bb800=-11392 ,bb678=-11391 ,bb735=-11390 ,bb720=-11389 ,bb767=-11388 , bb903=-11387 ,bb901=-11386 ,bb733=-11385 ,bb690=-11384 ,bb829=-11383 , bb642=-11382 ,bb768=-11381 ,bb744=-11380 ,bb777=-11379 ,bb620=-11378 , bb745=-11377 ,bb795=-11376 ,bb749=-11375 ,bb650=-11374 ,bb841=-11373 , bb672=-11372 ,bb846=-11371 ,bb797=-11370 ,bb771=-11369 ,bb835=-11368 , bb750=-11367 ,bb852=-11366 ,bb751=-11365 ,bb842=-11364 ,bb807=-11363 , bb394=-11350 ,bb714=bb394,bb702=-11349 ,bb902=-11348 ,bb802=-11347 ,bb693 =-11346 ,bb640=-11345 ,bb884=-11344 ,bb805=-11343 ,bb708=-11342 ,bb848=- 11341 ,bb753=-11340 ,bb689=-11339 ,bb395=-11338 ,bb679=-11337 ,bb748=bb395 ,bb705=-11330 ,bb810=-11329 ,bb681=-11328 ,bb785=-11327 ,bb761=-11326 , bb643=-11325 ,bb814=-11324 ,bb634=-11320 ,bb822=-11319 ,bb858=-11318 , bb737=-11317 ,bb618=-11316 ,bb839=-11315 ,bb821=-11314 ,bb716=-11313 , bb633=-11312 ,bb893=-11300 ,bb778=-11299 ,bb790=-11298 ,bb692=-11297 , bb668=-11296 ,bb863=-11295 ,bb819=-11294 ,bb649=-11293 ,bb685=-11292 , bb886=-11291 ,bb809=-11290 ,bb787=-11289 ,bb879=-11288 ,bb630=-11287 , bb789=-11286 ,bb647=-11285 ,bb816=-11284 ,bb806=-11283 ,bb704=-11282 , bb663=-11281 ,bb703=-11280 ,bb675=-11279 ,bb731=-11250 ,bb784=-11249 , bb652=-11248 ,bb741=-11247 ,bb654=-11246 ,bb891=-11245 ,bb696=-11244 , bb747=-11243 ,bb619=-11242 ,bb860=-11240 ,bb626=-11239 ,bb738=-11238 , bb783=-11237 ,bb684=-11150 ,bb686=-11100 ,bb831=-11099 ,bb836=-11098 , bb697=-11097 ,bb622=-11096 ,bb786=-11095 ,bb632=-11094 ,bb877=-11093 , bb828=-11092 ,bb896=-11091 ,bb798=-11090 ,bb874=-11089 ,bb832=-11088 , bb908=-11087 ,bb850=-11086 ,bb763=-11085 ,bb776=-11050 ,bb656=-11049 , bb876=-10999 ,bb758=-10998 ,bb680=-10997 ,bb743=-10996 ,bb888=-10995 , bb669=-10994 ,bb870=-10993 ,bb899=-10992 ,bb766=-10991 ,bb683=-10990 , bb616=-10989 ,bb794=-10988 ,bb710=-10979 ,bb856=-10978 ,bb629=-10977 , bb885=-10976 ,bb722=-10975 ,bb799=-10974 ,};bba bbi bb452{bb1 bb74;bbd bb125;bbd bb181;bbi bb452*bb93;}bbx;bb4 bb472(bbx*bb824,bbd bb910,bbx *bb857,bbd bb895,bbd bb539);bb4 bb524(bbx*bbj,bbd bb92,bbh bbb*bb95, bbd bb47);bb4 bb576(bbx*bbj,bbd bb92,bbb*bb130,bbd bb47);bbu bb823( bbx*bbj,bbd bb92,bbh bbb*bb95,bbd bb47);bb4 bb596(bbx*bb87,bbf bb102, bbx*bb58);bb4 bb674(bbx*bb87,bbu bb177,bbf*bb408);bb4 bb940(bbx*bb58, bbf*bb397);bb4 bb958(bbh bbf*bb397,bbx*bb58);bb4 bb541(bbx*bb51,bbf bb102,bbd*bb944);bb4 bb929(bbx*bb87,bbf bb102,bbf bb408,bbx*bb58);bbd bb517(bbx*bb51);bbk bb536(bbx*bb51);bbb bb526(bbk bb151,bbx*bb51);bbb bb545(bbx*bb51);bbb bb978(bbx*bb51,bbd*bb26);bbb bb1001(bbx*bb51,bbd* bb26);bbb bb991(bbx*bb51,bbd bb26);bbb bb925(bbx*bb51,bbd bb26);bbb bb984(bbx*bb51);bbu bb1016(bbf*bb51);bba bbi bb1010*bb1026;bba bbi bb1029*bb1006;bba bbi bb994*bb1027;bba bbi bb1028*bb1015;bba bbi bb999 *bb1022;bba bbi bb1019*bb987;bba bb10{bb552=0 ,bb594=1 ,bb584=2 ,bb755= 3 ,bb581=4 ,bb561=5 ,bb575=6 ,bb563=7 ,bb586=9 ,}bb418;bba bb10{bb601=0 , bb992,bb603,bb1012,bb909,bb921,bb920,bb919,bb927,bb926,bb913,}bb515; #pragma pack(push, 8) #ifdef _MSC_VER #pragma warning (disable:4200) #endif bba bbf bb178[4 ];bba bb10{bb1655=0 ,bb1471=1 ,}bb1395;bba bb10{bb1531=0 ,bb1721=1 ,bb1562=2 ,bb1443=3 ,bb1663=4 ,bb1496=5 ,bb1639=6 ,bb1519=7 , bb1608=8 ,bb1522=9 ,bb1683=10 ,bb1512=11 ,bb1698=12 ,bb1713=13 ,bb1720=14 , bb1427=15 ,bb1458=16 ,bb1397=17 ,bb1601=18 ,bb1694=19 ,bb1646=20 ,bb1587=21 ,bb1510=22 ,bb1478=23 ,bb1604=24 ,bb1607=25 ,bb1454=26 ,bb1583=27 ,bb1378= 28 ,bb1707=29 ,bb1692=30 ,bb1575=16300 ,bb1617=16301 ,bb1729=16384 ,bb1541= 24576 ,bb1469=24577 ,bb1453=24578 ,bb1482=34793 ,bb1384=40500 ,}bb625;bba bb10{bb1466=0 ,bb1527=1 ,bb1461=2 ,bb1430=3 ,bb1702=4 ,bb1391=5 ,bb1674=6 , bb1479=7 ,bb1533=8 ,bb1402=9 ,bb1447=21 ,bb1493=22 ,bb1502=23 ,bb1451=24 , bb1544=25 ,bb1513=26 ,bb1467=27 ,bb1387=28 ,bb1483=29 ,bb1494=80 ,}bb765; bba bb10{bb1641=0 ,bb1701=1 ,bb1697=2 ,bb1489=3 ,bb1524=4 ,}bb1628;bba bb10 {bb1609=0 ,bb1347=1 ,bb1164=2 ,bb1220=3 ,bb1290=4 ,bb1054=61440 ,bb1359= 61441 ,bb1114=61443 ,bb1299=61444 ,}bb482;bba bb10{bb1703=0 ,bb1499=1 , bb1564=2 ,}bb1686;bba bb10{bb1618=0 ,bb1726,bb1444,bb1459,bb1568,bb1501 ,bb1640,bb1470,bb1528,bb1497,bb1399,bb1592,}bb728;bba bb10{bb1682=0 , bb1365=2 ,bb1328=3 ,bb1379=4 ,bb1322=9 ,bb1296=12 ,bb1362=13 ,bb1310=14 , bb1348=249 ,}bb621;bba bb10{bb1327=0 ,bb1298=1 ,bb1286=2 ,bb1433=3 ,bb1643 =4 ,bb1358=5 ,bb1330=12 ,bb1320=13 ,bb1370=14 ,bb1287=61440 ,}bb484;bba bb10 {bb1293=1 ,bb1307=2 ,bb1308=3 ,bb1543=4 ,bb1605=5 ,bb1455=6 ,bb1434=7 , bb1475=8 ,bb1460=9 ,bb1542=10 ,bb1303=11 ,bb400=12 ,bb1289=13 ,bb391=240 , bb1364=(128 <<16 )|bb391,bb1342=(192 <<16 )|bb391,bb1333=(256 <<16 )|bb391, bb1302=(128 <<16 )|bb400,bb1294=(192 <<16 )|bb400,bb1367=(256 <<16 )|bb400, }bb711;bba bb10{bb1352=0 ,bb1507=1 ,bb1369=2 ,bb1329=3 ,bb1463=4 ,}bb889; bba bb10{bb1441=0 ,bb1576=1 ,bb1195=2 ,bb604=3 ,bb1236=4 ,}bb773;bba bb10{ bb1579=0 ,bb1530=1 ,bb1409=2 ,bb1676=5 ,bb1709=7 ,}bb486;bba bb10{bb1431=0 ,bb1518=1 ,bb1602=2 ,bb1712=3 ,bb1635=4 ,bb1688=5 ,bb1652=6 ,bb398=7 ,bb1548 =65001 ,bb388=240 ,bb1490=(128 <<16 )|bb388,bb1508=(192 <<16 )|bb388,bb1516 =(256 <<16 )|bb388,bb1547=(128 <<16 )|bb398,bb1560=(192 <<16 )|bb398,bb1619 =(256 <<16 )|bb398,}bb796;bba bb10{bb1719=0 ,bb1464=1 ,bb1666=2 ,bb1574=3 , bb1477=4 ,bb1532=5 ,bb1569=6 ,bb1649=65001 ,}bb872;bba bb10{bb1684=0 , bb1529=1 ,bb1664=2 ,bb1555=3 ,bb1660=4 ,bb1615=5 ,bb1557=64221 ,bb1630= 64222 ,bb1662=64223 ,bb1437=64224 ,bb1710=65001 ,bb1687=65002 ,bb1553= 65003 ,bb1445=65004 ,bb1723=65005 ,bb1492=65006 ,bb1515=65007 ,bb1481= 65008 ,bb1565=65009 ,bb1480=65010 ,}bb661;bba bb10{bb1700=0 ,bb1418=1 , bb1438=2 ,}bb676;bba bb10{bb1413=0 ,bb1373=1 ,bb1485=2 ,bb1690=3 ,}bb881; bba bb10{bb1677=0 ,bb1423=1 ,bb1440=2 ,bb1653=3 ,bb1600=4 ,bb1645=5 ,bb1506 =21 ,bb1572=6 ,bb1620=7 ,bb1539=8 ,bb1383=1000 ,}bb476;bba bb10{bb1414=0 , bb1671=1 ,bb1672=2 ,}bb723;bba bb10{bb1670=0 ,bb1400=1 ,bb1718=2 ,bb1442=3 ,bb1476=4 ,}bb662;bba bb10{bb1535=0 ,bb1679=1 ,bb1396=1001 ,bb1714=1002 ,} bb826;bba bb10{bb1563=0 ,bb1135=1 ,bb1047=2 ,bb1057=3 ,bb1129=4 ,bb1130=5 , bb1095=6 ,bb1699=100 ,bb1584=101 ,}bb478;bba bbi bb386{bb711 bb152;bb484 bb585;bb482 bb57;}bb386;bba bbi bb387{bb621 bb1349;bb484 bb585;bb482 bb57;}bb387;bba bbi bb396{bb889 bb1003;}bb396;bba bbi bb485{bb661 bb1633;bb872 bb413;bb796 bb152;bbu bb1491;bb486 bb882;}bb485;bba bbi bb474{bbu bb611;bb386 bb308;bbu bb631;bb387 bb559;bbu bb734;bb396 bb605;bb486 bb882;}bb474;bba bbi bb455{bb178 bb952;bb178 bb1212;bb773 bb102;bb557{bbi{bb387 bb45;bbf bb554[64 ];bbf bb556[64 ];}bb559;bbi{ bb386 bb45;bbf bb1211[32 ];bbf bb1230[32 ];bbf bb554[64 ];bbf bb556[64 ]; bbf bb1213[16 ];}bb308;bbi{bb396 bb45;}bb605;}bb316;}bb455;bba bbi{bbd bb812,bb592;bbf bb1143:1 ;bbf bb1158:1 ;bbf bb102;bbk bb435;}bb186;bba bbi bb506{bbd bb11;bb186 bbc[64 *2 ];}bb506; #ifdef UNDER_CE bba bb43 bb380; #else bba bb83 bb380; #endif bba bbi bb199{bbi bb199*bb1473, *bb1392;bbd bb26;bbd bb1115;bb186 bb917[64 ];bb478 bb511;bbd bb1353;bbk bb1068;bbd bb553;bbd bb709;bbd bb808;bbf bb488;bbf bb1351;bbf bb1104;bbd bb1031;bbd bb1385;bb380 bb572;bbk bb1280;bb455 bb407[3 ];bb380 bb1570;bbf bb1509[40 ];bbd bb589 ;bbd bb1581;}bb199;bba bbi bb389{bbi bb389*bb1727;bb186 bb487;}bb389; bba bbi bb740{bbu bb479;bbu bb488;bbd bb26;bbd bb589;bbf bb1517;bbk bb1598;bbf*bb1551;bbd bb1426;bbf*bb1500;bbd bb1717;bbf*bb1381;bbd bb1416;bbu bb1651;bbu bb1573;bb389*bb130;bbu bb1484;bb662 bb1523;bbd bb1597;bb676 bb1708;bb478 bb511;bbk bb1374;bbd bb1537;bb826 bb1404; bbd bb1424;bbd bb1659;bb728 bb1421;bbf*bb1410;bbd bb1417;bb476 bb657; bbd bb1625;bbd bb1658;bbd bb1412;bbd bb1706;bbd bb1498;bb485*bb1540; bbd bb1614;bb474*bb1511;bbd bb1401;bbd bb1534;bbd bb1656;}bb740;bba bbi bb718{bbu bb479;bbd bb26;bb186 bb487;}bb718;bba bbi bb867{bb199* bb317;bbu bb1571;bbf*bb1704;bbd bb1673;}bb867;bba bbi bb869{bbd bb26; bb186 bb487;bbf bb1439;bbf bb1452;}bb869;bba bbi bb855{bbu bb479;bbu bb1120;bbd bb26;bbf*bb1631;bbd bb1545;}bb855;bba bbi bb804{bbd bb26; bbk bb1730;bbk bb1731;bbd bb151;bbf*bb49;}bb804;bba bbi bb646{bbu bb1590;bbd bb26;bbd bb553;bbd bb709;bbd bb808;}bb646;bba bbi bb906{ bb625 bb1495;bbd bb26;bb765 bb1324;bbu bb1559;}bb906;bba bbi bb820{ bbf bb1681;bbf bb1394;bbf bb1596;bbf bb1589;bbf bb1578;bbf bb1606;bbf bb1450;bbf bb1465;bbf bb1376;bbf bb1525;bbf bb1415;bbf bb1665;bbf bb1728;bbf bb1411;bbf bb1377;bbf bb1446;bbf bb1624;bbf bb1388;bbf bb1456;bbf bb508;bbf bb1552;bbf bb1667;bbf bb1536;bbf bb1693;bbf bb1420;bbf bb1436;bbf bb1419;}bb820;bba bbi bb732{bbu bb1650;bbd bb490 ;bbd bb1711;bb881 bb1432;bbk bb1638;bbu bb1521;bbu bb1566;bbu bb1657; bbu bb1457;bbu bb1637;bbu bb1599;bbu bb1403;bbl bb1626[128 ];bbl bb1675 [128 ];bbl bb1632[128 ];bbl bb1425[256 ];bbl bb1642[128 ];bbl bb1449[128 ] ;bbd bb1594;bbf bb1567[8 ];bbf bb1408[8 ];}bb732;bba bbi bb853{bbd bb26 ;bbd bb1390;}bb853;bba bbi bb815{bbd bb26;bbu bb488;}bb815;bba bbi bb636{bbu bb1716;bbd bb510;bbd bb1171;}bb636;bba bbi bb651{bbd bb26; bb476 bb657;bb723 bb1603;bbf*bb1582;bbd bb1591;}bb651;bba bb10{bb1407 =0 ,bb1556,bb1678,bb1389,bb1616,bb1538,bb1636,bb1680,bb1526,bb1585, bb1586,bb1696,bb1621,bb1386,bb1405,bb1588,bb1468,bb1406,bb1627,bb1644 ,}bb641;bba bbi bb1654 bb739;bba bb4( *bb1554)(bb739*bb1380,bbb* bb1593,bb641 bb324,bbb*bb74); #pragma pack(pop) #ifdef _WIN32 #ifdef UNDER_CE #define bb466 bb1705 bb598("1:") #else #define bb466 bb598("\\\\.\\IPSecTL") #endif #else #define bb602 "ipsecdrvtl" #define bb466 "/dev/" bb602 #ifndef bb120 #define bb120 #ifdef _WIN32 #include"uncobf.h" #include<wtypes.h> #include"cobf.h" #else #ifdef bb119 #include"uncobf.h" #include<linux/types.h> #include"cobf.h" #else #include"uncobf.h" #include<stddef.h> #include<sys/types.h> #include"cobf.h" #endif #endif #ifdef _WIN32 bba bb113 bb249; #else bba bbe bbu, *bb134, *bb236; #define bb201 1 #define bb202 0 bba bb270 bb211, *bb268, *bb234;bba bbe bb207, *bb217, *bb212;bba bbs bbq, *bb94, *bb223;bba bb6 bb274, *bb265;bba bbs bb6 bb280, *bb254; bba bb6 bb116, *bb285;bba bbs bb6 bb63, *bb264;bba bb63 bb242, *bb229 ;bba bb63 bb267, *bb235;bba bb116 bb113, *bb240;bba bb227 bb210;bba bb255 bb123;bba bb245 bb83;bba bb115 bb118;bba bb115 bb272; #ifdef bb226 bba bb288 bb39, *bb72;bba bb241 bbk, *bb59;bba bb253 bbd, *bb29;bba bb208 bb56, *bb112; #else bba bb224 bb39, *bb72;bba bb291 bbk, *bb59;bba bb238 bbd, *bb29;bba bb262 bb56, *bb112; #endif bba bb39 bbf, *bb1, *bb271;bba bbk bb228, *bb251, *bb277;bba bbk bb231 , *bb260, *bb225;bba bbd bb60, *bb122, *bb248;bba bb83 bb36, *bb279, * bb292;bba bbd bb232, *bb213, *bb243;bba bb118 bb281, *bb269, *bb275; bba bb56 bb222, *bb289, *bb247; #define bb140 bbb bba bbb*bb205, *bb77;bba bbh bbb*bb290;bba bbl bb252;bba bbl*bb259; bba bbh bbl*bb82; #if defined( bb119) bba bbe bb114; #endif bba bb114 bb20;bba bb20*bb215;bba bbh bb20*bb187; #if defined( bb282) || defined( bb209) bba bb20 bb37;bba bb20 bb111; #else bba bbl bb37;bba bbs bbl bb111; #endif bba bbh bb37*bb278;bba bb37*bb246;bba bb60 bb257, *bb261;bba bbb* bb106;bba bb106*bb258; #define bb263( bb35) bbi bb35##__ { bbe bb230; }; bba bbi bb35##__ * \ bb35 bba bbi{bb36 bb191,bb216,bb284,bb276;}bb266, *bb220, *bb273;bba bbi{ bb36 bb8,bb193;}bb244, *bb250, *bb237;bba bbi{bb36 bb206,bb221;}bb286 , *bb233, *bb214; #endif bba bbh bbf*bb89; #endif #include"uncobf.h" #include<linux/ioctl.h> #include"cobf.h" bba bbi{bb1 bb1345;bbd bb1339;bb1 bb1241;bbd bb1146;bbd bb451;}bb1203 ; #define bb1357 1 #endif #pragma pack(push, 8) bb10{bb1360=3 ,bb1354,bb1355,bb1428,};bba bbi{bbf bb104[4 ];}bb1237;bba bbi{bbf bb104[4 ];}bb1235;bba bbi{bbd bb947;bbd bb26;}bb1260;bba bbi{ bbd bb131;bbf bb1227[8 ];}bb409;bba bb10{bb1223=0 ,bb1232,bb1252,bb1247 ,bb1372}bb1233;bba bbi{bbf bb1121;bbd bb1076;bbf bb1363;}bb489; #pragma pack(pop) #pragma pack(push, 8) bb10{bb1137=-5000 ,bb1105=-4000 ,bb998=-4999 ,bb990=-4998 ,bb989=-4997 , bb981=-4996 ,bb1094=-4995 ,bb1087=-4994 ,bb1098=-4993 ,bb1014=-4992 , bb1080=-4991 };bb4 bb1133(bb4 bb1127,bbd bb1116,bbl*bb1100);bba bbi{ bb199 bb180;bbd bb1201;bbd bb1073;bbd bb1371;bbd bb1081;bbd bb1240; bbd bb1279;bbd bb1277;bbd bb1239;bbd bb1249;bbd bb1281;bbd bb1251;bbu bb1225;bb43 bb572,bb1159,bb1170;bbf bb369[6 ];}bb160;bba bbi bb481{bbi bb481*bb93;bbf bb102;bbk bb1276;bbk bb1253;bbk bb1272;bbk bb1274;} bb426;bba bbi bb779{bbi bb779*bb93;bbi bb481*bb1096;bbd bb26;bbf bb369 [6 ];}bb411;bba bb10{bb1147=0 ,bb1577,bb1041,bb1023,bb1009}bb204;bba bbi {bbd bb385;bbd bb451;bbd bb509;bb409*bb915;bb97 bb966;}bb305;bba bbi{ bb489*bb458;bb411*bb1124;bbd bb582;bb426*bb548;bb97 bb615;bbq bb1119; bbq bb547;bb160*bb503;bbu bb1270;bbk bb1160;bbk bb1107;bb305 bb1086;} bb33, *bb1611; #pragma pack(pop) bba bbi bb982 bb1319, *bb78;bba bbi bb818{bbi bb818*bb321;bb1 bb465; bbq bb569;bbd bb26;bbk bb435;bbq bb92;bb1 bb314;bbq bb456;bb1 bb534; bbq bb546;bb1 bb1505;bb103 bb1366;bbf bb1306[6 ];bb103 bb964;bb103 bb1142;bb103 bb516;bb103 bb533;}bb174, *bb86;bba bbi bb868{bbi bb868* bb93;bb174*bb321;bbd bb26;bbk bb537;bbk bb1474;bbq bb1448;bbq bb1488; bbk bb1435;}bb1462, *bb457;bbu bb1268(bb33* *bb1214);bbb bb1285(bb33* bbj);bb204 bb1269(bb33*bb108,bb376 bb450,bb318 bb138,bb346 bb412, bb313 bb200);bb204 bb1248(bb33*bb108,bb376 bb450,bb318 bb138,bb346 bb412,bb313 bb200);bb204 bb1258(bb33*bb108,bb174*bb49,bb78 bb75); bb204 bb1234(bb33*bb108,bb174*bb49,bb78 bb75);bb4 bb1245(bb33*bb108, bb174*bb49,bbd*bb104);bb4 bb1156(bb78 bb75,bb33*bb108,bb174*bb49, bb160*bb317,bbu bb595,bbu bb946);bba bb10{bb403,bb1504,}bb295;bbk bb1215(bb295 bb712,bbh bbf*bb446);bbd bb544(bb295 bb712,bbh bbf*bb446 );bbb bb1173(bbk bb159,bb295 bb550,bbf bb441[2 ]);bbb bb975(bbd bb159, bb295 bb550,bbf bb441[4 ]);bbb bb1910(bb33*bbj,bbd bb294,bbh bbf bb1187 [6 ]);bbu bb1922(bb33*bbj,bbd bb294,bb411*bb423);bbb bb2036(bb33*bbj); bbb bb2004(bb33*bbj,bbd bb294,bbh bbf bb1187[6 ],bbf bb102,bbk bb410, bbk bb406);bbu bb2012(bb33*bbj,bbd bb294,bbf bb102,bbk bb410,bbk bb406 );bbu bb1853(bb33*bbj,bbf bb102,bbk bb410,bbk bb406);bbb bb1987(bb33* bbj,bb426*bb548,bbq bb582);bbu bb1868(bbd bb294);bb160*bb1789(bb33* bbj,bbd bb294,bbu bb595);bb160*bb1844(bb33*bbj,bbd bb294,bbd bb104); bb160*bb1918(bb33*bbj,bb178 bb104);bbb bb1938(bb506*bb40);bb160* bb1935(bb33*bbj,bb199*bb180);bbb bb1881(bb33*bbj,bb178 bb104);bbb bb1859(bb33*bbj,bb178 bb104);bbb bb1980(bb33*bbj);bbb bb1796(bb33*bbj );bb41 bb506 bb593;bbu bb1868(bbd bb2570){bbd bb294=bb493(bb2570);bbe bbz;bb91(bbz=0 ;bbz<bb593.bb11;bbz++){bb186*bb564=&bb593.bbc[bbz];bbu bbn;bbm(!bb564->bb1143){bbn=(bb564->bb812&bb564->bb592)==(bb294&bb564 ->bb592);}bb54{bbn=bb564->bb812<=bb294&&bb294<=bb564->bb592;}bbm(bbn)bb2 1 ;}bb2 0 ;}bb160*bb1789(bb33*bbj,bbd bb294,bbu bb595){bb160*bb1278=bbj ->bb503;bbe bbz;bb91(bbz=bbj->bb547-1 ;bbz>=0 ;bbz--){bb160*bb166=& bb1278[bbz];bb199*bb1218=&bb166->bb180;bbm(bb595){bbm(bb1218->bb26== bb294)bb2 bb166;}bb54{bbq bb76;bbu bb2244=0 ;bbm(!bb1218->bb1104)bb1691 ;bbm(bb1218->bb26==bb294){bbm(bb1218->bb407[0 ].bb316.bb308.bb45.bb57 ==bb1164)bb2 bb166;bb54 bb2 bb90;}bb91(bb76=0 ;bb76<bb1218->bb1115; bb76++){bbu bbn;bb186*bb564=&bb1218->bb917[bb76];bbm(!bb564->bb1143){ bbn=(bb564->bb812&bb564->bb592)==(bb294&bb564->bb592);}bb54{bbd bbc= bb493(bb294);bbn=bb493(bb564->bb812)<=bbc&&bbc<=bb493(bb564->bb592);} bbm(bbn){bbm(bb564->bb1158){bb2244=0 ;bb21;}bb54 bb2244=1 ;}}bbm(bb2244 )bb2 bb166;}}bb2 bb90;}bb160*bb1844(bb33*bbj,bbd bb294,bbd bb104){ bb160*bb1278=bbj->bb503;bbq bbz,bb11=bbj->bb547;bb91(bbz=0 ;bbz<bb11; bbz++){bb160*bb166=&bb1278[bbz];bb199*bb1218=&bb166->bb180;bbm(bb1218 ->bb26==bb294){bbq bb76,bb11=bb1218->bb1280;bb91(bb76=0 ;bb76<bb11; bb76++){bbd bb2567=bb544(bb403,bb1218->bb407[bb76].bb952);bbm(bb2567 ==bb104)bb2 bb166;}}}bb2 bb90;}bb160*bb1918(bb33*bbj,bb178 bb104){ bb160*bb1278=bbj->bb503;bbq bbz,bb11=bbj->bb547;bb91(bbz=0 ;bbz<bb11; bbz++){bb160*bb166=&bb1278[bbz];bbm(bb1898(bb166->bb180.bb407[0 ]. bb952,bb104,bb12(bb178))==0 )bb2 bb166;}bb2 bb90;}bbb bb1938(bb506* bb40){bbq bbz;bb593.bb11=0 ;bb91(bbz=0 ;bbz<bb40->bb11;bbz++){bb593.bbc [bb593.bb11]=bb40->bbc[bbz];bb593.bbc[bb593.bb11].bb812=bb493(bb40-> bbc[bbz].bb812);bb593.bbc[bb593.bb11].bb592=bb493(bb40->bbc[bbz]. bb592);bb593.bb11++;}}bb160*bb1935(bb33*bbj,bb199*bb180){bb160*bb1110 =bb90;bbm(bbj->bb547>=bbj->bb1119){bbd bb2392=bbj->bb1119+8 ;bb160* bb2225=bb137(1 ,bb2392*bb12( *bb1110));bbm(!bb2225)bb2 bb90;bbm(bbj-> bb503){bb81(bb2225,bbj->bb503,bbj->bb1119*bb12( *bb1110));bb109(bbj-> bb503);}bbj->bb503=bb2225;bbj->bb1119=bb2392;}{bb160*bb1278=bbj-> bb503;bbq bbz;bb91(bbz=0 ;bbz<bbj->bb547;bbz++){bb160*bb166=&bb1278[ bbz];bb199*bb1218=&bb166->bb180;bbq bb76;bbu bb2304=0 ;bb91(bb76=0 ; bb76<bb1218->bb1115;bb76++){bb186*bb564=&bb1218->bb917[bb76];bbq bb3; bb91(bb3=0 ;bb3<bb180->bb1115;bb3++){bb186*bb2223=&bb180->bb917[bb3]; bbm(bb564->bb1143&&bb2223->bb1143){bbu bbn=bb493(bb2223->bb812)<bb493 (bb564->bb812)&&bb493(bb564->bb592)<=bb493(bb2223->bb592);bbm(bbn&& bb564->bb1158)bbn=!bbn;bbm(bbn){bb2368(bb166+1 ,bb166,bb12( *bb166) * ( bbj->bb547-bbz));bb997(bb166,0 ,bb12( *bb166));bb1110=bb166;bbj->bb547 ++;bb2304=1 ;bb21;}}}bbm(bb2304)bb21;}bbm(bb2304)bb21;}}bbm(!bb1110){ bb1110=&bbj->bb503[bbj->bb547++];bb997(bb1110,0 ,bb12( *bb1110));}{ bb411 bb2322;bbm(bb1922(bbj,bb180->bb26,&bb2322))bb81(bb1110->bb369, bb2322.bb369,6 );bb81(&bb1110->bb180,bb180,bb12( *bb180));bb1110-> bb1225=0 ;bb1110->bb572=bb302(bb90);bb1110->bb1159=bb302(bb90);bb1110 ->bb1170=bb302(bb90);bb1110->bb1201=bb180->bb589;}bb593.bb11=0 ;bb2 bb1110;}bbb bb1881(bb33*bbj,bb178 bb104){bb160*bb1278=bbj->bb503;bbq bbz,bb11=bbj->bb547;bbm(bb593.bb11==0 ){bb91(bbz=0 ;bbz<bb11;bbz++){ bb160*bb166=&bb1278[bbz];bbe bb76;bb91(bb76=0 ;bb76<bb166->bb180. bb1115;bb76++){bb186*bb564=&bb166->bb180.bb917[bb76];bbm(bb564-> bb1158)bb1691;bbm(!(bb593.bb11<(bb12(bb593.bbc)/bb12((bb593.bbc)[0 ]))))bb21 ;bb593.bbc[bb593.bb11]= *bb564;bb593.bbc[bb593.bb11].bb812=bb493( bb564->bb812);bb593.bbc[bb593.bb11].bb592=bb493(bb564->bb592);bb593. bb11++;}}}bb91(bbz=0 ;bbz<bb11;bbz++){bb160*bb166=&bb1278[bbz];bbm( bb1898(bb166->bb180.bb407[0 ].bb952,bb104,bb12(bb178))==0 ){bb2368( bb166,bb166+1 ,(bb11-bbz-1 ) *bb12( *bb166));bbj->bb547--;bb2;}}}bbb bb1859(bb33*bbj,bb178 bb104){bb160*bb1278=bbj->bb503;bbq bbz,bb11=bbj ->bb547;bb91(bbz=0 ;bbz<bb11;bbz++){bb160*bb166=&bb1278[bbz];bbm( bb1898(bb166->bb180.bb407[0 ].bb952,bb104,bb12(bb178))==0 ){bb166-> bb180.bb1104=0 ;bb2;}}}bbb bb1980(bb33*bbj){bbj->bb547=0 ;}bbb bb1796( bb33*bbj){bbm(bbj->bb503){bb109(bbj->bb503);bbj->bb503=bb90;}bbj-> bb1119=0 ;bbj->bb547=0 ;bb593.bb11=0 ;}
gpl-2.0
sub77/android_kernel_samsung_matissewifi
drivers/net/wireless/ipsecdrvtl/bj.c
310
9781
/* 'src_ipsec_pgpIPsecBuffer.c' Obfuscated by COBF (Version 1.06 2006-01-07 by BB) at Wed Jan 15 12:08:55 2014 */ #include"cobf.h" #ifdef _WIN32 #if defined( UNDER_CE) && defined( bb355) || ! defined( bb356) #define bb354 1 #define bb330 1 #else #define bb352 bb334 #define bb351 1 #define bb340 1 #endif #define bb347 1 #include"uncobf.h" #include<ndis.h> #include"cobf.h" #ifdef UNDER_CE #include"uncobf.h" #include<ndiswan.h> #include"cobf.h" #endif #include"uncobf.h" #include<stdio.h> #include<basetsd.h> #include"cobf.h" bba bbs bbl bbf, *bb1;bba bbs bbe bbq, *bb94;bba bb135 bb123, *bb332; bba bbs bbl bb39, *bb72;bba bbs bb135 bbk, *bb59;bba bbe bbu, *bb134; bba bbh bbf*bb89; #ifdef bb311 bba bbd bb60, *bb122; #endif #else #include"uncobf.h" #include<linux/module.h> #include<linux/ctype.h> #include<linux/time.h> #include<linux/slab.h> #include"cobf.h" #ifndef bb120 #define bb120 #ifdef _WIN32 #include"uncobf.h" #include<wtypes.h> #include"cobf.h" #else #ifdef bb119 #include"uncobf.h" #include<linux/types.h> #include"cobf.h" #else #include"uncobf.h" #include<stddef.h> #include<sys/types.h> #include"cobf.h" #endif #endif #ifdef _WIN32 bba bb113 bb249; #else bba bbe bbu, *bb134, *bb236; #define bb201 1 #define bb202 0 bba bb270 bb211, *bb268, *bb234;bba bbe bb207, *bb217, *bb212;bba bbs bbq, *bb94, *bb223;bba bb6 bb274, *bb265;bba bbs bb6 bb280, *bb254; bba bb6 bb116, *bb285;bba bbs bb6 bb63, *bb264;bba bb63 bb242, *bb229 ;bba bb63 bb267, *bb235;bba bb116 bb113, *bb240;bba bb227 bb210;bba bb255 bb123;bba bb245 bb83;bba bb115 bb118;bba bb115 bb272; #ifdef bb226 bba bb288 bb39, *bb72;bba bb241 bbk, *bb59;bba bb253 bbd, *bb29;bba bb208 bb56, *bb112; #else bba bb224 bb39, *bb72;bba bb291 bbk, *bb59;bba bb238 bbd, *bb29;bba bb262 bb56, *bb112; #endif bba bb39 bbf, *bb1, *bb271;bba bbk bb228, *bb251, *bb277;bba bbk bb231 , *bb260, *bb225;bba bbd bb60, *bb122, *bb248;bba bb83 bb36, *bb279, * bb292;bba bbd bb232, *bb213, *bb243;bba bb118 bb281, *bb269, *bb275; bba bb56 bb222, *bb289, *bb247; #define bb140 bbb bba bbb*bb205, *bb77;bba bbh bbb*bb290;bba bbl bb252;bba bbl*bb259; bba bbh bbl*bb82; #if defined( bb119) bba bbe bb114; #endif bba bb114 bb20;bba bb20*bb215;bba bbh bb20*bb187; #if defined( bb282) || defined( bb209) bba bb20 bb37;bba bb20 bb111; #else bba bbl bb37;bba bbs bbl bb111; #endif bba bbh bb37*bb278;bba bb37*bb246;bba bb60 bb257, *bb261;bba bbb* bb106;bba bb106*bb258; #define bb263( bb35) bbi bb35##__ { bbe bb230; }; bba bbi bb35##__ * \ bb35 bba bbi{bb36 bb191,bb216,bb284,bb276;}bb266, *bb220, *bb273;bba bbi{ bb36 bb8,bb193;}bb244, *bb250, *bb237;bba bbi{bb36 bb206,bb221;}bb286 , *bb233, *bb214; #endif bba bbh bbf*bb89; #endif bba bbf bb103; #define IN #define OUT #ifdef _DEBUG #define bb145( bbc) bb32( bbc) #else #define bb145( bbc) ( bbb)( bbc) #endif bba bbe bb161, *bb173; #define bb287 0 #define bb315 1 #define bb299 2 #define bb319 3 #define bb357 4 bba bbe bb349;bba bbb*bb121; #endif #ifdef _WIN32 #ifndef UNDER_CE #define bb31 bb341 #define bb43 bb338 bba bbs bb6 bb31;bba bb6 bb43; #endif #else #endif #ifdef _WIN32 bbb*bb128(bb31 bb47);bbb bb109(bbb* );bbb*bb137(bb31 bb159,bb31 bb47); #else #define bb128( bbc) bb146(1, bbc, bb139) #define bb109( bbc) bb348( bbc) #define bb137( bbc, bbn) bb146( bbc, bbn, bb139) #endif #ifdef _WIN32 #define bb32( bbc) bb339( bbc) #else #ifdef _DEBUG bbe bb144(bbh bbl*bb95,bbh bbl*bb25,bbs bb239); #define bb32( bbc) ( bbb)(( bbc) || ( bb144(# bbc, __FILE__, __LINE__ \ ))) #else #define bb32( bbc) (( bbb)0) #endif #endif bb43 bb302(bb43*bb320); #ifndef _WIN32 bbe bb328(bbh bbl*bbg);bbe bb323(bbh bbl*bb19,...); #endif #ifdef _WIN32 bba bb343 bb97; #define bb141( bbc) bb345( bbc) #define bb143( bbc) bb358( bbc) #define bb133( bbc) bb353( bbc) #define bb132( bbc) bb333( bbc) #else bba bb350 bb97; #define bb141( bbc) ( bbb)( * bbc = bb344( bbc)) #define bb143( bbc) (( bbb)0) #define bb133( bbc) bb337( bbc) #define bb132( bbc) bb342( bbc) #endif bba bb83 bb4;bb10{bb98=0 ,bb364=-12000 ,bb336=-11999 ,bb379=-11998 ,bb658 =-11997 ,bb811=-11996 ,bb717=-11995 ,bb871=-11994 ,bb780=-11992 ,bb713=- 11991 ,bb834=-11990 ,bb730=-11989 ,bb837=-11988 ,bb645=-11987 ,bb900=- 11986 ,bb752=-11985 ,bb687=-11984 ,bb813=-11983 ,bb648=-11982 ,bb844=- 11981 ,bb904=-11980 ,bb667=-11979 ,bb801=-11978 ,bb851=-11977 ,bb578=- 11976 ,bb782=-11975 ,bb762=-11960 ,bb803=-11959 ,bb677=-11500 ,bb725=- 11499 ,bb849=-11498 ,bb791=-11497 ,bb878=-11496 ,bb772=-11495 ,bb898=- 11494 ,bb769=-11493 ,bb843=-11492 ,bb887=-11491 ,bb865=-11490 ,bb727=- 11489 ,bb660=-11488 ,bb866=-11487 ,bb873=-11486 ,bb655=-11485 ,bb638=- 11484 ,bb905=-11483 ,bb830=-11482 ,bb617=-11481 ,bb691=-11480 ,bb757=- 11479 ,bb637=-11478 ,bb653=-11477 ,bb759=-11476 ,bb864=-11475 ,bb862=- 11474 ,bb706=-11473 ,bb695=-11472 ,bb635=-11460 ,bb840=-11450 ,bb729=- 11449 ,bb698=-11448 ,bb726=-11447 ,bb847=-11446 ,bb624=-11445 ,bb894=- 11444 ,bb701=-11443 ,bb694=-11440 ,bb781=-11439 ,bb774=-11438 ,bb793=- 11437 ,bb659=-11436 ,bb746=-11435 ,bb845=-11420 ,bb522=-11419 ,bb567=- 11418 ,bb673=-11417 ,bb627=-11416 ,bb775=-11415 ,bb788=-11414 ,bb724=- 11413 ,bb854=-11412 ,bb715=-11411 ,bb665=-11410 ,bb623=-11409 ,bb700=- 11408 ,bb897=-11407 ,bb907=-11406 ,bb688=-11405 ,bb666=-11404 ,bb639=- 11403 ,bb754=-11402 ,bb764=-11401 ,bb664=-11400 ,bb880=-11399 ,bb883=- 11398 ,bb756=-11397 ,bb670=-11396 ,bb875=-11395 ,bb671=-11394 ,bb827=- 11393 ,bb800=-11392 ,bb678=-11391 ,bb735=-11390 ,bb720=-11389 ,bb767=- 11388 ,bb903=-11387 ,bb901=-11386 ,bb733=-11385 ,bb690=-11384 ,bb829=- 11383 ,bb642=-11382 ,bb768=-11381 ,bb744=-11380 ,bb777=-11379 ,bb620=- 11378 ,bb745=-11377 ,bb795=-11376 ,bb749=-11375 ,bb650=-11374 ,bb841=- 11373 ,bb672=-11372 ,bb846=-11371 ,bb797=-11370 ,bb771=-11369 ,bb835=- 11368 ,bb750=-11367 ,bb852=-11366 ,bb751=-11365 ,bb842=-11364 ,bb807=- 11363 ,bb394=-11350 ,bb714=bb394,bb702=-11349 ,bb902=-11348 ,bb802=-11347 ,bb693=-11346 ,bb640=-11345 ,bb884=-11344 ,bb805=-11343 ,bb708=-11342 , bb848=-11341 ,bb753=-11340 ,bb689=-11339 ,bb395=-11338 ,bb679=-11337 , bb748=bb395,bb705=-11330 ,bb810=-11329 ,bb681=-11328 ,bb785=-11327 ,bb761 =-11326 ,bb643=-11325 ,bb814=-11324 ,bb634=-11320 ,bb822=-11319 ,bb858=- 11318 ,bb737=-11317 ,bb618=-11316 ,bb839=-11315 ,bb821=-11314 ,bb716=- 11313 ,bb633=-11312 ,bb893=-11300 ,bb778=-11299 ,bb790=-11298 ,bb692=- 11297 ,bb668=-11296 ,bb863=-11295 ,bb819=-11294 ,bb649=-11293 ,bb685=- 11292 ,bb886=-11291 ,bb809=-11290 ,bb787=-11289 ,bb879=-11288 ,bb630=- 11287 ,bb789=-11286 ,bb647=-11285 ,bb816=-11284 ,bb806=-11283 ,bb704=- 11282 ,bb663=-11281 ,bb703=-11280 ,bb675=-11279 ,bb731=-11250 ,bb784=- 11249 ,bb652=-11248 ,bb741=-11247 ,bb654=-11246 ,bb891=-11245 ,bb696=- 11244 ,bb747=-11243 ,bb619=-11242 ,bb860=-11240 ,bb626=-11239 ,bb738=- 11238 ,bb783=-11237 ,bb684=-11150 ,bb686=-11100 ,bb831=-11099 ,bb836=- 11098 ,bb697=-11097 ,bb622=-11096 ,bb786=-11095 ,bb632=-11094 ,bb877=- 11093 ,bb828=-11092 ,bb896=-11091 ,bb798=-11090 ,bb874=-11089 ,bb832=- 11088 ,bb908=-11087 ,bb850=-11086 ,bb763=-11085 ,bb776=-11050 ,bb656=- 11049 ,bb876=-10999 ,bb758=-10998 ,bb680=-10997 ,bb743=-10996 ,bb888=- 10995 ,bb669=-10994 ,bb870=-10993 ,bb899=-10992 ,bb766=-10991 ,bb683=- 10990 ,bb616=-10989 ,bb794=-10988 ,bb710=-10979 ,bb856=-10978 ,bb629=- 10977 ,bb885=-10976 ,bb722=-10975 ,bb799=-10974 ,};bba bbi bb452{bb1 bb74 ;bbd bb125;bbd bb181;bbi bb452*bb93;}bbx;bb4 bb472(bbx*bb824,bbd bb910 ,bbx*bb857,bbd bb895,bbd bb539);bb4 bb524(bbx*bbj,bbd bb92,bbh bbb* bb95,bbd bb47);bb4 bb576(bbx*bbj,bbd bb92,bbb*bb130,bbd bb47);bbu bb823(bbx*bbj,bbd bb92,bbh bbb*bb95,bbd bb47);bb4 bb472(bbx*bb824,bbd bb910,bbx*bb857,bbd bb895,bbd bb539){bbd bb169=0 ;bbd bb147=0 ;bbd bb156 =0 ;bbd bb570=0 ;bbd bb1855;bbd bb1923;bbx*bb73=bb90;bbx*bb46=bb90;bb4 bb18=bb98;bbm(!bb824||!bb857)bb2 bb364;bbm(!bb824->bb74||!bb857->bb74 )bb2 bb364;bb73=bb824;bb46=bb857;bb147=bb895;bb156=bb73->bb125;bb107( bb156<bb910&&bb73){bb73=bb73->bb93;bb570=bb156;bb156+=bb73->bb125;} bb169=bb910-bb570;bb570=0 ;bb156=bb46->bb181;bb107(bb156<bb895&&((bb18 )==bb98)){bbm(!bb46->bb93)bb18=bb379;bb54{bb46=bb46->bb93;bb570=bb156 ;bb156+=bb46->bb181;}}bb147=bb895-bb570;bbm(&(bb73->bb74[bb169])==&( bb46->bb74[bb147]))bb2 bb18;bb107(bb539>0 &&bb73&&((bb18)==bb98)){ bb1855=bb73->bb125-bb169;bb1923=bb46->bb181-bb147;bbm(bb1855>bb539)bb1855 =bb539;bbm(bb1923>bb539)bb1923=bb539;bbm(bb1855<=bb1923){bb81(&bb46-> bb74[bb147],&bb73->bb74[bb169],bb1855);bb73=bb73->bb93;bb169=0 ;bb147 +=bb1855;bb539-=bb1855;bbm(bb46->bb125<bb147)bb46->bb125=bb147;}bb54{ bbm(!bb46->bb93)bb18=bb379;bb54{bb81(&bb46->bb74[bb147],&bb73->bb74[ bb169],bb1923);bb46->bb125=bb46->bb181;bb46=bb46->bb93;bb147=0 ;bb169 +=bb1923;bb539-=bb1923;}}}bbm(((bb18)==bb98)&&(bb539>0 ))bb18=bb379; bb2 bb18;}bb4 bb524(bbx*bbj,bbd bb92,bbh bbb*bb2511,bbd bb47){bbx bb95 ;bb95.bb74=(bb1)bb2511;bb95.bb125=bb47;bb95.bb181=bb47;bb95.bb93=bb90 ;bb2 bb472(&bb95,0 ,bbj,bb92,bb47);}bb4 bb576(bbx*bbj,bbd bb92,bbb* bb2450,bbd bb47){bbx bb130;bb130.bb74=bb2450;bb130.bb125=0 ;bb130. bb181=bb47;bb130.bb93=bb90;bb2 bb472(bbj,bb92,&bb130,0 ,bb47);}bbu bb823(bbx*bbj,bbd bb92,bbh bbb*bb95,bbd bb47){bbd bb169=0 ;bbd bb147=0 ;bbd bb156=0 ;bbd bb570=0 ;bbd bb1888;bbd bb1889;bbd bb2550=bb47;bbx* bb73=bb90;bbm(!bbj)bb2 0 ;bbm(!bbj->bb74||!bb95)bb2 0 ;bb73=bbj;bb156= bb73->bb125;bb107(bb156<bb92&&bb73){bb73=bb73->bb93;bb570=bb156;bb156 +=bb73->bb125;}bb169=bb92-bb570;bbm(&bb73->bb74[bb169]==bb95)bb2 1 ; bb107(bb47&&bb73){bb1888=bb73->bb125-bb169;bb1889=bb2550-bb147;bbm( bb1888>bb47)bb1888=bb47;bbm(bb1889>bb47)bb1889=bb47;bbm(bb1888<= bb1889){bbm(bb1898(&bb73->bb74[bb169],(bb1)bb95+bb147,bb1888)!=0 )bb2 0 ;bb73=bb73->bb93;bb169=0 ;bb147+=bb1888;bb47-=bb1888;}bb54{bbm(bb1898( &bb73->bb74[bb169],(bb1)bb95+bb147,bb1889)!=0 )bb2 0 ;bb169+=bb1889; bb47-=bb1889;bb21;}}bb2!bb47;}
gpl-2.0
harunjo/galaxsih-kernel-JB-S3
arch/arm/mach-exynos/ppc.c
566
1669
/* linux/arch/arm/mach-exynos/ppc.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * EXYNOS4 - PPMU support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/io.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <mach/ppmu.h> void exynos4_ppc_reset(struct exynos4_ppmu_hw *ppmu) { void __iomem *ppmu_base = ppmu->hw_base; int i; __raw_writel(0x8000000f, ppmu_base + 0xf010); __raw_writel(0x8000000f, ppmu_base + 0xf050); __raw_writel(0x6, ppmu_base + 0xf000); __raw_writel(0x0, ppmu_base + 0xf100); ppmu->ccnt = 0; for (i = 0; i < NUMBER_OF_COUNTER; i++) ppmu->count[i] = 0; } void exynos4_ppc_setevent(struct exynos4_ppmu_hw *ppmu, unsigned int evt) { void __iomem *ppmu_base = ppmu->hw_base; ppmu->event[0] = evt; __raw_writel(((evt << 12) | 0x1), ppmu_base + 0xfc); } void exynos4_ppc_start(struct exynos4_ppmu_hw *ppmu) { void __iomem *ppmu_base = ppmu->hw_base; __raw_writel(0x1, ppmu_base + 0xf000); } void exynos4_ppc_stop(struct exynos4_ppmu_hw *ppmu) { void __iomem *ppmu_base = ppmu->hw_base; __raw_writel(0x0, ppmu_base + 0xf000); } unsigned long long exynos4_ppc_update(struct exynos4_ppmu_hw *ppmu) { void __iomem *ppmu_base = ppmu->hw_base; unsigned int i; ppmu->ccnt = __raw_readl(ppmu_base + 0xf100); for (i = 0; i < NUMBER_OF_COUNTER; i++) ppmu->count[i] = __raw_readl(ppmu_base + (0xf110 + (0x10 * i))); return 0; }
gpl-2.0
DarkforestGroup/sony-kernel-msm7x30-ics
arch/mips/alchemy/devboards/pb1500/board_setup.c
566
5225
/* * Copyright 2000, 2008 MontaVista Software Inc. * Author: MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-pb1x00/pb1500.h> #include <prom.h> char irq_tab_alchemy[][5] __initdata = { [12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - HPT370 */ [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */ }; struct au1xxx_irqmap __initdata au1xxx_irq_map[] = { { AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 }, { AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 }, { AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 }, { AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 }, { AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 }, }; const char *get_system_type(void) { return "Alchemy Pb1500"; } void board_reset(void) { /* Hit BCSR.RST_VDDI[SOFT_RESET] */ au_writel(0x00000000, PB1500_RST_VDDI); } void __init board_init_irq(void) { au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map)); } void __init board_setup(void) { u32 pin_func; u32 sys_freqctrl, sys_clksrc; char *argptr; argptr = prom_getcmdline(); #ifdef CONFIG_SERIAL_8250_CONSOLE argptr = strstr(argptr, "console="); if (argptr == NULL) { argptr = prom_getcmdline(); strcat(argptr, " console=ttyS0,115200"); } #endif #if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000) /* au1000 does not support vra, au1500 and au1100 do */ strcat(argptr, " au1000_audio=vra"); argptr = prom_getcmdline(); #endif sys_clksrc = sys_freqctrl = pin_func = 0; /* Set AUX clock to 12 MHz * 8 = 96 MHz */ au_writel(8, SYS_AUXPLL); au_writel(0, SYS_PINSTATERD); udelay(100); /* GPIO201 is input for PCMCIA card detect */ /* GPIO203 is input for PCMCIA interrupt request */ alchemy_gpio_direction_input(201); alchemy_gpio_direction_input(203); #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) /* Zero and disable FREQ2 */ sys_freqctrl = au_readl(SYS_FREQCTRL0); sys_freqctrl &= ~0xFFF00000; au_writel(sys_freqctrl, SYS_FREQCTRL0); /* zero and disable USBH/USBD clocks */ sys_clksrc = au_readl(SYS_CLKSRC); sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK | SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK); au_writel(sys_clksrc, SYS_CLKSRC); sys_freqctrl = au_readl(SYS_FREQCTRL0); sys_freqctrl &= ~0xFFF00000; sys_clksrc = au_readl(SYS_CLKSRC); sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK | SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK); /* FREQ2 = aux/2 = 48 MHz */ sys_freqctrl |= (0 << SYS_FC_FRDIV2_BIT) | SYS_FC_FE2 | SYS_FC_FS2; au_writel(sys_freqctrl, SYS_FREQCTRL0); /* * Route 48MHz FREQ2 into USB Host and/or Device */ sys_clksrc |= SYS_CS_MUX_FQ2 << SYS_CS_MUH_BIT; au_writel(sys_clksrc, SYS_CLKSRC); pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_USB; /* 2nd USB port is USB host */ pin_func |= SYS_PF_USB; au_writel(pin_func, SYS_PINFUNC); #endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ #ifdef CONFIG_PCI /* Setup PCI bus controller */ au_writel(0, Au1500_PCI_CMEM); au_writel(0x00003fff, Au1500_CFG_BASE); #if defined(__MIPSEB__) au_writel(0xf | (2 << 6) | (1 << 4), Au1500_PCI_CFG); #else au_writel(0xf, Au1500_PCI_CFG); #endif au_writel(0xf0000000, Au1500_PCI_MWMASK_DEV); au_writel(0, Au1500_PCI_MWBASE_REV_CCL); au_writel(0x02a00356, Au1500_PCI_STATCMD); au_writel(0x00003c04, Au1500_PCI_HDRTYPE); au_writel(0x00000008, Au1500_PCI_MBAR); au_sync(); #endif /* Enable sys bus clock divider when IDLE state or no bus activity. */ au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL); /* Enable the RTC if not already enabled */ if (!(au_readl(0xac000028) & 0x20)) { printk(KERN_INFO "enabling clock ...\n"); au_writel((au_readl(0xac000028) | 0x20), 0xac000028); } /* Put the clock in BCD mode */ if (au_readl(0xac00002c) & 0x4) { /* reg B */ au_writel(au_readl(0xac00002c) & ~0x4, 0xac00002c); au_sync(); } }
gpl-2.0
Victor-android/kernel_u8800
arch/sh/kernel/cpu/sh4a/clock-sh7785.c
566
3792
/* * arch/sh/kernel/cpu/sh4a/clock-sh7785.c * * SH7785 support for the clock framework * * Copyright (C) 2007 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/cpufreq.h> #include <asm/clock.h> #include <asm/freq.h> #include <cpu/sh7785.h> /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ static struct clk extal_clk = { .name = "extal", .id = -1, .rate = 33333333, }; static unsigned long pll_recalc(struct clk *clk) { int multiplier; multiplier = test_mode_pin(MODE_PIN4) ? 36 : 72; return clk->parent->rate * multiplier; } static struct clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .name = "pll_clk", .id = -1, .ops = &pll_clk_ops, .parent = &extal_clk, .flags = CLK_ENABLE_ON_INIT, }; static struct clk *clks[] = { &extal_clk, &pll_clk, }; static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, 24, 32, 36, 48 }; static struct clk_div_mult_table div4_table = { .divisors = div2, .nr_divisors = ARRAY_SIZE(div2), }; enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA, DIV4_DU, DIV4_P, DIV4_NR }; #define DIV4(_str, _bit, _mask, _flags) \ SH_CLK_DIV4(_str, &pll_clk, FRQMR1, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_P] = DIV4("peripheral_clk", 0, 0x0f80, 0), [DIV4_DU] = DIV4("du_clk", 4, 0x0ff0, 0), [DIV4_GA] = DIV4("ga_clk", 8, 0x0030, 0), [DIV4_DDR] = DIV4("ddr_clk", 12, 0x000c, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4("bus_clk", 16, 0x0fe0, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4("shyway_clk", 20, 0x000c, CLK_ENABLE_ON_INIT), [DIV4_U] = DIV4("umem_clk", 24, 0x000c, CLK_ENABLE_ON_INIT), [DIV4_I] = DIV4("cpu_clk", 28, 0x000e, CLK_ENABLE_ON_INIT), }; #define MSTPCR0 0xffc80030 #define MSTPCR1 0xffc80034 static struct clk mstp_clks[] = { /* MSTPCR0 */ SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0), SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0), SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0), SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0), SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0), SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0), SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0), SH_CLK_MSTP32("ssi_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 20, 0), SH_CLK_MSTP32("hac_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 17, 0), SH_CLK_MSTP32("hac_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 16, 0), SH_CLK_MSTP32("mmcif_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 13, 0), SH_CLK_MSTP32("flctl_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 12, 0), SH_CLK_MSTP32("tmu345_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 9, 0), SH_CLK_MSTP32("tmu012_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 8, 0), SH_CLK_MSTP32("siof_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 3, 0), SH_CLK_MSTP32("hspi_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 2, 0), /* MSTPCR1 */ SH_CLK_MSTP32("hudi_fck", -1, NULL, MSTPCR1, 19, 0), SH_CLK_MSTP32("ubc_fck", -1, NULL, MSTPCR1, 17, 0), SH_CLK_MSTP32("dmac_11_6_fck", -1, NULL, MSTPCR1, 5, 0), SH_CLK_MSTP32("dmac_5_0_fck", -1, NULL, MSTPCR1, 4, 0), SH_CLK_MSTP32("gdta_fck", -1, NULL, MSTPCR1, 0, 0), }; int __init arch_clk_init(void) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(clks); i++) ret |= clk_register(clks[i]); if (!ret) ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), &div4_table); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; }
gpl-2.0
atilag/android_kernel_samsung_smdk4412
drivers/rtc/rtc-mxc.c
1078
12503
/* * Copyright 2004-2008 Freescale Semiconductor, Inc. All Rights Reserved. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/io.h> #include <linux/rtc.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <mach/hardware.h> #define RTC_INPUT_CLK_32768HZ (0x00 << 5) #define RTC_INPUT_CLK_32000HZ (0x01 << 5) #define RTC_INPUT_CLK_38400HZ (0x02 << 5) #define RTC_SW_BIT (1 << 0) #define RTC_ALM_BIT (1 << 2) #define RTC_1HZ_BIT (1 << 4) #define RTC_2HZ_BIT (1 << 7) #define RTC_SAM0_BIT (1 << 8) #define RTC_SAM1_BIT (1 << 9) #define RTC_SAM2_BIT (1 << 10) #define RTC_SAM3_BIT (1 << 11) #define RTC_SAM4_BIT (1 << 12) #define RTC_SAM5_BIT (1 << 13) #define RTC_SAM6_BIT (1 << 14) #define RTC_SAM7_BIT (1 << 15) #define PIT_ALL_ON (RTC_2HZ_BIT | RTC_SAM0_BIT | RTC_SAM1_BIT | \ RTC_SAM2_BIT | RTC_SAM3_BIT | RTC_SAM4_BIT | \ RTC_SAM5_BIT | RTC_SAM6_BIT | RTC_SAM7_BIT) #define RTC_ENABLE_BIT (1 << 7) #define MAX_PIE_NUM 9 #define MAX_PIE_FREQ 512 static const u32 PIE_BIT_DEF[MAX_PIE_NUM][2] = { { 2, RTC_2HZ_BIT }, { 4, RTC_SAM0_BIT }, { 8, RTC_SAM1_BIT }, { 16, RTC_SAM2_BIT }, { 32, RTC_SAM3_BIT }, { 64, RTC_SAM4_BIT }, { 128, RTC_SAM5_BIT }, { 256, RTC_SAM6_BIT }, { MAX_PIE_FREQ, RTC_SAM7_BIT }, }; #define MXC_RTC_TIME 0 #define MXC_RTC_ALARM 1 #define RTC_HOURMIN 0x00 /* 32bit rtc hour/min counter reg */ #define RTC_SECOND 0x04 /* 32bit rtc seconds counter reg */ #define RTC_ALRM_HM 0x08 /* 32bit rtc alarm hour/min reg */ #define RTC_ALRM_SEC 0x0C /* 32bit rtc alarm seconds reg */ #define RTC_RTCCTL 0x10 /* 32bit rtc control reg */ #define RTC_RTCISR 0x14 /* 32bit rtc interrupt status reg */ #define RTC_RTCIENR 0x18 /* 32bit rtc interrupt enable reg */ #define RTC_STPWCH 0x1C /* 32bit rtc stopwatch min reg */ #define RTC_DAYR 0x20 /* 32bit rtc days counter reg */ #define RTC_DAYALARM 0x24 /* 32bit rtc day alarm reg */ #define RTC_TEST1 0x28 /* 32bit rtc test reg 1 */ #define RTC_TEST2 0x2C /* 32bit rtc test reg 2 */ #define RTC_TEST3 0x30 /* 32bit rtc test reg 3 */ struct rtc_plat_data { struct rtc_device *rtc; void __iomem *ioaddr; int irq; struct clk *clk; struct rtc_time g_rtc_alarm; }; /* * This function is used to obtain the RTC time or the alarm value in * second. */ static u32 get_alarm_or_time(struct device *dev, int time_alarm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; u32 day = 0, hr = 0, min = 0, sec = 0, hr_min = 0; switch (time_alarm) { case MXC_RTC_TIME: day = readw(ioaddr + RTC_DAYR); hr_min = readw(ioaddr + RTC_HOURMIN); sec = readw(ioaddr + RTC_SECOND); break; case MXC_RTC_ALARM: day = readw(ioaddr + RTC_DAYALARM); hr_min = readw(ioaddr + RTC_ALRM_HM) & 0xffff; sec = readw(ioaddr + RTC_ALRM_SEC); break; } hr = hr_min >> 8; min = hr_min & 0xff; return (((day * 24 + hr) * 60) + min) * 60 + sec; } /* * This function sets the RTC alarm value or the time value. */ static void set_alarm_or_time(struct device *dev, int time_alarm, u32 time) { u32 day, hr, min, sec, temp; struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; day = time / 86400; time -= day * 86400; /* time is within a day now */ hr = time / 3600; time -= hr * 3600; /* time is within an hour now */ min = time / 60; sec = time - min * 60; temp = (hr << 8) + min; switch (time_alarm) { case MXC_RTC_TIME: writew(day, ioaddr + RTC_DAYR); writew(sec, ioaddr + RTC_SECOND); writew(temp, ioaddr + RTC_HOURMIN); break; case MXC_RTC_ALARM: writew(day, ioaddr + RTC_DAYALARM); writew(sec, ioaddr + RTC_ALRM_SEC); writew(temp, ioaddr + RTC_ALRM_HM); break; } } /* * This function updates the RTC alarm registers and then clears all the * interrupt status bits. */ static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm) { struct rtc_time alarm_tm, now_tm; unsigned long now, time; int ret; struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; now = get_alarm_or_time(dev, MXC_RTC_TIME); rtc_time_to_tm(now, &now_tm); alarm_tm.tm_year = now_tm.tm_year; alarm_tm.tm_mon = now_tm.tm_mon; alarm_tm.tm_mday = now_tm.tm_mday; alarm_tm.tm_hour = alrm->tm_hour; alarm_tm.tm_min = alrm->tm_min; alarm_tm.tm_sec = alrm->tm_sec; rtc_tm_to_time(&now_tm, &now); rtc_tm_to_time(&alarm_tm, &time); if (time < now) { time += 60 * 60 * 24; rtc_time_to_tm(time, &alarm_tm); } ret = rtc_tm_to_time(&alarm_tm, &time); /* clear all the interrupt status bits */ writew(readw(ioaddr + RTC_RTCISR), ioaddr + RTC_RTCISR); set_alarm_or_time(dev, MXC_RTC_ALARM, time); return ret; } /* This function is the RTC interrupt service routine. */ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; unsigned long flags; u32 status; u32 events = 0; spin_lock_irqsave(&pdata->rtc->irq_lock, flags); status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR); /* clear interrupt sources */ writew(status, ioaddr + RTC_RTCISR); /* clear alarm interrupt if it has occurred */ if (status & RTC_ALM_BIT) status &= ~RTC_ALM_BIT; /* update irq data & counter */ if (status & RTC_ALM_BIT) events |= (RTC_AF | RTC_IRQF); if (status & RTC_1HZ_BIT) events |= (RTC_UF | RTC_IRQF); if (status & PIT_ALL_ON) events |= (RTC_PF | RTC_IRQF); if ((status & RTC_ALM_BIT) && rtc_valid_tm(&pdata->g_rtc_alarm)) rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm); rtc_update_irq(pdata->rtc, 1, events); spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags); return IRQ_HANDLED; } /* * Clear all interrupts and release the IRQ */ static void mxc_rtc_release(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; spin_lock_irq(&pdata->rtc->irq_lock); /* Disable all rtc interrupts */ writew(0, ioaddr + RTC_RTCIENR); /* Clear all interrupt status */ writew(0xffffffff, ioaddr + RTC_RTCISR); spin_unlock_irq(&pdata->rtc->irq_lock); } static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit, unsigned int enabled) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; u32 reg; spin_lock_irq(&pdata->rtc->irq_lock); reg = readw(ioaddr + RTC_RTCIENR); if (enabled) reg |= bit; else reg &= ~bit; writew(reg, ioaddr + RTC_RTCIENR); spin_unlock_irq(&pdata->rtc->irq_lock); } static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { mxc_rtc_irq_enable(dev, RTC_ALM_BIT, enabled); return 0; } /* * This function reads the current RTC time into tm in Gregorian date. */ static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm) { u32 val; /* Avoid roll-over from reading the different registers */ do { val = get_alarm_or_time(dev, MXC_RTC_TIME); } while (val != get_alarm_or_time(dev, MXC_RTC_TIME)); rtc_time_to_tm(val, tm); return 0; } /* * This function sets the internal RTC time based on tm in Gregorian date. */ static int mxc_rtc_set_mmss(struct device *dev, unsigned long time) { /* Avoid roll-over from reading the different registers */ do { set_alarm_or_time(dev, MXC_RTC_TIME, time); } while (time != get_alarm_or_time(dev, MXC_RTC_TIME)); return 0; } /* * This function reads the current alarm value into the passed in 'alrm' * argument. It updates the alrm's pending field value based on the whether * an alarm interrupt occurs or not. */ static int mxc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; rtc_time_to_tm(get_alarm_or_time(dev, MXC_RTC_ALARM), &alrm->time); alrm->pending = ((readw(ioaddr + RTC_RTCISR) & RTC_ALM_BIT)) ? 1 : 0; return 0; } /* * This function sets the RTC alarm based on passed in alrm. */ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); int ret; if (rtc_valid_tm(&alrm->time)) { if (alrm->time.tm_sec > 59 || alrm->time.tm_hour > 23 || alrm->time.tm_min > 59) return -EINVAL; ret = rtc_update_alarm(dev, &alrm->time); } else { ret = rtc_valid_tm(&alrm->time); if (ret) return ret; ret = rtc_update_alarm(dev, &alrm->time); } if (ret) return ret; memcpy(&pdata->g_rtc_alarm, &alrm->time, sizeof(struct rtc_time)); mxc_rtc_irq_enable(dev, RTC_ALM_BIT, alrm->enabled); return 0; } /* RTC layer */ static struct rtc_class_ops mxc_rtc_ops = { .release = mxc_rtc_release, .read_time = mxc_rtc_read_time, .set_mmss = mxc_rtc_set_mmss, .read_alarm = mxc_rtc_read_alarm, .set_alarm = mxc_rtc_set_alarm, .alarm_irq_enable = mxc_rtc_alarm_irq_enable, }; static int __init mxc_rtc_probe(struct platform_device *pdev) { struct resource *res; struct rtc_device *rtc; struct rtc_plat_data *pdata = NULL; u32 reg; unsigned long rate; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name)) return -EBUSY; pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); pdata->clk = clk_get(&pdev->dev, "rtc"); if (IS_ERR(pdata->clk)) { dev_err(&pdev->dev, "unable to get clock!\n"); ret = PTR_ERR(pdata->clk); goto exit_free_pdata; } clk_enable(pdata->clk); rate = clk_get_rate(pdata->clk); if (rate == 32768) reg = RTC_INPUT_CLK_32768HZ; else if (rate == 32000) reg = RTC_INPUT_CLK_32000HZ; else if (rate == 38400) reg = RTC_INPUT_CLK_38400HZ; else { dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate); ret = -EINVAL; goto exit_put_clk; } reg |= RTC_ENABLE_BIT; writew(reg, (pdata->ioaddr + RTC_RTCCTL)); if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) { dev_err(&pdev->dev, "hardware module can't be enabled!\n"); ret = -EIO; goto exit_put_clk; } platform_set_drvdata(pdev, pdata); /* Configure and enable the RTC */ pdata->irq = platform_get_irq(pdev, 0); if (pdata->irq >= 0 && devm_request_irq(&pdev->dev, pdata->irq, mxc_rtc_interrupt, IRQF_SHARED, pdev->name, pdev) < 0) { dev_warn(&pdev->dev, "interrupt not available.\n"); pdata->irq = -1; } rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { ret = PTR_ERR(rtc); goto exit_clr_drvdata; } pdata->rtc = rtc; return 0; exit_clr_drvdata: platform_set_drvdata(pdev, NULL); exit_put_clk: clk_disable(pdata->clk); clk_put(pdata->clk); exit_free_pdata: return ret; } static int __exit mxc_rtc_remove(struct platform_device *pdev) { struct rtc_plat_data *pdata = platform_get_drvdata(pdev); rtc_device_unregister(pdata->rtc); clk_disable(pdata->clk); clk_put(pdata->clk); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver mxc_rtc_driver = { .driver = { .name = "mxc_rtc", .owner = THIS_MODULE, }, .remove = __exit_p(mxc_rtc_remove), }; static int __init mxc_rtc_init(void) { return platform_driver_probe(&mxc_rtc_driver, mxc_rtc_probe); } static void __exit mxc_rtc_exit(void) { platform_driver_unregister(&mxc_rtc_driver); } module_init(mxc_rtc_init); module_exit(mxc_rtc_exit); MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("RTC driver for Freescale MXC"); MODULE_LICENSE("GPL");
gpl-2.0
engine95/navel-990
drivers/coresight/coresight-rpm-etm.c
1846
3340
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/of_coresight.h> #include <linux/coresight.h> struct rpm_etm_drvdata { struct device *dev; struct coresight_device *csdev; }; static int rpm_etm_enable(struct coresight_device *csdev) { struct rpm_etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); dev_info(drvdata->dev, "RPM ETM tracing enabled\n"); return 0; } static void rpm_etm_disable(struct coresight_device *csdev) { struct rpm_etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); dev_info(drvdata->dev, "RPM ETM tracing disabled\n"); } static const struct coresight_ops_source rpm_etm_source_ops = { .enable = rpm_etm_enable, .disable = rpm_etm_disable, }; static const struct coresight_ops rpm_cs_ops = { .source_ops = &rpm_etm_source_ops, }; static int rpm_etm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct coresight_platform_data *pdata; struct rpm_etm_drvdata *drvdata; struct coresight_desc *desc; if (pdev->dev.of_node) { pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node); if (IS_ERR(pdata)) return PTR_ERR(pdata); pdev->dev.platform_data = pdata; } drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->dev = &pdev->dev; platform_set_drvdata(pdev, drvdata); desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; desc->type = CORESIGHT_DEV_TYPE_SOURCE; desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; desc->ops = &rpm_cs_ops; desc->pdata = pdev->dev.platform_data; desc->dev = &pdev->dev; desc->owner = THIS_MODULE; drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) return PTR_ERR(drvdata->csdev); dev_info(dev, "RPM ETM initialized\n"); return 0; } static int rpm_etm_remove(struct platform_device *pdev) { struct rpm_etm_drvdata *drvdata = platform_get_drvdata(pdev); coresight_unregister(drvdata->csdev); return 0; } static struct of_device_id rpm_etm_match[] = { {.compatible = "qcom,coresight-rpm-etm"}, {} }; static struct platform_driver rpm_etm_driver = { .probe = rpm_etm_probe, .remove = rpm_etm_remove, .driver = { .name = "coresight-rpm-etm", .owner = THIS_MODULE, .of_match_table = rpm_etm_match, }, }; int __init rpm_etm_init(void) { return platform_driver_register(&rpm_etm_driver); } module_init(rpm_etm_init); void __exit rpm_etm_exit(void) { platform_driver_unregister(&rpm_etm_driver); } module_exit(rpm_etm_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CoreSight RPM ETM driver");
gpl-2.0
ArtisteHsu/jetson-tk1-r21.3-kernel
arch/xtensa/mm/tlb.c
2102
3147
/* * arch/xtensa/mm/tlb.c * * Logic that manipulates the Xtensa MMU. Derived from MIPS. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2003 Tensilica Inc. * * Joe Taylor * Chris Zankel <chris@zankel.net> * Marc Gauthier */ #include <linux/mm.h> #include <asm/processor.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> static inline void __flush_itlb_all (void) { int w, i; for (w = 0; w < ITLB_ARF_WAYS; w++) { for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) { int e = w + (i << PAGE_SHIFT); invalidate_itlb_entry_no_isync(e); } } asm volatile ("isync\n"); } static inline void __flush_dtlb_all (void) { int w, i; for (w = 0; w < DTLB_ARF_WAYS; w++) { for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) { int e = w + (i << PAGE_SHIFT); invalidate_dtlb_entry_no_isync(e); } } asm volatile ("isync\n"); } void flush_tlb_all (void) { __flush_itlb_all(); __flush_dtlb_all(); } /* If mm is current, we simply assign the current task a new ASID, thus, * invalidating all previous tlb entries. If mm is someone else's user mapping, * wie invalidate the context, thus, when that user mapping is swapped in, * a new context will be assigned to it. */ void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) { unsigned long flags; local_save_flags(flags); __get_new_mmu_context(mm); __load_mmu_context(mm); local_irq_restore(flags); } else mm->context = 0; } #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) #if _ITLB_ENTRIES > _DTLB_ENTRIES # define _TLB_ENTRIES _ITLB_ENTRIES #else # define _TLB_ENTRIES _DTLB_ENTRIES #endif void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long flags; if (mm->context == NO_CONTEXT) return; #if 0 printk("[tlbrange<%02lx,%08lx,%08lx>]\n", (unsigned long)mm->context, start, end); #endif local_save_flags(flags); if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { int oldpid = get_rasid_register(); set_rasid_register (ASID_INSERT(mm->context)); start &= PAGE_MASK; if (vma->vm_flags & VM_EXEC) while(start < end) { invalidate_itlb_mapping(start); invalidate_dtlb_mapping(start); start += PAGE_SIZE; } else while(start < end) { invalidate_dtlb_mapping(start); start += PAGE_SIZE; } set_rasid_register(oldpid); } else { flush_tlb_mm(mm); } local_irq_restore(flags); } void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) { struct mm_struct* mm = vma->vm_mm; unsigned long flags; int oldpid; if(mm->context == NO_CONTEXT) return; local_save_flags(flags); oldpid = get_rasid_register(); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); invalidate_dtlb_mapping(page); set_rasid_register(oldpid); local_irq_restore(flags); }
gpl-2.0
Nautical-Rom/android_kernel_moto_shamu
arch/arm/mach-mmp/pxa910.c
2102
4663
/* * linux/arch/arm/mach-mmp/pxa910.c * * Code specific to PXA910 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <linux/platform_device.h> #include <asm/hardware/cache-tauros2.h> #include <asm/mach/time.h> #include <mach/addr-map.h> #include <mach/regs-apbc.h> #include <mach/cputype.h> #include <mach/irqs.h> #include <mach/dma.h> #include <mach/mfp.h> #include <mach/devices.h> #include "common.h" #define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) static struct mfp_addr_map pxa910_mfp_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO54, 0xdc), MFP_ADDR_X(GPIO67, GPIO98, 0x1b8), MFP_ADDR_X(GPIO100, GPIO109, 0x238), MFP_ADDR(GPIO123, 0xcc), MFP_ADDR(GPIO124, 0xd0), MFP_ADDR(DF_IO0, 0x40), MFP_ADDR(DF_IO1, 0x3c), MFP_ADDR(DF_IO2, 0x38), MFP_ADDR(DF_IO3, 0x34), MFP_ADDR(DF_IO4, 0x30), MFP_ADDR(DF_IO5, 0x2c), MFP_ADDR(DF_IO6, 0x28), MFP_ADDR(DF_IO7, 0x24), MFP_ADDR(DF_IO8, 0x20), MFP_ADDR(DF_IO9, 0x1c), MFP_ADDR(DF_IO10, 0x18), MFP_ADDR(DF_IO11, 0x14), MFP_ADDR(DF_IO12, 0x10), MFP_ADDR(DF_IO13, 0xc), MFP_ADDR(DF_IO14, 0x8), MFP_ADDR(DF_IO15, 0x4), MFP_ADDR(DF_nCS0_SM_nCS2, 0x44), MFP_ADDR(DF_nCS1_SM_nCS3, 0x48), MFP_ADDR(SM_nCS0, 0x4c), MFP_ADDR(SM_nCS1, 0x50), MFP_ADDR(DF_WEn, 0x54), MFP_ADDR(DF_REn, 0x58), MFP_ADDR(DF_CLE_SM_OEn, 0x5c), MFP_ADDR(DF_ALE_SM_WEn, 0x60), MFP_ADDR(SM_SCLK, 0x64), MFP_ADDR(DF_RDY0, 0x68), MFP_ADDR(SM_BE0, 0x6c), MFP_ADDR(SM_BE1, 0x70), MFP_ADDR(SM_ADV, 0x74), MFP_ADDR(DF_RDY1, 0x78), MFP_ADDR(SM_ADVMUX, 0x7c), MFP_ADDR(SM_RDY, 0x80), MFP_ADDR_X(MMC1_DAT7, MMC1_WP, 0x84), MFP_ADDR_END, }; void __init pxa910_init_irq(void) { icu_init_irq(); } static int __init pxa910_init(void) { if (cpu_is_pxa910()) { #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(0); #endif mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(pxa910_mfp_addr_map); pxa_init_dma(IRQ_PXA910_DMA_INT0, 32); pxa910_clk_init(); } return 0; } postcore_initcall(pxa910_init); /* system timer - clock enabled, 3.25MHz */ #define TIMER_CLK_RST (APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(3)) #define APBC_TIMERS APBC_REG(0x34) void __init pxa910_timer_init(void) { /* reset and configure */ __raw_writel(APBC_APBCLK | APBC_RST, APBC_TIMERS); __raw_writel(TIMER_CLK_RST, APBC_TIMERS); timer_init(IRQ_PXA910_AP1_TIMER1); } /* on-chip devices */ /* NOTE: there are totally 3 UARTs on PXA910: * * UART1 - Slow UART (can be used both by AP and CP) * UART2/3 - Fast UART * * To be backward compatible with the legacy FFUART/BTUART/STUART sequence, * they are re-ordered as: * * pxa910_device_uart1 - UART2 as FFUART * pxa910_device_uart2 - UART3 as BTUART * * UART1 is not used by AP for the moment. */ PXA910_DEVICE(uart1, "pxa2xx-uart", 0, UART2, 0xd4017000, 0x30, 21, 22); PXA910_DEVICE(uart2, "pxa2xx-uart", 1, UART3, 0xd4018000, 0x30, 23, 24); PXA910_DEVICE(twsi0, "pxa2xx-i2c", 0, TWSI0, 0xd4011000, 0x28); PXA910_DEVICE(twsi1, "pxa2xx-i2c", 1, TWSI1, 0xd4025000, 0x28); PXA910_DEVICE(pwm1, "pxa910-pwm", 0, NONE, 0xd401a000, 0x10); PXA910_DEVICE(pwm2, "pxa910-pwm", 1, NONE, 0xd401a400, 0x10); PXA910_DEVICE(pwm3, "pxa910-pwm", 2, NONE, 0xd401a800, 0x10); PXA910_DEVICE(pwm4, "pxa910-pwm", 3, NONE, 0xd401ac00, 0x10); PXA910_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x80, 97, 99); PXA910_DEVICE(disp, "mmp-disp", 0, LCD, 0xd420b000, 0x1ec); PXA910_DEVICE(fb, "mmp-fb", -1, NONE, 0, 0); PXA910_DEVICE(panel, "tpo-hvga", -1, NONE, 0, 0); struct resource pxa910_resource_gpio[] = { { .start = 0xd4019000, .end = 0xd4019fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_PXA910_AP_GPIO, .end = IRQ_PXA910_AP_GPIO, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa910_device_gpio = { .name = "mmp-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa910_resource_gpio), .resource = pxa910_resource_gpio, }; static struct resource pxa910_resource_rtc[] = { { .start = 0xd4010000, .end = 0xd401003f, .flags = IORESOURCE_MEM, }, { .start = IRQ_PXA910_RTC_INT, .end = IRQ_PXA910_RTC_INT, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, { .start = IRQ_PXA910_RTC_ALARM, .end = IRQ_PXA910_RTC_ALARM, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa910_device_rtc = { .name = "sa1100-rtc", .id = -1, .num_resources = ARRAY_SIZE(pxa910_resource_rtc), .resource = pxa910_resource_rtc, };
gpl-2.0
varunchitre15/thunderzap_m9
arch/arm/mach-mmp/mmp-dt.c
2102
2623
/* * linux/arch/arm/mach-mmp/mmp-dt.c * * Copyright (C) 2012 Marvell Technology Group Ltd. * Author: Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <mach/irqs.h> #include "common.h" extern void __init mmp_dt_irq_init(void); extern void __init mmp_dt_init_timer(void); static const struct of_dev_auxdata pxa168_auxdata_lookup[] __initconst = { OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.0", NULL), OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.1", NULL), OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4026000, "pxa2xx-uart.2", NULL), OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL), OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4025000, "pxa2xx-i2c.1", NULL), OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp-gpio", NULL), OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL), {} }; static const struct of_dev_auxdata pxa910_auxdata_lookup[] __initconst = { OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.0", NULL), OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.1", NULL), OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4036000, "pxa2xx-uart.2", NULL), OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL), OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4037000, "pxa2xx-i2c.1", NULL), OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp-gpio", NULL), OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL), {} }; static void __init pxa168_dt_init(void) { of_platform_populate(NULL, of_default_bus_match_table, pxa168_auxdata_lookup, NULL); } static void __init pxa910_dt_init(void) { of_platform_populate(NULL, of_default_bus_match_table, pxa910_auxdata_lookup, NULL); } static const char *mmp_dt_board_compat[] __initdata = { "mrvl,pxa168-aspenite", "mrvl,pxa910-dkb", NULL, }; DT_MACHINE_START(PXA168_DT, "Marvell PXA168 (Device Tree Support)") .map_io = mmp_map_io, .init_irq = mmp_dt_irq_init, .init_time = mmp_dt_init_timer, .init_machine = pxa168_dt_init, .dt_compat = mmp_dt_board_compat, MACHINE_END DT_MACHINE_START(PXA910_DT, "Marvell PXA910 (Device Tree Support)") .map_io = mmp_map_io, .init_irq = mmp_dt_irq_init, .init_time = mmp_dt_init_timer, .init_machine = pxa910_dt_init, .dt_compat = mmp_dt_board_compat, MACHINE_END
gpl-2.0
JoinTheRealms/TF700-dualboot-stockbased
drivers/usb/otg/fsl_otg.c
2870
28145
/* * Copyright (C) 2007,2008 Freescale semiconductor, Inc. * * Author: Li Yang <LeoLi@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * * Initialization based on code from Shlomi Gridish. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/timer.h> #include <linux/usb.h> #include <linux/device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/workqueue.h> #include <linux/time.h> #include <linux/fsl_devices.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include "fsl_otg.h" #define DRIVER_VERSION "Rev. 1.55" #define DRIVER_AUTHOR "Jerry Huang/Li Yang" #define DRIVER_DESC "Freescale USB OTG Transceiver Driver" #define DRIVER_INFO DRIVER_DESC " " DRIVER_VERSION static const char driver_name[] = "fsl-usb2-otg"; const pm_message_t otg_suspend_state = { .event = 1, }; #define HA_DATA_PULSE static struct usb_dr_mmap *usb_dr_regs; static struct fsl_otg *fsl_otg_dev; static int srp_wait_done; /* FSM timers */ struct fsl_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr, *a_aidl_bdis_tmr, *b_ase0_brst_tmr, *b_se0_srp_tmr; /* Driver specific timers */ struct fsl_otg_timer *b_data_pulse_tmr, *b_vbus_pulse_tmr, *b_srp_fail_tmr, *b_srp_wait_tmr, *a_wait_enum_tmr; static struct list_head active_timers; static struct fsl_otg_config fsl_otg_initdata = { .otg_port = 1, }; #ifdef CONFIG_PPC32 static u32 _fsl_readl_be(const unsigned __iomem *p) { return in_be32(p); } static u32 _fsl_readl_le(const unsigned __iomem *p) { return in_le32(p); } static void _fsl_writel_be(u32 v, unsigned __iomem *p) { out_be32(p, v); } static void _fsl_writel_le(u32 v, unsigned __iomem *p) { out_le32(p, v); } static u32 (*_fsl_readl)(const unsigned __iomem *p); static void (*_fsl_writel)(u32 v, unsigned __iomem *p); #define fsl_readl(p) (*_fsl_readl)((p)) #define fsl_writel(v, p) (*_fsl_writel)((v), (p)) #else #define fsl_readl(addr) readl(addr) #define fsl_writel(val, addr) writel(val, addr) #endif /* CONFIG_PPC32 */ /* Routines to access transceiver ULPI registers */ u8 view_ulpi(u8 addr) { u32 temp; temp = 0x40000000 | (addr << 16); fsl_writel(temp, &usb_dr_regs->ulpiview); udelay(1000); while (temp & 0x40) temp = fsl_readl(&usb_dr_regs->ulpiview); return (le32_to_cpu(temp) & 0x0000ff00) >> 8; } int write_ulpi(u8 addr, u8 data) { u32 temp; temp = 0x60000000 | (addr << 16) | data; fsl_writel(temp, &usb_dr_regs->ulpiview); return 0; } /* -------------------------------------------------------------*/ /* Operations that will be called from OTG Finite State Machine */ /* Charge vbus for vbus pulsing in SRP */ void fsl_otg_chrg_vbus(int on) { u32 tmp; tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK; if (on) /* stop discharging, start charging */ tmp = (tmp & ~OTGSC_CTRL_VBUS_DISCHARGE) | OTGSC_CTRL_VBUS_CHARGE; else /* stop charging */ tmp &= ~OTGSC_CTRL_VBUS_CHARGE; fsl_writel(tmp, &usb_dr_regs->otgsc); } /* Discharge vbus through a resistor to ground */ void fsl_otg_dischrg_vbus(int on) { u32 tmp; tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK; if (on) /* stop charging, start discharging */ tmp = (tmp & ~OTGSC_CTRL_VBUS_CHARGE) | OTGSC_CTRL_VBUS_DISCHARGE; else /* stop discharging */ tmp &= ~OTGSC_CTRL_VBUS_DISCHARGE; fsl_writel(tmp, &usb_dr_regs->otgsc); } /* A-device driver vbus, controlled through PP bit in PORTSC */ void fsl_otg_drv_vbus(int on) { u32 tmp; if (on) { tmp = fsl_readl(&usb_dr_regs->portsc) & ~PORTSC_W1C_BITS; fsl_writel(tmp | PORTSC_PORT_POWER, &usb_dr_regs->portsc); } else { tmp = fsl_readl(&usb_dr_regs->portsc) & ~PORTSC_W1C_BITS & ~PORTSC_PORT_POWER; fsl_writel(tmp, &usb_dr_regs->portsc); } } /* * Pull-up D+, signalling connect by periperal. Also used in * data-line pulsing in SRP */ void fsl_otg_loc_conn(int on) { u32 tmp; tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK; if (on) tmp |= OTGSC_CTRL_DATA_PULSING; else tmp &= ~OTGSC_CTRL_DATA_PULSING; fsl_writel(tmp, &usb_dr_regs->otgsc); } /* * Generate SOF by host. This is controlled through suspend/resume the * port. In host mode, controller will automatically send SOF. * Suspend will block the data on the port. */ void fsl_otg_loc_sof(int on) { u32 tmp; tmp = fsl_readl(&fsl_otg_dev->dr_mem_map->portsc) & ~PORTSC_W1C_BITS; if (on) tmp |= PORTSC_PORT_FORCE_RESUME; else tmp |= PORTSC_PORT_SUSPEND; fsl_writel(tmp, &fsl_otg_dev->dr_mem_map->portsc); } /* Start SRP pulsing by data-line pulsing, followed with v-bus pulsing. */ void fsl_otg_start_pulse(void) { u32 tmp; srp_wait_done = 0; #ifdef HA_DATA_PULSE tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK; tmp |= OTGSC_HA_DATA_PULSE; fsl_writel(tmp, &usb_dr_regs->otgsc); #else fsl_otg_loc_conn(1); #endif fsl_otg_add_timer(b_data_pulse_tmr); } void b_data_pulse_end(unsigned long foo) { #ifdef HA_DATA_PULSE #else fsl_otg_loc_conn(0); #endif /* Do VBUS pulse after data pulse */ fsl_otg_pulse_vbus(); } void fsl_otg_pulse_vbus(void) { srp_wait_done = 0; fsl_otg_chrg_vbus(1); /* start the timer to end vbus charge */ fsl_otg_add_timer(b_vbus_pulse_tmr); } void b_vbus_pulse_end(unsigned long foo) { fsl_otg_chrg_vbus(0); /* * As USB3300 using the same a_sess_vld and b_sess_vld voltage * we need to discharge the bus for a while to distinguish * residual voltage of vbus pulsing and A device pull up */ fsl_otg_dischrg_vbus(1); fsl_otg_add_timer(b_srp_wait_tmr); } void b_srp_end(unsigned long foo) { fsl_otg_dischrg_vbus(0); srp_wait_done = 1; if ((fsl_otg_dev->otg.state == OTG_STATE_B_SRP_INIT) && fsl_otg_dev->fsm.b_sess_vld) fsl_otg_dev->fsm.b_srp_done = 1; } /* * Workaround for a_host suspending too fast. When a_bus_req=0, * a_host will start by SRP. It needs to set b_hnp_enable before * actually suspending to start HNP */ void a_wait_enum(unsigned long foo) { VDBG("a_wait_enum timeout\n"); if (!fsl_otg_dev->otg.host->b_hnp_enable) fsl_otg_add_timer(a_wait_enum_tmr); else otg_statemachine(&fsl_otg_dev->fsm); } /* The timeout callback function to set time out bit */ void set_tmout(unsigned long indicator) { *(int *)indicator = 1; } /* Initialize timers */ int fsl_otg_init_timers(struct otg_fsm *fsm) { /* FSM used timers */ a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE, (unsigned long)&fsm->a_wait_vrise_tmout); if (!a_wait_vrise_tmr) return -ENOMEM; a_wait_bcon_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_BCON, (unsigned long)&fsm->a_wait_bcon_tmout); if (!a_wait_bcon_tmr) return -ENOMEM; a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS, (unsigned long)&fsm->a_aidl_bdis_tmout); if (!a_aidl_bdis_tmr) return -ENOMEM; b_ase0_brst_tmr = otg_timer_initializer(&set_tmout, TB_ASE0_BRST, (unsigned long)&fsm->b_ase0_brst_tmout); if (!b_ase0_brst_tmr) return -ENOMEM; b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP, (unsigned long)&fsm->b_se0_srp); if (!b_se0_srp_tmr) return -ENOMEM; b_srp_fail_tmr = otg_timer_initializer(&set_tmout, TB_SRP_FAIL, (unsigned long)&fsm->b_srp_done); if (!b_srp_fail_tmr) return -ENOMEM; a_wait_enum_tmr = otg_timer_initializer(&a_wait_enum, 10, (unsigned long)&fsm); if (!a_wait_enum_tmr) return -ENOMEM; /* device driver used timers */ b_srp_wait_tmr = otg_timer_initializer(&b_srp_end, TB_SRP_WAIT, 0); if (!b_srp_wait_tmr) return -ENOMEM; b_data_pulse_tmr = otg_timer_initializer(&b_data_pulse_end, TB_DATA_PLS, 0); if (!b_data_pulse_tmr) return -ENOMEM; b_vbus_pulse_tmr = otg_timer_initializer(&b_vbus_pulse_end, TB_VBUS_PLS, 0); if (!b_vbus_pulse_tmr) return -ENOMEM; return 0; } /* Uninitialize timers */ void fsl_otg_uninit_timers(void) { /* FSM used timers */ if (a_wait_vrise_tmr != NULL) kfree(a_wait_vrise_tmr); if (a_wait_bcon_tmr != NULL) kfree(a_wait_bcon_tmr); if (a_aidl_bdis_tmr != NULL) kfree(a_aidl_bdis_tmr); if (b_ase0_brst_tmr != NULL) kfree(b_ase0_brst_tmr); if (b_se0_srp_tmr != NULL) kfree(b_se0_srp_tmr); if (b_srp_fail_tmr != NULL) kfree(b_srp_fail_tmr); if (a_wait_enum_tmr != NULL) kfree(a_wait_enum_tmr); /* device driver used timers */ if (b_srp_wait_tmr != NULL) kfree(b_srp_wait_tmr); if (b_data_pulse_tmr != NULL) kfree(b_data_pulse_tmr); if (b_vbus_pulse_tmr != NULL) kfree(b_vbus_pulse_tmr); } /* Add timer to timer list */ void fsl_otg_add_timer(void *gtimer) { struct fsl_otg_timer *timer = gtimer; struct fsl_otg_timer *tmp_timer; /* * Check if the timer is already in the active list, * if so update timer count */ list_for_each_entry(tmp_timer, &active_timers, list) if (tmp_timer == timer) { timer->count = timer->expires; return; } timer->count = timer->expires; list_add_tail(&timer->list, &active_timers); } /* Remove timer from the timer list; clear timeout status */ void fsl_otg_del_timer(void *gtimer) { struct fsl_otg_timer *timer = gtimer; struct fsl_otg_timer *tmp_timer, *del_tmp; list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) if (tmp_timer == timer) list_del(&timer->list); } /* * Reduce timer count by 1, and find timeout conditions. * Called by fsl_otg 1ms timer interrupt */ int fsl_otg_tick_timer(void) { struct fsl_otg_timer *tmp_timer, *del_tmp; int expired = 0; list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) { tmp_timer->count--; /* check if timer expires */ if (!tmp_timer->count) { list_del(&tmp_timer->list); tmp_timer->function(tmp_timer->data); expired = 1; } } return expired; } /* Reset controller, not reset the bus */ void otg_reset_controller(void) { u32 command; command = fsl_readl(&usb_dr_regs->usbcmd); command |= (1 << 1); fsl_writel(command, &usb_dr_regs->usbcmd); while (fsl_readl(&usb_dr_regs->usbcmd) & (1 << 1)) ; } /* Call suspend/resume routines in host driver */ int fsl_otg_start_host(struct otg_fsm *fsm, int on) { struct otg_transceiver *xceiv = fsm->transceiver; struct device *dev; struct fsl_otg *otg_dev = container_of(xceiv, struct fsl_otg, otg); u32 retval = 0; if (!xceiv->host) return -ENODEV; dev = xceiv->host->controller; /* * Update a_vbus_vld state as a_vbus_vld int is disabled * in device mode */ fsm->a_vbus_vld = !!(fsl_readl(&usb_dr_regs->otgsc) & OTGSC_STS_A_VBUS_VALID); if (on) { /* start fsl usb host controller */ if (otg_dev->host_working) goto end; else { otg_reset_controller(); VDBG("host on......\n"); if (dev->driver->pm && dev->driver->pm->resume) { retval = dev->driver->pm->resume(dev); if (fsm->id) { /* default-b */ fsl_otg_drv_vbus(1); /* * Workaround: b_host can't driver * vbus, but PP in PORTSC needs to * be 1 for host to work. * So we set drv_vbus bit in * transceiver to 0 thru ULPI. */ write_ulpi(0x0c, 0x20); } } otg_dev->host_working = 1; } } else { /* stop fsl usb host controller */ if (!otg_dev->host_working) goto end; else { VDBG("host off......\n"); if (dev && dev->driver) { if (dev->driver->pm && dev->driver->pm->suspend) retval = dev->driver->pm->suspend(dev); if (fsm->id) /* default-b */ fsl_otg_drv_vbus(0); } otg_dev->host_working = 0; } } end: return retval; } /* * Call suspend and resume function in udc driver * to stop and start udc driver. */ int fsl_otg_start_gadget(struct otg_fsm *fsm, int on) { struct otg_transceiver *xceiv = fsm->transceiver; struct device *dev; if (!xceiv->gadget || !xceiv->gadget->dev.parent) return -ENODEV; VDBG("gadget %s\n", on ? "on" : "off"); dev = xceiv->gadget->dev.parent; if (on) { if (dev->driver->resume) dev->driver->resume(dev); } else { if (dev->driver->suspend) dev->driver->suspend(dev, otg_suspend_state); } return 0; } /* * Called by initialization code of host driver. Register host controller * to the OTG. Suspend host for OTG role detection. */ static int fsl_otg_set_host(struct otg_transceiver *otg_p, struct usb_bus *host) { struct fsl_otg *otg_dev = container_of(otg_p, struct fsl_otg, otg); if (!otg_p || otg_dev != fsl_otg_dev) return -ENODEV; otg_p->host = host; otg_dev->fsm.a_bus_drop = 0; otg_dev->fsm.a_bus_req = 1; if (host) { VDBG("host off......\n"); otg_p->host->otg_port = fsl_otg_initdata.otg_port; otg_p->host->is_b_host = otg_dev->fsm.id; /* * must leave time for khubd to finish its thing * before yanking the host driver out from under it, * so suspend the host after a short delay. */ otg_dev->host_working = 1; schedule_delayed_work(&otg_dev->otg_event, 100); return 0; } else { /* host driver going away */ if (!(fsl_readl(&otg_dev->dr_mem_map->otgsc) & OTGSC_STS_USB_ID)) { /* Mini-A cable connected */ struct otg_fsm *fsm = &otg_dev->fsm; otg_p->state = OTG_STATE_UNDEFINED; fsm->protocol = PROTO_UNDEF; } } otg_dev->host_working = 0; otg_statemachine(&otg_dev->fsm); return 0; } /* Called by initialization code of udc. Register udc to OTG. */ static int fsl_otg_set_peripheral(struct otg_transceiver *otg_p, struct usb_gadget *gadget) { struct fsl_otg *otg_dev = container_of(otg_p, struct fsl_otg, otg); VDBG("otg_dev 0x%x\n", (int)otg_dev); VDBG("fsl_otg_dev 0x%x\n", (int)fsl_otg_dev); if (!otg_p || otg_dev != fsl_otg_dev) return -ENODEV; if (!gadget) { if (!otg_dev->otg.default_a) otg_p->gadget->ops->vbus_draw(otg_p->gadget, 0); usb_gadget_vbus_disconnect(otg_dev->otg.gadget); otg_dev->otg.gadget = 0; otg_dev->fsm.b_bus_req = 0; otg_statemachine(&otg_dev->fsm); return 0; } otg_p->gadget = gadget; otg_p->gadget->is_a_peripheral = !otg_dev->fsm.id; otg_dev->fsm.b_bus_req = 1; /* start the gadget right away if the ID pin says Mini-B */ DBG("ID pin=%d\n", otg_dev->fsm.id); if (otg_dev->fsm.id == 1) { fsl_otg_start_host(&otg_dev->fsm, 0); otg_drv_vbus(&otg_dev->fsm, 0); fsl_otg_start_gadget(&otg_dev->fsm, 1); } return 0; } /* Set OTG port power, only for B-device */ static int fsl_otg_set_power(struct otg_transceiver *otg_p, unsigned mA) { if (!fsl_otg_dev) return -ENODEV; if (otg_p->state == OTG_STATE_B_PERIPHERAL) pr_info("FSL OTG: Draw %d mA\n", mA); return 0; } /* * Delayed pin detect interrupt processing. * * When the Mini-A cable is disconnected from the board, * the pin-detect interrupt happens before the disconnnect * interrupts for the connected device(s). In order to * process the disconnect interrupt(s) prior to switching * roles, the pin-detect interrupts are delayed, and handled * by this routine. */ static void fsl_otg_event(struct work_struct *work) { struct fsl_otg *og = container_of(work, struct fsl_otg, otg_event.work); struct otg_fsm *fsm = &og->fsm; if (fsm->id) { /* switch to gadget */ fsl_otg_start_host(fsm, 0); otg_drv_vbus(fsm, 0); fsl_otg_start_gadget(fsm, 1); } } /* B-device start SRP */ static int fsl_otg_start_srp(struct otg_transceiver *otg_p) { struct fsl_otg *otg_dev = container_of(otg_p, struct fsl_otg, otg); if (!otg_p || otg_dev != fsl_otg_dev || otg_p->state != OTG_STATE_B_IDLE) return -ENODEV; otg_dev->fsm.b_bus_req = 1; otg_statemachine(&otg_dev->fsm); return 0; } /* A_host suspend will call this function to start hnp */ static int fsl_otg_start_hnp(struct otg_transceiver *otg_p) { struct fsl_otg *otg_dev = container_of(otg_p, struct fsl_otg, otg); if (!otg_p || otg_dev != fsl_otg_dev) return -ENODEV; DBG("start_hnp...n"); /* clear a_bus_req to enter a_suspend state */ otg_dev->fsm.a_bus_req = 0; otg_statemachine(&otg_dev->fsm); return 0; } /* * Interrupt handler. OTG/host/peripheral share the same int line. * OTG driver clears OTGSC interrupts and leaves USB interrupts * intact. It needs to have knowledge of some USB interrupts * such as port change. */ irqreturn_t fsl_otg_isr(int irq, void *dev_id) { struct otg_fsm *fsm = &((struct fsl_otg *)dev_id)->fsm; struct otg_transceiver *otg = &((struct fsl_otg *)dev_id)->otg; u32 otg_int_src, otg_sc; otg_sc = fsl_readl(&usb_dr_regs->otgsc); otg_int_src = otg_sc & OTGSC_INTSTS_MASK & (otg_sc >> 8); /* Only clear otg interrupts */ fsl_writel(otg_sc, &usb_dr_regs->otgsc); /*FIXME: ID change not generate when init to 0 */ fsm->id = (otg_sc & OTGSC_STS_USB_ID) ? 1 : 0; otg->default_a = (fsm->id == 0); /* process OTG interrupts */ if (otg_int_src) { if (otg_int_src & OTGSC_INTSTS_USB_ID) { fsm->id = (otg_sc & OTGSC_STS_USB_ID) ? 1 : 0; otg->default_a = (fsm->id == 0); /* clear conn information */ if (fsm->id) fsm->b_conn = 0; else fsm->a_conn = 0; if (otg->host) otg->host->is_b_host = fsm->id; if (otg->gadget) otg->gadget->is_a_peripheral = !fsm->id; VDBG("ID int (ID is %d)\n", fsm->id); if (fsm->id) { /* switch to gadget */ schedule_delayed_work( &((struct fsl_otg *)dev_id)->otg_event, 100); } else { /* switch to host */ cancel_delayed_work(& ((struct fsl_otg *)dev_id)-> otg_event); fsl_otg_start_gadget(fsm, 0); otg_drv_vbus(fsm, 1); fsl_otg_start_host(fsm, 1); } return IRQ_HANDLED; } } return IRQ_NONE; } static struct otg_fsm_ops fsl_otg_ops = { .chrg_vbus = fsl_otg_chrg_vbus, .drv_vbus = fsl_otg_drv_vbus, .loc_conn = fsl_otg_loc_conn, .loc_sof = fsl_otg_loc_sof, .start_pulse = fsl_otg_start_pulse, .add_timer = fsl_otg_add_timer, .del_timer = fsl_otg_del_timer, .start_host = fsl_otg_start_host, .start_gadget = fsl_otg_start_gadget, }; /* Initialize the global variable fsl_otg_dev and request IRQ for OTG */ static int fsl_otg_conf(struct platform_device *pdev) { struct fsl_otg *fsl_otg_tc; int status; if (fsl_otg_dev) return 0; /* allocate space to fsl otg device */ fsl_otg_tc = kzalloc(sizeof(struct fsl_otg), GFP_KERNEL); if (!fsl_otg_tc) return -ENOMEM; INIT_DELAYED_WORK(&fsl_otg_tc->otg_event, fsl_otg_event); INIT_LIST_HEAD(&active_timers); status = fsl_otg_init_timers(&fsl_otg_tc->fsm); if (status) { pr_info("Couldn't init OTG timers\n"); goto err; } spin_lock_init(&fsl_otg_tc->fsm.lock); /* Set OTG state machine operations */ fsl_otg_tc->fsm.ops = &fsl_otg_ops; /* initialize the otg structure */ fsl_otg_tc->otg.label = DRIVER_DESC; fsl_otg_tc->otg.set_host = fsl_otg_set_host; fsl_otg_tc->otg.set_peripheral = fsl_otg_set_peripheral; fsl_otg_tc->otg.set_power = fsl_otg_set_power; fsl_otg_tc->otg.start_hnp = fsl_otg_start_hnp; fsl_otg_tc->otg.start_srp = fsl_otg_start_srp; fsl_otg_dev = fsl_otg_tc; /* Store the otg transceiver */ status = otg_set_transceiver(&fsl_otg_tc->otg); if (status) { pr_warn(FSL_OTG_NAME ": unable to register OTG transceiver.\n"); goto err; } return 0; err: fsl_otg_uninit_timers(); kfree(fsl_otg_tc); return status; } /* OTG Initialization */ int usb_otg_start(struct platform_device *pdev) { struct fsl_otg *p_otg; struct otg_transceiver *otg_trans = otg_get_transceiver(); struct otg_fsm *fsm; int status; struct resource *res; u32 temp; struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; p_otg = container_of(otg_trans, struct fsl_otg, otg); fsm = &p_otg->fsm; /* Initialize the state machine structure with default values */ SET_OTG_STATE(otg_trans, OTG_STATE_UNDEFINED); fsm->transceiver = &p_otg->otg; /* We don't require predefined MEM/IRQ resource index */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; /* We don't request_mem_region here to enable resource sharing * with host/device */ usb_dr_regs = ioremap(res->start, sizeof(struct usb_dr_mmap)); p_otg->dr_mem_map = (struct usb_dr_mmap *)usb_dr_regs; pdata->regs = (void *)usb_dr_regs; if (pdata->init && pdata->init(pdev) != 0) return -EINVAL; if (pdata->big_endian_mmio) { _fsl_readl = _fsl_readl_be; _fsl_writel = _fsl_writel_be; } else { _fsl_readl = _fsl_readl_le; _fsl_writel = _fsl_writel_le; } /* request irq */ p_otg->irq = platform_get_irq(pdev, 0); status = request_irq(p_otg->irq, fsl_otg_isr, IRQF_SHARED, driver_name, p_otg); if (status) { dev_dbg(p_otg->otg.dev, "can't get IRQ %d, error %d\n", p_otg->irq, status); iounmap(p_otg->dr_mem_map); kfree(p_otg); return status; } /* stop the controller */ temp = fsl_readl(&p_otg->dr_mem_map->usbcmd); temp &= ~USB_CMD_RUN_STOP; fsl_writel(temp, &p_otg->dr_mem_map->usbcmd); /* reset the controller */ temp = fsl_readl(&p_otg->dr_mem_map->usbcmd); temp |= USB_CMD_CTRL_RESET; fsl_writel(temp, &p_otg->dr_mem_map->usbcmd); /* wait reset completed */ while (fsl_readl(&p_otg->dr_mem_map->usbcmd) & USB_CMD_CTRL_RESET) ; /* configure the VBUSHS as IDLE(both host and device) */ temp = USB_MODE_STREAM_DISABLE | (pdata->es ? USB_MODE_ES : 0); fsl_writel(temp, &p_otg->dr_mem_map->usbmode); /* configure PHY interface */ temp = fsl_readl(&p_otg->dr_mem_map->portsc); temp &= ~(PORTSC_PHY_TYPE_SEL | PORTSC_PTW); switch (pdata->phy_mode) { case FSL_USB2_PHY_ULPI: temp |= PORTSC_PTS_ULPI; break; case FSL_USB2_PHY_UTMI_WIDE: temp |= PORTSC_PTW_16BIT; /* fall through */ case FSL_USB2_PHY_UTMI: temp |= PORTSC_PTS_UTMI; /* fall through */ default: break; } fsl_writel(temp, &p_otg->dr_mem_map->portsc); if (pdata->have_sysif_regs) { /* configure control enable IO output, big endian register */ temp = __raw_readl(&p_otg->dr_mem_map->control); temp |= USB_CTRL_IOENB; __raw_writel(temp, &p_otg->dr_mem_map->control); } /* disable all interrupt and clear all OTGSC status */ temp = fsl_readl(&p_otg->dr_mem_map->otgsc); temp &= ~OTGSC_INTERRUPT_ENABLE_BITS_MASK; temp |= OTGSC_INTERRUPT_STATUS_BITS_MASK | OTGSC_CTRL_VBUS_DISCHARGE; fsl_writel(temp, &p_otg->dr_mem_map->otgsc); /* * The identification (id) input is FALSE when a Mini-A plug is inserted * in the devices Mini-AB receptacle. Otherwise, this input is TRUE. * Also: record initial state of ID pin */ if (fsl_readl(&p_otg->dr_mem_map->otgsc) & OTGSC_STS_USB_ID) { p_otg->otg.state = OTG_STATE_UNDEFINED; p_otg->fsm.id = 1; } else { p_otg->otg.state = OTG_STATE_A_IDLE; p_otg->fsm.id = 0; } DBG("initial ID pin=%d\n", p_otg->fsm.id); /* enable OTG ID pin interrupt */ temp = fsl_readl(&p_otg->dr_mem_map->otgsc); temp |= OTGSC_INTR_USB_ID_EN; temp &= ~(OTGSC_CTRL_VBUS_DISCHARGE | OTGSC_INTR_1MS_TIMER_EN); fsl_writel(temp, &p_otg->dr_mem_map->otgsc); return 0; } /* * state file in sysfs */ static int show_fsl_usb2_otg_state(struct device *dev, struct device_attribute *attr, char *buf) { struct otg_fsm *fsm = &fsl_otg_dev->fsm; char *next = buf; unsigned size = PAGE_SIZE; unsigned long flags; int t; spin_lock_irqsave(&fsm->lock, flags); /* basic driver infomation */ t = scnprintf(next, size, DRIVER_DESC "\n" "fsl_usb2_otg version: %s\n\n", DRIVER_VERSION); size -= t; next += t; /* Registers */ t = scnprintf(next, size, "OTGSC: 0x%08x\n" "PORTSC: 0x%08x\n" "USBMODE: 0x%08x\n" "USBCMD: 0x%08x\n" "USBSTS: 0x%08x\n" "USBINTR: 0x%08x\n", fsl_readl(&usb_dr_regs->otgsc), fsl_readl(&usb_dr_regs->portsc), fsl_readl(&usb_dr_regs->usbmode), fsl_readl(&usb_dr_regs->usbcmd), fsl_readl(&usb_dr_regs->usbsts), fsl_readl(&usb_dr_regs->usbintr)); size -= t; next += t; /* State */ t = scnprintf(next, size, "OTG state: %s\n\n", otg_state_string(fsl_otg_dev->otg.state)); size -= t; next += t; /* State Machine Variables */ t = scnprintf(next, size, "a_bus_req: %d\n" "b_bus_req: %d\n" "a_bus_resume: %d\n" "a_bus_suspend: %d\n" "a_conn: %d\n" "a_sess_vld: %d\n" "a_srp_det: %d\n" "a_vbus_vld: %d\n" "b_bus_resume: %d\n" "b_bus_suspend: %d\n" "b_conn: %d\n" "b_se0_srp: %d\n" "b_sess_end: %d\n" "b_sess_vld: %d\n" "id: %d\n", fsm->a_bus_req, fsm->b_bus_req, fsm->a_bus_resume, fsm->a_bus_suspend, fsm->a_conn, fsm->a_sess_vld, fsm->a_srp_det, fsm->a_vbus_vld, fsm->b_bus_resume, fsm->b_bus_suspend, fsm->b_conn, fsm->b_se0_srp, fsm->b_sess_end, fsm->b_sess_vld, fsm->id); size -= t; next += t; spin_unlock_irqrestore(&fsm->lock, flags); return PAGE_SIZE - size; } static DEVICE_ATTR(fsl_usb2_otg_state, S_IRUGO, show_fsl_usb2_otg_state, NULL); /* Char driver interface to control some OTG input */ /* * Handle some ioctl command, such as get otg * status and set host suspend */ static long fsl_otg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { u32 retval = 0; switch (cmd) { case GET_OTG_STATUS: retval = fsl_otg_dev->host_working; break; case SET_A_SUSPEND_REQ: fsl_otg_dev->fsm.a_suspend_req = arg; break; case SET_A_BUS_DROP: fsl_otg_dev->fsm.a_bus_drop = arg; break; case SET_A_BUS_REQ: fsl_otg_dev->fsm.a_bus_req = arg; break; case SET_B_BUS_REQ: fsl_otg_dev->fsm.b_bus_req = arg; break; default: break; } otg_statemachine(&fsl_otg_dev->fsm); return retval; } static int fsl_otg_open(struct inode *inode, struct file *file) { return 0; } static int fsl_otg_release(struct inode *inode, struct file *file) { return 0; } static const struct file_operations otg_fops = { .owner = THIS_MODULE, .llseek = NULL, .read = NULL, .write = NULL, .unlocked_ioctl = fsl_otg_ioctl, .open = fsl_otg_open, .release = fsl_otg_release, }; static int __devinit fsl_otg_probe(struct platform_device *pdev) { int ret; if (!pdev->dev.platform_data) return -ENODEV; /* configure the OTG */ ret = fsl_otg_conf(pdev); if (ret) { dev_err(&pdev->dev, "Couldn't configure OTG module\n"); return ret; } /* start OTG */ ret = usb_otg_start(pdev); if (ret) { dev_err(&pdev->dev, "Can't init FSL OTG device\n"); return ret; } ret = register_chrdev(FSL_OTG_MAJOR, FSL_OTG_NAME, &otg_fops); if (ret) { dev_err(&pdev->dev, "unable to register FSL OTG device\n"); return ret; } ret = device_create_file(&pdev->dev, &dev_attr_fsl_usb2_otg_state); if (ret) dev_warn(&pdev->dev, "Can't register sysfs attribute\n"); return ret; } static int __devexit fsl_otg_remove(struct platform_device *pdev) { struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; otg_set_transceiver(NULL); free_irq(fsl_otg_dev->irq, fsl_otg_dev); iounmap((void *)usb_dr_regs); fsl_otg_uninit_timers(); kfree(fsl_otg_dev); device_remove_file(&pdev->dev, &dev_attr_fsl_usb2_otg_state); unregister_chrdev(FSL_OTG_MAJOR, FSL_OTG_NAME); if (pdata->exit) pdata->exit(pdev); return 0; } struct platform_driver fsl_otg_driver = { .probe = fsl_otg_probe, .remove = __devexit_p(fsl_otg_remove), .driver = { .name = driver_name, .owner = THIS_MODULE, }, }; static int __init fsl_usb_otg_init(void) { pr_info(DRIVER_INFO "\n"); return platform_driver_register(&fsl_otg_driver); } module_init(fsl_usb_otg_init); static void __exit fsl_usb_otg_exit(void) { platform_driver_unregister(&fsl_otg_driver); } module_exit(fsl_usb_otg_exit); MODULE_DESCRIPTION(DRIVER_INFO); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_LICENSE("GPL");
gpl-2.0
playfulgod/kernel_lge_fx3
net/ipv4/cipso_ipv4.c
2870
64767
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) kfree_rcu(old, rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) kfree_rcu(opt, rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; kfree_rcu(opt, rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
gpl-2.0
sleekmason/cyanogenmod12
drivers/net/ethernet/sfc/nic.c
3382
62443
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2006-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/seq_file.h> #include "net_driver.h" #include "bitfield.h" #include "efx.h" #include "nic.h" #include "regs.h" #include "io.h" #include "workarounds.h" /************************************************************************** * * Configurable values * ************************************************************************** */ /* This is set to 16 for a good reason. In summary, if larger than * 16, the descriptor cache holds more than a default socket * buffer's worth of packets (for UDP we can only have at most one * socket buffer's worth outstanding). This combined with the fact * that we only get 1 TX event per descriptor cache means the NIC * goes idle. */ #define TX_DC_ENTRIES 16 #define TX_DC_ENTRIES_ORDER 1 #define RX_DC_ENTRIES 64 #define RX_DC_ENTRIES_ORDER 3 /* If EFX_MAX_INT_ERRORS internal errors occur within * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and * disable it. */ #define EFX_INT_ERROR_EXPIRE 3600 #define EFX_MAX_INT_ERRORS 5 /* Depth of RX flush request fifo */ #define EFX_RX_FLUSH_COUNT 4 /* Driver generated events */ #define _EFX_CHANNEL_MAGIC_TEST 0x000101 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) #define EFX_CHANNEL_MAGIC_TEST(_channel) \ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ efx_rx_queue_index(_rx_queue)) #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ efx_rx_queue_index(_rx_queue)) #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ (_tx_queue)->queue) /************************************************************************** * * Solarstorm hardware access * **************************************************************************/ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, unsigned int index) { efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, value, index); } /* Read the current event from the event queue */ static inline efx_qword_t *efx_event(struct efx_channel *channel, unsigned int index) { return ((efx_qword_t *) (channel->eventq.addr)) + (index & channel->eventq_mask); } /* See if an event is present * * We check both the high and low dword of the event for all ones. We * wrote all ones when we cleared the event, and no valid event can * have all ones in either its high or low dwords. This approach is * robust against reordering. * * Note that using a single 64-bit comparison is incorrect; even * though the CPU read will be atomic, the DMA write may not be. */ static inline int efx_event_present(efx_qword_t *event) { return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | EFX_DWORD_IS_ALL_ONES(event->dword[1])); } static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, const efx_oword_t *mask) { return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); } int efx_nic_test_registers(struct efx_nic *efx, const struct efx_nic_register_test *regs, size_t n_regs) { unsigned address = 0, i, j; efx_oword_t mask, imask, original, reg, buf; /* Falcon should be in loopback to isolate the XMAC from the PHY */ WARN_ON(!LOOPBACK_INTERNAL(efx)); for (i = 0; i < n_regs; ++i) { address = regs[i].address; mask = imask = regs[i].mask; EFX_INVERT_OWORD(imask); efx_reado(efx, &original, address); /* bit sweep on and off */ for (j = 0; j < 128; j++) { if (!EFX_EXTRACT_OWORD32(mask, j, j)) continue; /* Test this testable bit can be set in isolation */ EFX_AND_OWORD(reg, original, mask); EFX_SET_OWORD32(reg, j, j, 1); efx_writeo(efx, &reg, address); efx_reado(efx, &buf, address); if (efx_masked_compare_oword(&reg, &buf, &mask)) goto fail; /* Test this testable bit can be cleared in isolation */ EFX_OR_OWORD(reg, original, mask); EFX_SET_OWORD32(reg, j, j, 0); efx_writeo(efx, &reg, address); efx_reado(efx, &buf, address); if (efx_masked_compare_oword(&reg, &buf, &mask)) goto fail; } efx_writeo(efx, &original, address); } return 0; fail: netif_err(efx, hw, efx->net_dev, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); return -EIO; } /************************************************************************** * * Special buffer handling * Special buffers are used for event queues and the TX and RX * descriptor rings. * *************************************************************************/ /* * Initialise a special buffer * * This will define a buffer (previously allocated via * efx_alloc_special_buffer()) in the buffer table, allowing * it to be used for event queues, descriptor rings etc. */ static void efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) { efx_qword_t buf_desc; unsigned int index; dma_addr_t dma_addr; int i; EFX_BUG_ON_PARANOID(!buffer->addr); /* Write buffer descriptors to NIC */ for (i = 0; i < buffer->entries; i++) { index = buffer->index + i; dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); netif_dbg(efx, probe, efx->net_dev, "mapping special buffer %d at %llx\n", index, (unsigned long long)dma_addr); EFX_POPULATE_QWORD_3(buf_desc, FRF_AZ_BUF_ADR_REGION, 0, FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, FRF_AZ_BUF_OWNER_ID_FBUF, 0); efx_write_buf_tbl(efx, &buf_desc, index); } } /* Unmaps a buffer and clears the buffer table entries */ static void efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) { efx_oword_t buf_tbl_upd; unsigned int start = buffer->index; unsigned int end = (buffer->index + buffer->entries - 1); if (!buffer->entries) return; netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", buffer->index, buffer->index + buffer->entries - 1); EFX_POPULATE_OWORD_4(buf_tbl_upd, FRF_AZ_BUF_UPD_CMD, 0, FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, end, FRF_AZ_BUF_CLR_START_ID, start); efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); } /* * Allocate a new special buffer * * This allocates memory for a new buffer, clears it and allocates a * new buffer ID range. It does not write into the buffer table. * * This call will allocate 4KB buffers, since 8KB buffers can't be * used for event queues and descriptor rings. */ static int efx_alloc_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer, unsigned int len) { len = ALIGN(len, EFX_BUF_SIZE); buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, &buffer->dma_addr, GFP_KERNEL); if (!buffer->addr) return -ENOMEM; buffer->len = len; buffer->entries = len / EFX_BUF_SIZE; BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); /* All zeros is a potentially valid event so memset to 0xff */ memset(buffer->addr, 0xff, len); /* Select new buffer ID */ buffer->index = efx->next_buffer_table; efx->next_buffer_table += buffer->entries; #ifdef CONFIG_SFC_SRIOV BUG_ON(efx_sriov_enabled(efx) && efx->vf_buftbl_base < efx->next_buffer_table); #endif netif_dbg(efx, probe, efx->net_dev, "allocating special buffers %d-%d at %llx+%x " "(virt %p phys %llx)\n", buffer->index, buffer->index + buffer->entries - 1, (u64)buffer->dma_addr, len, buffer->addr, (u64)virt_to_phys(buffer->addr)); return 0; } static void efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) { if (!buffer->addr) return; netif_dbg(efx, hw, efx->net_dev, "deallocating special buffers %d-%d at %llx+%x " "(virt %p phys %llx)\n", buffer->index, buffer->index + buffer->entries - 1, (u64)buffer->dma_addr, buffer->len, buffer->addr, (u64)virt_to_phys(buffer->addr)); dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, buffer->dma_addr); buffer->addr = NULL; buffer->entries = 0; } /************************************************************************** * * Generic buffer handling * These buffers are used for interrupt status and MAC stats * **************************************************************************/ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len) { buffer->addr = pci_alloc_consistent(efx->pci_dev, len, &buffer->dma_addr); if (!buffer->addr) return -ENOMEM; buffer->len = len; memset(buffer->addr, 0, len); return 0; } void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) { if (buffer->addr) { pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, buffer->dma_addr); buffer->addr = NULL; } } /************************************************************************** * * TX path * **************************************************************************/ /* Returns a pointer to the specified transmit descriptor in the TX * descriptor queue belonging to the specified channel. */ static inline efx_qword_t * efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) { return ((efx_qword_t *) (tx_queue->txd.addr)) + index; } /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) { unsigned write_ptr; efx_dword_t reg; write_ptr = tx_queue->write_count & tx_queue->ptr_mask; EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); efx_writed_page(tx_queue->efx, &reg, FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); } /* Write pointer and first descriptor for TX descriptor ring */ static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, const efx_qword_t *txd) { unsigned write_ptr; efx_oword_t reg; BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); write_ptr = tx_queue->write_count & tx_queue->ptr_mask; EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, FRF_AZ_TX_DESC_WPTR, write_ptr); reg.qword[0] = *txd; efx_writeo_page(tx_queue->efx, &reg, FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); } static inline bool efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) { unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); if (empty_read_count == 0) return false; tx_queue->empty_read_count = 0; return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; } /* For each entry inserted into the software descriptor ring, create a * descriptor in the hardware TX descriptor ring (in host memory), and * write a doorbell. */ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; efx_qword_t *txd; unsigned write_ptr; unsigned old_write_count = tx_queue->write_count; BUG_ON(tx_queue->write_count == tx_queue->insert_count); do { write_ptr = tx_queue->write_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[write_ptr]; txd = efx_tx_desc(tx_queue, write_ptr); ++tx_queue->write_count; /* Create TX descriptor ring entry */ EFX_POPULATE_QWORD_4(*txd, FSF_AZ_TX_KER_CONT, buffer->continuation, FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, FSF_AZ_TX_KER_BUF_REGION, 0, FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); } while (tx_queue->write_count != tx_queue->insert_count); wmb(); /* Ensure descriptors are written before they are fetched */ if (efx_may_push_tx_desc(tx_queue, old_write_count)) { txd = efx_tx_desc(tx_queue, old_write_count & tx_queue->ptr_mask); efx_push_tx_desc(tx_queue, txd); ++tx_queue->pushes; } else { efx_notify_tx_desc(tx_queue); } } /* Allocate hardware resources for a TX queue */ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; unsigned entries; entries = tx_queue->ptr_mask + 1; return efx_alloc_special_buffer(efx, &tx_queue->txd, entries * sizeof(efx_qword_t)); } void efx_nic_init_tx(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; efx_oword_t reg; /* Pin TX descriptor ring */ efx_init_special_buffer(efx, &tx_queue->txd); /* Push TX descriptor ring to card */ EFX_POPULATE_OWORD_10(reg, FRF_AZ_TX_DESCQ_EN, 1, FRF_AZ_TX_ISCSI_DDIG_EN, 0, FRF_AZ_TX_ISCSI_HDIG_EN, 0, FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, FRF_AZ_TX_DESCQ_EVQ_ID, tx_queue->channel->channel, FRF_AZ_TX_DESCQ_OWNER_ID, 0, FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, FRF_AZ_TX_DESCQ_SIZE, __ffs(tx_queue->txd.entries), FRF_AZ_TX_DESCQ_TYPE, 0, FRF_BZ_TX_NON_IP_DROP_DIS, 1); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); } efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base, tx_queue->queue); if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { /* Only 128 bits in this register */ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG); if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) clear_bit_le(tx_queue->queue, (void *)&reg); else set_bit_le(tx_queue->queue, (void *)&reg); efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); } if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_POPULATE_OWORD_1(reg, FRF_BZ_TX_PACE, (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? FFE_BZ_TX_PACE_OFF : FFE_BZ_TX_PACE_RESERVED); efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue); } } static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; efx_oword_t tx_flush_descq; EFX_POPULATE_OWORD_2(tx_flush_descq, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); } void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; efx_oword_t tx_desc_ptr; /* Remove TX descriptor ring from card */ EFX_ZERO_OWORD(tx_desc_ptr); efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, tx_queue->queue); /* Unpin TX descriptor ring */ efx_fini_special_buffer(efx, &tx_queue->txd); } /* Free buffers backing TX queue */ void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) { efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); } /************************************************************************** * * RX path * **************************************************************************/ /* Returns a pointer to the specified descriptor in the RX descriptor queue */ static inline efx_qword_t * efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) { return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; } /* This creates an entry in the RX descriptor queue */ static inline void efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) { struct efx_rx_buffer *rx_buf; efx_qword_t *rxd; rxd = efx_rx_desc(rx_queue, index); rx_buf = efx_rx_buffer(rx_queue, index); EFX_POPULATE_QWORD_3(*rxd, FSF_AZ_RX_KER_BUF_SIZE, rx_buf->len - rx_queue->efx->type->rx_buffer_padding, FSF_AZ_RX_KER_BUF_REGION, 0, FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); } /* This writes to the RX_DESC_WPTR register for the specified receive * descriptor ring. */ void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; efx_dword_t reg; unsigned write_ptr; while (rx_queue->notified_count != rx_queue->added_count) { efx_build_rx_desc( rx_queue, rx_queue->notified_count & rx_queue->ptr_mask); ++rx_queue->notified_count; } wmb(); write_ptr = rx_queue->added_count & rx_queue->ptr_mask; EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0, efx_rx_queue_index(rx_queue)); } int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; unsigned entries; entries = rx_queue->ptr_mask + 1; return efx_alloc_special_buffer(efx, &rx_queue->rxd, entries * sizeof(efx_qword_t)); } void efx_nic_init_rx(struct efx_rx_queue *rx_queue) { efx_oword_t rx_desc_ptr; struct efx_nic *efx = rx_queue->efx; bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; bool iscsi_digest_en = is_b0; netif_dbg(efx, hw, efx->net_dev, "RX queue %d ring in special buffers %d-%d\n", efx_rx_queue_index(rx_queue), rx_queue->rxd.index, rx_queue->rxd.index + rx_queue->rxd.entries - 1); /* Pin RX descriptor ring */ efx_init_special_buffer(efx, &rx_queue->rxd); /* Push RX descriptor ring to card */ EFX_POPULATE_OWORD_10(rx_desc_ptr, FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, FRF_AZ_RX_DESCQ_EVQ_ID, efx_rx_queue_channel(rx_queue)->channel, FRF_AZ_RX_DESCQ_OWNER_ID, 0, FRF_AZ_RX_DESCQ_LABEL, efx_rx_queue_index(rx_queue), FRF_AZ_RX_DESCQ_SIZE, __ffs(rx_queue->rxd.entries), FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , /* For >=B0 this is scatter so disable */ FRF_AZ_RX_DESCQ_JUMBO, !is_b0, FRF_AZ_RX_DESCQ_EN, 1); efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, efx_rx_queue_index(rx_queue)); } static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; efx_oword_t rx_flush_descq; EFX_POPULATE_OWORD_2(rx_flush_descq, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, FRF_AZ_RX_FLUSH_DESCQ, efx_rx_queue_index(rx_queue)); efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); } void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) { efx_oword_t rx_desc_ptr; struct efx_nic *efx = rx_queue->efx; /* Remove RX descriptor ring from card */ EFX_ZERO_OWORD(rx_desc_ptr); efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, efx_rx_queue_index(rx_queue)); /* Unpin RX descriptor ring */ efx_fini_special_buffer(efx, &rx_queue->rxd); } /* Free buffers backing RX queue */ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) { efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); } /************************************************************************** * * Flush handling * **************************************************************************/ /* efx_nic_flush_queues() must be woken up when all flushes are completed, * or more RX flushes can be kicked off. */ static bool efx_flush_wake(struct efx_nic *efx) { /* Ensure that all updates are visible to efx_nic_flush_queues() */ smp_mb(); return (atomic_read(&efx->drain_pending) == 0 || (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT && atomic_read(&efx->rxq_flush_pending) > 0)); } /* Flush all the transmit queues, and continue flushing receive queues until * they're all flushed. Wait for the DRAIN events to be recieved so that there * are no more RX and TX events left on any channel. */ int efx_nic_flush_queues(struct efx_nic *efx) { unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ struct efx_channel *channel; struct efx_rx_queue *rx_queue; struct efx_tx_queue *tx_queue; int rc = 0; efx->fc_disable++; efx->type->prepare_flush(efx); efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) { atomic_inc(&efx->drain_pending); efx_flush_tx_queue(tx_queue); } efx_for_each_channel_rx_queue(rx_queue, channel) { atomic_inc(&efx->drain_pending); rx_queue->flush_pending = true; atomic_inc(&efx->rxq_flush_pending); } } while (timeout && atomic_read(&efx->drain_pending) > 0) { /* If SRIOV is enabled, then offload receive queue flushing to * the firmware (though we will still have to poll for * completion). If that fails, fall back to the old scheme. */ if (efx_sriov_enabled(efx)) { rc = efx_mcdi_flush_rxqs(efx); if (!rc) goto wait; } /* The hardware supports four concurrent rx flushes, each of * which may need to be retried if there is an outstanding * descriptor fetch */ efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) { if (atomic_read(&efx->rxq_flush_outstanding) >= EFX_RX_FLUSH_COUNT) break; if (rx_queue->flush_pending) { rx_queue->flush_pending = false; atomic_dec(&efx->rxq_flush_pending); atomic_inc(&efx->rxq_flush_outstanding); efx_flush_rx_queue(rx_queue); } } } wait: timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), timeout); } if (atomic_read(&efx->drain_pending)) { netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " "(rx %d+%d)\n", atomic_read(&efx->drain_pending), atomic_read(&efx->rxq_flush_outstanding), atomic_read(&efx->rxq_flush_pending)); rc = -ETIMEDOUT; atomic_set(&efx->drain_pending, 0); atomic_set(&efx->rxq_flush_pending, 0); atomic_set(&efx->rxq_flush_outstanding, 0); } efx->fc_disable--; return rc; } /************************************************************************** * * Event queue processing * Event queues are processed by per-channel tasklets. * **************************************************************************/ /* Update a channel's event queue's read pointer (RPTR) register * * This writes the EVQ_RPTR_REG register for the specified channel's * event queue. */ void efx_nic_eventq_read_ack(struct efx_channel *channel) { efx_dword_t reg; struct efx_nic *efx = channel->efx; EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr & channel->eventq_mask); efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, channel->channel); } /* Use HW to insert a SW defined event */ void efx_generate_event(struct efx_nic *efx, unsigned int evq, efx_qword_t *event) { efx_oword_t drv_ev_reg; BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || FRF_AZ_DRV_EV_DATA_WIDTH != 64); drv_ev_reg.u32[0] = event->u32[0]; drv_ev_reg.u32[1] = event->u32[1]; drv_ev_reg.u32[2] = 0; drv_ev_reg.u32[3] = 0; EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); } static void efx_magic_event(struct efx_channel *channel, u32 magic) { efx_qword_t event; EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV, FSF_AZ_DRV_GEN_EV_MAGIC, magic); efx_generate_event(channel->efx, channel->channel, &event); } /* Handle a transmit completion event * * The NIC batches TX completion events; the message we receive is of * the form "complete all TX events up to this index". */ static int efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) { unsigned int tx_ev_desc_ptr; unsigned int tx_ev_q_label; struct efx_tx_queue *tx_queue; struct efx_nic *efx = channel->efx; int tx_packets = 0; if (unlikely(ACCESS_ONCE(efx->reset_pending))) return 0; if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { /* Transmit completion */ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); tx_queue = efx_channel_get_tx_queue( channel, tx_ev_q_label % EFX_TXQ_TYPES); tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & tx_queue->ptr_mask); efx_xmit_done(tx_queue, tx_ev_desc_ptr); } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { /* Rewrite the FIFO write pointer */ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); tx_queue = efx_channel_get_tx_queue( channel, tx_ev_q_label % EFX_TXQ_TYPES); netif_tx_lock(efx->net_dev); efx_notify_tx_desc(tx_queue); netif_tx_unlock(efx->net_dev); } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && EFX_WORKAROUND_10727(efx)) { efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); } else { netif_err(efx, tx_err, efx->net_dev, "channel %d unexpected TX event " EFX_QWORD_FMT"\n", channel->channel, EFX_QWORD_VAL(*event)); } return tx_packets; } /* Detect errors included in the rx_evt_pkt_ok bit. */ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, const efx_qword_t *event) { struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_nic *efx = rx_queue->efx; bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; bool rx_ev_other_err, rx_ev_pause_frm; bool rx_ev_hdr_type, rx_ev_mcast_pkt; unsigned rx_ev_pkt_type; rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); /* Every error apart from tobe_disc and pause_frm */ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); /* Count errors that are not in MAC stats. Ignore expected * checksum errors during self-test. */ if (rx_ev_frm_trunc) ++channel->n_rx_frm_trunc; else if (rx_ev_tobe_disc) ++channel->n_rx_tobe_disc; else if (!efx->loopback_selftest) { if (rx_ev_ip_hdr_chksum_err) ++channel->n_rx_ip_hdr_chksum_err; else if (rx_ev_tcp_udp_chksum_err) ++channel->n_rx_tcp_udp_chksum_err; } /* TOBE_DISC is expected on unicast mismatches; don't print out an * error message. FRM_TRUNC indicates RXDP dropped the packet due * to a FIFO overflow. */ #ifdef DEBUG if (rx_ev_other_err && net_ratelimit()) { netif_dbg(efx, rx_err, efx->net_dev, " RX queue %d unexpected RX event " EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", rx_ev_ip_hdr_chksum_err ? " [IP_HDR_CHKSUM_ERR]" : "", rx_ev_tcp_udp_chksum_err ? " [TCP_UDP_CHKSUM_ERR]" : "", rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", rx_ev_drib_nib ? " [DRIB_NIB]" : "", rx_ev_tobe_disc ? " [TOBE_DISC]" : "", rx_ev_pause_frm ? " [PAUSE]" : ""); } #endif /* The frame must be discarded if any of these are true. */ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | rx_ev_tobe_disc | rx_ev_pause_frm) ? EFX_RX_PKT_DISCARD : 0; } /* Handle receive events that are not in-order. */ static void efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) { struct efx_nic *efx = rx_queue->efx; unsigned expected, dropped; expected = rx_queue->removed_count & rx_queue->ptr_mask; dropped = (index - expected) & rx_queue->ptr_mask; netif_info(efx, rx_err, efx->net_dev, "dropped %d events (index=%d expected=%d)\n", dropped, index, expected); efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); } /* Handle a packet received event * * The NIC gives a "discard" flag if it's a unicast packet with the * wrong destination address * Also "is multicast" and "matches multicast filter" flags can be used to * discard non-matching multicast packets. */ static void efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) { unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; unsigned expected_ptr; bool rx_ev_pkt_ok; u16 flags; struct efx_rx_queue *rx_queue; struct efx_nic *efx = channel->efx; if (unlikely(ACCESS_ONCE(efx->reset_pending))) return; /* Basic packet information */ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != channel->channel); rx_queue = efx_channel_get_rx_queue(channel); rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; if (unlikely(rx_ev_desc_ptr != expected_ptr)) efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); if (likely(rx_ev_pkt_ok)) { /* If packet is marked as OK and packet type is TCP/IP or * UDP/IP, then we can rely on the hardware checksum. */ flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? EFX_RX_PKT_CSUMMED : 0; } else { flags = efx_handle_rx_not_ok(rx_queue, event); } /* Detect multicast packets that didn't match the filter */ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); if (rx_ev_mcast_pkt) { unsigned int rx_ev_mcast_hash_match = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); if (unlikely(!rx_ev_mcast_hash_match)) { ++channel->n_rx_mcast_mismatch; flags |= EFX_RX_PKT_DISCARD; } } channel->irq_mod_score += 2; /* Handle received packet */ efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); } /* If this flush done event corresponds to a &struct efx_tx_queue, then * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue * of all transmit completions. */ static void efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) { struct efx_tx_queue *tx_queue; int qid; qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, qid % EFX_TXQ_TYPES); efx_magic_event(tx_queue->channel, EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); } } /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add * the RX queue back to the mask of RX queues in need of flushing. */ static void efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) { struct efx_channel *channel; struct efx_rx_queue *rx_queue; int qid; bool failed; qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); if (qid >= efx->n_channels) return; channel = efx_get_channel(efx, qid); if (!efx_channel_has_rx_queue(channel)) return; rx_queue = efx_channel_get_rx_queue(channel); if (failed) { netif_info(efx, hw, efx->net_dev, "RXQ %d flush retry\n", qid); rx_queue->flush_pending = true; atomic_inc(&efx->rxq_flush_pending); } else { efx_magic_event(efx_rx_queue_channel(rx_queue), EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); } atomic_dec(&efx->rxq_flush_outstanding); if (efx_flush_wake(efx)) wake_up(&efx->flush_wq); } static void efx_handle_drain_event(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; WARN_ON(atomic_read(&efx->drain_pending) == 0); atomic_dec(&efx->drain_pending); if (efx_flush_wake(efx)) wake_up(&efx->flush_wq); } static void efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; struct efx_rx_queue *rx_queue = efx_channel_has_rx_queue(channel) ? efx_channel_get_rx_queue(channel) : NULL; unsigned magic, code; magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); code = _EFX_CHANNEL_MAGIC_CODE(magic); if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { channel->event_test_cpu = raw_smp_processor_id(); } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { /* The queue must be empty, so we won't receive any rx * events, so efx_process_channel() won't refill the * queue. Refill it here */ efx_fast_push_rx_descriptors(rx_queue); } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { rx_queue->enabled = false; efx_handle_drain_event(channel); } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { efx_handle_drain_event(channel); } else { netif_dbg(efx, hw, efx->net_dev, "channel %d received " "generated event "EFX_QWORD_FMT"\n", channel->channel, EFX_QWORD_VAL(*event)); } } static void efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; unsigned int ev_sub_code; unsigned int ev_sub_data; ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); switch (ev_sub_code) { case FSE_AZ_TX_DESCQ_FLS_DONE_EV: netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", channel->channel, ev_sub_data); efx_handle_tx_flush_done(efx, event); efx_sriov_tx_flush_done(efx, event); break; case FSE_AZ_RX_DESCQ_FLS_DONE_EV: netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", channel->channel, ev_sub_data); efx_handle_rx_flush_done(efx, event); efx_sriov_rx_flush_done(efx, event); break; case FSE_AZ_EVQ_INIT_DONE_EV: netif_dbg(efx, hw, efx->net_dev, "channel %d EVQ %d initialised\n", channel->channel, ev_sub_data); break; case FSE_AZ_SRM_UPD_DONE_EV: netif_vdbg(efx, hw, efx->net_dev, "channel %d SRAM update done\n", channel->channel); break; case FSE_AZ_WAKE_UP_EV: netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d wakeup event\n", channel->channel, ev_sub_data); break; case FSE_AZ_TIMER_EV: netif_vdbg(efx, hw, efx->net_dev, "channel %d RX queue %d timer expired\n", channel->channel, ev_sub_data); break; case FSE_AA_RX_RECOVER_EV: netif_err(efx, rx_err, efx->net_dev, "channel %d seen DRIVER RX_RESET event. " "Resetting.\n", channel->channel); atomic_inc(&efx->rx_reset); efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); break; case FSE_BZ_RX_DSC_ERROR_EV: if (ev_sub_data < EFX_VI_BASE) { netif_err(efx, rx_err, efx->net_dev, "RX DMA Q %d reports descriptor fetch error." " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); } else efx_sriov_desc_fetch_err(efx, ev_sub_data); break; case FSE_BZ_TX_DSC_ERROR_EV: if (ev_sub_data < EFX_VI_BASE) { netif_err(efx, tx_err, efx->net_dev, "TX DMA Q %d reports descriptor fetch error." " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); } else efx_sriov_desc_fetch_err(efx, ev_sub_data); break; default: netif_vdbg(efx, hw, efx->net_dev, "channel %d unknown driver event code %d " "data %04x\n", channel->channel, ev_sub_code, ev_sub_data); break; } } int efx_nic_process_eventq(struct efx_channel *channel, int budget) { struct efx_nic *efx = channel->efx; unsigned int read_ptr; efx_qword_t event, *p_event; int ev_code; int tx_packets = 0; int spent = 0; read_ptr = channel->eventq_read_ptr; for (;;) { p_event = efx_event(channel, read_ptr); event = *p_event; if (!efx_event_present(&event)) /* End of events */ break; netif_vdbg(channel->efx, intr, channel->efx->net_dev, "channel %d event is "EFX_QWORD_FMT"\n", channel->channel, EFX_QWORD_VAL(event)); /* Clear this event by marking it all ones */ EFX_SET_QWORD(*p_event); ++read_ptr; ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); switch (ev_code) { case FSE_AZ_EV_CODE_RX_EV: efx_handle_rx_event(channel, &event); if (++spent == budget) goto out; break; case FSE_AZ_EV_CODE_TX_EV: tx_packets += efx_handle_tx_event(channel, &event); if (tx_packets > efx->txq_entries) { spent = budget; goto out; } break; case FSE_AZ_EV_CODE_DRV_GEN_EV: efx_handle_generated_event(channel, &event); break; case FSE_AZ_EV_CODE_DRIVER_EV: efx_handle_driver_event(channel, &event); break; case FSE_CZ_EV_CODE_USER_EV: efx_sriov_event(channel, &event); break; case FSE_CZ_EV_CODE_MCDI_EV: efx_mcdi_process_event(channel, &event); break; case FSE_AZ_EV_CODE_GLOBAL_EV: if (efx->type->handle_global_event && efx->type->handle_global_event(channel, &event)) break; /* else fall through */ default: netif_err(channel->efx, hw, channel->efx->net_dev, "channel %d unknown event type %d (data " EFX_QWORD_FMT ")\n", channel->channel, ev_code, EFX_QWORD_VAL(event)); } } out: channel->eventq_read_ptr = read_ptr; return spent; } /* Check whether an event is present in the eventq at the current * read pointer. Only useful for self-test. */ bool efx_nic_event_present(struct efx_channel *channel) { return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); } /* Allocate buffer table entries for event queue */ int efx_nic_probe_eventq(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; unsigned entries; entries = channel->eventq_mask + 1; return efx_alloc_special_buffer(efx, &channel->eventq, entries * sizeof(efx_qword_t)); } void efx_nic_init_eventq(struct efx_channel *channel) { efx_oword_t reg; struct efx_nic *efx = channel->efx; netif_dbg(efx, hw, efx->net_dev, "channel %d event queue in special buffers %d-%d\n", channel->channel, channel->eventq.index, channel->eventq.index + channel->eventq.entries - 1); if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { EFX_POPULATE_OWORD_3(reg, FRF_CZ_TIMER_Q_EN, 1, FRF_CZ_HOST_NOTIFY_MODE, 0, FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel); } /* Pin event queue buffer */ efx_init_special_buffer(efx, &channel->eventq); /* Fill event queue with all ones (i.e. empty events) */ memset(channel->eventq.addr, 0xff, channel->eventq.len); /* Push event queue to card */ EFX_POPULATE_OWORD_3(reg, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base, channel->channel); efx->type->push_irq_moderation(channel); } void efx_nic_fini_eventq(struct efx_channel *channel) { efx_oword_t reg; struct efx_nic *efx = channel->efx; /* Remove event queue from card */ EFX_ZERO_OWORD(reg); efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base, channel->channel); if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel); /* Unpin event queue */ efx_fini_special_buffer(efx, &channel->eventq); } /* Free buffers backing event queue */ void efx_nic_remove_eventq(struct efx_channel *channel) { efx_free_special_buffer(channel->efx, &channel->eventq); } void efx_nic_event_test_start(struct efx_channel *channel) { channel->event_test_cpu = -1; smp_wmb(); efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); } void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) { efx_magic_event(efx_rx_queue_channel(rx_queue), EFX_CHANNEL_MAGIC_FILL(rx_queue)); } /************************************************************************** * * Hardware interrupts * The hardware interrupt handler does very little work; all the event * queue processing is carried out by per-channel tasklets. * **************************************************************************/ /* Enable/disable/generate interrupts */ static inline void efx_nic_interrupts(struct efx_nic *efx, bool enabled, bool force) { efx_oword_t int_en_reg_ker; EFX_POPULATE_OWORD_3(int_en_reg_ker, FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, FRF_AZ_KER_INT_KER, force, FRF_AZ_DRV_INT_EN_KER, enabled); efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); } void efx_nic_enable_interrupts(struct efx_nic *efx) { EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ efx_nic_interrupts(efx, true, false); } void efx_nic_disable_interrupts(struct efx_nic *efx) { /* Disable interrupts */ efx_nic_interrupts(efx, false, false); } /* Generate a test interrupt * Interrupt must already have been enabled, otherwise nasty things * may happen. */ void efx_nic_irq_test_start(struct efx_nic *efx) { efx->last_irq_cpu = -1; smp_wmb(); efx_nic_interrupts(efx, true, true); } /* Process a fatal interrupt * Disable bus mastering ASAP and schedule a reset */ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t *int_ker = efx->irq_status.addr; efx_oword_t fatal_intr; int error, mem_perr; efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), EFX_OWORD_VAL(fatal_intr), error ? "disabling bus mastering" : "no recognised error"); /* If this is a memory parity error dump which blocks are offending */ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); if (mem_perr) { efx_oword_t reg; efx_reado(efx, &reg, FR_AZ_MEM_STAT); netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg)); } /* Disable both devices */ pci_clear_master(efx->pci_dev); if (efx_nic_is_dual_func(efx)) pci_clear_master(nic_data->pci_dev2); efx_nic_disable_interrupts(efx); /* Count errors and reset or disable the NIC accordingly */ if (efx->int_error_count == 0 || time_after(jiffies, efx->int_error_expire)) { efx->int_error_count = 0; efx->int_error_expire = jiffies + EFX_INT_ERROR_EXPIRE * HZ; } if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR - reset scheduled\n"); efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); } else { netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR - max number of errors seen." "NIC will be disabled\n"); efx_schedule_reset(efx, RESET_TYPE_DISABLE); } return IRQ_HANDLED; } /* Handle a legacy interrupt * Acknowledges the interrupt and schedule event queue processing. */ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) { struct efx_nic *efx = dev_id; efx_oword_t *int_ker = efx->irq_status.addr; irqreturn_t result = IRQ_NONE; struct efx_channel *channel; efx_dword_t reg; u32 queues; int syserr; /* Could this be ours? If interrupts are disabled then the * channel state may not be valid. */ if (!efx->legacy_irq_enabled) return result; /* Read the ISR which also ACKs the interrupts */ efx_readd(efx, &reg, FR_BZ_INT_ISR0); queues = EFX_EXTRACT_DWORD(reg, 0, 31); /* Handle non-event-queue sources */ if (queues & (1U << efx->irq_level)) { syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) return efx_nic_fatal_interrupt(efx); efx->last_irq_cpu = raw_smp_processor_id(); } if (queues != 0) { if (EFX_WORKAROUND_15783(efx)) efx->irq_zero_count = 0; /* Schedule processing of any interrupting queues */ efx_for_each_channel(channel, efx) { if (queues & 1) efx_schedule_channel_irq(channel); queues >>= 1; } result = IRQ_HANDLED; } else if (EFX_WORKAROUND_15783(efx)) { efx_qword_t *event; /* We can't return IRQ_HANDLED more than once on seeing ISR=0 * because this might be a shared interrupt. */ if (efx->irq_zero_count++ == 0) result = IRQ_HANDLED; /* Ensure we schedule or rearm all event queues */ efx_for_each_channel(channel, efx) { event = efx_event(channel, channel->eventq_read_ptr); if (efx_event_present(event)) efx_schedule_channel_irq(channel); else efx_nic_eventq_read_ack(channel); } } if (result == IRQ_HANDLED) netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); return result; } /* Handle an MSI interrupt * * Handle an MSI hardware interrupt. This routine schedules event * queue processing. No interrupt acknowledgement cycle is necessary. * Also, we never need to check that the interrupt is for us, since * MSI interrupts cannot be shared. */ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) { struct efx_channel *channel = *(struct efx_channel **)dev_id; struct efx_nic *efx = channel->efx; efx_oword_t *int_ker = efx->irq_status.addr; int syserr; netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); /* Handle non-event-queue sources */ if (channel->channel == efx->irq_level) { syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) return efx_nic_fatal_interrupt(efx); efx->last_irq_cpu = raw_smp_processor_id(); } /* Schedule processing of the channel */ efx_schedule_channel_irq(channel); return IRQ_HANDLED; } /* Setup RSS indirection table. * This maps from the hash value of the packet to RXQ */ void efx_nic_push_rx_indir_table(struct efx_nic *efx) { size_t i = 0; efx_dword_t dword; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) return; BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != FR_BZ_RX_INDIRECTION_TBL_ROWS); for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, efx->rx_indir_table[i]); efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); } } /* Hook interrupt handler(s) * Try MSI and then legacy interrupts. */ int efx_nic_init_interrupt(struct efx_nic *efx) { struct efx_channel *channel; int rc; if (!EFX_INT_MODE_USE_MSI(efx)) { irq_handler_t handler; if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) handler = efx_legacy_interrupt; else handler = falcon_legacy_interrupt_a1; rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, efx->name, efx); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to hook legacy IRQ %d\n", efx->pci_dev->irq); goto fail1; } return 0; } /* Hook MSI or MSI-X interrupt */ efx_for_each_channel(channel, efx) { rc = request_irq(channel->irq, efx_msi_interrupt, IRQF_PROBE_SHARED, /* Not shared */ efx->channel_name[channel->channel], &efx->channel[channel->channel]); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to hook IRQ %d\n", channel->irq); goto fail2; } } return 0; fail2: efx_for_each_channel(channel, efx) free_irq(channel->irq, &efx->channel[channel->channel]); fail1: return rc; } void efx_nic_fini_interrupt(struct efx_nic *efx) { struct efx_channel *channel; efx_oword_t reg; /* Disable MSI/MSI-X interrupts */ efx_for_each_channel(channel, efx) { if (channel->irq) free_irq(channel->irq, &efx->channel[channel->channel]); } /* ACK legacy interrupt */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) efx_reado(efx, &reg, FR_BZ_INT_ISR0); else falcon_irq_ack_a1(efx); /* Disable legacy interrupt */ if (efx->legacy_irq) free_irq(efx->legacy_irq, efx); } /* Looks at available SRAM resources and works out how many queues we * can support, and where things like descriptor caches should live. * * SRAM is split up as follows: * 0 buftbl entries for channels * efx->vf_buftbl_base buftbl entries for SR-IOV * efx->rx_dc_base RX descriptor caches * efx->tx_dc_base TX descriptor caches */ void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) { unsigned vi_count, buftbl_min; /* Account for the buffer table entries backing the datapath channels * and the descriptor caches for those channels. */ buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + efx->n_channels * EFX_MAX_EVQ_SIZE) * sizeof(efx_qword_t) / EFX_BUF_SIZE); vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); #ifdef CONFIG_SFC_SRIOV if (efx_sriov_wanted(efx)) { unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; efx->vf_buftbl_base = buftbl_min; vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; vi_count = max(vi_count, EFX_VI_BASE); buftbl_free = (sram_lim_qw - buftbl_min - vi_count * vi_dc_entries); entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * efx_vf_size(efx)); vf_limit = min(buftbl_free / entries_per_vf, (1024U - EFX_VI_BASE) >> efx->vi_scale); if (efx->vf_count > vf_limit) { netif_err(efx, probe, efx->net_dev, "Reducing VF count from from %d to %d\n", efx->vf_count, vf_limit); efx->vf_count = vf_limit; } vi_count += efx->vf_count * efx_vf_size(efx); } #endif efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; } u32 efx_nic_fpga_ver(struct efx_nic *efx) { efx_oword_t altera_build; efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); } void efx_nic_init_common(struct efx_nic *efx) { efx_oword_t temp; /* Set positions of descriptor caches in SRAM. */ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); /* Set TX descriptor cache size. */ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); /* Set RX descriptor cache size. Set low watermark to size-8, as * this allows most efficient prefetching. */ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); /* Program INT_KER address */ EFX_POPULATE_OWORD_2(temp, FRF_AZ_NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx), FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) /* Use an interrupt level unused by event queues */ efx->irq_level = 0x1f; else /* Use a valid MSI-X vector */ efx->irq_level = 0; /* Enable all the genuinely fatal interrupts. (They are still * masked by the overall interrupt mask, controlled by * falcon_interrupts()). * * Note: All other fatal interrupts are enabled */ EFX_POPULATE_OWORD_3(temp, FRF_AZ_ILL_ADR_INT_KER_EN, 1, FRF_AZ_RBUF_OWN_INT_KER_EN, 1, FRF_AZ_TBUF_OWN_INT_KER_EN, 1); if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); EFX_INVERT_OWORD(temp); efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); efx_nic_push_rx_indir_table(efx); /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. */ efx_reado(efx, &temp, FR_AZ_TX_RESERVED); EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); /* Enable SW_EV to inherit in char driver - assume harmless here */ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); /* Prefetch threshold 2 => fetch when descriptor cache half empty */ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); /* Disable hardware watchdog which can misfire */ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); /* Squash TX of packets of 16 bytes or less */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_POPULATE_OWORD_4(temp, /* Default values */ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, FRF_BZ_TX_PACE_SB_AF, 0xb, FRF_BZ_TX_PACE_FB_BASE, 0, /* Allow large pace values in the * fast bin. */ FRF_BZ_TX_PACE_BIN_TH, FFE_BZ_TX_PACE_RESERVED); efx_writeo(efx, &temp, FR_BZ_TX_PACE); } } /* Register dump */ #define REGISTER_REVISION_A 1 #define REGISTER_REVISION_B 2 #define REGISTER_REVISION_C 3 #define REGISTER_REVISION_Z 3 /* latest revision */ struct efx_nic_reg { u32 offset:24; u32 min_revision:2, max_revision:2; }; #define REGISTER(name, min_rev, max_rev) { \ FR_ ## min_rev ## max_rev ## _ ## name, \ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ } #define REGISTER_AA(name) REGISTER(name, A, A) #define REGISTER_AB(name) REGISTER(name, A, B) #define REGISTER_AZ(name) REGISTER(name, A, Z) #define REGISTER_BB(name) REGISTER(name, B, B) #define REGISTER_BZ(name) REGISTER(name, B, Z) #define REGISTER_CZ(name) REGISTER(name, C, Z) static const struct efx_nic_reg efx_nic_regs[] = { REGISTER_AZ(ADR_REGION), REGISTER_AZ(INT_EN_KER), REGISTER_BZ(INT_EN_CHAR), REGISTER_AZ(INT_ADR_KER), REGISTER_BZ(INT_ADR_CHAR), /* INT_ACK_KER is WO */ /* INT_ISR0 is RC */ REGISTER_AZ(HW_INIT), REGISTER_CZ(USR_EV_CFG), REGISTER_AB(EE_SPI_HCMD), REGISTER_AB(EE_SPI_HADR), REGISTER_AB(EE_SPI_HDATA), REGISTER_AB(EE_BASE_PAGE), REGISTER_AB(EE_VPD_CFG0), /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ /* PCIE_CORE_INDIRECT is indirect */ REGISTER_AB(NIC_STAT), REGISTER_AB(GPIO_CTL), REGISTER_AB(GLB_CTL), /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ REGISTER_BZ(DP_CTRL), REGISTER_AZ(MEM_STAT), REGISTER_AZ(CS_DEBUG), REGISTER_AZ(ALTERA_BUILD), REGISTER_AZ(CSR_SPARE), REGISTER_AB(PCIE_SD_CTL0123), REGISTER_AB(PCIE_SD_CTL45), REGISTER_AB(PCIE_PCS_CTL_STAT), /* DEBUG_DATA_OUT is not used */ /* DRV_EV is WO */ REGISTER_AZ(EVQ_CTL), REGISTER_AZ(EVQ_CNT1), REGISTER_AZ(EVQ_CNT2), REGISTER_AZ(BUF_TBL_CFG), REGISTER_AZ(SRM_RX_DC_CFG), REGISTER_AZ(SRM_TX_DC_CFG), REGISTER_AZ(SRM_CFG), /* BUF_TBL_UPD is WO */ REGISTER_AZ(SRM_UPD_EVQ), REGISTER_AZ(SRAM_PARITY), REGISTER_AZ(RX_CFG), REGISTER_BZ(RX_FILTER_CTL), /* RX_FLUSH_DESCQ is WO */ REGISTER_AZ(RX_DC_CFG), REGISTER_AZ(RX_DC_PF_WM), REGISTER_BZ(RX_RSS_TKEY), /* RX_NODESC_DROP is RC */ REGISTER_AA(RX_SELF_RST), /* RX_DEBUG, RX_PUSH_DROP are not used */ REGISTER_CZ(RX_RSS_IPV6_REG1), REGISTER_CZ(RX_RSS_IPV6_REG2), REGISTER_CZ(RX_RSS_IPV6_REG3), /* TX_FLUSH_DESCQ is WO */ REGISTER_AZ(TX_DC_CFG), REGISTER_AA(TX_CHKSM_CFG), REGISTER_AZ(TX_CFG), /* TX_PUSH_DROP is not used */ REGISTER_AZ(TX_RESERVED), REGISTER_BZ(TX_PACE), /* TX_PACE_DROP_QID is RC */ REGISTER_BB(TX_VLAN), REGISTER_BZ(TX_IPFIL_PORTEN), REGISTER_AB(MD_TXD), REGISTER_AB(MD_RXD), REGISTER_AB(MD_CS), REGISTER_AB(MD_PHY_ADR), REGISTER_AB(MD_ID), /* MD_STAT is RC */ REGISTER_AB(MAC_STAT_DMA), REGISTER_AB(MAC_CTRL), REGISTER_BB(GEN_MODE), REGISTER_AB(MAC_MC_HASH_REG0), REGISTER_AB(MAC_MC_HASH_REG1), REGISTER_AB(GM_CFG1), REGISTER_AB(GM_CFG2), /* GM_IPG and GM_HD are not used */ REGISTER_AB(GM_MAX_FLEN), /* GM_TEST is not used */ REGISTER_AB(GM_ADR1), REGISTER_AB(GM_ADR2), REGISTER_AB(GMF_CFG0), REGISTER_AB(GMF_CFG1), REGISTER_AB(GMF_CFG2), REGISTER_AB(GMF_CFG3), REGISTER_AB(GMF_CFG4), REGISTER_AB(GMF_CFG5), REGISTER_BB(TX_SRC_MAC_CTL), REGISTER_AB(XM_ADR_LO), REGISTER_AB(XM_ADR_HI), REGISTER_AB(XM_GLB_CFG), REGISTER_AB(XM_TX_CFG), REGISTER_AB(XM_RX_CFG), REGISTER_AB(XM_MGT_INT_MASK), REGISTER_AB(XM_FC), REGISTER_AB(XM_PAUSE_TIME), REGISTER_AB(XM_TX_PARAM), REGISTER_AB(XM_RX_PARAM), /* XM_MGT_INT_MSK (note no 'A') is RC */ REGISTER_AB(XX_PWR_RST), REGISTER_AB(XX_SD_CTL), REGISTER_AB(XX_TXDRV_CTL), /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ /* XX_CORE_STAT is partly RC */ }; struct efx_nic_reg_table { u32 offset:24; u32 min_revision:2, max_revision:2; u32 step:6, rows:21; }; #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ offset, \ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ step, rows \ } #define REGISTER_TABLE(name, min_rev, max_rev) \ REGISTER_TABLE_DIMENSIONS( \ name, FR_ ## min_rev ## max_rev ## _ ## name, \ min_rev, max_rev, \ FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) #define REGISTER_TABLE_BB_CZ(name) \ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ FR_BZ_ ## name ## _STEP, \ FR_BB_ ## name ## _ROWS), \ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ FR_BZ_ ## name ## _STEP, \ FR_CZ_ ## name ## _ROWS) #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) static const struct efx_nic_reg_table efx_nic_reg_tables[] = { /* DRIVER is not used */ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ REGISTER_TABLE_BB(TX_IPFIL_TBL), REGISTER_TABLE_BB(TX_SRC_MAC_TBL), REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), /* We can't reasonably read all of the buffer table (up to 8MB!). * However this driver will only use a few entries. Reading * 1K entries allows for some expansion of queue count and * size before we need to change the version. */ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, A, A, 8, 1024), REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, B, Z, 8, 1024), REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), REGISTER_TABLE_BB_CZ(TIMER_TBL), REGISTER_TABLE_BB_CZ(TX_PACE_TBL), REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), /* TX_FILTER_TBL0 is huge and not used by this driver */ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), REGISTER_TABLE_CZ(MC_TREG_SMEM), /* MSIX_PBA_TABLE is not mapped */ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ REGISTER_TABLE_BZ(RX_FILTER_TBL0), }; size_t efx_nic_get_regs_len(struct efx_nic *efx) { const struct efx_nic_reg *reg; const struct efx_nic_reg_table *table; size_t len = 0; for (reg = efx_nic_regs; reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); reg++) if (efx->type->revision >= reg->min_revision && efx->type->revision <= reg->max_revision) len += sizeof(efx_oword_t); for (table = efx_nic_reg_tables; table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); table++) if (efx->type->revision >= table->min_revision && efx->type->revision <= table->max_revision) len += table->rows * min_t(size_t, table->step, 16); return len; } void efx_nic_get_regs(struct efx_nic *efx, void *buf) { const struct efx_nic_reg *reg; const struct efx_nic_reg_table *table; for (reg = efx_nic_regs; reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); reg++) { if (efx->type->revision >= reg->min_revision && efx->type->revision <= reg->max_revision) { efx_reado(efx, (efx_oword_t *)buf, reg->offset); buf += sizeof(efx_oword_t); } } for (table = efx_nic_reg_tables; table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); table++) { size_t size, i; if (!(efx->type->revision >= table->min_revision && efx->type->revision <= table->max_revision)) continue; size = min_t(size_t, table->step, 16); for (i = 0; i < table->rows; i++) { switch (table->step) { case 4: /* 32-bit register or SRAM */ efx_readd_table(efx, buf, table->offset, i); break; case 8: /* 64-bit SRAM */ efx_sram_readq(efx, efx->membase + table->offset, buf, i); break; case 16: /* 128-bit register */ efx_reado_table(efx, buf, table->offset, i); break; case 32: /* 128-bit register, interleaved */ efx_reado_table(efx, buf, table->offset, 2 * i); break; default: WARN_ON(1); return; } buf += size; } } }
gpl-2.0
Fred6681/android_kernel_samsung_golden
lib/div64.c
3382
3128
/* * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> * * Based on former do_div() implementation from asm-parisc/div64.h: * Copyright (C) 1999 Hewlett-Packard Co * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> * * * Generic C version of 64bit/32bit division and modulo, with * 64bit result and 32bit remainder. * * The fast case for (n>>32 == 0) is handled inline by do_div(). * * Code generated for this function might be very inefficient * for some CPUs. __div64_32() can be overridden by linking arch-specific * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. */ #include <linux/module.h> #include <linux/math64.h> /* Not needed on 64bit architectures */ #if BITS_PER_LONG == 32 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) { uint64_t rem = *n; uint64_t b = base; uint64_t res, d = 1; uint32_t high = rem >> 32; /* Reduce the thing a bit first */ res = 0; if (high >= base) { high /= base; res = (uint64_t) high << 32; rem -= (uint64_t) (high*base) << 32; } while ((int64_t)b > 0 && b < rem) { b = b+b; d = d+d; } do { if (rem >= b) { rem -= b; res += d; } b >>= 1; d >>= 1; } while (d); *n = res; return rem; } EXPORT_SYMBOL(__div64_32); #ifndef div_s64_rem s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) { u64 quotient; if (dividend < 0) { quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder); *remainder = -*remainder; if (divisor > 0) quotient = -quotient; } else { quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder); if (divisor < 0) quotient = -quotient; } return quotient; } EXPORT_SYMBOL(div_s64_rem); #endif /** * div64_u64 - unsigned 64bit divide with 64bit divisor * @dividend: 64bit dividend * @divisor: 64bit divisor * * This implementation is a modified version of the algorithm proposed * by the book 'Hacker's Delight'. The original source and full proof * can be found here and is available for use without restriction. * * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c' */ #ifndef div64_u64 u64 div64_u64(u64 dividend, u64 divisor) { u32 high = divisor >> 32; u64 quot; if (high == 0) { quot = div_u64(dividend, divisor); } else { int n = 1 + fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) quot--; if ((dividend - quot * divisor) >= divisor) quot++; } return quot; } EXPORT_SYMBOL(div64_u64); #endif /** * div64_s64 - signed 64bit divide with 64bit divisor * @dividend: 64bit dividend * @divisor: 64bit divisor */ #ifndef div64_s64 s64 div64_s64(s64 dividend, s64 divisor) { s64 quot, t; quot = div64_u64(abs64(dividend), abs64(divisor)); t = (dividend ^ divisor) >> 63; return (quot ^ t) - t; } EXPORT_SYMBOL(div64_s64); #endif #endif /* BITS_PER_LONG == 32 */ /* * Iterative div/mod for use when dividend is not expected to be much * bigger than divisor. */ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) { return __iter_div_u64_rem(dividend, divisor, remainder); } EXPORT_SYMBOL(iter_div_u64_rem);
gpl-2.0
LeJay/android_kernel_samsung_jactiveltexx_stock
tools/perf/util/map.c
3638
15967
#include "symbol.h" #include <errno.h> #include <inttypes.h> #include <limits.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <unistd.h> #include "map.h" const char *map_type__name[MAP__NR_TYPES] = { [MAP__FUNCTION] = "Functions", [MAP__VARIABLE] = "Variables", }; static inline int is_anon_memory(const char *filename) { return strcmp(filename, "//anon") == 0; } static inline int is_no_dso_memory(const char *filename) { return !strcmp(filename, "[stack]") || !strcmp(filename, "[vdso]") || !strcmp(filename, "[heap]"); } void map__init(struct map *self, enum map_type type, u64 start, u64 end, u64 pgoff, struct dso *dso) { self->type = type; self->start = start; self->end = end; self->pgoff = pgoff; self->dso = dso; self->map_ip = map__map_ip; self->unmap_ip = map__unmap_ip; RB_CLEAR_NODE(&self->rb_node); self->groups = NULL; self->referenced = false; self->erange_warned = false; } struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, u64 pgoff, u32 pid, char *filename, enum map_type type) { struct map *self = malloc(sizeof(*self)); if (self != NULL) { char newfilename[PATH_MAX]; struct dso *dso; int anon, no_dso; anon = is_anon_memory(filename); no_dso = is_no_dso_memory(filename); if (anon) { snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); filename = newfilename; } dso = __dsos__findnew(dsos__list, filename); if (dso == NULL) goto out_delete; map__init(self, type, start, start + len, pgoff, dso); if (anon || no_dso) { self->map_ip = self->unmap_ip = identity__map_ip; /* * Set memory without DSO as loaded. All map__find_* * functions still return NULL, and we avoid the * unnecessary map__load warning. */ if (no_dso) dso__set_loaded(dso, self->type); } } return self; out_delete: free(self); return NULL; } void map__delete(struct map *self) { free(self); } void map__fixup_start(struct map *self) { struct rb_root *symbols = &self->dso->symbols[self->type]; struct rb_node *nd = rb_first(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); self->start = sym->start; } } void map__fixup_end(struct map *self) { struct rb_root *symbols = &self->dso->symbols[self->type]; struct rb_node *nd = rb_last(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); self->end = sym->end; } } #define DSO__DELETED "(deleted)" int map__load(struct map *self, symbol_filter_t filter) { const char *name = self->dso->long_name; int nr; if (dso__loaded(self->dso, self->type)) return 0; nr = dso__load(self->dso, self, filter); if (nr < 0) { if (self->dso->has_build_id) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; build_id__sprintf(self->dso->build_id, sizeof(self->dso->build_id), sbuild_id); pr_warning("%s with build id %s not found", name, sbuild_id); } else pr_warning("Failed to open %s", name); pr_warning(", continuing without symbols\n"); return -1; } else if (nr == 0) { const size_t len = strlen(name); const size_t real_len = len - sizeof(DSO__DELETED); if (len > sizeof(DSO__DELETED) && strcmp(name + real_len + 1, DSO__DELETED) == 0) { pr_warning("%.*s was updated (is prelink enabled?). " "Restart the long running apps that use it!\n", (int)real_len, name); } else { pr_warning("no symbols found in %s, maybe install " "a debug package?\n", name); } return -1; } /* * Only applies to the kernel, as its symtabs aren't relative like the * module ones. */ if (self->dso->kernel) map__reloc_vmlinux(self); return 0; } struct symbol *map__find_symbol(struct map *self, u64 addr, symbol_filter_t filter) { if (map__load(self, filter) < 0) return NULL; return dso__find_symbol(self->dso, self->type, addr); } struct symbol *map__find_symbol_by_name(struct map *self, const char *name, symbol_filter_t filter) { if (map__load(self, filter) < 0) return NULL; if (!dso__sorted_by_name(self->dso, self->type)) dso__sort_by_name(self->dso, self->type); return dso__find_symbol_by_name(self->dso, self->type, name); } struct map *map__clone(struct map *self) { struct map *map = malloc(sizeof(*self)); if (!map) return NULL; memcpy(map, self, sizeof(*self)); return map; } int map__overlap(struct map *l, struct map *r) { if (l->start > r->start) { struct map *t = l; l = r; r = t; } if (l->end > r->start) return 1; return 0; } size_t map__fprintf(struct map *self, FILE *fp) { return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", self->start, self->end, self->pgoff, self->dso->name); } size_t map__fprintf_dsoname(struct map *map, FILE *fp) { const char *dsoname; if (map && map->dso && (map->dso->name || map->dso->long_name)) { if (symbol_conf.show_kernel_path && map->dso->long_name) dsoname = map->dso->long_name; else if (map->dso->name) dsoname = map->dso->name; } else dsoname = "[unknown]"; return fprintf(fp, "%s", dsoname); } /* * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. * map->dso->adjust_symbols==1 for ET_EXEC-like cases. */ u64 map__rip_2objdump(struct map *map, u64 rip) { u64 addr = map->dso->adjust_symbols ? map->unmap_ip(map, rip) : /* RIP -> IP */ rip; return addr; } u64 map__objdump_2ip(struct map *map, u64 addr) { u64 ip = map->dso->adjust_symbols ? addr : map->unmap_ip(map, addr); /* RIP -> IP */ return ip; } void map_groups__init(struct map_groups *mg) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) { mg->maps[i] = RB_ROOT; INIT_LIST_HEAD(&mg->removed_maps[i]); } mg->machine = NULL; } static void maps__delete(struct rb_root *maps) { struct rb_node *next = rb_first(maps); while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); rb_erase(&pos->rb_node, maps); map__delete(pos); } } static void maps__delete_removed(struct list_head *maps) { struct map *pos, *n; list_for_each_entry_safe(pos, n, maps, node) { list_del(&pos->node); map__delete(pos); } } void map_groups__exit(struct map_groups *mg) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) { maps__delete(&mg->maps[i]); maps__delete_removed(&mg->removed_maps[i]); } } void map_groups__flush(struct map_groups *mg) { int type; for (type = 0; type < MAP__NR_TYPES; type++) { struct rb_root *root = &mg->maps[type]; struct rb_node *next = rb_first(root); while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); rb_erase(&pos->rb_node, root); /* * We may have references to this map, for * instance in some hist_entry instances, so * just move them to a separate list. */ list_add_tail(&pos->node, &mg->removed_maps[pos->type]); } } } struct symbol *map_groups__find_symbol(struct map_groups *mg, enum map_type type, u64 addr, struct map **mapp, symbol_filter_t filter) { struct map *map = map_groups__find(mg, type, addr); if (map != NULL) { if (mapp != NULL) *mapp = map; return map__find_symbol(map, map->map_ip(map, addr), filter); } return NULL; } struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, enum map_type type, const char *name, struct map **mapp, symbol_filter_t filter) { struct rb_node *nd; for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); struct symbol *sym = map__find_symbol_by_name(pos, name, filter); if (sym == NULL) continue; if (mapp != NULL) *mapp = pos; return sym; } return NULL; } size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, int verbose, FILE *fp) { size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); struct rb_node *nd; for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); if (verbose > 2) { printed += dso__fprintf(pos->dso, type, fp); printed += fprintf(fp, "--\n"); } } return printed; } size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp) { size_t printed = 0, i; for (i = 0; i < MAP__NR_TYPES; ++i) printed += __map_groups__fprintf_maps(mg, i, verbose, fp); return printed; } static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg, enum map_type type, int verbose, FILE *fp) { struct map *pos; size_t printed = 0; list_for_each_entry(pos, &mg->removed_maps[type], node) { printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); if (verbose > 1) { printed += dso__fprintf(pos->dso, type, fp); printed += fprintf(fp, "--\n"); } } return printed; } static size_t map_groups__fprintf_removed_maps(struct map_groups *mg, int verbose, FILE *fp) { size_t printed = 0, i; for (i = 0; i < MAP__NR_TYPES; ++i) printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp); return printed; } size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp) { size_t printed = map_groups__fprintf_maps(mg, verbose, fp); printed += fprintf(fp, "Removed maps:\n"); return printed + map_groups__fprintf_removed_maps(mg, verbose, fp); } int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, int verbose, FILE *fp) { struct rb_root *root = &mg->maps[map->type]; struct rb_node *next = rb_first(root); int err = 0; while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); if (!map__overlap(pos, map)) continue; if (verbose >= 2) { fputs("overlapping maps:\n", fp); map__fprintf(map, fp); map__fprintf(pos, fp); } rb_erase(&pos->rb_node, root); /* * Now check if we need to create new maps for areas not * overlapped by the new map: */ if (map->start > pos->start) { struct map *before = map__clone(pos); if (before == NULL) { err = -ENOMEM; goto move_map; } before->end = map->start - 1; map_groups__insert(mg, before); if (verbose >= 2) map__fprintf(before, fp); } if (map->end < pos->end) { struct map *after = map__clone(pos); if (after == NULL) { err = -ENOMEM; goto move_map; } after->start = map->end + 1; map_groups__insert(mg, after); if (verbose >= 2) map__fprintf(after, fp); } move_map: /* * If we have references, just move them to a separate list. */ if (pos->referenced) list_add_tail(&pos->node, &mg->removed_maps[map->type]); else map__delete(pos); if (err) return err; } return 0; } /* * XXX This should not really _copy_ te maps, but refcount them. */ int map_groups__clone(struct map_groups *mg, struct map_groups *parent, enum map_type type) { struct rb_node *nd; for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { struct map *map = rb_entry(nd, struct map, rb_node); struct map *new = map__clone(map); if (new == NULL) return -ENOMEM; map_groups__insert(mg, new); } return 0; } static u64 map__reloc_map_ip(struct map *map, u64 ip) { return ip + (s64)map->pgoff; } static u64 map__reloc_unmap_ip(struct map *map, u64 ip) { return ip - (s64)map->pgoff; } void map__reloc_vmlinux(struct map *self) { struct kmap *kmap = map__kmap(self); s64 reloc; if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) return; reloc = (kmap->ref_reloc_sym->unrelocated_addr - kmap->ref_reloc_sym->addr); if (!reloc) return; self->map_ip = map__reloc_map_ip; self->unmap_ip = map__reloc_unmap_ip; self->pgoff = reloc; } void maps__insert(struct rb_root *maps, struct map *map) { struct rb_node **p = &maps->rb_node; struct rb_node *parent = NULL; const u64 ip = map->start; struct map *m; while (*p != NULL) { parent = *p; m = rb_entry(parent, struct map, rb_node); if (ip < m->start) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&map->rb_node, parent, p); rb_insert_color(&map->rb_node, maps); } void maps__remove(struct rb_root *self, struct map *map) { rb_erase(&map->rb_node, self); } struct map *maps__find(struct rb_root *maps, u64 ip) { struct rb_node **p = &maps->rb_node; struct rb_node *parent = NULL; struct map *m; while (*p != NULL) { parent = *p; m = rb_entry(parent, struct map, rb_node); if (ip < m->start) p = &(*p)->rb_left; else if (ip > m->end) p = &(*p)->rb_right; else return m; } return NULL; } int machine__init(struct machine *self, const char *root_dir, pid_t pid) { map_groups__init(&self->kmaps); RB_CLEAR_NODE(&self->rb_node); INIT_LIST_HEAD(&self->user_dsos); INIT_LIST_HEAD(&self->kernel_dsos); self->threads = RB_ROOT; INIT_LIST_HEAD(&self->dead_threads); self->last_match = NULL; self->kmaps.machine = self; self->pid = pid; self->root_dir = strdup(root_dir); return self->root_dir == NULL ? -ENOMEM : 0; } static void dsos__delete(struct list_head *self) { struct dso *pos, *n; list_for_each_entry_safe(pos, n, self, node) { list_del(&pos->node); dso__delete(pos); } } void machine__exit(struct machine *self) { map_groups__exit(&self->kmaps); dsos__delete(&self->user_dsos); dsos__delete(&self->kernel_dsos); free(self->root_dir); self->root_dir = NULL; } void machine__delete(struct machine *self) { machine__exit(self); free(self); } struct machine *machines__add(struct rb_root *self, pid_t pid, const char *root_dir) { struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; struct machine *pos, *machine = malloc(sizeof(*machine)); if (!machine) return NULL; if (machine__init(machine, root_dir, pid) != 0) { free(machine); return NULL; } while (*p != NULL) { parent = *p; pos = rb_entry(parent, struct machine, rb_node); if (pid < pos->pid) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&machine->rb_node, parent, p); rb_insert_color(&machine->rb_node, self); return machine; } struct machine *machines__find(struct rb_root *self, pid_t pid) { struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; struct machine *machine; struct machine *default_machine = NULL; while (*p != NULL) { parent = *p; machine = rb_entry(parent, struct machine, rb_node); if (pid < machine->pid) p = &(*p)->rb_left; else if (pid > machine->pid) p = &(*p)->rb_right; else return machine; if (!machine->pid) default_machine = machine; } return default_machine; } struct machine *machines__findnew(struct rb_root *self, pid_t pid) { char path[PATH_MAX]; const char *root_dir; struct machine *machine = machines__find(self, pid); if (!machine || machine->pid != pid) { if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID) root_dir = ""; else { if (!symbol_conf.guestmount) goto out; sprintf(path, "%s/%d", symbol_conf.guestmount, pid); if (access(path, R_OK)) { pr_err("Can't access file %s\n", path); goto out; } root_dir = path; } machine = machines__add(self, pid, root_dir); } out: return machine; } void machines__process(struct rb_root *self, machine__process_t process, void *data) { struct rb_node *nd; for (nd = rb_first(self); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); process(pos, data); } } char *machine__mmap_name(struct machine *self, char *bf, size_t size) { if (machine__is_host(self)) snprintf(bf, size, "[%s]", "kernel.kallsyms"); else if (machine__is_default_guest(self)) snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); else snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid); return bf; }
gpl-2.0
milodky/kernel_for_nexus7
arch/arm/mach-nuc93x/nuc932.c
3894
1255
/* * linux/arch/arm/mach-nuc93x/nuc932.c * * Copyright (c) 2009 Nuvoton corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * NUC932 cpu support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include "cpu.h" #include "clock.h" /* define specific CPU platform device */ static struct platform_device *nuc932_dev[] __initdata = { }; /* define specific CPU platform io map */ static struct map_desc nuc932evb_iodesc[] __initdata = { }; /*Init NUC932 evb io*/ void __init nuc932_map_io(void) { nuc93x_map_io(nuc932evb_iodesc, ARRAY_SIZE(nuc932evb_iodesc)); } /*Init NUC932 clock*/ void __init nuc932_init_clocks(void) { nuc93x_init_clocks(); } /*enable NUC932 uart clock*/ void __init nuc932_init_uartclk(void) { struct clk *ck_uart = clk_get(NULL, "uart"); BUG_ON(IS_ERR(ck_uart)); clk_enable(ck_uart); } /*Init NUC932 board info*/ void __init nuc932_board_init(void) { nuc93x_board_init(nuc932_dev, ARRAY_SIZE(nuc932_dev)); }
gpl-2.0
NamelessRom/android_kernel_samsung_jf
arch/m68k/atari/ataints.c
4406
6330
/* * arch/m68k/atari/ataints.c -- Atari Linux interrupt handling code * * 5/2/94 Roman Hodek: * Added support for TT interrupts; setup for TT SCU (may someone has * twiddled there and we won't get the right interrupts :-() * * Major change: The device-independent code in m68k/ints.c didn't know * about non-autovec ints yet. It hardcoded the number of possible ints to * 7 (IRQ1...IRQ7). But the Atari has lots of non-autovec ints! I made the * number of possible ints a constant defined in interrupt.h, which is * 47 for the Atari. So we can call request_irq() for all Atari interrupts * just the normal way. Additionally, all vectors >= 48 are initialized to * call trap() instead of inthandler(). This must be changed here, too. * * 1995-07-16 Lars Brinkhoff <f93labr@dd.chalmers.se>: * Corrected a bug in atari_add_isr() which rejected all SCC * interrupt sources if there were no TT MFP! * * 12/13/95: New interface functions atari_level_triggered_int() and * atari_register_vme_int() as support for level triggered VME interrupts. * * 02/12/96: (Roman) * Total rewrite of Atari interrupt handling, for new scheme see comments * below. * * 1996-09-03 lars brinkhoff <f93labr@dd.chalmers.se>: * Added new function atari_unregister_vme_int(), and * modified atari_register_vme_int() as well as IS_VALID_INTNO() * to work with it. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/module.h> #include <asm/traps.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stdma.h> #include <asm/irq.h> #include <asm/entry.h> /* * Atari interrupt handling scheme: * -------------------------------- * * All interrupt source have an internal number (defined in * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP, * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can * be allocated by atari_register_vme_int(). */ /* * Bitmap for free interrupt vector numbers * (new vectors starting from 0x70 can be allocated by * atari_register_vme_int()) */ static int free_vme_vec_bitmap; /* GK: * HBL IRQ handler for Falcon. Nobody needs it :-) * ++andreas: raise ipl to disable further HBLANK interrupts. */ asmlinkage void falcon_hblhandler(void); asm(".text\n" __ALIGN_STR "\n\t" "falcon_hblhandler:\n\t" "orw #0x200,%sp@\n\t" /* set saved ipl to 2 */ "rte"); extern void atari_microwire_cmd(int cmd); static unsigned int atari_irq_startup(struct irq_data *data) { unsigned int irq = data->irq; m68k_irq_startup(data); atari_turnon_irq(irq); atari_enable_irq(irq); return 0; } static void atari_irq_shutdown(struct irq_data *data) { unsigned int irq = data->irq; atari_disable_irq(irq); atari_turnoff_irq(irq); m68k_irq_shutdown(data); if (irq == IRQ_AUTO_4) vectors[VEC_INT4] = falcon_hblhandler; } static void atari_irq_enable(struct irq_data *data) { atari_enable_irq(data->irq); } static void atari_irq_disable(struct irq_data *data) { atari_disable_irq(data->irq); } static struct irq_chip atari_irq_chip = { .name = "atari", .irq_startup = atari_irq_startup, .irq_shutdown = atari_irq_shutdown, .irq_enable = atari_irq_enable, .irq_disable = atari_irq_disable, }; /* * void atari_init_IRQ (void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the atari IRQ handling routines. */ void __init atari_init_IRQ(void) { m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER); m68k_setup_irq_controller(&atari_irq_chip, handle_simple_irq, 1, NUM_ATARI_SOURCES - 1); /* Initialize the MFP(s) */ #ifdef ATARI_USE_SOFTWARE_EOI st_mfp.vec_adr = 0x48; /* Software EOI-Mode */ #else st_mfp.vec_adr = 0x40; /* Automatic EOI-Mode */ #endif st_mfp.int_en_a = 0x00; /* turn off MFP-Ints */ st_mfp.int_en_b = 0x00; st_mfp.int_mk_a = 0xff; /* no Masking */ st_mfp.int_mk_b = 0xff; if (ATARIHW_PRESENT(TT_MFP)) { #ifdef ATARI_USE_SOFTWARE_EOI tt_mfp.vec_adr = 0x58; /* Software EOI-Mode */ #else tt_mfp.vec_adr = 0x50; /* Automatic EOI-Mode */ #endif tt_mfp.int_en_a = 0x00; /* turn off MFP-Ints */ tt_mfp.int_en_b = 0x00; tt_mfp.int_mk_a = 0xff; /* no Masking */ tt_mfp.int_mk_b = 0xff; } if (ATARIHW_PRESENT(SCC) && !atari_SCC_reset_done) { atari_scc.cha_a_ctrl = 9; MFPDELAY(); atari_scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */ } if (ATARIHW_PRESENT(SCU)) { /* init the SCU if present */ tt_scu.sys_mask = 0x10; /* enable VBL (for the cursor) and * disable HSYNC interrupts (who * needs them?) MFP and SCC are * enabled in VME mask */ tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */ } else { /* If no SCU and no Hades, the HSYNC interrupt needs to be * disabled this way. (Else _inthandler in kernel/sys_call.S * gets overruns) */ vectors[VEC_INT2] = falcon_hblhandler; vectors[VEC_INT4] = falcon_hblhandler; } if (ATARIHW_PRESENT(PCM_8BIT) && ATARIHW_PRESENT(MICROWIRE)) { /* Initialize the LM1992 Sound Controller to enable the PSG sound. This is misplaced here, it should be in an atasound_init(), that doesn't exist yet. */ atari_microwire_cmd(MW_LM1992_PSG_HIGH); } stdma_init(); /* Initialize the PSG: all sounds off, both ports output */ sound_ym.rd_data_reg_sel = 7; sound_ym.wd_data = 0xff; } /* * atari_register_vme_int() returns the number of a free interrupt vector for * hardware with a programmable int vector (probably a VME board). */ unsigned long atari_register_vme_int(void) { int i; for (i = 0; i < 32; i++) if ((free_vme_vec_bitmap & (1 << i)) == 0) break; if (i == 16) return 0; free_vme_vec_bitmap |= 1 << i; return VME_SOURCE_BASE + i; } EXPORT_SYMBOL(atari_register_vme_int); void atari_unregister_vme_int(unsigned long irq) { if (irq >= VME_SOURCE_BASE && irq < VME_SOURCE_BASE + VME_MAX_SOURCES) { irq -= VME_SOURCE_BASE; free_vme_vec_bitmap &= ~(1 << irq); } } EXPORT_SYMBOL(atari_unregister_vme_int);
gpl-2.0
sssangram14/android_kernel_samsung_arubaslim
sound/pci/hda/patch_si3054.c
7990
9928
/* * Universal Interface for Intel High Definition Audio Codec * * HD audio interface patch for Silicon Labs 3054/5 modem codec * * Copyright (c) 2005 Sasha Khapyorsky <sashak@alsa-project.org> * Takashi Iwai <tiwai@suse.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" /* si3054 verbs */ #define SI3054_VERB_READ_NODE 0x900 #define SI3054_VERB_WRITE_NODE 0x100 /* si3054 nodes (registers) */ #define SI3054_EXTENDED_MID 2 #define SI3054_LINE_RATE 3 #define SI3054_LINE_LEVEL 4 #define SI3054_GPIO_CFG 5 #define SI3054_GPIO_POLARITY 6 #define SI3054_GPIO_STICKY 7 #define SI3054_GPIO_WAKEUP 8 #define SI3054_GPIO_STATUS 9 #define SI3054_GPIO_CONTROL 10 #define SI3054_MISC_AFE 11 #define SI3054_CHIPID 12 #define SI3054_LINE_CFG1 13 #define SI3054_LINE_STATUS 14 #define SI3054_DC_TERMINATION 15 #define SI3054_LINE_CONFIG 16 #define SI3054_CALLPROG_ATT 17 #define SI3054_SQ_CONTROL 18 #define SI3054_MISC_CONTROL 19 #define SI3054_RING_CTRL1 20 #define SI3054_RING_CTRL2 21 /* extended MID */ #define SI3054_MEI_READY 0xf /* line level */ #define SI3054_ATAG_MASK 0x00f0 #define SI3054_DTAG_MASK 0xf000 /* GPIO bits */ #define SI3054_GPIO_OH 0x0001 #define SI3054_GPIO_CID 0x0002 /* chipid and revisions */ #define SI3054_CHIPID_CODEC_REV_MASK 0x000f #define SI3054_CHIPID_DAA_REV_MASK 0x00f0 #define SI3054_CHIPID_INTERNATIONAL 0x0100 #define SI3054_CHIPID_DAA_ID 0x0f00 #define SI3054_CHIPID_CODEC_ID (1<<12) /* si3054 codec registers (nodes) access macros */ #define GET_REG(codec,reg) (snd_hda_codec_read(codec,reg,0,SI3054_VERB_READ_NODE,0)) #define SET_REG(codec,reg,val) (snd_hda_codec_write(codec,reg,0,SI3054_VERB_WRITE_NODE,val)) #define SET_REG_CACHE(codec,reg,val) \ snd_hda_codec_write_cache(codec,reg,0,SI3054_VERB_WRITE_NODE,val) struct si3054_spec { unsigned international; struct hda_pcm pcm; }; /* * Modem mixer */ #define PRIVATE_VALUE(reg,mask) ((reg<<16)|(mask&0xffff)) #define PRIVATE_REG(val) ((val>>16)&0xffff) #define PRIVATE_MASK(val) (val&0xffff) #define si3054_switch_info snd_ctl_boolean_mono_info static int si3054_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); u16 reg = PRIVATE_REG(kcontrol->private_value); u16 mask = PRIVATE_MASK(kcontrol->private_value); uvalue->value.integer.value[0] = (GET_REG(codec, reg)) & mask ? 1 : 0 ; return 0; } static int si3054_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); u16 reg = PRIVATE_REG(kcontrol->private_value); u16 mask = PRIVATE_MASK(kcontrol->private_value); if (uvalue->value.integer.value[0]) SET_REG_CACHE(codec, reg, (GET_REG(codec, reg)) | mask); else SET_REG_CACHE(codec, reg, (GET_REG(codec, reg)) & ~mask); return 0; } #define SI3054_KCONTROL(kname,reg,mask) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = kname, \ .subdevice = HDA_SUBDEV_NID_FLAG | reg, \ .info = si3054_switch_info, \ .get = si3054_switch_get, \ .put = si3054_switch_put, \ .private_value = PRIVATE_VALUE(reg,mask), \ } static const struct snd_kcontrol_new si3054_modem_mixer[] = { SI3054_KCONTROL("Off-hook Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_OH), SI3054_KCONTROL("Caller ID Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_CID), {} }; static int si3054_build_controls(struct hda_codec *codec) { return snd_hda_add_new_ctls(codec, si3054_modem_mixer); } /* * PCM callbacks */ static int si3054_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { u16 val; SET_REG(codec, SI3054_LINE_RATE, substream->runtime->rate); val = GET_REG(codec, SI3054_LINE_LEVEL); val &= 0xff << (8 * (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)); val |= ((stream_tag & 0xf) << 4) << (8 * (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)); SET_REG(codec, SI3054_LINE_LEVEL, val); snd_hda_codec_setup_stream(codec, hinfo->nid, stream_tag, 0, format); return 0; } static int si3054_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { static unsigned int rates[] = { 8000, 9600, 16000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; substream->runtime->hw.period_bytes_min = 80; return snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); } static const struct hda_pcm_stream si3054_pcm = { .substreams = 1, .channels_min = 1, .channels_max = 1, .nid = 0x1, .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_KNOT, .formats = SNDRV_PCM_FMTBIT_S16_LE, .maxbps = 16, .ops = { .open = si3054_pcm_open, .prepare = si3054_pcm_prepare, }, }; static int si3054_build_pcms(struct hda_codec *codec) { struct si3054_spec *spec = codec->spec; struct hda_pcm *info = &spec->pcm; codec->num_pcms = 1; codec->pcm_info = info; info->name = "Si3054 Modem"; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = si3054_pcm; info->stream[SNDRV_PCM_STREAM_CAPTURE] = si3054_pcm; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = codec->mfg; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = codec->mfg; info->pcm_type = HDA_PCM_TYPE_MODEM; return 0; } /* * Init part */ static int si3054_init(struct hda_codec *codec) { struct si3054_spec *spec = codec->spec; unsigned wait_count; u16 val; snd_hda_codec_write(codec, AC_NODE_ROOT, 0, AC_VERB_SET_CODEC_RESET, 0); snd_hda_codec_write(codec, codec->mfg, 0, AC_VERB_SET_STREAM_FORMAT, 0); SET_REG(codec, SI3054_LINE_RATE, 9600); SET_REG(codec, SI3054_LINE_LEVEL, SI3054_DTAG_MASK|SI3054_ATAG_MASK); SET_REG(codec, SI3054_EXTENDED_MID, 0); wait_count = 10; do { msleep(2); val = GET_REG(codec, SI3054_EXTENDED_MID); } while ((val & SI3054_MEI_READY) != SI3054_MEI_READY && wait_count--); if((val&SI3054_MEI_READY) != SI3054_MEI_READY) { snd_printk(KERN_ERR "si3054: cannot initialize. EXT MID = %04x\n", val); /* let's pray that this is no fatal error */ /* return -EACCES; */ } SET_REG(codec, SI3054_GPIO_POLARITY, 0xffff); SET_REG(codec, SI3054_GPIO_CFG, 0x0); SET_REG(codec, SI3054_MISC_AFE, 0); SET_REG(codec, SI3054_LINE_CFG1,0x200); if((GET_REG(codec,SI3054_LINE_STATUS) & (1<<6)) == 0) { snd_printd("Link Frame Detect(FDT) is not ready (line status: %04x)\n", GET_REG(codec,SI3054_LINE_STATUS)); } spec->international = GET_REG(codec, SI3054_CHIPID) & SI3054_CHIPID_INTERNATIONAL; return 0; } static void si3054_free(struct hda_codec *codec) { kfree(codec->spec); } /* */ static const struct hda_codec_ops si3054_patch_ops = { .build_controls = si3054_build_controls, .build_pcms = si3054_build_pcms, .init = si3054_init, .free = si3054_free, }; static int patch_si3054(struct hda_codec *codec) { struct si3054_spec *spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->spec = spec; codec->patch_ops = si3054_patch_ops; return 0; } /* * patch entries */ static const struct hda_codec_preset snd_hda_preset_si3054[] = { { .id = 0x163c3055, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x163c3155, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x11c13026, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x11c13055, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x11c13155, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x10573055, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x10573057, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x10573155, .name = "Si3054", .patch = patch_si3054 }, /* VIA HDA on Clevo m540 */ { .id = 0x11063288, .name = "Si3054", .patch = patch_si3054 }, /* Asus A8J Modem (SM56) */ { .id = 0x15433155, .name = "Si3054", .patch = patch_si3054 }, /* LG LW20 modem */ { .id = 0x18540018, .name = "Si3054", .patch = patch_si3054 }, {} }; MODULE_ALIAS("snd-hda-codec-id:163c3055"); MODULE_ALIAS("snd-hda-codec-id:163c3155"); MODULE_ALIAS("snd-hda-codec-id:11c13026"); MODULE_ALIAS("snd-hda-codec-id:11c13055"); MODULE_ALIAS("snd-hda-codec-id:11c13155"); MODULE_ALIAS("snd-hda-codec-id:10573055"); MODULE_ALIAS("snd-hda-codec-id:10573057"); MODULE_ALIAS("snd-hda-codec-id:10573155"); MODULE_ALIAS("snd-hda-codec-id:11063288"); MODULE_ALIAS("snd-hda-codec-id:15433155"); MODULE_ALIAS("snd-hda-codec-id:18540018"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Si3054 HD-audio modem codec"); static struct hda_codec_preset_list si3054_list = { .preset = snd_hda_preset_si3054, .owner = THIS_MODULE, }; static int __init patch_si3054_init(void) { return snd_hda_add_codec_preset(&si3054_list); } static void __exit patch_si3054_exit(void) { snd_hda_delete_codec_preset(&si3054_list); } module_init(patch_si3054_init) module_exit(patch_si3054_exit)
gpl-2.0
gherkaul/kernel_lge_fx3
arch/arm/kernel/dma.c
9014
5794
/* * linux/arch/arm/kernel/dma.c * * Copyright (C) 1995-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Front-end to the DMA handling. This handles the allocation/freeing * of DMA channels, and provides a unified interface to the machines * DMA facilities. */ #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <asm/dma.h> #include <asm/mach/dma.h> DEFINE_RAW_SPINLOCK(dma_spin_lock); EXPORT_SYMBOL(dma_spin_lock); static dma_t *dma_chan[MAX_DMA_CHANNELS]; static inline dma_t *dma_channel(unsigned int chan) { if (chan >= MAX_DMA_CHANNELS) return NULL; return dma_chan[chan]; } int __init isa_dma_add(unsigned int chan, dma_t *dma) { if (!dma->d_ops) return -EINVAL; sg_init_table(&dma->buf, 1); if (dma_chan[chan]) return -EBUSY; dma_chan[chan] = dma; return 0; } /* * Request DMA channel * * On certain platforms, we have to allocate an interrupt as well... */ int request_dma(unsigned int chan, const char *device_id) { dma_t *dma = dma_channel(chan); int ret; if (!dma) goto bad_dma; if (xchg(&dma->lock, 1) != 0) goto busy; dma->device_id = device_id; dma->active = 0; dma->invalid = 1; ret = 0; if (dma->d_ops->request) ret = dma->d_ops->request(chan, dma); if (ret) xchg(&dma->lock, 0); return ret; bad_dma: printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan); return -EINVAL; busy: return -EBUSY; } EXPORT_SYMBOL(request_dma); /* * Free DMA channel * * On certain platforms, we have to free interrupt as well... */ void free_dma(unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma) goto bad_dma; if (dma->active) { printk(KERN_ERR "dma%d: freeing active DMA\n", chan); dma->d_ops->disable(chan, dma); dma->active = 0; } if (xchg(&dma->lock, 0) != 0) { if (dma->d_ops->free) dma->d_ops->free(chan, dma); return; } printk(KERN_ERR "dma%d: trying to free free DMA\n", chan); return; bad_dma: printk(KERN_ERR "dma: trying to free DMA%d\n", chan); } EXPORT_SYMBOL(free_dma); /* Set DMA Scatter-Gather list */ void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA SG while " "DMA active\n", chan); dma->sg = sg; dma->sgcount = nr_sg; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_sg); /* Set DMA address * * Copy address to the structure, and set the invalid bit */ void __set_dma_addr (unsigned int chan, void *addr) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA address while " "DMA active\n", chan); dma->sg = NULL; dma->addr = addr; dma->invalid = 1; } EXPORT_SYMBOL(__set_dma_addr); /* Set DMA byte count * * Copy address to the structure, and set the invalid bit */ void set_dma_count (unsigned int chan, unsigned long count) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA count while " "DMA active\n", chan); dma->sg = NULL; dma->count = count; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_count); /* Set DMA direction mode */ void set_dma_mode (unsigned int chan, unsigned int mode) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA mode while " "DMA active\n", chan); dma->dma_mode = mode; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_mode); /* Enable DMA channel */ void enable_dma (unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma->lock) goto free_dma; if (dma->active == 0) { dma->active = 1; dma->d_ops->enable(chan, dma); } return; free_dma: printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(enable_dma); /* Disable DMA channel */ void disable_dma (unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma->lock) goto free_dma; if (dma->active == 1) { dma->active = 0; dma->d_ops->disable(chan, dma); } return; free_dma: printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(disable_dma); /* * Is the specified DMA channel active? */ int dma_channel_active(unsigned int chan) { dma_t *dma = dma_channel(chan); return dma->active; } EXPORT_SYMBOL(dma_channel_active); void set_dma_page(unsigned int chan, char pagenr) { printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan); } EXPORT_SYMBOL(set_dma_page); void set_dma_speed(unsigned int chan, int cycle_ns) { dma_t *dma = dma_channel(chan); int ret = 0; if (dma->d_ops->setspeed) ret = dma->d_ops->setspeed(chan, dma, cycle_ns); dma->speed = ret; } EXPORT_SYMBOL(set_dma_speed); int get_dma_residue(unsigned int chan) { dma_t *dma = dma_channel(chan); int ret = 0; if (dma->d_ops->residue) ret = dma->d_ops->residue(chan, dma); return ret; } EXPORT_SYMBOL(get_dma_residue); #ifdef CONFIG_PROC_FS static int proc_dma_show(struct seq_file *m, void *v) { int i; for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) { dma_t *dma = dma_channel(i); if (dma && dma->lock) seq_printf(m, "%2d: %s\n", i, dma->device_id); } return 0; } static int proc_dma_open(struct inode *inode, struct file *file) { return single_open(file, proc_dma_show, NULL); } static const struct file_operations proc_dma_operations = { .open = proc_dma_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_dma_init(void) { proc_create("dma", 0, NULL, &proc_dma_operations); return 0; } __initcall(proc_dma_init); #endif
gpl-2.0
acuster/FirefoxOS-Flatfish-kernel
drivers/net/wireless/hostap/hostap_download.c
9270
18521
static int prism2_enable_aux_port(struct net_device *dev, int enable) { u16 val, reg; int i, tries; unsigned long flags; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if (local->no_pri) { if (enable) { PDEBUG(DEBUG_EXTRA2, "%s: no PRI f/w - assuming Aux " "port is already enabled\n", dev->name); } return 0; } spin_lock_irqsave(&local->cmdlock, flags); /* wait until busy bit is clear */ tries = HFA384X_CMD_BUSY_TIMEOUT; while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) { tries--; udelay(1); } if (tries == 0) { reg = HFA384X_INW(HFA384X_CMD_OFF); spin_unlock_irqrestore(&local->cmdlock, flags); printk("%s: prism2_enable_aux_port - timeout - reg=0x%04x\n", dev->name, reg); return -ETIMEDOUT; } val = HFA384X_INW(HFA384X_CONTROL_OFF); if (enable) { HFA384X_OUTW(HFA384X_AUX_MAGIC0, HFA384X_PARAM0_OFF); HFA384X_OUTW(HFA384X_AUX_MAGIC1, HFA384X_PARAM1_OFF); HFA384X_OUTW(HFA384X_AUX_MAGIC2, HFA384X_PARAM2_OFF); if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_DISABLED) printk("prism2_enable_aux_port: was not disabled!?\n"); val &= ~HFA384X_AUX_PORT_MASK; val |= HFA384X_AUX_PORT_ENABLE; } else { HFA384X_OUTW(0, HFA384X_PARAM0_OFF); HFA384X_OUTW(0, HFA384X_PARAM1_OFF); HFA384X_OUTW(0, HFA384X_PARAM2_OFF); if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_ENABLED) printk("prism2_enable_aux_port: was not enabled!?\n"); val &= ~HFA384X_AUX_PORT_MASK; val |= HFA384X_AUX_PORT_DISABLE; } HFA384X_OUTW(val, HFA384X_CONTROL_OFF); udelay(5); i = 10000; while (i > 0) { val = HFA384X_INW(HFA384X_CONTROL_OFF); val &= HFA384X_AUX_PORT_MASK; if ((enable && val == HFA384X_AUX_PORT_ENABLED) || (!enable && val == HFA384X_AUX_PORT_DISABLED)) break; udelay(10); i--; } spin_unlock_irqrestore(&local->cmdlock, flags); if (i == 0) { printk("prism2_enable_aux_port(%d) timed out\n", enable); return -ETIMEDOUT; } return 0; } static int hfa384x_from_aux(struct net_device *dev, unsigned int addr, int len, void *buf) { u16 page, offset; if (addr & 1 || len & 1) return -1; page = addr >> 7; offset = addr & 0x7f; HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF); HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF); udelay(5); #ifdef PRISM2_PCI { __le16 *pos = (__le16 *) buf; while (len > 0) { *pos++ = HFA384X_INW_DATA(HFA384X_AUXDATA_OFF); len -= 2; } } #else /* PRISM2_PCI */ HFA384X_INSW(HFA384X_AUXDATA_OFF, buf, len / 2); #endif /* PRISM2_PCI */ return 0; } static int hfa384x_to_aux(struct net_device *dev, unsigned int addr, int len, void *buf) { u16 page, offset; if (addr & 1 || len & 1) return -1; page = addr >> 7; offset = addr & 0x7f; HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF); HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF); udelay(5); #ifdef PRISM2_PCI { __le16 *pos = (__le16 *) buf; while (len > 0) { HFA384X_OUTW_DATA(*pos++, HFA384X_AUXDATA_OFF); len -= 2; } } #else /* PRISM2_PCI */ HFA384X_OUTSW(HFA384X_AUXDATA_OFF, buf, len / 2); #endif /* PRISM2_PCI */ return 0; } static int prism2_pda_ok(u8 *buf) { __le16 *pda = (__le16 *) buf; int pos; u16 len, pdr; if (buf[0] == 0xff && buf[1] == 0x00 && buf[2] == 0xff && buf[3] == 0x00) return 0; pos = 0; while (pos + 1 < PRISM2_PDA_SIZE / 2) { len = le16_to_cpu(pda[pos]); pdr = le16_to_cpu(pda[pos + 1]); if (len == 0 || pos + len > PRISM2_PDA_SIZE / 2) return 0; if (pdr == 0x0000 && len == 2) { /* PDA end found */ return 1; } pos += len + 1; } return 0; } static int prism2_download_aux_dump(struct net_device *dev, unsigned int addr, int len, u8 *buf) { int res; prism2_enable_aux_port(dev, 1); res = hfa384x_from_aux(dev, addr, len, buf); prism2_enable_aux_port(dev, 0); if (res) return -1; return 0; } static u8 * prism2_read_pda(struct net_device *dev) { u8 *buf; int res, i, found = 0; #define NUM_PDA_ADDRS 4 unsigned int pda_addr[NUM_PDA_ADDRS] = { 0x7f0000 /* others than HFA3841 */, 0x3f0000 /* HFA3841 */, 0x390000 /* apparently used in older cards */, 0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */, }; buf = kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL); if (buf == NULL) return NULL; /* Note: wlan card should be in initial state (just after init cmd) * and no other operations should be performed concurrently. */ prism2_enable_aux_port(dev, 1); for (i = 0; i < NUM_PDA_ADDRS; i++) { PDEBUG(DEBUG_EXTRA2, "%s: trying to read PDA from 0x%08x", dev->name, pda_addr[i]); res = hfa384x_from_aux(dev, pda_addr[i], PRISM2_PDA_SIZE, buf); if (res) continue; if (res == 0 && prism2_pda_ok(buf)) { PDEBUG2(DEBUG_EXTRA2, ": OK\n"); found = 1; break; } else { PDEBUG2(DEBUG_EXTRA2, ": failed\n"); } } prism2_enable_aux_port(dev, 0); if (!found) { printk(KERN_DEBUG "%s: valid PDA not found\n", dev->name); kfree(buf); buf = NULL; } return buf; } static int prism2_download_volatile(local_info_t *local, struct prism2_download_data *param) { struct net_device *dev = local->dev; int ret = 0, i; u16 param0, param1; if (local->hw_downloading) { printk(KERN_WARNING "%s: Already downloading - aborting new " "request\n", dev->name); return -1; } local->hw_downloading = 1; if (local->pri_only) { hfa384x_disable_interrupts(dev); } else { prism2_hw_shutdown(dev, 0); if (prism2_hw_init(dev, 0)) { printk(KERN_WARNING "%s: Could not initialize card for" " download\n", dev->name); ret = -1; goto out; } } if (prism2_enable_aux_port(dev, 1)) { printk(KERN_WARNING "%s: Could not enable AUX port\n", dev->name); ret = -1; goto out; } param0 = param->start_addr & 0xffff; param1 = param->start_addr >> 16; HFA384X_OUTW(0, HFA384X_PARAM2_OFF); HFA384X_OUTW(param1, HFA384X_PARAM1_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_ENABLE_VOLATILE << 8), param0)) { printk(KERN_WARNING "%s: Download command execution failed\n", dev->name); ret = -1; goto out; } for (i = 0; i < param->num_areas; i++) { PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n", dev->name, param->data[i].len, param->data[i].addr); if (hfa384x_to_aux(dev, param->data[i].addr, param->data[i].len, param->data[i].data)) { printk(KERN_WARNING "%s: RAM download at 0x%08x " "(len=%d) failed\n", dev->name, param->data[i].addr, param->data[i].len); ret = -1; goto out; } } HFA384X_OUTW(param1, HFA384X_PARAM1_OFF); HFA384X_OUTW(0, HFA384X_PARAM2_OFF); if (hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_DISABLE << 8), param0)) { printk(KERN_WARNING "%s: Download command execution failed\n", dev->name); ret = -1; goto out; } /* ProgMode disable causes the hardware to restart itself from the * given starting address. Give hw some time and ACK command just in * case restart did not happen. */ mdelay(5); HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF); if (prism2_enable_aux_port(dev, 0)) { printk(KERN_DEBUG "%s: Disabling AUX port failed\n", dev->name); /* continue anyway.. restart should have taken care of this */ } mdelay(5); local->hw_downloading = 0; if (prism2_hw_config(dev, 2)) { printk(KERN_WARNING "%s: Card configuration after RAM " "download failed\n", dev->name); ret = -1; goto out; } out: local->hw_downloading = 0; return ret; } static int prism2_enable_genesis(local_info_t *local, int hcr) { struct net_device *dev = local->dev; u8 initseq[4] = { 0x00, 0xe1, 0xa1, 0xff }; u8 readbuf[4]; printk(KERN_DEBUG "%s: test Genesis mode with HCR 0x%02x\n", dev->name, hcr); local->func->cor_sreset(local); hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq); local->func->genesis_reset(local, hcr); /* Readback test */ hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf); hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq); hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf); if (memcmp(initseq, readbuf, sizeof(initseq)) == 0) { printk(KERN_DEBUG "Readback test succeeded, HCR 0x%02x\n", hcr); return 0; } else { printk(KERN_DEBUG "Readback test failed, HCR 0x%02x " "write %02x %02x %02x %02x read %02x %02x %02x %02x\n", hcr, initseq[0], initseq[1], initseq[2], initseq[3], readbuf[0], readbuf[1], readbuf[2], readbuf[3]); return 1; } } static int prism2_get_ram_size(local_info_t *local) { int ret; /* Try to enable genesis mode; 0x1F for x8 SRAM or 0x0F for x16 SRAM */ if (prism2_enable_genesis(local, 0x1f) == 0) ret = 8; else if (prism2_enable_genesis(local, 0x0f) == 0) ret = 16; else ret = -1; /* Disable genesis mode */ local->func->genesis_reset(local, ret == 16 ? 0x07 : 0x17); return ret; } static int prism2_download_genesis(local_info_t *local, struct prism2_download_data *param) { struct net_device *dev = local->dev; int ram16 = 0, i; int ret = 0; if (local->hw_downloading) { printk(KERN_WARNING "%s: Already downloading - aborting new " "request\n", dev->name); return -EBUSY; } if (!local->func->genesis_reset || !local->func->cor_sreset) { printk(KERN_INFO "%s: Genesis mode downloading not supported " "with this hwmodel\n", dev->name); return -EOPNOTSUPP; } local->hw_downloading = 1; if (prism2_enable_aux_port(dev, 1)) { printk(KERN_DEBUG "%s: failed to enable AUX port\n", dev->name); ret = -EIO; goto out; } if (local->sram_type == -1) { /* 0x1F for x8 SRAM or 0x0F for x16 SRAM */ if (prism2_enable_genesis(local, 0x1f) == 0) { ram16 = 0; PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x8 " "SRAM\n", dev->name); } else if (prism2_enable_genesis(local, 0x0f) == 0) { ram16 = 1; PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x16 " "SRAM\n", dev->name); } else { printk(KERN_DEBUG "%s: Could not initiate genesis " "mode\n", dev->name); ret = -EIO; goto out; } } else { if (prism2_enable_genesis(local, local->sram_type == 8 ? 0x1f : 0x0f)) { printk(KERN_DEBUG "%s: Failed to set Genesis " "mode (sram_type=%d)\n", dev->name, local->sram_type); ret = -EIO; goto out; } ram16 = local->sram_type != 8; } for (i = 0; i < param->num_areas; i++) { PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n", dev->name, param->data[i].len, param->data[i].addr); if (hfa384x_to_aux(dev, param->data[i].addr, param->data[i].len, param->data[i].data)) { printk(KERN_WARNING "%s: RAM download at 0x%08x " "(len=%d) failed\n", dev->name, param->data[i].addr, param->data[i].len); ret = -EIO; goto out; } } PDEBUG(DEBUG_EXTRA2, "Disable genesis mode\n"); local->func->genesis_reset(local, ram16 ? 0x07 : 0x17); if (prism2_enable_aux_port(dev, 0)) { printk(KERN_DEBUG "%s: Failed to disable AUX port\n", dev->name); } mdelay(5); local->hw_downloading = 0; PDEBUG(DEBUG_EXTRA2, "Trying to initialize card\n"); /* * Make sure the INIT command does not generate a command completion * event by disabling interrupts. */ hfa384x_disable_interrupts(dev); if (prism2_hw_init(dev, 1)) { printk(KERN_DEBUG "%s: Initialization after genesis mode " "download failed\n", dev->name); ret = -EIO; goto out; } PDEBUG(DEBUG_EXTRA2, "Card initialized - running PRI only\n"); if (prism2_hw_init2(dev, 1)) { printk(KERN_DEBUG "%s: Initialization(2) after genesis mode " "download failed\n", dev->name); ret = -EIO; goto out; } out: local->hw_downloading = 0; return ret; } #ifdef PRISM2_NON_VOLATILE_DOWNLOAD /* Note! Non-volatile downloading functionality has not yet been tested * thoroughly and it may corrupt flash image and effectively kill the card that * is being updated. You have been warned. */ static inline int prism2_download_block(struct net_device *dev, u32 addr, u8 *data, u32 bufaddr, int rest_len) { u16 param0, param1; int block_len; block_len = rest_len < 4096 ? rest_len : 4096; param0 = addr & 0xffff; param1 = addr >> 16; HFA384X_OUTW(block_len, HFA384X_PARAM2_OFF); HFA384X_OUTW(param1, HFA384X_PARAM1_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_ENABLE_NON_VOLATILE << 8), param0)) { printk(KERN_WARNING "%s: Flash download command execution " "failed\n", dev->name); return -1; } if (hfa384x_to_aux(dev, bufaddr, block_len, data)) { printk(KERN_WARNING "%s: flash download at 0x%08x " "(len=%d) failed\n", dev->name, addr, block_len); return -1; } HFA384X_OUTW(0, HFA384X_PARAM2_OFF); HFA384X_OUTW(0, HFA384X_PARAM1_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_PROGRAM_NON_VOLATILE << 8), 0)) { printk(KERN_WARNING "%s: Flash write command execution " "failed\n", dev->name); return -1; } return block_len; } static int prism2_download_nonvolatile(local_info_t *local, struct prism2_download_data *dl) { struct net_device *dev = local->dev; int ret = 0, i; struct { __le16 page; __le16 offset; __le16 len; } dlbuffer; u32 bufaddr; if (local->hw_downloading) { printk(KERN_WARNING "%s: Already downloading - aborting new " "request\n", dev->name); return -1; } ret = local->func->get_rid(dev, HFA384X_RID_DOWNLOADBUFFER, &dlbuffer, 6, 0); if (ret < 0) { printk(KERN_WARNING "%s: Could not read download buffer " "parameters\n", dev->name); goto out; } printk(KERN_DEBUG "Download buffer: %d bytes at 0x%04x:0x%04x\n", le16_to_cpu(dlbuffer.len), le16_to_cpu(dlbuffer.page), le16_to_cpu(dlbuffer.offset)); bufaddr = (le16_to_cpu(dlbuffer.page) << 7) + le16_to_cpu(dlbuffer.offset); local->hw_downloading = 1; if (!local->pri_only) { prism2_hw_shutdown(dev, 0); if (prism2_hw_init(dev, 0)) { printk(KERN_WARNING "%s: Could not initialize card for" " download\n", dev->name); ret = -1; goto out; } } hfa384x_disable_interrupts(dev); if (prism2_enable_aux_port(dev, 1)) { printk(KERN_WARNING "%s: Could not enable AUX port\n", dev->name); ret = -1; goto out; } printk(KERN_DEBUG "%s: starting flash download\n", dev->name); for (i = 0; i < dl->num_areas; i++) { int rest_len = dl->data[i].len; int data_off = 0; while (rest_len > 0) { int block_len; block_len = prism2_download_block( dev, dl->data[i].addr + data_off, dl->data[i].data + data_off, bufaddr, rest_len); if (block_len < 0) { ret = -1; goto out; } rest_len -= block_len; data_off += block_len; } } HFA384X_OUTW(0, HFA384X_PARAM1_OFF); HFA384X_OUTW(0, HFA384X_PARAM2_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_DISABLE << 8), 0)) { printk(KERN_WARNING "%s: Download command execution failed\n", dev->name); ret = -1; goto out; } if (prism2_enable_aux_port(dev, 0)) { printk(KERN_DEBUG "%s: Disabling AUX port failed\n", dev->name); /* continue anyway.. restart should have taken care of this */ } mdelay(5); local->func->hw_reset(dev); local->hw_downloading = 0; if (prism2_hw_config(dev, 2)) { printk(KERN_WARNING "%s: Card configuration after flash " "download failed\n", dev->name); ret = -1; } else { printk(KERN_INFO "%s: Card initialized successfully after " "flash download\n", dev->name); } out: local->hw_downloading = 0; return ret; } #endif /* PRISM2_NON_VOLATILE_DOWNLOAD */ static void prism2_download_free_data(struct prism2_download_data *dl) { int i; if (dl == NULL) return; for (i = 0; i < dl->num_areas; i++) kfree(dl->data[i].data); kfree(dl); } static int prism2_download(local_info_t *local, struct prism2_download_param *param) { int ret = 0; int i; u32 total_len = 0; struct prism2_download_data *dl = NULL; printk(KERN_DEBUG "prism2_download: dl_cmd=%d start_addr=0x%08x " "num_areas=%d\n", param->dl_cmd, param->start_addr, param->num_areas); if (param->num_areas > 100) { ret = -EINVAL; goto out; } dl = kzalloc(sizeof(*dl) + param->num_areas * sizeof(struct prism2_download_data_area), GFP_KERNEL); if (dl == NULL) { ret = -ENOMEM; goto out; } dl->dl_cmd = param->dl_cmd; dl->start_addr = param->start_addr; dl->num_areas = param->num_areas; for (i = 0; i < param->num_areas; i++) { PDEBUG(DEBUG_EXTRA2, " area %d: addr=0x%08x len=%d ptr=0x%p\n", i, param->data[i].addr, param->data[i].len, param->data[i].ptr); dl->data[i].addr = param->data[i].addr; dl->data[i].len = param->data[i].len; total_len += param->data[i].len; if (param->data[i].len > PRISM2_MAX_DOWNLOAD_AREA_LEN || total_len > PRISM2_MAX_DOWNLOAD_LEN) { ret = -E2BIG; goto out; } dl->data[i].data = kmalloc(dl->data[i].len, GFP_KERNEL); if (dl->data[i].data == NULL) { ret = -ENOMEM; goto out; } if (copy_from_user(dl->data[i].data, param->data[i].ptr, param->data[i].len)) { ret = -EFAULT; goto out; } } switch (param->dl_cmd) { case PRISM2_DOWNLOAD_VOLATILE: case PRISM2_DOWNLOAD_VOLATILE_PERSISTENT: ret = prism2_download_volatile(local, dl); break; case PRISM2_DOWNLOAD_VOLATILE_GENESIS: case PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT: ret = prism2_download_genesis(local, dl); break; case PRISM2_DOWNLOAD_NON_VOLATILE: #ifdef PRISM2_NON_VOLATILE_DOWNLOAD ret = prism2_download_nonvolatile(local, dl); #else /* PRISM2_NON_VOLATILE_DOWNLOAD */ printk(KERN_INFO "%s: non-volatile downloading not enabled\n", local->dev->name); ret = -EOPNOTSUPP; #endif /* PRISM2_NON_VOLATILE_DOWNLOAD */ break; default: printk(KERN_DEBUG "%s: unsupported download command %d\n", local->dev->name, param->dl_cmd); ret = -EINVAL; break; } out: if (ret == 0 && dl && param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT) { prism2_download_free_data(local->dl_pri); local->dl_pri = dl; } else if (ret == 0 && dl && param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_PERSISTENT) { prism2_download_free_data(local->dl_sec); local->dl_sec = dl; } else prism2_download_free_data(dl); return ret; }
gpl-2.0
lnfamous/Kernel_Stock_Pico
drivers/net/wireless/hostap/hostap_download.c
9270
18521
static int prism2_enable_aux_port(struct net_device *dev, int enable) { u16 val, reg; int i, tries; unsigned long flags; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if (local->no_pri) { if (enable) { PDEBUG(DEBUG_EXTRA2, "%s: no PRI f/w - assuming Aux " "port is already enabled\n", dev->name); } return 0; } spin_lock_irqsave(&local->cmdlock, flags); /* wait until busy bit is clear */ tries = HFA384X_CMD_BUSY_TIMEOUT; while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) { tries--; udelay(1); } if (tries == 0) { reg = HFA384X_INW(HFA384X_CMD_OFF); spin_unlock_irqrestore(&local->cmdlock, flags); printk("%s: prism2_enable_aux_port - timeout - reg=0x%04x\n", dev->name, reg); return -ETIMEDOUT; } val = HFA384X_INW(HFA384X_CONTROL_OFF); if (enable) { HFA384X_OUTW(HFA384X_AUX_MAGIC0, HFA384X_PARAM0_OFF); HFA384X_OUTW(HFA384X_AUX_MAGIC1, HFA384X_PARAM1_OFF); HFA384X_OUTW(HFA384X_AUX_MAGIC2, HFA384X_PARAM2_OFF); if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_DISABLED) printk("prism2_enable_aux_port: was not disabled!?\n"); val &= ~HFA384X_AUX_PORT_MASK; val |= HFA384X_AUX_PORT_ENABLE; } else { HFA384X_OUTW(0, HFA384X_PARAM0_OFF); HFA384X_OUTW(0, HFA384X_PARAM1_OFF); HFA384X_OUTW(0, HFA384X_PARAM2_OFF); if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_ENABLED) printk("prism2_enable_aux_port: was not enabled!?\n"); val &= ~HFA384X_AUX_PORT_MASK; val |= HFA384X_AUX_PORT_DISABLE; } HFA384X_OUTW(val, HFA384X_CONTROL_OFF); udelay(5); i = 10000; while (i > 0) { val = HFA384X_INW(HFA384X_CONTROL_OFF); val &= HFA384X_AUX_PORT_MASK; if ((enable && val == HFA384X_AUX_PORT_ENABLED) || (!enable && val == HFA384X_AUX_PORT_DISABLED)) break; udelay(10); i--; } spin_unlock_irqrestore(&local->cmdlock, flags); if (i == 0) { printk("prism2_enable_aux_port(%d) timed out\n", enable); return -ETIMEDOUT; } return 0; } static int hfa384x_from_aux(struct net_device *dev, unsigned int addr, int len, void *buf) { u16 page, offset; if (addr & 1 || len & 1) return -1; page = addr >> 7; offset = addr & 0x7f; HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF); HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF); udelay(5); #ifdef PRISM2_PCI { __le16 *pos = (__le16 *) buf; while (len > 0) { *pos++ = HFA384X_INW_DATA(HFA384X_AUXDATA_OFF); len -= 2; } } #else /* PRISM2_PCI */ HFA384X_INSW(HFA384X_AUXDATA_OFF, buf, len / 2); #endif /* PRISM2_PCI */ return 0; } static int hfa384x_to_aux(struct net_device *dev, unsigned int addr, int len, void *buf) { u16 page, offset; if (addr & 1 || len & 1) return -1; page = addr >> 7; offset = addr & 0x7f; HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF); HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF); udelay(5); #ifdef PRISM2_PCI { __le16 *pos = (__le16 *) buf; while (len > 0) { HFA384X_OUTW_DATA(*pos++, HFA384X_AUXDATA_OFF); len -= 2; } } #else /* PRISM2_PCI */ HFA384X_OUTSW(HFA384X_AUXDATA_OFF, buf, len / 2); #endif /* PRISM2_PCI */ return 0; } static int prism2_pda_ok(u8 *buf) { __le16 *pda = (__le16 *) buf; int pos; u16 len, pdr; if (buf[0] == 0xff && buf[1] == 0x00 && buf[2] == 0xff && buf[3] == 0x00) return 0; pos = 0; while (pos + 1 < PRISM2_PDA_SIZE / 2) { len = le16_to_cpu(pda[pos]); pdr = le16_to_cpu(pda[pos + 1]); if (len == 0 || pos + len > PRISM2_PDA_SIZE / 2) return 0; if (pdr == 0x0000 && len == 2) { /* PDA end found */ return 1; } pos += len + 1; } return 0; } static int prism2_download_aux_dump(struct net_device *dev, unsigned int addr, int len, u8 *buf) { int res; prism2_enable_aux_port(dev, 1); res = hfa384x_from_aux(dev, addr, len, buf); prism2_enable_aux_port(dev, 0); if (res) return -1; return 0; } static u8 * prism2_read_pda(struct net_device *dev) { u8 *buf; int res, i, found = 0; #define NUM_PDA_ADDRS 4 unsigned int pda_addr[NUM_PDA_ADDRS] = { 0x7f0000 /* others than HFA3841 */, 0x3f0000 /* HFA3841 */, 0x390000 /* apparently used in older cards */, 0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */, }; buf = kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL); if (buf == NULL) return NULL; /* Note: wlan card should be in initial state (just after init cmd) * and no other operations should be performed concurrently. */ prism2_enable_aux_port(dev, 1); for (i = 0; i < NUM_PDA_ADDRS; i++) { PDEBUG(DEBUG_EXTRA2, "%s: trying to read PDA from 0x%08x", dev->name, pda_addr[i]); res = hfa384x_from_aux(dev, pda_addr[i], PRISM2_PDA_SIZE, buf); if (res) continue; if (res == 0 && prism2_pda_ok(buf)) { PDEBUG2(DEBUG_EXTRA2, ": OK\n"); found = 1; break; } else { PDEBUG2(DEBUG_EXTRA2, ": failed\n"); } } prism2_enable_aux_port(dev, 0); if (!found) { printk(KERN_DEBUG "%s: valid PDA not found\n", dev->name); kfree(buf); buf = NULL; } return buf; } static int prism2_download_volatile(local_info_t *local, struct prism2_download_data *param) { struct net_device *dev = local->dev; int ret = 0, i; u16 param0, param1; if (local->hw_downloading) { printk(KERN_WARNING "%s: Already downloading - aborting new " "request\n", dev->name); return -1; } local->hw_downloading = 1; if (local->pri_only) { hfa384x_disable_interrupts(dev); } else { prism2_hw_shutdown(dev, 0); if (prism2_hw_init(dev, 0)) { printk(KERN_WARNING "%s: Could not initialize card for" " download\n", dev->name); ret = -1; goto out; } } if (prism2_enable_aux_port(dev, 1)) { printk(KERN_WARNING "%s: Could not enable AUX port\n", dev->name); ret = -1; goto out; } param0 = param->start_addr & 0xffff; param1 = param->start_addr >> 16; HFA384X_OUTW(0, HFA384X_PARAM2_OFF); HFA384X_OUTW(param1, HFA384X_PARAM1_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_ENABLE_VOLATILE << 8), param0)) { printk(KERN_WARNING "%s: Download command execution failed\n", dev->name); ret = -1; goto out; } for (i = 0; i < param->num_areas; i++) { PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n", dev->name, param->data[i].len, param->data[i].addr); if (hfa384x_to_aux(dev, param->data[i].addr, param->data[i].len, param->data[i].data)) { printk(KERN_WARNING "%s: RAM download at 0x%08x " "(len=%d) failed\n", dev->name, param->data[i].addr, param->data[i].len); ret = -1; goto out; } } HFA384X_OUTW(param1, HFA384X_PARAM1_OFF); HFA384X_OUTW(0, HFA384X_PARAM2_OFF); if (hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_DISABLE << 8), param0)) { printk(KERN_WARNING "%s: Download command execution failed\n", dev->name); ret = -1; goto out; } /* ProgMode disable causes the hardware to restart itself from the * given starting address. Give hw some time and ACK command just in * case restart did not happen. */ mdelay(5); HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF); if (prism2_enable_aux_port(dev, 0)) { printk(KERN_DEBUG "%s: Disabling AUX port failed\n", dev->name); /* continue anyway.. restart should have taken care of this */ } mdelay(5); local->hw_downloading = 0; if (prism2_hw_config(dev, 2)) { printk(KERN_WARNING "%s: Card configuration after RAM " "download failed\n", dev->name); ret = -1; goto out; } out: local->hw_downloading = 0; return ret; } static int prism2_enable_genesis(local_info_t *local, int hcr) { struct net_device *dev = local->dev; u8 initseq[4] = { 0x00, 0xe1, 0xa1, 0xff }; u8 readbuf[4]; printk(KERN_DEBUG "%s: test Genesis mode with HCR 0x%02x\n", dev->name, hcr); local->func->cor_sreset(local); hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq); local->func->genesis_reset(local, hcr); /* Readback test */ hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf); hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq); hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf); if (memcmp(initseq, readbuf, sizeof(initseq)) == 0) { printk(KERN_DEBUG "Readback test succeeded, HCR 0x%02x\n", hcr); return 0; } else { printk(KERN_DEBUG "Readback test failed, HCR 0x%02x " "write %02x %02x %02x %02x read %02x %02x %02x %02x\n", hcr, initseq[0], initseq[1], initseq[2], initseq[3], readbuf[0], readbuf[1], readbuf[2], readbuf[3]); return 1; } } static int prism2_get_ram_size(local_info_t *local) { int ret; /* Try to enable genesis mode; 0x1F for x8 SRAM or 0x0F for x16 SRAM */ if (prism2_enable_genesis(local, 0x1f) == 0) ret = 8; else if (prism2_enable_genesis(local, 0x0f) == 0) ret = 16; else ret = -1; /* Disable genesis mode */ local->func->genesis_reset(local, ret == 16 ? 0x07 : 0x17); return ret; } static int prism2_download_genesis(local_info_t *local, struct prism2_download_data *param) { struct net_device *dev = local->dev; int ram16 = 0, i; int ret = 0; if (local->hw_downloading) { printk(KERN_WARNING "%s: Already downloading - aborting new " "request\n", dev->name); return -EBUSY; } if (!local->func->genesis_reset || !local->func->cor_sreset) { printk(KERN_INFO "%s: Genesis mode downloading not supported " "with this hwmodel\n", dev->name); return -EOPNOTSUPP; } local->hw_downloading = 1; if (prism2_enable_aux_port(dev, 1)) { printk(KERN_DEBUG "%s: failed to enable AUX port\n", dev->name); ret = -EIO; goto out; } if (local->sram_type == -1) { /* 0x1F for x8 SRAM or 0x0F for x16 SRAM */ if (prism2_enable_genesis(local, 0x1f) == 0) { ram16 = 0; PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x8 " "SRAM\n", dev->name); } else if (prism2_enable_genesis(local, 0x0f) == 0) { ram16 = 1; PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x16 " "SRAM\n", dev->name); } else { printk(KERN_DEBUG "%s: Could not initiate genesis " "mode\n", dev->name); ret = -EIO; goto out; } } else { if (prism2_enable_genesis(local, local->sram_type == 8 ? 0x1f : 0x0f)) { printk(KERN_DEBUG "%s: Failed to set Genesis " "mode (sram_type=%d)\n", dev->name, local->sram_type); ret = -EIO; goto out; } ram16 = local->sram_type != 8; } for (i = 0; i < param->num_areas; i++) { PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n", dev->name, param->data[i].len, param->data[i].addr); if (hfa384x_to_aux(dev, param->data[i].addr, param->data[i].len, param->data[i].data)) { printk(KERN_WARNING "%s: RAM download at 0x%08x " "(len=%d) failed\n", dev->name, param->data[i].addr, param->data[i].len); ret = -EIO; goto out; } } PDEBUG(DEBUG_EXTRA2, "Disable genesis mode\n"); local->func->genesis_reset(local, ram16 ? 0x07 : 0x17); if (prism2_enable_aux_port(dev, 0)) { printk(KERN_DEBUG "%s: Failed to disable AUX port\n", dev->name); } mdelay(5); local->hw_downloading = 0; PDEBUG(DEBUG_EXTRA2, "Trying to initialize card\n"); /* * Make sure the INIT command does not generate a command completion * event by disabling interrupts. */ hfa384x_disable_interrupts(dev); if (prism2_hw_init(dev, 1)) { printk(KERN_DEBUG "%s: Initialization after genesis mode " "download failed\n", dev->name); ret = -EIO; goto out; } PDEBUG(DEBUG_EXTRA2, "Card initialized - running PRI only\n"); if (prism2_hw_init2(dev, 1)) { printk(KERN_DEBUG "%s: Initialization(2) after genesis mode " "download failed\n", dev->name); ret = -EIO; goto out; } out: local->hw_downloading = 0; return ret; } #ifdef PRISM2_NON_VOLATILE_DOWNLOAD /* Note! Non-volatile downloading functionality has not yet been tested * thoroughly and it may corrupt flash image and effectively kill the card that * is being updated. You have been warned. */ static inline int prism2_download_block(struct net_device *dev, u32 addr, u8 *data, u32 bufaddr, int rest_len) { u16 param0, param1; int block_len; block_len = rest_len < 4096 ? rest_len : 4096; param0 = addr & 0xffff; param1 = addr >> 16; HFA384X_OUTW(block_len, HFA384X_PARAM2_OFF); HFA384X_OUTW(param1, HFA384X_PARAM1_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_ENABLE_NON_VOLATILE << 8), param0)) { printk(KERN_WARNING "%s: Flash download command execution " "failed\n", dev->name); return -1; } if (hfa384x_to_aux(dev, bufaddr, block_len, data)) { printk(KERN_WARNING "%s: flash download at 0x%08x " "(len=%d) failed\n", dev->name, addr, block_len); return -1; } HFA384X_OUTW(0, HFA384X_PARAM2_OFF); HFA384X_OUTW(0, HFA384X_PARAM1_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_PROGRAM_NON_VOLATILE << 8), 0)) { printk(KERN_WARNING "%s: Flash write command execution " "failed\n", dev->name); return -1; } return block_len; } static int prism2_download_nonvolatile(local_info_t *local, struct prism2_download_data *dl) { struct net_device *dev = local->dev; int ret = 0, i; struct { __le16 page; __le16 offset; __le16 len; } dlbuffer; u32 bufaddr; if (local->hw_downloading) { printk(KERN_WARNING "%s: Already downloading - aborting new " "request\n", dev->name); return -1; } ret = local->func->get_rid(dev, HFA384X_RID_DOWNLOADBUFFER, &dlbuffer, 6, 0); if (ret < 0) { printk(KERN_WARNING "%s: Could not read download buffer " "parameters\n", dev->name); goto out; } printk(KERN_DEBUG "Download buffer: %d bytes at 0x%04x:0x%04x\n", le16_to_cpu(dlbuffer.len), le16_to_cpu(dlbuffer.page), le16_to_cpu(dlbuffer.offset)); bufaddr = (le16_to_cpu(dlbuffer.page) << 7) + le16_to_cpu(dlbuffer.offset); local->hw_downloading = 1; if (!local->pri_only) { prism2_hw_shutdown(dev, 0); if (prism2_hw_init(dev, 0)) { printk(KERN_WARNING "%s: Could not initialize card for" " download\n", dev->name); ret = -1; goto out; } } hfa384x_disable_interrupts(dev); if (prism2_enable_aux_port(dev, 1)) { printk(KERN_WARNING "%s: Could not enable AUX port\n", dev->name); ret = -1; goto out; } printk(KERN_DEBUG "%s: starting flash download\n", dev->name); for (i = 0; i < dl->num_areas; i++) { int rest_len = dl->data[i].len; int data_off = 0; while (rest_len > 0) { int block_len; block_len = prism2_download_block( dev, dl->data[i].addr + data_off, dl->data[i].data + data_off, bufaddr, rest_len); if (block_len < 0) { ret = -1; goto out; } rest_len -= block_len; data_off += block_len; } } HFA384X_OUTW(0, HFA384X_PARAM1_OFF); HFA384X_OUTW(0, HFA384X_PARAM2_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_DISABLE << 8), 0)) { printk(KERN_WARNING "%s: Download command execution failed\n", dev->name); ret = -1; goto out; } if (prism2_enable_aux_port(dev, 0)) { printk(KERN_DEBUG "%s: Disabling AUX port failed\n", dev->name); /* continue anyway.. restart should have taken care of this */ } mdelay(5); local->func->hw_reset(dev); local->hw_downloading = 0; if (prism2_hw_config(dev, 2)) { printk(KERN_WARNING "%s: Card configuration after flash " "download failed\n", dev->name); ret = -1; } else { printk(KERN_INFO "%s: Card initialized successfully after " "flash download\n", dev->name); } out: local->hw_downloading = 0; return ret; } #endif /* PRISM2_NON_VOLATILE_DOWNLOAD */ static void prism2_download_free_data(struct prism2_download_data *dl) { int i; if (dl == NULL) return; for (i = 0; i < dl->num_areas; i++) kfree(dl->data[i].data); kfree(dl); } static int prism2_download(local_info_t *local, struct prism2_download_param *param) { int ret = 0; int i; u32 total_len = 0; struct prism2_download_data *dl = NULL; printk(KERN_DEBUG "prism2_download: dl_cmd=%d start_addr=0x%08x " "num_areas=%d\n", param->dl_cmd, param->start_addr, param->num_areas); if (param->num_areas > 100) { ret = -EINVAL; goto out; } dl = kzalloc(sizeof(*dl) + param->num_areas * sizeof(struct prism2_download_data_area), GFP_KERNEL); if (dl == NULL) { ret = -ENOMEM; goto out; } dl->dl_cmd = param->dl_cmd; dl->start_addr = param->start_addr; dl->num_areas = param->num_areas; for (i = 0; i < param->num_areas; i++) { PDEBUG(DEBUG_EXTRA2, " area %d: addr=0x%08x len=%d ptr=0x%p\n", i, param->data[i].addr, param->data[i].len, param->data[i].ptr); dl->data[i].addr = param->data[i].addr; dl->data[i].len = param->data[i].len; total_len += param->data[i].len; if (param->data[i].len > PRISM2_MAX_DOWNLOAD_AREA_LEN || total_len > PRISM2_MAX_DOWNLOAD_LEN) { ret = -E2BIG; goto out; } dl->data[i].data = kmalloc(dl->data[i].len, GFP_KERNEL); if (dl->data[i].data == NULL) { ret = -ENOMEM; goto out; } if (copy_from_user(dl->data[i].data, param->data[i].ptr, param->data[i].len)) { ret = -EFAULT; goto out; } } switch (param->dl_cmd) { case PRISM2_DOWNLOAD_VOLATILE: case PRISM2_DOWNLOAD_VOLATILE_PERSISTENT: ret = prism2_download_volatile(local, dl); break; case PRISM2_DOWNLOAD_VOLATILE_GENESIS: case PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT: ret = prism2_download_genesis(local, dl); break; case PRISM2_DOWNLOAD_NON_VOLATILE: #ifdef PRISM2_NON_VOLATILE_DOWNLOAD ret = prism2_download_nonvolatile(local, dl); #else /* PRISM2_NON_VOLATILE_DOWNLOAD */ printk(KERN_INFO "%s: non-volatile downloading not enabled\n", local->dev->name); ret = -EOPNOTSUPP; #endif /* PRISM2_NON_VOLATILE_DOWNLOAD */ break; default: printk(KERN_DEBUG "%s: unsupported download command %d\n", local->dev->name, param->dl_cmd); ret = -EINVAL; break; } out: if (ret == 0 && dl && param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT) { prism2_download_free_data(local->dl_pri); local->dl_pri = dl; } else if (ret == 0 && dl && param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_PERSISTENT) { prism2_download_free_data(local->dl_sec); local->dl_sec = dl; } else prism2_download_free_data(dl); return ret; }
gpl-2.0
again4you/linux
net/rose/rose_out.c
9782
2840
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/gfp.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> /* * This procedure is passed a buffer descriptor for an iframe. It builds * the rest of the control part of the frame and then writes it out. */ static void rose_send_iframe(struct sock *sk, struct sk_buff *skb) { struct rose_sock *rose = rose_sk(sk); if (skb == NULL) return; skb->data[2] |= (rose->vr << 5) & 0xE0; skb->data[2] |= (rose->vs << 1) & 0x0E; rose_start_idletimer(sk); rose_transmit_link(skb, rose->neighbour); } void rose_kick(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); struct sk_buff *skb, *skbn; unsigned short start, end; if (rose->state != ROSE_STATE_3) return; if (rose->condition & ROSE_COND_PEER_RX_BUSY) return; if (!skb_peek(&sk->sk_write_queue)) return; start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs; end = (rose->va + sysctl_rose_window_size) % ROSE_MODULUS; if (start == end) return; rose->vs = start; /* * Transmit data until either we're out of data to send or * the window is full. */ skb = skb_dequeue(&sk->sk_write_queue); do { if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { skb_queue_head(&sk->sk_write_queue, skb); break; } skb_set_owner_w(skbn, sk); /* * Transmit the frame copy. */ rose_send_iframe(sk, skbn); rose->vs = (rose->vs + 1) % ROSE_MODULUS; /* * Requeue the original data frame. */ skb_queue_tail(&rose->ack_queue, skb); } while (rose->vs != end && (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); rose->vl = rose->vr; rose->condition &= ~ROSE_COND_ACK_PENDING; rose_stop_timer(sk); } /* * The following routines are taken from page 170 of the 7th ARRL Computer * Networking Conference paper, as is the whole state machine. */ void rose_enquiry_response(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); if (rose->condition & ROSE_COND_OWN_RX_BUSY) rose_write_internal(sk, ROSE_RNR); else rose_write_internal(sk, ROSE_RR); rose->vl = rose->vr; rose->condition &= ~ROSE_COND_ACK_PENDING; rose_stop_timer(sk); }
gpl-2.0
Compunctus/android_kernel_lge_g3
arch/sh/drivers/pci/ops-sh7786.c
12342
4765
/* * Generic SH7786 PCI-Express operations. * * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License v2. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/spinlock.h> #include "pcie-sh7786.h" enum { PCI_ACCESS_READ, PCI_ACCESS_WRITE, }; static int sh7786_pcie_config_access(unsigned char access_type, struct pci_bus *bus, unsigned int devfn, int where, u32 *data) { struct pci_channel *chan = bus->sysdata; int dev, func, type, reg; dev = PCI_SLOT(devfn); func = PCI_FUNC(devfn); type = !!bus->parent; reg = where & ~3; if (bus->number > 255 || dev > 31 || func > 7) return PCIBIOS_FUNC_NOT_SUPPORTED; /* * While each channel has its own memory-mapped extended config * space, it's generally only accessible when in endpoint mode. * When in root complex mode, the controller is unable to target * itself with either type 0 or type 1 accesses, and indeed, any * controller initiated target transfer to its own config space * result in a completer abort. * * Each channel effectively only supports a single device, but as * the same channel <-> device access works for any PCI_SLOT() * value, we cheat a bit here and bind the controller's config * space to devfn 0 in order to enable self-enumeration. In this * case the regular PAR/PDR path is sidelined and the mangled * config access itself is initiated as a SuperHyway transaction. */ if (pci_is_root_bus(bus)) { if (dev == 0) { if (access_type == PCI_ACCESS_READ) *data = pci_read_reg(chan, PCI_REG(reg)); else pci_write_reg(chan, *data, PCI_REG(reg)); return PCIBIOS_SUCCESSFUL; } else if (dev > 1) return PCIBIOS_DEVICE_NOT_FOUND; } /* Clear errors */ pci_write_reg(chan, pci_read_reg(chan, SH4A_PCIEERRFR), SH4A_PCIEERRFR); /* Set the PIO address */ pci_write_reg(chan, (bus->number << 24) | (dev << 19) | (func << 16) | reg, SH4A_PCIEPAR); /* Enable the configuration access */ pci_write_reg(chan, (1 << 31) | (type << 8), SH4A_PCIEPCTLR); /* Check for errors */ if (pci_read_reg(chan, SH4A_PCIEERRFR) & 0x10) return PCIBIOS_DEVICE_NOT_FOUND; /* Check for master and target aborts */ if (pci_read_reg(chan, SH4A_PCIEPCICONF1) & ((1 << 29) | (1 << 28))) return PCIBIOS_DEVICE_NOT_FOUND; if (access_type == PCI_ACCESS_READ) *data = pci_read_reg(chan, SH4A_PCIEPDR); else pci_write_reg(chan, *data, SH4A_PCIEPDR); /* Disable the configuration access */ pci_write_reg(chan, 0, SH4A_PCIEPCTLR); return PCIBIOS_SUCCESSFUL; } static int sh7786_pcie_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { unsigned long flags; int ret; u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; raw_spin_lock_irqsave(&pci_config_lock, flags); ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus, devfn, where, &data); if (ret != PCIBIOS_SUCCESSFUL) { *val = 0xffffffff; goto out; } if (size == 1) *val = (data >> ((where & 3) << 3)) & 0xff; else if (size == 2) *val = (data >> ((where & 2) << 3)) & 0xffff; else *val = data; dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x " "where=0x%04x size=%d val=0x%08lx\n", bus->number, devfn, where, size, (unsigned long)*val); out: raw_spin_unlock_irqrestore(&pci_config_lock, flags); return ret; } static int sh7786_pcie_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { unsigned long flags; int shift, ret; u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; raw_spin_lock_irqsave(&pci_config_lock, flags); ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus, devfn, where, &data); if (ret != PCIBIOS_SUCCESSFUL) goto out; dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x " "where=0x%04x size=%d val=%08lx\n", bus->number, devfn, where, size, (unsigned long)val); if (size == 1) { shift = (where & 3) << 3; data &= ~(0xff << shift); data |= ((val & 0xff) << shift); } else if (size == 2) { shift = (where & 2) << 3; data &= ~(0xffff << shift); data |= ((val & 0xffff) << shift); } else data = val; ret = sh7786_pcie_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data); out: raw_spin_unlock_irqrestore(&pci_config_lock, flags); return ret; } struct pci_ops sh7786_pci_ops = { .read = sh7786_pcie_read, .write = sh7786_pcie_write, };
gpl-2.0
MoKee/android_kernel_samsung_d710
arch/sh/drivers/pci/pci-sh7751.c
12342
5481
/* * Low-Level PCI Support for the SH7751 * * Copyright (C) 2003 - 2009 Paul Mundt * Copyright (C) 2001 Dustin McIntire * * With cleanup by Paul van Gool <pvangool@mimotech.com>, 2003. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/io.h> #include "pci-sh4.h" #include <asm/addrspace.h> #include <asm/sizes.h> static int __init __area_sdram_check(struct pci_channel *chan, unsigned int area) { unsigned long word; word = __raw_readl(SH7751_BCR1); /* check BCR for SDRAM in area */ if (((word >> area) & 1) == 0) { printk("PCI: Area %d is not configured for SDRAM. BCR1=0x%lx\n", area, word); return 0; } pci_write_reg(chan, word, SH4_PCIBCR1); word = __raw_readw(SH7751_BCR2); /* check BCR2 for 32bit SDRAM interface*/ if (((word >> (area << 1)) & 0x3) != 0x3) { printk("PCI: Area %d is not 32 bit SDRAM. BCR2=0x%lx\n", area, word); return 0; } pci_write_reg(chan, word, SH4_PCIBCR2); return 1; } static struct resource sh7751_pci_resources[] = { { .name = "SH7751_IO", .start = 0x1000, .end = SZ_4M - 1, .flags = IORESOURCE_IO }, { .name = "SH7751_mem", .start = SH7751_PCI_MEMORY_BASE, .end = SH7751_PCI_MEMORY_BASE + SH7751_PCI_MEM_SIZE - 1, .flags = IORESOURCE_MEM }, }; static struct pci_channel sh7751_pci_controller = { .pci_ops = &sh4_pci_ops, .resources = sh7751_pci_resources, .nr_resources = ARRAY_SIZE(sh7751_pci_resources), .mem_offset = 0x00000000, .io_offset = 0x00000000, .io_map_base = SH7751_PCI_IO_BASE, }; static struct sh4_pci_address_map sh7751_pci_map = { .window0 = { .base = SH7751_CS3_BASE_ADDR, .size = 0x04000000, }, }; static int __init sh7751_pci_init(void) { struct pci_channel *chan = &sh7751_pci_controller; unsigned int id; u32 word, reg; printk(KERN_NOTICE "PCI: Starting initialization.\n"); chan->reg_base = 0xfe200000; /* check for SH7751/SH7751R hardware */ id = pci_read_reg(chan, SH7751_PCICONF0); if (id != ((SH7751_DEVICE_ID << 16) | SH7751_VENDOR_ID) && id != ((SH7751R_DEVICE_ID << 16) | SH7751_VENDOR_ID)) { pr_debug("PCI: This is not an SH7751(R) (%x)\n", id); return -ENODEV; } /* Set the BCR's to enable PCI access */ reg = __raw_readl(SH7751_BCR1); reg |= 0x80000; __raw_writel(reg, SH7751_BCR1); /* Turn the clocks back on (not done in reset)*/ pci_write_reg(chan, 0, SH4_PCICLKR); /* Clear Powerdown IRQ's (not done in reset) */ word = SH4_PCIPINT_D3 | SH4_PCIPINT_D0; pci_write_reg(chan, word, SH4_PCIPINT); /* set the command/status bits to: * Wait Cycle Control + Parity Enable + Bus Master + * Mem space enable */ word = SH7751_PCICONF1_WCC | SH7751_PCICONF1_PER | SH7751_PCICONF1_BUM | SH7751_PCICONF1_MES; pci_write_reg(chan, word, SH7751_PCICONF1); /* define this host as the host bridge */ word = PCI_BASE_CLASS_BRIDGE << 24; pci_write_reg(chan, word, SH7751_PCICONF2); /* Set IO and Mem windows to local address * Make PCI and local address the same for easy 1 to 1 mapping */ word = sh7751_pci_map.window0.size - 1; pci_write_reg(chan, word, SH4_PCILSR0); /* Set the values on window 0 PCI config registers */ word = P2SEGADDR(sh7751_pci_map.window0.base); pci_write_reg(chan, word, SH4_PCILAR0); pci_write_reg(chan, word, SH7751_PCICONF5); /* Set the local 16MB PCI memory space window to * the lowest PCI mapped address */ word = chan->resources[1].start & SH4_PCIMBR_MASK; pr_debug("PCI: Setting upper bits of Memory window to 0x%x\n", word); pci_write_reg(chan, word , SH4_PCIMBR); /* Make sure the MSB's of IO window are set to access PCI space * correctly */ word = chan->resources[0].start & SH4_PCIIOBR_MASK; pr_debug("PCI: Setting upper bits of IO window to 0x%x\n", word); pci_write_reg(chan, word, SH4_PCIIOBR); /* Set PCI WCRx, BCRx's, copy from BSC locations */ /* check BCR for SDRAM in specified area */ switch (sh7751_pci_map.window0.base) { case SH7751_CS0_BASE_ADDR: word = __area_sdram_check(chan, 0); break; case SH7751_CS1_BASE_ADDR: word = __area_sdram_check(chan, 1); break; case SH7751_CS2_BASE_ADDR: word = __area_sdram_check(chan, 2); break; case SH7751_CS3_BASE_ADDR: word = __area_sdram_check(chan, 3); break; case SH7751_CS4_BASE_ADDR: word = __area_sdram_check(chan, 4); break; case SH7751_CS5_BASE_ADDR: word = __area_sdram_check(chan, 5); break; case SH7751_CS6_BASE_ADDR: word = __area_sdram_check(chan, 6); break; } if (!word) return -1; /* configure the wait control registers */ word = __raw_readl(SH7751_WCR1); pci_write_reg(chan, word, SH4_PCIWCR1); word = __raw_readl(SH7751_WCR2); pci_write_reg(chan, word, SH4_PCIWCR2); word = __raw_readl(SH7751_WCR3); pci_write_reg(chan, word, SH4_PCIWCR3); word = __raw_readl(SH7751_MCR); pci_write_reg(chan, word, SH4_PCIMCR); /* NOTE: I'm ignoring the PCI error IRQs for now.. * TODO: add support for the internal error interrupts and * DMA interrupts... */ pci_fixup_pcic(chan); /* SH7751 init done, set central function init complete */ /* use round robin mode to stop a device starving/overruning */ word = SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_ARBM; pci_write_reg(chan, word, SH4_PCICR); return register_pci_controller(chan); } arch_initcall(sh7751_pci_init);
gpl-2.0
dmitrysmagin/xz0032-linux
drivers/scsi/qla2xxx/qla_mbx.c
55
108566
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2010 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include <linux/delay.h> #include <linux/gfp.h> /* * qla2x00_mailbox_command * Issue mailbox command and waits for completion. * * Input: * ha = adapter block pointer. * mcp = driver internal mbx struct pointer. * * Output: * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. * * Returns: * 0 : QLA_SUCCESS = cmd performed success * 1 : QLA_FUNCTION_FAILED (error encountered) * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) * * Context: * Kernel context. */ static int qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) { int rval; unsigned long flags = 0; device_reg_t __iomem *reg; uint8_t abort_active; uint8_t io_lock_on; uint16_t command = 0; uint16_t *iptr; uint16_t __iomem *optr; uint32_t cnt; uint32_t mboxes; unsigned long wait_time; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); if (ha->pdev->error_state > pci_channel_io_frozen) return QLA_FUNCTION_TIMEOUT; if (vha->device_flags & DFLG_DEV_FAILED) { DEBUG2_3_11(qla_printk(KERN_WARNING, ha, "%s(%ld): Device in failed state, " "timeout MBX Exiting.\n", __func__, base_vha->host_no)); return QLA_FUNCTION_TIMEOUT; } reg = ha->iobase; io_lock_on = base_vha->flags.init_done; rval = QLA_SUCCESS; abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no)); if (ha->flags.pci_channel_io_perm_failure) { DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX " "Exiting.\n", __func__, vha->host_no)); return QLA_FUNCTION_TIMEOUT; } /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during * non ISP abort time. */ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { /* Timeout occurred. Return error. */ DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " "Exiting.\n", __func__, base_vha->host_no)); return QLA_FUNCTION_TIMEOUT; } if (IS_QLA82XX(ha) && ha->flags.fw_hung) { /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; rval = QLA_FUNCTION_FAILED; goto premature_exit; } ha->flags.mbox_busy = 1; /* Save mailbox command for debug */ ha->mcp = mcp; DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", base_vha->host_no, mcp->mb[0])); spin_lock_irqsave(&ha->hardware_lock, flags); /* Load mailbox registers. */ if (IS_QLA82XX(ha)) optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0]; else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha)) optr = (uint16_t __iomem *)&reg->isp24.mailbox0; else optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0); iptr = mcp->mb; command = mcp->mb[0]; mboxes = mcp->out_mb; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8); if (mboxes & BIT_0) WRT_REG_WORD(optr, *iptr); mboxes >>= 1; optr++; iptr++; } #if defined(QL_DEBUG_LEVEL_1) printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n", __func__, base_vha->host_no); qla2x00_dump_buffer((uint8_t *)mcp->mb, 16); printk("\n"); qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16); printk("\n"); qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8); printk("\n"); printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no, optr); qla2x00_dump_regs(base_vha); #endif /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Unlock mbx registers and wait for interrupt */ DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies)); /* Wait for mbx cmd completion until timeout */ if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); if (IS_QLA82XX(ha)) { if (RD_REG_DWORD(&reg->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, flags); DEBUG2_3_11(printk(KERN_INFO "%s(%ld): Pending Mailbox timeout. " "Exiting.\n", __func__, base_vha->host_no)); rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); } else if (IS_FWI2_CAPABLE(ha)) WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); else WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); } else { DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, base_vha->host_no, command)); if (IS_QLA82XX(ha)) { if (RD_REG_DWORD(&reg->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, flags); DEBUG2_3_11(printk(KERN_INFO "%s(%ld): Pending Mailbox timeout. " "Exiting.\n", __func__, base_vha->host_no)); rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); } else if (IS_FWI2_CAPABLE(ha)) WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); else WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ while (!ha->flags.mbox_int) { if (time_after(jiffies, wait_time)) break; /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); if (!ha->flags.mbox_int && !(IS_QLA2200(ha) && command == MBC_LOAD_RISC_RAM_EXTENDED)) msleep(10); } /* while */ DEBUG17(qla_printk(KERN_WARNING, ha, "Waited %d sec\n", (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ))); } /* Check whether we timed out */ if (ha->flags.mbox_int) { uint16_t *iptr2; DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, base_vha->host_no, command)); /* Got interrupt. Clear the flag. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); if (IS_QLA82XX(ha) && ha->flags.fw_hung) { ha->flags.mbox_busy = 0; /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ha->mcp = NULL; rval = QLA_FUNCTION_FAILED; goto premature_exit; } if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) rval = QLA_FUNCTION_FAILED; /* Load return mailbox registers. */ iptr2 = mcp->mb; iptr = (uint16_t *)&ha->mailbox_out[0]; mboxes = mcp->in_mb; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) *iptr2 = *iptr; mboxes >>= 1; iptr2++; iptr++; } } else { #if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3) || \ defined(QL_DEBUG_LEVEL_11) uint16_t mb0; uint32_t ictrl; if (IS_FWI2_CAPABLE(ha)) { mb0 = RD_REG_WORD(&reg->isp24.mailbox0); ictrl = RD_REG_DWORD(&reg->isp24.ictrl); } else { mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); ictrl = RD_REG_WORD(&reg->isp.ictrl); } printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", __func__, base_vha->host_no, command); printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__, base_vha->host_no, ictrl, jiffies); printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__, base_vha->host_no, mb0); qla2x00_dump_regs(base_vha); #endif rval = QLA_FUNCTION_TIMEOUT; } ha->flags.mbox_busy = 0; /* Clean up */ ha->mcp = NULL; if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { DEBUG11(printk("%s(%ld): checking for additional resp " "interrupt.\n", __func__, base_vha->host_no)); /* polling mode for non isp_abort commands. */ qla2x00_poll(ha->rsp_q_map[0]); } if (rval == QLA_FUNCTION_TIMEOUT && mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { if (!io_lock_on || (mcp->flags & IOCTL_CMD) || ha->flags.eeh_busy) { /* not in dpc. schedule it for dpc to take over. */ DEBUG(printk("%s(%ld): timeout schedule " "isp_abort_needed.\n", __func__, base_vha->host_no)); DEBUG2_3_11(printk("%s(%ld): timeout schedule " "isp_abort_needed.\n", __func__, base_vha->host_no)); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { qla_printk(KERN_WARNING, ha, "Mailbox command timeout occured. " "Scheduling ISP " "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } else if (!abort_active) { /* call abort directly since we are in the DPC thread */ DEBUG(printk("%s(%ld): timeout calling abort_isp\n", __func__, base_vha->host_no)); DEBUG2_3_11(printk("%s(%ld): timeout calling " "abort_isp\n", __func__, base_vha->host_no)); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { qla_printk(KERN_WARNING, ha, "Mailbox command timeout occured. " "Issuing ISP abort.\n"); set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); if (ha->isp_ops->abort_isp(vha)) { /* Failed. retry later. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); DEBUG(printk("%s(%ld): finished abort_isp\n", __func__, vha->host_no)); DEBUG2_3_11(printk( "%s(%ld): finished abort_isp\n", __func__, vha->host_no)); } } } premature_exit: /* Allow next mbx cmd to come in. */ complete(&ha->mbx_cmd_comp); if (rval) { DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no, mcp->mb[0], mcp->mb[1], mcp->mb[2], command)); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, base_vha->host_no)); } return rval; } int qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, uint32_t risc_code_size) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_0; } else { mcp->mb[0] = MBC_LOAD_RISC_RAM; mcp->out_mb = MBX_0; } mcp->mb[1] = LSW(risc_addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[4] = MSW(risc_code_size); mcp->mb[5] = LSW(risc_code_size); mcp->out_mb |= MBX_5|MBX_4; } else { mcp->mb[4] = LSW(risc_code_size); mcp->out_mb |= MBX_4; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } #define EXTENDED_BB_CREDITS BIT_0 /* * qla2x00_execute_fw * Start adapter firmware. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_EXECUTE_FIRMWARE; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->mb[3] = 0; if (IS_QLA81XX(ha)) { struct nvram_81xx *nv = ha->nvram; mcp->mb[4] = (nv->enhanced_features & EXTENDED_BB_CREDITS); } else mcp->mb[4] = 0; mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; mcp->in_mb |= MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; if (IS_QLA2322(ha) || IS_QLA6322(ha)) { mcp->mb[2] = 0; mcp->out_mb |= MBX_2; } } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { if (IS_FWI2_CAPABLE(ha)) { DEBUG11(printk("%s(%ld): done exchanges=%x.\n", __func__, vha->host_no, mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } } return rval; } /* * qla2x00_get_fw_version * Get firmware version. * * Input: * ha: adapter state pointer. * major: pointer for major number. * minor: pointer for minor number. * subminor: pointer for subminor number. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, uint32_t *mpi_caps, uint8_t *phy) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA81XX(vha->hw)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) goto failed; /* Return mailbox data. */ *major = mcp->mb[1]; *minor = mcp->mb[2]; *subminor = mcp->mb[3]; *attributes = mcp->mb[6]; if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) *memory = 0x1FFFF; /* Defaults to 128KB. */ else *memory = (mcp->mb[5] << 16) | mcp->mb[4]; if (IS_QLA81XX(vha->hw)) { mpi[0] = mcp->mb[10] & 0xff; mpi[1] = mcp->mb[11] >> 8; mpi[2] = mcp->mb[11] & 0xff; *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13]; phy[0] = mcp->mb[8] & 0xff; phy[1] = mcp->mb[9] >> 8; phy[2] = mcp->mb[9] & 0xff; } failed: if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } else { /*EMPTY*/ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } /* * qla2x00_get_fw_options * Set firmware options. * * Input: * ha = adapter block pointer. * fwopt = pointer for firmware options. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } else { fwopts[0] = mcp->mb[0]; fwopts[1] = mcp->mb[1]; fwopts[2] = mcp->mb[2]; fwopts[3] = mcp->mb[3]; DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } /* * qla2x00_set_fw_options * Set firmware options. * * Input: * ha = adapter block pointer. * fwopt = pointer for firmware options. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; mcp->mb[1] = fwopts[1]; mcp->mb[2] = fwopts[2]; mcp->mb[3] = fwopts[3]; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->in_mb |= MBX_1; } else { mcp->mb[10] = fwopts[10]; mcp->mb[11] = fwopts[11]; mcp->mb[12] = 0; /* Undocumented, but used */ mcp->out_mb |= MBX_12|MBX_11|MBX_10; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); fwopts[0] = mcp->mb[0]; if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { /*EMPTY*/ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } /* * qla2x00_mbx_reg_test * Mailbox register wrap test. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_mbx_reg_test(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no)); mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; mcp->mb[1] = 0xAAAA; mcp->mb[2] = 0x5555; mcp->mb[3] = 0xAA55; mcp->mb[4] = 0x55AA; mcp->mb[5] = 0xA5A5; mcp->mb[6] = 0x5A5A; mcp->mb[7] = 0x2525; mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) rval = QLA_FUNCTION_FAILED; if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || mcp->mb[7] != 0x2525) rval = QLA_FUNCTION_FAILED; } if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", vha->host_no, rval)); } else { /*EMPTY*/ DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", vha->host_no)); } return rval; } /* * qla2x00_verify_checksum * Verify firmware checksum. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_VERIFY_CHECKSUM; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->out_mb |= MBX_2|MBX_1; mcp->in_mb |= MBX_2|MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; mcp->in_mb |= MBX_1; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ? (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } /* * qla2x00_issue_iocb * Issue IOCB using mailbox command * * Input: * ha = adapter state pointer. * buffer = buffer pointer. * phys_addr = physical address of buffer. * size = size of buffer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, size_t size, uint32_t tov) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; mcp->mb[0] = MBC_IOCB_COMMAND_A64; mcp->mb[1] = 0; mcp->mb[2] = MSW(phys_addr); mcp->mb[3] = LSW(phys_addr); mcp->mb[6] = MSW(MSD(phys_addr)); mcp->mb[7] = LSW(MSD(phys_addr)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_0; mcp->tov = tov; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", vha->host_no, rval)); } else { sts_entry_t *sts_entry = (sts_entry_t *) buffer; /* Mask reserved bits. */ sts_entry->entry_status &= IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; } return rval; } int qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, size_t size) { return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, MBX_TOV_SECONDS); } /* * qla2x00_abort_command * Abort command aborts a specified IOCB. * * Input: * ha = adapter block pointer. * sp = SB structure pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_abort_command(srb_t *sp) { unsigned long flags = 0; int rval; uint32_t handle = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; fc_port_t *fcport = sp->fcport; scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = vha->req; DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (handle == MAX_OUTSTANDING_COMMANDS) { /* command not found */ return QLA_FUNCTION_FAILED; } mcp->mb[0] = MBC_ABORT_COMMAND; if (HAS_EXTENDED_IDS(ha)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = (uint16_t)handle; mcp->mb[3] = (uint16_t)(handle >> 16); mcp->mb[6] = (uint16_t)sp->cmd->device->lun; mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", vha->host_no, rval)); } else { DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", vha->host_no)); } return rval; } int qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; struct req_que *req; struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); l = l; vha = fcport->vha; req = vha->hw->req_q_map[0]; rsp = req->rsp; mcp->mb[0] = MBC_ABORT_TARGET; mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = fcport->loop_id << 8; } mcp->mb[2] = vha->hw->loop_reset_delay; mcp->mb[9] = vha->vp_idx; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " "(%x).\n", __func__, vha->host_no, rval2)); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; struct req_que *req; struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); vha = fcport->vha; req = vha->hw->req_q_map[0]; rsp = req->rsp; mcp->mb[0] = MBC_LUN_RESET; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = l; mcp->mb[3] = 0; mcp->mb[9] = vha->vp_idx; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, MK_SYNC_ID_LUN); if (rval2 != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " "(%x).\n", __func__, vha->host_no, rval2)); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } /* * qla2x00_get_adapter_id * Get adapter ID and topology. * * Input: * ha = adapter block pointer. * id = pointer for loop ID. * al_pa = pointer for AL_PA. * area = pointer for area. * domain = pointer for domain. * top = pointer for topology. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", vha->host_no)); mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_0; mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA8XXX_TYPE(vha->hw)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mcp->mb[0] == MBS_COMMAND_ERROR) rval = QLA_COMMAND_ERROR; else if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; /* Return data. */ *id = mcp->mb[1]; *al_pa = LSB(mcp->mb[2]); *area = MSB(mcp->mb[2]); *domain = LSB(mcp->mb[3]); *top = mcp->mb[6]; *sw_cap = mcp->mb[7]; if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", vha->host_no, rval)); } else { DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", vha->host_no)); if (IS_QLA8XXX_TYPE(vha->hw)) { vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; vha->fcoe_fcf_idx = mcp->mb[10]; vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; } } return rval; } /* * qla2x00_get_retry_cnt * Get current firmware login retry count and delay. * * Input: * ha = adapter block pointer. * retry_cnt = pointer to login retry count. * tov = pointer to login timeout value. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, uint16_t *r_a_tov) { int rval; uint16_t ratov; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", vha->host_no)); mcp->mb[0] = MBC_GET_RETRY_COUNT; mcp->out_mb = MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", vha->host_no, mcp->mb[0])); } else { /* Convert returned data and check our values. */ *r_a_tov = mcp->mb[3] / 2; ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { /* Update to the larger values */ *retry_cnt = (uint8_t)mcp->mb[1]; *tov = ratov; } DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov)); } return rval; } /* * qla2x00_init_firmware * Initialize adapter firmware. * * Input: * ha = adapter block pointer. * dptr = Initialization control block pointer. * size = size of initialization control block. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", vha->host_no)); if (IS_QLA82XX(ha) && ql2xdbwr) qla82xx_wr_32(ha, ha->nxdb_wr_ptr, (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); if (ha->flags.npiv_supported) mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; else mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; mcp->mb[1] = 0; mcp->mb[2] = MSW(ha->init_cb_dma); mcp->mb[3] = LSW(ha->init_cb_dma); mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) { mcp->mb[1] = BIT_0; mcp->mb[10] = MSW(ha->ex_init_cb_dma); mcp->mb[11] = LSW(ha->ex_init_cb_dma); mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); mcp->mb[14] = sizeof(*ha->ex_init_cb); mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; } mcp->in_mb = MBX_0; mcp->buf_size = size; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " "mb0=%x.\n", vha->host_no, rval, mcp->mb[0])); } else { /*EMPTY*/ DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", vha->host_no)); } return rval; } /* * qla2x00_get_port_database * Issue normal/enhanced get port database mailbox command * and copy device name as necessary. * * Input: * ha = adapter state pointer. * dev = structure pointer. * opt = enhanced cmd option byte. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; port_database_t *pd; struct port_database_24xx *pd24; dma_addr_t pd_dma; struct qla_hw_data *ha = vha->hw; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); pd24 = NULL; pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Port Database " "structure.\n", __func__, vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); mcp->mb[0] = MBC_GET_PORT_DATABASE; if (opt != 0 && !IS_FWI2_CAPABLE(ha)) mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; mcp->mb[2] = MSW(pd_dma); mcp->mb[3] = LSW(pd_dma); mcp->mb[6] = MSW(MSD(pd_dma)); mcp->mb[7] = LSW(MSD(pd_dma)); mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10|MBX_1; mcp->in_mb |= MBX_1; } else if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10|MBX_1; } else { mcp->mb[1] = fcport->loop_id << 8 | opt; mcp->out_mb |= MBX_1; } mcp->buf_size = IS_FWI2_CAPABLE(ha) ? PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; mcp->flags = MBX_DMA_IN; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) goto gpd_error_out; if (IS_FWI2_CAPABLE(ha)) { pd24 = (struct port_database_24xx *) pd; /* Check for logged in state. */ if (pd24->current_login_state != PDS_PRLI_COMPLETE && pd24->last_login_state != PDS_PRLI_COMPLETE) { DEBUG2(printk("%s(%ld): Unable to verify " "login-state (%x/%x) for loop_id %x\n", __func__, vha->host_no, pd24->current_login_state, pd24->last_login_state, fcport->loop_id)); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } /* Names are little-endian. */ memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); /* Get port_id of device. */ fcport->d_id.b.domain = pd24->port_id[0]; fcport->d_id.b.area = pd24->port_id[1]; fcport->d_id.b.al_pa = pd24->port_id[2]; fcport->d_id.b.rsvd_1 = 0; /* If not target must be initiator or unknown type. */ if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; } else { /* Check for logged in state. */ if (pd->master_state != PD_STATE_PORT_LOGGED_IN && pd->slave_state != PD_STATE_PORT_LOGGED_IN) { rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } /* Names are little-endian. */ memcpy(fcport->node_name, pd->node_name, WWN_SIZE); memcpy(fcport->port_name, pd->port_name, WWN_SIZE); /* Get port_id of device. */ fcport->d_id.b.domain = pd->port_id[0]; fcport->d_id.b.area = pd->port_id[3]; fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; /* If not target must be initiator or unknown type. */ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; /* Passback COS information. */ fcport->supported_classes = (pd->options & BIT_4) ? FC_COS_CLASS2: FC_COS_CLASS3; } gpd_error_out: dma_pool_free(ha->s_dma_pool, pd, pd_dma); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } /* * qla2x00_get_firmware_state * Get adapter firmware state. * * Input: * ha = adapter block pointer. * dptr = pointer for firmware state. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", vha->host_no)); mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; else mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return firmware states. */ states[0] = mcp->mb[1]; if (IS_FWI2_CAPABLE(vha->hw)) { states[1] = mcp->mb[2]; states[2] = mcp->mb[3]; states[3] = mcp->mb[4]; states[4] = mcp->mb[5]; } if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " "failed=%x.\n", vha->host_no, rval)); } else { /*EMPTY*/ DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", vha->host_no)); } return rval; } /* * qla2x00_get_port_name * Issue get port name mailbox command. * Returned name is in big endian format. * * Input: * ha = adapter block pointer. * loop_id = loop ID of device. * name = pointer for name. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", vha->host_no)); mcp->mb[0] = MBC_GET_PORT_NAME; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = loop_id << 8 | opt; } mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", vha->host_no, rval)); } else { if (name != NULL) { /* This function returns name in big endian. */ name[0] = MSB(mcp->mb[2]); name[1] = LSB(mcp->mb[2]); name[2] = MSB(mcp->mb[3]); name[3] = LSB(mcp->mb[3]); name[4] = MSB(mcp->mb[6]); name[5] = LSB(mcp->mb[6]); name[6] = MSB(mcp->mb[7]); name[7] = LSB(mcp->mb[7]); } DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", vha->host_no)); } return rval; } /* * qla2x00_lip_reset * Issue LIP reset mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_lip_reset(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); if (IS_QLA8XXX_TYPE(vha->hw)) { /* Logout across all FCFs. */ mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = BIT_1; mcp->mb[2] = 0; mcp->out_mb = MBX_2|MBX_1|MBX_0; } else if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = BIT_6; mcp->mb[2] = 0; mcp->mb[3] = vha->hw->loop_reset_delay; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; } else { mcp->mb[0] = MBC_LIP_RESET; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = 0x00ff; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = 0xff00; } mcp->mb[2] = vha->hw->loop_reset_delay; mcp->mb[3] = 0; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } else { /*EMPTY*/ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } /* * qla2x00_send_sns * Send SNS command. * * Input: * ha = adapter block pointer. * sns = pointer for command. * cmd_size = command size. * buf_size = response/command size. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, uint16_t cmd_size, size_t buf_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", vha->host_no)); DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, mcp->tov)); mcp->mb[0] = MBC_SEND_SNS_COMMAND; mcp->mb[1] = cmd_size; mcp->mb[2] = MSW(sns_phys_address); mcp->mb[3] = LSW(sns_phys_address); mcp->mb[6] = MSW(MSD(sns_phys_address)); mcp->mb[7] = LSW(MSD(sns_phys_address)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->buf_size = buf_size; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1])); DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { /*EMPTY*/ DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no)); } return rval; } int qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) { int rval; struct logio_entry_24xx *lg; dma_addr_t lg_dma; uint32_t iop[2]; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); if (ha->flags.cpu_affinity_enabled) req = ha->req_q_map[0]; else req = vha->req; rsp = req->rsp; lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", __func__, vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } memset(lg, 0, sizeof(struct logio_entry_24xx)); lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); if (opt & BIT_0) lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI); if (opt & BIT_1) lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = vha->vp_idx; rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " "(%x).\n", __func__, vha->host_no, rval)); } else if (lg->entry_status != 0) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- error status (%x).\n", __func__, vha->host_no, lg->entry_status)); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { iop[0] = le32_to_cpu(lg->io_parameter[0]); iop[1] = le32_to_cpu(lg->io_parameter[1]); DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- completion status (%x) ioparam=%x/%x.\n", __func__, vha->host_no, le16_to_cpu(lg->comp_status), iop[0], iop[1])); switch (iop[0]) { case LSC_SCODE_PORTID_USED: mb[0] = MBS_PORT_ID_USED; mb[1] = LSW(iop[1]); break; case LSC_SCODE_NPORT_USED: mb[0] = MBS_LOOP_ID_USED; break; case LSC_SCODE_NOLINK: case LSC_SCODE_NOIOCB: case LSC_SCODE_NOXCB: case LSC_SCODE_CMD_FAILED: case LSC_SCODE_NOFABRIC: case LSC_SCODE_FW_NOT_READY: case LSC_SCODE_NOT_LOGGED_IN: case LSC_SCODE_NOPCB: case LSC_SCODE_ELS_REJECT: case LSC_SCODE_CMD_PARAM_ERR: case LSC_SCODE_NONPORT: case LSC_SCODE_LOGGED_IN: case LSC_SCODE_NOFLOGI_ACC: default: mb[0] = MBS_COMMAND_ERROR; break; } } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); iop[0] = le32_to_cpu(lg->io_parameter[0]); mb[0] = MBS_COMMAND_COMPLETE; mb[1] = 0; if (iop[0] & BIT_4) { if (iop[0] & BIT_8) mb[1] |= BIT_1; } else mb[1] = BIT_0; /* Passback COS information. */ mb[10] = 0; if (lg->io_parameter[7] || lg->io_parameter[8]) mb[10] |= BIT_0; /* Class 2. */ if (lg->io_parameter[9] || lg->io_parameter[10]) mb[10] |= BIT_1; /* Class 3. */ } dma_pool_free(ha->s_dma_pool, lg, lg_dma); return rval; } /* * qla2x00_login_fabric * Issue login fabric port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * domain = device domain. * area = device area. * al_pa = device AL_PA. * status = pointer for return status. * opt = command options. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no)); mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = (loop_id << 8) | opt; } mcp->mb[2] = domain; mcp->mb[3] = area << 8 | al_pa; mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[2] = mcp->mb[2]; mb[6] = mcp->mb[6]; mb[7] = mcp->mb[7]; /* COS retrieved from Get-Port-Database mailbox command. */ mb[10] = 0; } if (rval != QLA_SUCCESS) { /* RLU tmp code: need to change main mailbox_command function to * return ok even when the mailbox completion value is not * SUCCESS. The caller needs to be responsible to interpret * the return values of this mailbox command if we're not * to change too much of the existing code. */ if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) rval = QLA_SUCCESS; /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2])); } else { /*EMPTY*/ DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", vha->host_no)); } return rval; } /* * qla2x00_login_local_device * Issue login loop port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * opt = command options. * * Returns: * Return status code. * * Context: * Kernel context. * */ int qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t *mb_ret, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (IS_FWI2_CAPABLE(ha)) return qla24xx_login_fabric(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mb_ret, opt); DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_LOGIN_LOOP_PORT; if (HAS_EXTENDED_IDS(ha)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = opt; mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb_ret != NULL) { mb_ret[0] = mcp->mb[0]; mb_ret[1] = mcp->mb[1]; mb_ret[6] = mcp->mb[6]; mb_ret[7] = mcp->mb[7]; } if (rval != QLA_SUCCESS) { /* AV tmp code: need to change main mailbox_command function to * return ok even when the mailbox completion value is not * SUCCESS. The caller needs to be responsible to interpret * the return values of this mailbox command if we're not * to change too much of the existing code. */ if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) rval = QLA_SUCCESS; DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); } else { /*EMPTY*/ DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return (rval); } int qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa) { int rval; struct logio_entry_24xx *lg; dma_addr_t lg_dma; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n", __func__, vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } memset(lg, 0, sizeof(struct logio_entry_24xx)); if (ql2xmaxqueues > 1) req = ha->req_q_map[0]; else req = vha->req; rsp = req->rsp; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = vha->vp_idx; rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " "(%x).\n", __func__, vha->host_no, rval)); } else if (lg->entry_status != 0) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- error status (%x).\n", __func__, vha->host_no, lg->entry_status)); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB " "-- completion status (%x) ioparam=%x/%x.\n", __func__, vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status), le32_to_cpu(lg->io_parameter[0]), le32_to_cpu(lg->io_parameter[1]))); } else { /*EMPTY*/ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } dma_pool_free(ha->s_dma_pool, lg, lg_dma); return rval; } /* * qla2x00_fabric_logout * Issue logout fabric port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", vha->host_no)); mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; mcp->out_mb = MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = loop_id << 8; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1])); } else { /*EMPTY*/ DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", vha->host_no)); } return rval; } /* * qla2x00_full_login_lip * Issue full login LIP mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_full_login_lip(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", vha->host_no)); mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", vha->host_no, rval)); } else { /*EMPTY*/ DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", vha->host_no)); } return rval; } /* * qla2x00_get_id_list * * Input: * ha = adapter block pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, uint16_t *entries) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", vha->host_no)); if (id_list == NULL) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_ID_LIST; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[2] = MSW(id_list_dma); mcp->mb[3] = LSW(id_list_dma); mcp->mb[6] = MSW(MSD(id_list_dma)); mcp->mb[7] = LSW(MSD(id_list_dma)); mcp->mb[8] = 0; mcp->mb[9] = vha->vp_idx; mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; } else { mcp->mb[1] = MSW(id_list_dma); mcp->mb[2] = LSW(id_list_dma); mcp->mb[3] = MSW(MSD(id_list_dma)); mcp->mb[6] = LSW(MSD(id_list_dma)); mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", vha->host_no, rval)); } else { *entries = mcp->mb[1]; DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", vha->host_no)); } return rval; } /* * qla2x00_get_resource_cnts * Get current firmware resource counts. * * Input: * ha = adapter block pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; mcp->out_mb = MBX_0; mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA81XX(vha->hw)) mcp->in_mb |= MBX_12; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, vha->host_no, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__, vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], mcp->mb[12])); if (cur_xchg_cnt) *cur_xchg_cnt = mcp->mb[3]; if (orig_xchg_cnt) *orig_xchg_cnt = mcp->mb[6]; if (cur_iocb_cnt) *cur_iocb_cnt = mcp->mb[7]; if (orig_iocb_cnt) *orig_iocb_cnt = mcp->mb[10]; if (vha->hw->flags.npiv_supported && max_npiv_vports) *max_npiv_vports = mcp->mb[11]; if (IS_QLA81XX(vha->hw) && max_fcfs) *max_fcfs = mcp->mb[12]; } return (rval); } #if defined(QL_DEBUG_LEVEL_3) /* * qla2x00_get_fcal_position_map * Get FCAL (LILP) position map using mailbox command * * Input: * ha = adapter state pointer. * pos_map = buffer pointer (can be NULL). * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; char *pmap; dma_addr_t pmap_dma; struct qla_hw_data *ha = vha->hw; pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); if (pmap == NULL) { DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", __func__, vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } memset(pmap, 0, FCAL_MAP_SIZE); mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; mcp->mb[2] = MSW(pmap_dma); mcp->mb[3] = LSW(pmap_dma); mcp->mb[6] = MSW(MSD(pmap_dma)); mcp->mb[7] = LSW(MSD(pmap_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->buf_size = FCAL_MAP_SIZE; mcp->flags = MBX_DMA_IN; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map " "size (%x)\n", __func__, vha->host_no, mcp->mb[0], mcp->mb[1], (unsigned)pmap[0])); DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1)); if (pos_map) memcpy(pos_map, pmap, FCAL_MAP_SIZE); } dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } #endif /* * qla2x00_get_link_status * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * ret_buf = pointer to link status return buffer. * * Returns: * 0 = success. * BIT_0 = mem alloc error. * BIT_1 = mailbox error. */ int qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, struct link_statistics *stats, dma_addr_t stats_dma) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t *siter, *diter, dwords; struct qla_hw_data *ha = vha->hw; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_GET_LINK_STATUS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = loop_id; mcp->mb[4] = 0; mcp->mb[10] = 0; mcp->out_mb |= MBX_10|MBX_4|MBX_1; mcp->in_mb |= MBX_1; } else if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10|MBX_1; } else { mcp->mb[1] = loop_id << 8; mcp->out_mb |= MBX_1; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", __func__, vha->host_no, mcp->mb[0])); rval = QLA_FUNCTION_FAILED; } else { /* Copy over data -- firmware data is LE. */ dwords = offsetof(struct link_statistics, unused1) / 4; siter = diter = &stats->link_fail_cnt; while (dwords--) *diter++ = le32_to_cpu(*siter++); } } else { /* Failed. */ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } return rval; } int qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, dma_addr_t stats_dma) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t *siter, *diter, dwords; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->mb[8] = sizeof(struct link_statistics) / 4; mcp->mb[9] = vha->vp_idx; mcp->mb[10] = 0; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", __func__, vha->host_no, mcp->mb[0])); rval = QLA_FUNCTION_FAILED; } else { /* Copy over data -- firmware data is LE. */ dwords = sizeof(struct link_statistics) / 4; siter = diter = &stats->link_fail_cnt; while (dwords--) *diter++ = le32_to_cpu(*siter++); } } else { /* Failed. */ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } return rval; } int qla24xx_abort_command(srb_t *sp) { int rval; unsigned long flags = 0; struct abort_entry_24xx *abt; dma_addr_t abt_dma; uint32_t handle; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = vha->req; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (handle == MAX_OUTSTANDING_COMMANDS) { /* Command not found. */ return QLA_FUNCTION_FAILED; } abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); if (abt == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n", __func__, vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } memset(abt, 0, sizeof(struct abort_entry_24xx)); abt->entry_type = ABORT_IOCB_TYPE; abt->entry_count = 1; abt->handle = MAKE_HANDLE(req->id, abt->handle); abt->nport_handle = cpu_to_le16(fcport->loop_id); abt->handle_to_abort = handle; abt->port_id[0] = fcport->d_id.b.al_pa; abt->port_id[1] = fcport->d_id.b.area; abt->port_id[2] = fcport->d_id.b.domain; abt->vp_index = fcport->vp_idx; abt->req_que_no = cpu_to_le16(req->id); rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", __func__, vha->host_no, rval)); } else if (abt->entry_status != 0) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- error status (%x).\n", __func__, vha->host_no, abt->entry_status)); rval = QLA_FUNCTION_FAILED; } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- completion status (%x).\n", __func__, vha->host_no, le16_to_cpu(abt->nport_handle))); rval = QLA_FUNCTION_FAILED; } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } dma_pool_free(ha->s_dma_pool, abt, abt_dma); return rval; } struct tsk_mgmt_cmd { union { struct tsk_mgmt_entry tsk; struct sts_entry_24xx sts; } p; }; static int __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; struct tsk_mgmt_cmd *tsk; struct sts_entry_24xx *sts; dma_addr_t tsk_dma; scsi_qla_host_t *vha; struct qla_hw_data *ha; struct req_que *req; struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); vha = fcport->vha; ha = vha->hw; req = vha->req; if (ha->flags.cpu_affinity_enabled) rsp = ha->rsp_q_map[tag + 1]; else rsp = req->rsp; tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " "IOCB.\n", __func__, vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; tsk->p.tsk.entry_count = 1; tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->p.tsk.control_flags = cpu_to_le32(type); tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; tsk->p.tsk.port_id[1] = fcport->d_id.b.area; tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; tsk->p.tsk.vp_index = fcport->vp_idx; if (type == TCF_LUN_RESET) { int_to_scsilun(l, &tsk->p.tsk.lun); host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, sizeof(tsk->p.tsk.lun)); } sts = &tsk->p.sts; rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " "(%x).\n", __func__, vha->host_no, name, rval)); } else if (sts->entry_status != 0) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- error status (%x).\n", __func__, vha->host_no, sts->entry_status)); rval = QLA_FUNCTION_FAILED; } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- completion status (%x).\n", __func__, vha->host_no, le16_to_cpu(sts->comp_status))); rval = QLA_FUNCTION_FAILED; } else if (!(le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- no response info (%x).\n", __func__, vha->host_no, le16_to_cpu(sts->scsi_status))); rval = QLA_FUNCTION_FAILED; } else if (le32_to_cpu(sts->rsp_data_len) < 4) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- not enough response info (%d).\n", __func__, vha->host_no, le32_to_cpu(sts->rsp_data_len))); rval = QLA_FUNCTION_FAILED; } else if (sts->data[3]) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- response (%x).\n", __func__, vha->host_no, sts->data[3])); rval = QLA_FUNCTION_FAILED; } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " "(%x).\n", __func__, vha->host_no, rval2)); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); return rval; } int qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag) { struct qla_hw_data *ha = fcport->vha->hw; if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); } int qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { struct qla_hw_data *ha = fcport->vha->hw; if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); } int qla2x00_system_error(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; mcp->tov = 5; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } /** * qla2x00_set_serdes_params() - * @ha: HA context * * Returns */ int qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, uint16_t sw_em_2g, uint16_t sw_em_4g) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_SERDES_PARAMS; mcp->mb[1] = BIT_0; mcp->mb[2] = sw_em_1g | BIT_15; mcp->mb[3] = sw_em_2g | BIT_15; mcp->mb[4] = sw_em_4g | BIT_15; mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { /*EMPTY*/ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_stop_firmware(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_STOP_FIRMWARE; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; mcp->tov = 5; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, uint16_t buffers) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_ENABLE; mcp->mb[2] = LSW(eft_dma); mcp->mb[3] = MSW(eft_dma); mcp->mb[4] = LSW(MSD(eft_dma)); mcp->mb[5] = MSW(MSD(eft_dma)); mcp->mb[6] = buffers; mcp->mb[7] = TC_AEN_DISABLE; mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_disable_eft_trace(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_DISABLE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, uint16_t buffers, uint16_t *mb, uint32_t *dwords) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_ENABLE; mcp->mb[2] = LSW(fce_dma); mcp->mb[3] = MSW(fce_dma); mcp->mb[4] = LSW(MSD(fce_dma)); mcp->mb[5] = MSW(MSD(fce_dma)); mcp->mb[6] = buffers; mcp->mb[7] = TC_AEN_DISABLE; mcp->mb[8] = 0; mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| MBX_1|MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); if (mb) memcpy(mb, mcp->mb, 8 * sizeof(*mb)); if (dwords) *dwords = buffers; } return rval; } int qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_DISABLE; mcp->mb[2] = TC_FCE_DISABLE_TRACE; mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); if (wr) *wr = (uint64_t) mcp->mb[5] << 48 | (uint64_t) mcp->mb[4] << 32 | (uint64_t) mcp->mb[3] << 16 | (uint64_t) mcp->mb[2]; if (rd) *rd = (uint64_t) mcp->mb[9] << 48 | (uint64_t) mcp->mb[8] << 32 | (uint64_t) mcp->mb[7] << 16 | (uint64_t) mcp->mb[6]; } return rval; } int qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr, uint16_t off, uint16_t count) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_READ_SFP; mcp->mb[1] = addr; mcp->mb[2] = MSW(sfp_dma); mcp->mb[3] = LSW(sfp_dma); mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = count; mcp->mb[9] = off; mcp->mb[10] = 0; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t *port_speed, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = mcp->mb[3] = 0; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; } if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); if (port_speed) *port_speed = mcp->mb[3]; } return rval; } int qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t port_speed, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = BIT_0; if (IS_QLA8XXX_TYPE(vha->hw)) mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); else mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; } if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } void qla24xx_report_id_acquisition(scsi_qla_host_t *vha, struct vp_rpt_id_entry_24xx *rptid_entry) { uint8_t vp_idx; uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; unsigned long flags; if (rptid_entry->entry_status != 0) return; if (rptid_entry->format == 0) { DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," " number of VPs acquired %d\n", __func__, vha->host_no, MSB(le16_to_cpu(rptid_entry->vp_count)), LSB(le16_to_cpu(rptid_entry->vp_count)))); DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0])); } else if (rptid_entry->format == 1) { vp_idx = LSB(stat); DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " "- status %d - " "with port id %02x%02x%02x\n", __func__, vha->host_no, vp_idx, MSB(stat), rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0])); vp = vha; if (vp_idx == 0 && (MSB(stat) != 1)) goto reg_needed; if (MSB(stat) == 1) { DEBUG2(printk("scsi(%ld): Could not acquire ID for " "VP[%d].\n", vha->host_no, vp_idx)); return; } spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) if (vp_idx == vp->vp_idx) break; spin_unlock_irqrestore(&ha->vport_slock, flags); if (!vp) return; vp->d_id.b.domain = rptid_entry->port_id[2]; vp->d_id.b.area = rptid_entry->port_id[1]; vp->d_id.b.al_pa = rptid_entry->port_id[0]; /* * Cannot configure here as we are still sitting on the * response queue. Handle it in dpc context. */ set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); reg_needed: set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } /* * qla24xx_modify_vp_config * Change VP configuration for vha * * Input: * vha = adapter block pointer. * * Returns: * qla2xxx local function return status code. * * Context: * Kernel context. */ int qla24xx_modify_vp_config(scsi_qla_host_t *vha) { int rval; struct vp_config_entry_24xx *vpmod; dma_addr_t vpmod_dma; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); /* This can be called by the parent */ vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); if (!vpmod) { DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP " "IOCB.\n", __func__, vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } memset(vpmod, 0, sizeof(struct vp_config_entry_24xx)); vpmod->entry_type = VP_CONFIG_IOCB_TYPE; vpmod->entry_count = 1; vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; vpmod->vp_count = 1; vpmod->vp_index1 = vha->vp_idx; vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); vpmod->entry_count = 1; rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB" "(%x).\n", __func__, base_vha->host_no, rval)); } else if (vpmod->comp_status != 0) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- error status (%x).\n", __func__, base_vha->host_no, vpmod->comp_status)); rval = QLA_FUNCTION_FAILED; } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- completion status (%x).\n", __func__, base_vha->host_no, le16_to_cpu(vpmod->comp_status))); rval = QLA_FUNCTION_FAILED; } else { /* EMPTY */ DEBUG11(printk("%s(%ld): done.\n", __func__, base_vha->host_no)); fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); } dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); return rval; } /* * qla24xx_control_vp * Enable a virtual port for given host * * Input: * ha = adapter block pointer. * vhba = virtual adapter (unused) * index = index number for enabled VP * * Returns: * qla2xxx local function return status code. * * Context: * Kernel context. */ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) { int rval; int map, pos; struct vp_ctrl_entry_24xx *vce; dma_addr_t vce_dma; struct qla_hw_data *ha = vha->hw; int vp_index = vha->vp_idx; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, vha->host_no, vp_index)); if (vp_index == 0 || vp_index >= ha->max_npiv_vports) return QLA_PARAMETER_ERROR; vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); if (!vce) { DEBUG2_3(printk("%s(%ld): " "failed to allocate VP Control IOCB.\n", __func__, base_vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); vce->entry_type = VP_CTRL_IOCB_TYPE; vce->entry_count = 1; vce->command = cpu_to_le16(cmd); vce->vp_count = __constant_cpu_to_le16(1); /* index map in firmware starts with 1; decrement index * this is ok as we never use index 0 */ map = (vp_index - 1) / 8; pos = (vp_index - 1) & 7; mutex_lock(&ha->vport_lock); vce->vp_idx_map[map] |= 1 << pos; mutex_unlock(&ha->vport_lock); rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB" "(%x).\n", __func__, base_vha->host_no, rval)); printk("%s(%ld): failed to issue VP control IOCB" "(%x).\n", __func__, base_vha->host_no, rval); } else if (vce->entry_status != 0) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- error status (%x).\n", __func__, base_vha->host_no, vce->entry_status)); printk("%s(%ld): failed to complete IOCB " "-- error status (%x).\n", __func__, base_vha->host_no, vce->entry_status); rval = QLA_FUNCTION_FAILED; } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " "-- completion status (%x).\n", __func__, base_vha->host_no, le16_to_cpu(vce->comp_status))); printk("%s(%ld): failed to complete IOCB " "-- completion status (%x).\n", __func__, base_vha->host_no, le16_to_cpu(vce->comp_status)); rval = QLA_FUNCTION_FAILED; } else { DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no)); } dma_pool_free(ha->s_dma_pool, vce, vce_dma); return rval; } /* * qla2x00_send_change_request * Receive or disable RSCN request from fabric controller * * Input: * ha = adapter block pointer * format = registration format: * 0 - Reserved * 1 - Fabric detected registration * 2 - N_port detected registration * 3 - Full registration * FF - clear registration * vp_idx = Virtual port index * * Returns: * qla2x00 local function return status code. * * Context: * Kernel Context */ int qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, uint16_t vp_idx) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; /* * This command is implicitly executed by firmware during login for the * physical hosts */ if (vp_idx == 0) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; mcp->mb[1] = format; mcp->mb[9] = vp_idx; mcp->out_mb = MBX_9|MBX_1|MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { rval = BIT_1; } } else rval = BIT_1; return rval; } int qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, uint32_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; mcp->mb[8] = MSW(addr); mcp->out_mb = MBX_8|MBX_0; } else { mcp->mb[0] = MBC_DUMP_RISC_RAM; mcp->out_mb = MBX_0; } mcp->mb[1] = LSW(addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[4] = MSW(size); mcp->mb[5] = LSW(size); mcp->out_mb |= MBX_5|MBX_4; } else { mcp->mb[4] = LSW(size); mcp->out_mb |= MBX_4; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } /* 84XX Support **************************************************************/ struct cs84xx_mgmt_cmd { union { struct verify_chip_entry_84xx req; struct verify_chip_rsp_84xx rsp; } p; }; int qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) { int rval, retry; struct cs84xx_mgmt_cmd *mn; dma_addr_t mn_dma; uint16_t options; unsigned long flags; struct qla_hw_data *ha = vha->hw; DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (mn == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX " "IOCB.\n", __func__, vha->host_no)); return QLA_MEMORY_ALLOC_FAILED; } /* Force Update? */ options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; /* Diagnostic firmware? */ /* options |= MENLO_DIAG_FW; */ /* We update the firmware with only one data sequence. */ options |= VCO_END_OF_DATA; do { retry = 0; memset(mn, 0, sizeof(*mn)); mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; mn->p.req.entry_count = 1; mn->p.req.options = cpu_to_le16(options); DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, vha->host_no)); DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, sizeof(*mn))); rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); if (rval != QLA_SUCCESS) { DEBUG2_16(printk("%s(%ld): failed to issue Verify " "IOCB (%x).\n", __func__, vha->host_no, rval)); goto verify_done; } DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, vha->host_no)); DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, sizeof(*mn))); status[0] = le16_to_cpu(mn->p.rsp.comp_status); status[1] = status[0] == CS_VCS_CHIP_FAILURE ? le16_to_cpu(mn->p.rsp.failure_code) : 0; DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, vha->host_no, status[0], status[1])); if (status[0] != CS_COMPLETE) { rval = QLA_FUNCTION_FAILED; if (!(options & VCO_DONT_UPDATE_FW)) { DEBUG2_16(printk("%s(%ld): Firmware update " "failed. Retrying without update " "firmware.\n", __func__, vha->host_no)); options |= VCO_DONT_UPDATE_FW; options &= ~VCO_FORCE_UPDATE; retry = 1; } } else { DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", __func__, vha->host_no, le32_to_cpu(mn->p.rsp.fw_ver))); /* NOTE: we only update OP firmware. */ spin_lock_irqsave(&ha->cs84xx->access_lock, flags); ha->cs84xx->op_fw_version = le32_to_cpu(mn->p.rsp.fw_ver); spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); } } while (retry); verify_done: dma_pool_free(ha->s_dma_pool, mn, mn_dma); if (rval != QLA_SUCCESS) { DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } else { DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) { int rval; unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = req->options; mcp->mb[2] = MSW(LSD(req->dma)); mcp->mb[3] = LSW(LSD(req->dma)); mcp->mb[6] = MSW(MSD(req->dma)); mcp->mb[7] = LSW(MSD(req->dma)); mcp->mb[5] = req->length; if (req->rsp) mcp->mb[10] = req->rsp->id; mcp->mb[12] = req->qos; mcp->mb[11] = req->vp_idx; mcp->mb[13] = req->rid; reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + QLA_QUE_PAGE * req->id); mcp->mb[4] = req->id; /* que in ptr index */ mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = 0; mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; mcp->tov = 60; spin_lock_irqsave(&ha->hardware_lock, flags); if (!(req->options & BIT_0)) { WRT_REG_DWORD(&reg->req_q_in, 0); WRT_REG_DWORD(&reg->req_q_out, 0); } req->req_q_in = &reg->req_q_in; req->req_q_out = &reg->req_q_out; spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); return rval; } int qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { int rval; unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = rsp->options; mcp->mb[2] = MSW(LSD(rsp->dma)); mcp->mb[3] = LSW(LSD(rsp->dma)); mcp->mb[6] = MSW(MSD(rsp->dma)); mcp->mb[7] = LSW(MSD(rsp->dma)); mcp->mb[5] = rsp->length; mcp->mb[14] = rsp->msix->entry; mcp->mb[13] = rsp->rid; reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + QLA_QUE_PAGE * rsp->id); mcp->mb[4] = rsp->id; /* que in ptr index */ mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = 0; mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; mcp->tov = 60; spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { WRT_REG_DWORD(&reg->rsp_q_out, 0); WRT_REG_DWORD(&reg->rsp_q_in, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x " "mb0=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); return rval; } int qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_IDC_ACK; memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); *sector_size = mcp->mb[1]; } return rval; } int qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : FAC_OPT_CMD_WRITE_PROTECT; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; mcp->mb[2] = LSW(start); mcp->mb[3] = MSW(start); mcp->mb[4] = LSW(finish); mcp->mb[5] = MSW(finish); mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " "mb[2]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) { int rval = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_RESTART_MPI_FW; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_READ_SFP; mcp->mb[1] = dev; mcp->mb[2] = MSW(sfp_dma); mcp->mb[3] = LSW(sfp_dma); mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; mcp->mb[9] = adr; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (opt & BIT_0) if (sfp) *sfp = mcp->mb[8]; if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); if (opt & BIT_0) if (sfp) len = *sfp; mcp->mb[0] = MBC_WRITE_SFP; mcp->mb[1] = dev; mcp->mb[2] = MSW(sfp_dma); mcp->mb[3] = LSW(sfp_dma); mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; mcp->mb[9] = adr; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, uint16_t size_in_bytes, uint16_t *actual_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA8XXX_TYPE(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_GET_XGMAC_STATS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->mb[8] = size_in_bytes >> 2; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); *actual_size = mcp->mb[2] << 2; } return rval; } int qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, uint16_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA8XXX_TYPE(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_GET_DCBX_PARAMS; mcp->mb[1] = 0; mcp->mb[2] = MSW(tlv_dma); mcp->mb[3] = LSW(tlv_dma); mcp->mb[6] = MSW(MSD(tlv_dma)); mcp->mb[7] = LSW(MSD(tlv_dma)); mcp->mb[8] = size; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_READ_RAM_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); *data = mcp->mb[3] << 16 | mcp->mb[2]; } return rval; } int qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t iter_cnt = 0x1; DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing /* transfer count */ mcp->mb[10] = LSW(mreq->transfer_size); mcp->mb[11] = MSW(mreq->transfer_size); /* send data address */ mcp->mb[14] = LSW(mreq->send_dma); mcp->mb[15] = MSW(mreq->send_dma); mcp->mb[20] = LSW(MSD(mreq->send_dma)); mcp->mb[21] = MSW(MSD(mreq->send_dma)); /* recieve data address */ mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[17] = MSW(mreq->rcv_dma); mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); /* Iteration count */ mcp->mb[18] = LSW(iter_cnt); mcp->mb[19] = MSW(iter_cnt); mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; if (IS_QLA8XXX_TYPE(vha->hw)) mcp->out_mb |= MBX_2; mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; mcp->buf_size = mreq->transfer_size; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2(printk(KERN_WARNING "(%ld): failed=%x mb[0]=0x%x " "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x " "mb[19]=0x%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19])); } else { DEBUG2(printk(KERN_WARNING "scsi(%ld): done.\n", vha->host_no)); } /* Copy mailbox information */ memcpy( mresp, mcp->mb, 64); return rval; } int qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ if (IS_QLA8XXX_TYPE(ha)) { mcp->mb[1] |= BIT_15; mcp->mb[2] = vha->fcoe_fcf_idx; } mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[17] = MSW(mreq->rcv_dma); mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); mcp->mb[10] = LSW(mreq->transfer_size); mcp->mb[14] = LSW(mreq->send_dma); mcp->mb[15] = MSW(mreq->send_dma); mcp->mb[20] = LSW(MSD(mreq->send_dma)); mcp->mb[21] = MSW(MSD(mreq->send_dma)); mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; if (IS_QLA8XXX_TYPE(ha)) mcp->out_mb |= MBX_2; mcp->in_mb = MBX_0; if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) mcp->in_mb |= MBX_1; if (IS_QLA8XXX_TYPE(ha)) mcp->in_mb |= MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->buf_size = mreq->transfer_size; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2(printk(KERN_WARNING "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1])); } else { DEBUG2(printk(KERN_WARNING "scsi(%ld): done.\n", vha->host_no)); } /* Copy mailbox information */ memcpy(mresp, mcp->mb, 64); return rval; } int qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__, ha->host_no, enable_diagnostic)); mcp->mb[0] = MBC_ISP84XX_RESET; mcp->mb[1] = enable_diagnostic; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; rval = qla2x00_mailbox_command(ha, mcp); if (rval != QLA_SUCCESS) DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no, rval)); else DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); return rval; } int qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[2] = LSW(data); mcp->mb[3] = MSW(data); mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) { int rval; uint32_t stat, timer; uint16_t mb0 = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; rval = QLA_SUCCESS; DEBUG11(qla_printk(KERN_INFO, ha, "%s(%ld): entered.\n", __func__, vha->host_no)); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Write the MBC data to the registers */ WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER); WRT_REG_WORD(&reg->mailbox1, mb[0]); WRT_REG_WORD(&reg->mailbox2, mb[1]); WRT_REG_WORD(&reg->mailbox3, mb[2]); WRT_REG_WORD(&reg->mailbox4, mb[3]); WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT); /* Poll for MBC interrupt */ for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ stat = RD_REG_DWORD(&reg->host_status); if (stat & HSRX_RISC_INT) { stat &= 0xff; if (stat == 0x1 || stat == 0x2 || stat == 0x10 || stat == 0x11) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_REG_WORD(&reg->mailbox0); WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD(&reg->hccr); break; } } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) rval = mb0 & MBS_MASK; else rval = QLA_FUNCTION_FAILED; if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", __func__, vha->host_no, rval, mb[0])); } else { DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla2x00_get_data_rate(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; DEBUG11(qla_printk(KERN_INFO, ha, "%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_DATA_RATE; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); if (mcp->mb[1] != 0x7) ha->link_data_rate = mcp->mb[1]; } return rval; } int qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); if (!IS_QLA81XX(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_PORT_CONFIG; mcp->out_mb = MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { /* Copy all bits to preserve original value */ memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_SET_PORT_CONFIG; /* Copy all bits to preserve original setting */ memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); return rval; } int qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) return QLA_FUNCTION_FAILED; DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, ha->host_no)); mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; if (ha->flags.fcp_prio_enabled) mcp->mb[2] = BIT_1; else mcp->mb[2] = BIT_2; mcp->mb[4] = priority & 0xf; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; mb[4] = mcp->mb[4]; } if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); } else { DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; DEBUG11(qla_printk(KERN_INFO, ha, "%s(%ld): entered.\n", __func__, vha->host_no)); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 1; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(qla_printk(KERN_WARNING, ha, "%s(%ld): failed=%x mb[0]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(qla_printk(KERN_INFO, ha, "%s(%ld): done.\n", __func__, vha->host_no)); } return rval; } int qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; DEBUG11(qla_printk(KERN_INFO, ha, "%s(%ld): entered.\n", __func__, vha->host_no)); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { DEBUG2_3_11(qla_printk(KERN_WARNING, ha, "%s(%ld): failed=%x mb[0]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { DEBUG11(qla_printk(KERN_INFO, ha, "%s(%ld): done.\n", __func__, vha->host_no)); } return rval; }
gpl-2.0
nvllsvm/GZDoom-Android
doom/src/main/jni/SDL/src/audio/directsound/SDL_directsound.c
55
17135
/* Simple DirectMedia Layer Copyright (C) 1997-2012 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "SDL_config.h" #if SDL_AUDIO_DRIVER_DSOUND /* Allow access to a raw mixing buffer */ #include "SDL_timer.h" #include "SDL_loadso.h" #include "SDL_audio.h" #include "../SDL_audio_c.h" #include "SDL_directsound.h" /* DirectX function pointers for audio */ static void* DSoundDLL = NULL; typedef HRESULT(WINAPI*fnDirectSoundCreate8)(LPGUID,LPDIRECTSOUND*,LPUNKNOWN); typedef HRESULT(WINAPI*fnDirectSoundEnumerateW)(LPDSENUMCALLBACKW, LPVOID); typedef HRESULT(WINAPI*fnDirectSoundCaptureEnumerateW)(LPDSENUMCALLBACKW,LPVOID); static fnDirectSoundCreate8 pDirectSoundCreate8 = NULL; static fnDirectSoundEnumerateW pDirectSoundEnumerateW = NULL; static fnDirectSoundCaptureEnumerateW pDirectSoundCaptureEnumerateW = NULL; static void DSOUND_Unload(void) { pDirectSoundCreate8 = NULL; pDirectSoundEnumerateW = NULL; pDirectSoundCaptureEnumerateW = NULL; if (DSoundDLL != NULL) { SDL_UnloadObject(DSoundDLL); DSoundDLL = NULL; } } static int DSOUND_Load(void) { int loaded = 0; DSOUND_Unload(); DSoundDLL = SDL_LoadObject("DSOUND.DLL"); if (DSoundDLL == NULL) { SDL_SetError("DirectSound: failed to load DSOUND.DLL"); } else { /* Now make sure we have DirectX 8 or better... */ #define DSOUNDLOAD(f) { \ p##f = (fn##f) SDL_LoadFunction(DSoundDLL, #f); \ if (!p##f) loaded = 0; \ } loaded = 1; /* will reset if necessary. */ DSOUNDLOAD(DirectSoundCreate8); DSOUNDLOAD(DirectSoundEnumerateW); DSOUNDLOAD(DirectSoundCaptureEnumerateW); #undef DSOUNDLOAD if (!loaded) { SDL_SetError("DirectSound: System doesn't appear to have DX8."); } } if (!loaded) { DSOUND_Unload(); } return loaded; } static __inline__ char * utf16_to_utf8(const WCHAR *S) { /* !!! FIXME: this should be UTF-16, not UCS-2! */ return SDL_iconv_string("UTF-8", "UCS-2", (char *)(S), (SDL_wcslen(S)+1)*sizeof(WCHAR)); } static void SetDSerror(const char *function, int code) { static const char *error; static char errbuf[1024]; errbuf[0] = 0; switch (code) { case E_NOINTERFACE: error = "Unsupported interface -- Is DirectX 8.0 or later installed?"; break; case DSERR_ALLOCATED: error = "Audio device in use"; break; case DSERR_BADFORMAT: error = "Unsupported audio format"; break; case DSERR_BUFFERLOST: error = "Mixing buffer was lost"; break; case DSERR_CONTROLUNAVAIL: error = "Control requested is not available"; break; case DSERR_INVALIDCALL: error = "Invalid call for the current state"; break; case DSERR_INVALIDPARAM: error = "Invalid parameter"; break; case DSERR_NODRIVER: error = "No audio device found"; break; case DSERR_OUTOFMEMORY: error = "Out of memory"; break; case DSERR_PRIOLEVELNEEDED: error = "Caller doesn't have priority"; break; case DSERR_UNSUPPORTED: error = "Function not supported"; break; default: SDL_snprintf(errbuf, SDL_arraysize(errbuf), "%s: Unknown DirectSound error: 0x%x", function, code); break; } if (!errbuf[0]) { SDL_snprintf(errbuf, SDL_arraysize(errbuf), "%s: %s", function, error); } SDL_SetError("%s", errbuf); return; } static BOOL CALLBACK FindAllDevs(LPGUID guid, LPCWSTR desc, LPCWSTR module, LPVOID data) { SDL_AddAudioDevice addfn = (SDL_AddAudioDevice) data; if (guid != NULL) { /* skip default device */ char *str = utf16_to_utf8(desc); if (str != NULL) { addfn(str); SDL_free(str); /* addfn() makes a copy of this string. */ } } return TRUE; /* keep enumerating. */ } static void DSOUND_DetectDevices(int iscapture, SDL_AddAudioDevice addfn) { if (iscapture) { pDirectSoundCaptureEnumerateW(FindAllDevs, addfn); } else { pDirectSoundEnumerateW(FindAllDevs, addfn); } } static void DSOUND_WaitDevice(_THIS) { DWORD status = 0; DWORD cursor = 0; DWORD junk = 0; HRESULT result = DS_OK; /* Semi-busy wait, since we have no way of getting play notification on a primary mixing buffer located in hardware (DirectX 5.0) */ result = IDirectSoundBuffer_GetCurrentPosition(this->hidden->mixbuf, &junk, &cursor); if (result != DS_OK) { if (result == DSERR_BUFFERLOST) { IDirectSoundBuffer_Restore(this->hidden->mixbuf); } #ifdef DEBUG_SOUND SetDSerror("DirectSound GetCurrentPosition", result); #endif return; } while ((cursor / this->hidden->mixlen) == this->hidden->lastchunk) { /* FIXME: find out how much time is left and sleep that long */ SDL_Delay(1); /* Try to restore a lost sound buffer */ IDirectSoundBuffer_GetStatus(this->hidden->mixbuf, &status); if ((status & DSBSTATUS_BUFFERLOST)) { IDirectSoundBuffer_Restore(this->hidden->mixbuf); IDirectSoundBuffer_GetStatus(this->hidden->mixbuf, &status); if ((status & DSBSTATUS_BUFFERLOST)) { break; } } if (!(status & DSBSTATUS_PLAYING)) { result = IDirectSoundBuffer_Play(this->hidden->mixbuf, 0, 0, DSBPLAY_LOOPING); if (result == DS_OK) { continue; } #ifdef DEBUG_SOUND SetDSerror("DirectSound Play", result); #endif return; } /* Find out where we are playing */ result = IDirectSoundBuffer_GetCurrentPosition(this->hidden->mixbuf, &junk, &cursor); if (result != DS_OK) { SetDSerror("DirectSound GetCurrentPosition", result); return; } } } static void DSOUND_PlayDevice(_THIS) { /* Unlock the buffer, allowing it to play */ if (this->hidden->locked_buf) { IDirectSoundBuffer_Unlock(this->hidden->mixbuf, this->hidden->locked_buf, this->hidden->mixlen, NULL, 0); } } static Uint8 * DSOUND_GetDeviceBuf(_THIS) { DWORD cursor = 0; DWORD junk = 0; HRESULT result = DS_OK; DWORD rawlen = 0; /* Figure out which blocks to fill next */ this->hidden->locked_buf = NULL; result = IDirectSoundBuffer_GetCurrentPosition(this->hidden->mixbuf, &junk, &cursor); if (result == DSERR_BUFFERLOST) { IDirectSoundBuffer_Restore(this->hidden->mixbuf); result = IDirectSoundBuffer_GetCurrentPosition(this->hidden->mixbuf, &junk, &cursor); } if (result != DS_OK) { SetDSerror("DirectSound GetCurrentPosition", result); return (NULL); } cursor /= this->hidden->mixlen; #ifdef DEBUG_SOUND /* Detect audio dropouts */ { DWORD spot = cursor; if (spot < this->hidden->lastchunk) { spot += this->hidden->num_buffers; } if (spot > this->hidden->lastchunk + 1) { fprintf(stderr, "Audio dropout, missed %d fragments\n", (spot - (this->hidden->lastchunk + 1))); } } #endif this->hidden->lastchunk = cursor; cursor = (cursor + 1) % this->hidden->num_buffers; cursor *= this->hidden->mixlen; /* Lock the audio buffer */ result = IDirectSoundBuffer_Lock(this->hidden->mixbuf, cursor, this->hidden->mixlen, (LPVOID *) & this->hidden->locked_buf, &rawlen, NULL, &junk, 0); if (result == DSERR_BUFFERLOST) { IDirectSoundBuffer_Restore(this->hidden->mixbuf); result = IDirectSoundBuffer_Lock(this->hidden->mixbuf, cursor, this->hidden->mixlen, (LPVOID *) & this-> hidden->locked_buf, &rawlen, NULL, &junk, 0); } if (result != DS_OK) { SetDSerror("DirectSound Lock", result); return (NULL); } return (this->hidden->locked_buf); } static void DSOUND_WaitDone(_THIS) { Uint8 *stream = DSOUND_GetDeviceBuf(this); /* Wait for the playing chunk to finish */ if (stream != NULL) { SDL_memset(stream, this->spec.silence, this->hidden->mixlen); DSOUND_PlayDevice(this); } DSOUND_WaitDevice(this); /* Stop the looping sound buffer */ IDirectSoundBuffer_Stop(this->hidden->mixbuf); } static void DSOUND_CloseDevice(_THIS) { if (this->hidden != NULL) { if (this->hidden->sound != NULL) { if (this->hidden->mixbuf != NULL) { /* Clean up the audio buffer */ IDirectSoundBuffer_Release(this->hidden->mixbuf); this->hidden->mixbuf = NULL; } IDirectSound_Release(this->hidden->sound); this->hidden->sound = NULL; } SDL_free(this->hidden); this->hidden = NULL; } } /* This function tries to create a secondary audio buffer, and returns the number of audio chunks available in the created buffer. */ static int CreateSecondary(_THIS, HWND focus, WAVEFORMATEX * wavefmt) { LPDIRECTSOUND sndObj = this->hidden->sound; LPDIRECTSOUNDBUFFER *sndbuf = &this->hidden->mixbuf; Uint32 chunksize = this->spec.size; const int numchunks = 8; HRESULT result = DS_OK; DSBUFFERDESC format; LPVOID pvAudioPtr1, pvAudioPtr2; DWORD dwAudioBytes1, dwAudioBytes2; /* Try to set primary mixing privileges */ if (focus) { result = IDirectSound_SetCooperativeLevel(sndObj, focus, DSSCL_PRIORITY); } else { result = IDirectSound_SetCooperativeLevel(sndObj, GetDesktopWindow(), DSSCL_NORMAL); } if (result != DS_OK) { SetDSerror("DirectSound SetCooperativeLevel", result); return (-1); } /* Try to create the secondary buffer */ SDL_memset(&format, 0, sizeof(format)); format.dwSize = sizeof(format); format.dwFlags = DSBCAPS_GETCURRENTPOSITION2; if (!focus) { format.dwFlags |= DSBCAPS_GLOBALFOCUS; } else { format.dwFlags |= DSBCAPS_STICKYFOCUS; } format.dwBufferBytes = numchunks * chunksize; if ((format.dwBufferBytes < DSBSIZE_MIN) || (format.dwBufferBytes > DSBSIZE_MAX)) { SDL_SetError("Sound buffer size must be between %d and %d", DSBSIZE_MIN / numchunks, DSBSIZE_MAX / numchunks); return (-1); } format.dwReserved = 0; format.lpwfxFormat = wavefmt; result = IDirectSound_CreateSoundBuffer(sndObj, &format, sndbuf, NULL); if (result != DS_OK) { SetDSerror("DirectSound CreateSoundBuffer", result); return (-1); } IDirectSoundBuffer_SetFormat(*sndbuf, wavefmt); /* Silence the initial audio buffer */ result = IDirectSoundBuffer_Lock(*sndbuf, 0, format.dwBufferBytes, (LPVOID *) & pvAudioPtr1, &dwAudioBytes1, (LPVOID *) & pvAudioPtr2, &dwAudioBytes2, DSBLOCK_ENTIREBUFFER); if (result == DS_OK) { SDL_memset(pvAudioPtr1, this->spec.silence, dwAudioBytes1); IDirectSoundBuffer_Unlock(*sndbuf, (LPVOID) pvAudioPtr1, dwAudioBytes1, (LPVOID) pvAudioPtr2, dwAudioBytes2); } /* We're ready to go */ return (numchunks); } typedef struct FindDevGUIDData { const char *devname; GUID guid; int found; } FindDevGUIDData; static BOOL CALLBACK FindDevGUID(LPGUID guid, LPCWSTR desc, LPCWSTR module, LPVOID _data) { if (guid != NULL) { /* skip the default device. */ FindDevGUIDData *data = (FindDevGUIDData *) _data; char *str = utf16_to_utf8(desc); const int match = (SDL_strcmp(str, data->devname) == 0); SDL_free(str); if (match) { data->found = 1; SDL_memcpy(&data->guid, guid, sizeof (data->guid)); return FALSE; /* found it! stop enumerating. */ } } return TRUE; /* keep enumerating. */ } static int DSOUND_OpenDevice(_THIS, const char *devname, int iscapture) { HRESULT result; WAVEFORMATEX waveformat; int valid_format = 0; SDL_AudioFormat test_format = SDL_FirstAudioFormat(this->spec.format); FindDevGUIDData devguid; LPGUID guid = NULL; if (devname != NULL) { devguid.found = 0; devguid.devname = devname; if (iscapture) pDirectSoundCaptureEnumerateW(FindDevGUID, &devguid); else pDirectSoundEnumerateW(FindDevGUID, &devguid); if (!devguid.found) { SDL_SetError("DirectSound: Requested device not found"); return 0; } guid = &devguid.guid; } /* Initialize all variables that we clean on shutdown */ this->hidden = (struct SDL_PrivateAudioData *) SDL_malloc((sizeof *this->hidden)); if (this->hidden == NULL) { SDL_OutOfMemory(); return 0; } SDL_memset(this->hidden, 0, (sizeof *this->hidden)); while ((!valid_format) && (test_format)) { switch (test_format) { case AUDIO_U8: case AUDIO_S16: case AUDIO_S32: this->spec.format = test_format; valid_format = 1; break; } test_format = SDL_NextAudioFormat(); } if (!valid_format) { DSOUND_CloseDevice(this); SDL_SetError("DirectSound: Unsupported audio format"); return 0; } SDL_memset(&waveformat, 0, sizeof(waveformat)); waveformat.wFormatTag = WAVE_FORMAT_PCM; waveformat.wBitsPerSample = SDL_AUDIO_BITSIZE(this->spec.format); waveformat.nChannels = this->spec.channels; waveformat.nSamplesPerSec = this->spec.freq; waveformat.nBlockAlign = waveformat.nChannels * (waveformat.wBitsPerSample / 8); waveformat.nAvgBytesPerSec = waveformat.nSamplesPerSec * waveformat.nBlockAlign; /* Update the fragment size as size in bytes */ SDL_CalculateAudioSpec(&this->spec); /* Open the audio device */ result = pDirectSoundCreate8(guid, &this->hidden->sound, NULL); if (result != DS_OK) { DSOUND_CloseDevice(this); SetDSerror("DirectSoundCreate", result); return 0; } /* Create the audio buffer to which we write */ this->hidden->num_buffers = CreateSecondary(this, NULL, &waveformat); if (this->hidden->num_buffers < 0) { DSOUND_CloseDevice(this); return 0; } /* The buffer will auto-start playing in DSOUND_WaitDevice() */ this->hidden->mixlen = this->spec.size; return 1; /* good to go. */ } static void DSOUND_Deinitialize(void) { DSOUND_Unload(); } static int DSOUND_Init(SDL_AudioDriverImpl * impl) { if (!DSOUND_Load()) { return 0; } /* Set the function pointers */ impl->DetectDevices = DSOUND_DetectDevices; impl->OpenDevice = DSOUND_OpenDevice; impl->PlayDevice = DSOUND_PlayDevice; impl->WaitDevice = DSOUND_WaitDevice; impl->WaitDone = DSOUND_WaitDone; impl->GetDeviceBuf = DSOUND_GetDeviceBuf; impl->CloseDevice = DSOUND_CloseDevice; impl->Deinitialize = DSOUND_Deinitialize; return 1; /* this audio target is available. */ } AudioBootStrap DSOUND_bootstrap = { "directsound", "DirectSound", DSOUND_Init, 0 }; #endif /* SDL_AUDIO_DRIVER_DSOUND */ /* vi: set ts=4 sw=4 expandtab: */
gpl-2.0
MSM8226-Samsung/kernel_samsung_msm8226
mm/vmalloc.c
55
71013
/* * linux/mm/vmalloc.c * * Copyright (C) 1993 Linus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 * Numa awareness, Christoph Lameter, SGI, June 2005 */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/list.h> #include <linux/rbtree.h> #include <linux/radix-tree.h> #include <linux/rcupdate.h> #include <linux/pfn.h> #include <linux/kmemleak.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> /*** Page table manipulation functions ***/ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) { pte_t *pte; #ifdef CONFIG_TIMA_RKP_LAZY_MMU unsigned long do_lazy_mmu = 0; #endif pte = pte_offset_kernel(pmd, addr); #ifdef CONFIG_TIMA_RKP_LAZY_MMU do_lazy_mmu = 1; if (do_lazy_mmu) { spin_lock(&init_mm.page_table_lock); tima_send_cmd2((unsigned int)pmd, TIMA_LAZY_MMU_START, TIMA_LAZY_MMU_CMDID); flush_tlb_l2_page(pmd); spin_unlock(&init_mm.page_table_lock); } #endif do { pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent)); } while (pte++, addr += PAGE_SIZE, addr != end); #ifdef CONFIG_TIMA_RKP_LAZY_MMU if (do_lazy_mmu) { spin_lock(&init_mm.page_table_lock); tima_send_cmd2((unsigned int)pmd, TIMA_LAZY_MMU_STOP, TIMA_LAZY_MMU_CMDID); flush_tlb_l2_page(pmd); spin_unlock(&init_mm.page_table_lock); } #endif } static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next); } while (pmd++, addr = next, addr != end); } static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; vunmap_pmd_range(pud, addr, next); } while (pud++, addr = next, addr != end); } static void vunmap_page_range(unsigned long addr, unsigned long end) { pgd_t *pgd; unsigned long next; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; vunmap_pud_range(pgd, addr, next); } while (pgd++, addr = next, addr != end); } static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pte_t *pte; #ifdef CONFIG_TIMA_RKP_LAZY_MMU unsigned long do_lazy_mmu = 0; #endif /* * nr is a running index into the array which helps higher level * callers keep track of where we're up to. */ pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; #ifdef CONFIG_TIMA_RKP_LAZY_MMU do_lazy_mmu = 1; if (do_lazy_mmu) { spin_lock(&init_mm.page_table_lock); tima_send_cmd2((unsigned int)pmd, TIMA_LAZY_MMU_START, TIMA_LAZY_MMU_CMDID); flush_tlb_l2_page(pmd); spin_unlock(&init_mm.page_table_lock); } #endif do { struct page *page = pages[*nr]; if (WARN_ON(!pte_none(*pte))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); #ifdef CONFIG_TIMA_RKP_LAZY_MMU if (do_lazy_mmu) { spin_lock(&init_mm.page_table_lock); tima_send_cmd2((unsigned int)pmd, TIMA_LAZY_MMU_STOP, TIMA_LAZY_MMU_CMDID); flush_tlb_l2_page(pmd); spin_unlock(&init_mm.page_table_lock); } #endif return 0; } static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc(&init_mm, pud, addr); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static int vmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pud_t *pud; unsigned long next; pud = pud_alloc(&init_mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } /* * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and * will have pfns corresponding to the "pages" array. * * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] */ static int vmap_page_range_noflush(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { pgd_t *pgd; unsigned long next; unsigned long addr = start; int err = 0; int nr = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); if (err) return err; } while (pgd++, addr = next, addr != end); return nr; } static int vmap_page_range(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { int ret; ret = vmap_page_range_noflush(start, end, prot, pages); flush_cache_vmap(start, end); return ret; } int is_vmalloc_or_module_addr(const void *x) { /* * ARM, x86-64 and sparc64 put modules in a special place, * and fall back on vmalloc() if that fails. Others * just put it in the vmalloc space. */ #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) unsigned long addr = (unsigned long)x; if (addr >= MODULES_VADDR && addr < MODULES_END) return 1; #endif return is_vmalloc_addr(x); } /* * Walk a vmap address to the struct page it maps. */ struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); /* * XXX we might need to change this if we add VIRTUAL_BUG_ON for * architectures that do not vmalloc module space */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); if (!pgd_none(*pgd)) { pud_t *pud = pud_offset(pgd, addr); if (!pud_none(*pud)) { pmd_t *pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) { pte_t *ptep, pte; ptep = pte_offset_map(pmd, addr); pte = *ptep; if (pte_present(pte)) page = pte_page(pte); pte_unmap(ptep); } } } return page; } EXPORT_SYMBOL(vmalloc_to_page); /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ #define VM_LAZY_FREE 0x01 #define VM_LAZY_FREEING 0x02 #define VM_VM_AREA 0x04 struct vmap_area { unsigned long va_start; unsigned long va_end; unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ struct list_head purge_list; /* "lazy purge" list */ struct vm_struct *vm; struct rcu_head rcu_head; }; static DEFINE_SPINLOCK(vmap_area_lock); static LIST_HEAD(vmap_area_list); static struct rb_root vmap_area_root = RB_ROOT; /* The vmap cache globals are protected by vmap_area_lock */ static struct rb_node *free_vmap_cache; static unsigned long cached_hole_size; static unsigned long cached_vstart; static unsigned long cached_align; static unsigned long vmap_area_pcpu_hole; #ifdef CONFIG_ENABLE_VMALLOC_SAVING #define POSSIBLE_VMALLOC_START PAGE_OFFSET #define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \ PAGE_SHIFT) #define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT) #define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE) DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE); void mark_vmalloc_reserved_area(void *x, unsigned long size) { unsigned long addr = (unsigned long)x; bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT); } int is_vmalloc_addr(const void *x) { unsigned long addr = (unsigned long)x; if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END) return 0; if (test_bit(VMALLOC_TO_BIT(addr), possible_areas)) return 0; return 1; } #else int is_vmalloc_addr(const void *x) { unsigned long addr = (unsigned long)x; return addr >= VMALLOC_START && addr < VMALLOC_END; } #endif EXPORT_SYMBOL(is_vmalloc_addr); static struct vmap_area *__find_vmap_area(unsigned long addr) { struct rb_node *n = vmap_area_root.rb_node; while (n) { struct vmap_area *va; va = rb_entry(n, struct vmap_area, rb_node); if (addr < va->va_start) n = n->rb_left; else if (addr > va->va_start) n = n->rb_right; else return va; } return NULL; } static void __insert_vmap_area(struct vmap_area *va) { struct rb_node **p = &vmap_area_root.rb_node; struct rb_node *parent = NULL; struct rb_node *tmp; while (*p) { struct vmap_area *tmp_va; parent = *p; tmp_va = rb_entry(parent, struct vmap_area, rb_node); if (va->va_start < tmp_va->va_end) p = &(*p)->rb_left; else if (va->va_end > tmp_va->va_start) p = &(*p)->rb_right; else BUG(); } rb_link_node(&va->rb_node, parent, p); rb_insert_color(&va->rb_node, &vmap_area_root); /* address-sort this list so it is usable like the vmlist */ tmp = rb_prev(&va->rb_node); if (tmp) { struct vmap_area *prev; prev = rb_entry(tmp, struct vmap_area, rb_node); list_add_rcu(&va->list, &prev->list); } else list_add_rcu(&va->list, &vmap_area_list); } static void purge_vmap_area_lazy(void); /* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. */ static struct vmap_area *alloc_vmap_area(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask) { struct vmap_area *va; struct rb_node *n; unsigned long addr; int purged = 0; struct vmap_area *first; BUG_ON(!size); BUG_ON(size & ~PAGE_MASK); BUG_ON(!is_power_of_2(align)); va = kmalloc_node(sizeof(struct vmap_area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!va)) return ERR_PTR(-ENOMEM); retry: spin_lock(&vmap_area_lock); /* * Invalidate cache if we have more permissive parameters. * cached_hole_size notes the largest hole noticed _below_ * the vmap_area cached in free_vmap_cache: if size fits * into that hole, we want to scan from vstart to reuse * the hole instead of allocating above free_vmap_cache. * Note that __free_vmap_area may update free_vmap_cache * without updating cached_hole_size or cached_align. */ if (!free_vmap_cache || size < cached_hole_size || vstart < cached_vstart || align < cached_align) { nocache: cached_hole_size = 0; free_vmap_cache = NULL; } /* record if we encounter less permissive parameters */ cached_vstart = vstart; cached_align = align; /* find starting point for our search */ if (free_vmap_cache) { first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); addr = ALIGN(first->va_end, align); if (addr < vstart) goto nocache; if (addr + size - 1 < addr) goto overflow; } else { addr = ALIGN(vstart, align); if (addr + size - 1 < addr) goto overflow; n = vmap_area_root.rb_node; first = NULL; while (n) { struct vmap_area *tmp; tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_end >= addr) { first = tmp; if (tmp->va_start <= addr) break; n = n->rb_left; } else n = n->rb_right; } if (!first) goto found; } /* from the starting point, walk areas until a suitable hole is found */ while (addr + size > first->va_start && addr + size <= vend) { if (addr + cached_hole_size < first->va_start) cached_hole_size = first->va_start - addr; addr = ALIGN(first->va_end, align); if (addr + size - 1 < addr) goto overflow; n = rb_next(&first->rb_node); if (n) first = rb_entry(n, struct vmap_area, rb_node); else goto found; } found: if (addr + size > vend) goto overflow; va->va_start = addr; va->va_end = addr + size; va->flags = 0; __insert_vmap_area(va); free_vmap_cache = &va->rb_node; spin_unlock(&vmap_area_lock); BUG_ON(va->va_start & (align-1)); BUG_ON(va->va_start < vstart); BUG_ON(va->va_end > vend); return va; overflow: spin_unlock(&vmap_area_lock); if (!purged) { purge_vmap_area_lazy(); purged = 1; goto retry; } if (printk_ratelimit()) printk(KERN_WARNING "vmap allocation for size %lu failed: " "use vmalloc=<size> to increase size.\n", size); kfree(va); return ERR_PTR(-EBUSY); } static void __free_vmap_area(struct vmap_area *va) { BUG_ON(RB_EMPTY_NODE(&va->rb_node)); if (free_vmap_cache) { if (va->va_end < cached_vstart) { free_vmap_cache = NULL; } else { struct vmap_area *cache; cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); if (va->va_start <= cache->va_start) { free_vmap_cache = rb_prev(&va->rb_node); /* * We don't try to update cached_hole_size or * cached_align, but it won't go very wrong. */ } } } rb_erase(&va->rb_node, &vmap_area_root); RB_CLEAR_NODE(&va->rb_node); list_del_rcu(&va->list); /* * Track the highest possible candidate for pcpu area * allocation. Areas outside of vmalloc area can be returned * here too, consider only end addresses which fall inside * vmalloc area proper. */ if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); kfree_rcu(va, rcu_head); } /* * Free a region of KVA allocated by alloc_vmap_area */ static void free_vmap_area(struct vmap_area *va) { spin_lock(&vmap_area_lock); __free_vmap_area(va); spin_unlock(&vmap_area_lock); } /* * Clear the pagetable entries of a given vmap_area */ static void unmap_vmap_area(struct vmap_area *va) { vunmap_page_range(va->va_start, va->va_end); } static void vmap_debug_free_range(unsigned long start, unsigned long end) { /* * Unmap page tables and force a TLB flush immediately if * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free * bugs similarly to those in linear kernel virtual address * space after a page has been freed. * * All the lazy freeing logic is still retained, in order to * minimise intrusiveness of this debugging feature. * * This is going to be *slow* (linear kernel virtual address * debugging doesn't do a broadcast TLB flush so it is a lot * faster). */ #ifdef CONFIG_DEBUG_PAGEALLOC vunmap_page_range(start, end); flush_tlb_kernel_range(start, end); #endif } /* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. * * There is a tradeoff here: a larger number will cover more kernel page tables * and take slightly longer to purge, but it will linearly reduce the number of * global TLB flushes that must be performed. It would seem natural to scale * this number up linearly with the number of CPUs (because vmapping activity * could also scale linearly with the number of CPUs), however it is likely * that in practice, workloads might be constrained in other ways that mean * vmap activity will not scale linearly with CPUs. Also, I want to be * conservative and not introduce a big latency on huge systems, so go with * a less aggressive log scale. It will still be an improvement over the old * code, and it will be simple to change the scale factor if we find that it * becomes a problem on bigger systems. */ static unsigned long lazy_max_pages(void) { unsigned int log; log = fls(num_online_cpus()); return log * (32UL * 1024 * 1024 / PAGE_SIZE); } static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); /* * called before a call to iounmap() if the caller wants vm_area_struct's * immediately freed. */ void set_iounmap_nonlazy(void) { atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); } /* * Purges all lazily-freed vmap areas. * * If sync is 0 then don't purge if there is already a purge in progress. * If force_flush is 1, then flush kernel TLBs between *start and *end even * if we found no lazy vmap areas to unmap (callers can use this to optimise * their own TLB flushing). * Returns with *start = min(*start, lowest purged address) * *end = max(*end, highest purged address) */ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, int sync, int force_flush) { static DEFINE_SPINLOCK(purge_lock); LIST_HEAD(valist); struct vmap_area *va; struct vmap_area *n_va; int nr = 0; /* * If sync is 0 but force_flush is 1, we'll go sync anyway but callers * should not expect such behaviour. This just simplifies locking for * the case that isn't actually used at the moment anyway. */ if (!sync && !force_flush) { if (!spin_trylock(&purge_lock)) return; } else spin_lock(&purge_lock); if (sync) purge_fragmented_blocks_allcpus(); rcu_read_lock(); list_for_each_entry_rcu(va, &vmap_area_list, list) { if (va->flags & VM_LAZY_FREE) { if (va->va_start < *start) *start = va->va_start; if (va->va_end > *end) *end = va->va_end; nr += (va->va_end - va->va_start) >> PAGE_SHIFT; list_add_tail(&va->purge_list, &valist); va->flags |= VM_LAZY_FREEING; va->flags &= ~VM_LAZY_FREE; } } rcu_read_unlock(); if (nr) atomic_sub(nr, &vmap_lazy_nr); if (nr || force_flush) flush_tlb_kernel_range(*start, *end); if (nr) { spin_lock(&vmap_area_lock); list_for_each_entry_safe(va, n_va, &valist, purge_list) __free_vmap_area(va); spin_unlock(&vmap_area_lock); } spin_unlock(&purge_lock); } /* * Kick off a purge of the outstanding lazy areas. Don't bother if somebody * is already purging. */ static void try_purge_vmap_area_lazy(void) { unsigned long start = ULONG_MAX, end = 0; __purge_vmap_area_lazy(&start, &end, 0, 0); } /* * Kick off a purge of the outstanding lazy areas. */ static void purge_vmap_area_lazy(void) { unsigned long start = ULONG_MAX, end = 0; __purge_vmap_area_lazy(&start, &end, 1, 0); } /* * Free a vmap area, caller ensuring that the area has been unmapped * and flush_cache_vunmap had been called for the correct range * previously. */ static void free_vmap_area_noflush(struct vmap_area *va) { va->flags |= VM_LAZY_FREE; atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) try_purge_vmap_area_lazy(); } /* * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been * called for the correct range previously. */ static void free_unmap_vmap_area_noflush(struct vmap_area *va) { unmap_vmap_area(va); free_vmap_area_noflush(va); } /* * Free and unmap a vmap area */ static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); free_unmap_vmap_area_noflush(va); } static struct vmap_area *find_vmap_area(unsigned long addr) { struct vmap_area *va; spin_lock(&vmap_area_lock); va = __find_vmap_area(addr); spin_unlock(&vmap_area_lock); return va; } static void free_unmap_vmap_area_addr(unsigned long addr) { struct vmap_area *va; va = find_vmap_area(addr); BUG_ON(!va); free_unmap_vmap_area(va); } /*** Per cpu kva allocator ***/ /* * vmap space is limited especially on 32 bit architectures. Ensure there is * room for at least 16 percpu vmap blocks per CPU. */ /* * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess * instead (we just need a rough idea) */ #if BITS_PER_LONG == 32 #define VMALLOC_SPACE (128UL*1024*1024) #else #define VMALLOC_SPACE (128UL*1024*1024*1024) #endif #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ #define VMAP_BBMAP_BITS \ VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) static bool vmap_initialized __read_mostly = false; struct vmap_block_queue { spinlock_t lock; struct list_head free; }; struct vmap_block { spinlock_t lock; struct vmap_area *va; struct vmap_block_queue *vbq; unsigned long free, dirty; DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); struct list_head free_list; struct rcu_head rcu_head; struct list_head purge; }; /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); /* * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block * in the free path. Could get rid of this if we change the API to return a * "cookie" from alloc, to be passed to free. But no big deal yet. */ static DEFINE_SPINLOCK(vmap_block_tree_lock); static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); /* * We should probably have a fallback mechanism to allocate virtual memory * out of partially filled vmap blocks. However vmap block sizing should be * fairly reasonable according to the vmalloc size, so it shouldn't be a * big problem. */ static unsigned long addr_to_vb_idx(unsigned long addr) { addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); addr /= VMAP_BLOCK_SIZE; return addr; } static struct vmap_block *new_vmap_block(gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; int node, err; node = numa_node_id(); vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!vb)) return ERR_PTR(-ENOMEM); va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, VMALLOC_START, VMALLOC_END, node, gfp_mask); if (IS_ERR(va)) { kfree(vb); return ERR_CAST(va); } err = radix_tree_preload(gfp_mask); if (unlikely(err)) { kfree(vb); free_vmap_area(va); return ERR_PTR(err); } spin_lock_init(&vb->lock); vb->va = va; vb->free = VMAP_BBMAP_BITS; vb->dirty = 0; bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); INIT_LIST_HEAD(&vb->free_list); vb_idx = addr_to_vb_idx(va->va_start); spin_lock(&vmap_block_tree_lock); err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); spin_unlock(&vmap_block_tree_lock); BUG_ON(err); radix_tree_preload_end(); vbq = &get_cpu_var(vmap_block_queue); vb->vbq = vbq; spin_lock(&vbq->lock); list_add_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); put_cpu_var(vmap_block_queue); return vb; } static void free_vmap_block(struct vmap_block *vb) { struct vmap_block *tmp; unsigned long vb_idx; vb_idx = addr_to_vb_idx(vb->va->va_start); spin_lock(&vmap_block_tree_lock); tmp = radix_tree_delete(&vmap_block_tree, vb_idx); spin_unlock(&vmap_block_tree_lock); BUG_ON(tmp != vb); free_vmap_area_noflush(vb->va); kfree_rcu(vb, rcu_head); } static void purge_fragmented_blocks(int cpu) { LIST_HEAD(purge); struct vmap_block *vb; struct vmap_block *n_vb; struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) continue; spin_lock(&vb->lock); if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { vb->free = 0; /* prevent further allocs after releasing lock */ vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS); bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); spin_unlock(&vb->lock); list_add_tail(&vb->purge, &purge); } else spin_unlock(&vb->lock); } rcu_read_unlock(); list_for_each_entry_safe(vb, n_vb, &purge, purge) { list_del(&vb->purge); free_vmap_block(vb); } } static void purge_fragmented_blocks_thiscpu(void) { purge_fragmented_blocks(smp_processor_id()); } static void purge_fragmented_blocks_allcpus(void) { int cpu; for_each_possible_cpu(cpu) purge_fragmented_blocks(cpu); } static void *vb_alloc(unsigned long size, gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; unsigned long addr = 0; unsigned int order; int purge = 0; BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); order = get_order(size); again: rcu_read_lock(); vbq = &get_cpu_var(vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i; spin_lock(&vb->lock); if (vb->free < 1UL << order) goto next; i = bitmap_find_free_region(vb->alloc_map, VMAP_BBMAP_BITS, order); if (i < 0) { if (vb->free + vb->dirty == VMAP_BBMAP_BITS) { /* fragmented and no outstanding allocations */ BUG_ON(vb->dirty != VMAP_BBMAP_BITS); purge = 1; } goto next; } addr = vb->va->va_start + (i << PAGE_SHIFT); BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(vb->va->va_start)); vb->free -= 1UL << order; if (vb->free == 0) { spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); } spin_unlock(&vb->lock); break; next: spin_unlock(&vb->lock); } if (purge) purge_fragmented_blocks_thiscpu(); put_cpu_var(vmap_block_queue); rcu_read_unlock(); if (!addr) { vb = new_vmap_block(gfp_mask); if (IS_ERR(vb)) return vb; goto again; } return (void *)addr; } static void vb_free(const void *addr, unsigned long size) { unsigned long offset; unsigned long vb_idx; unsigned int order; struct vmap_block *vb; BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); order = get_order(size); offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); vb_idx = addr_to_vb_idx((unsigned long)addr); rcu_read_lock(); vb = radix_tree_lookup(&vmap_block_tree, vb_idx); rcu_read_unlock(); BUG_ON(!vb); vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); spin_lock(&vb->lock); BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); vb->dirty += 1UL << order; if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free); spin_unlock(&vb->lock); free_vmap_block(vb); } else spin_unlock(&vb->lock); } /** * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer * * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily * to amortize TLB flushing overheads. What this means is that any page you * have now, may, in a former life, have been mapped into kernel virtual * address by the vmap layer and so there might be some CPUs with TLB entries * still referencing that page (additional to the regular 1:1 kernel mapping). * * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can * be sure that none of the pages we have control over will have any aliases * from the vmap layer. */ void vm_unmap_aliases(void) { unsigned long start = ULONG_MAX, end = 0; int cpu; int flush = 0; if (unlikely(!vmap_initialized)) return; for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i; spin_lock(&vb->lock); i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); while (i < VMAP_BBMAP_BITS) { unsigned long s, e; int j; j = find_next_zero_bit(vb->dirty_map, VMAP_BBMAP_BITS, i); s = vb->va->va_start + (i << PAGE_SHIFT); e = vb->va->va_start + (j << PAGE_SHIFT); flush = 1; if (s < start) start = s; if (e > end) end = e; i = j; i = find_next_bit(vb->dirty_map, VMAP_BBMAP_BITS, i); } spin_unlock(&vb->lock); } rcu_read_unlock(); } __purge_vmap_area_lazy(&start, &end, 1, flush); } EXPORT_SYMBOL_GPL(vm_unmap_aliases); /** * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram * @mem: the pointer returned by vm_map_ram * @count: the count passed to that vm_map_ram call (cannot unmap partial) */ void vm_unmap_ram(const void *mem, unsigned int count) { unsigned long size = count << PAGE_SHIFT; unsigned long addr = (unsigned long)mem; BUG_ON(!addr); BUG_ON(addr < VMALLOC_START); BUG_ON(addr > VMALLOC_END); BUG_ON(addr & (PAGE_SIZE-1)); debug_check_no_locks_freed(mem, size); vmap_debug_free_range(addr, addr+size); if (likely(count <= VMAP_MAX_ALLOC)) vb_free(mem, size); else free_unmap_vmap_area_addr(addr); } EXPORT_SYMBOL(vm_unmap_ram); /** * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) * @pages: an array of pointers to the pages to be mapped * @count: number of pages * @node: prefer to allocate data structures on this node * @prot: memory protection to use. PAGE_KERNEL for regular RAM * * Returns: a pointer to the address that has been mapped, or %NULL on failure */ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) { unsigned long size = count << PAGE_SHIFT; unsigned long addr; void *mem; if (likely(count <= VMAP_MAX_ALLOC)) { mem = vb_alloc(size, GFP_KERNEL); if (IS_ERR(mem)) return NULL; addr = (unsigned long)mem; } else { struct vmap_area *va; va = alloc_vmap_area(size, PAGE_SIZE, VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); if (IS_ERR(va)) return NULL; addr = va->va_start; mem = (void *)addr; } if (vmap_page_range(addr, addr + size, prot, pages) < 0) { vm_unmap_ram(mem, count); return NULL; } return mem; } EXPORT_SYMBOL(vm_map_ram); /** * vm_area_check_early - check if vmap area is already mapped * @vm: vm_struct to be checked * * This function is used to check if the vmap area has been * mapped already. @vm->addr, @vm->size and @vm->flags should * contain proper values. * */ int __init vm_area_check_early(struct vm_struct *vm) { struct vm_struct *tmp, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) { if (tmp->addr < vm->addr + vm->size) return 1; } else { if (tmp->addr + tmp->size > vm->addr) return 1; } } return 0; } /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add * * This function is used to add fixed kernel vm area to vmlist before * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags * should contain proper values and the other fields should be zero. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_add_early(struct vm_struct *vm) { struct vm_struct *tmp, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) { BUG_ON(tmp->addr < vm->addr + vm->size); break; } else BUG_ON(tmp->addr + tmp->size > vm->addr); } vm->next = *p; *p = vm; } /** * vm_area_register_early - register vmap area early during boot * @vm: vm_struct to register * @align: requested alignment * * This function is used to register kernel vm area before * vmalloc_init() is called. @vm->size and @vm->flags should contain * proper values on entry and other fields should be zero. On return, * vm->addr contains the allocated address. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_register_early(struct vm_struct *vm, size_t align) { static size_t vm_init_off __initdata; unsigned long addr; addr = ALIGN(VMALLOC_START + vm_init_off, align); vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; vm->addr = (void *)addr; vm_area_add_early(vm); } void __init vmalloc_init(void) { struct vmap_area *va; struct vm_struct *tmp; int i; for_each_possible_cpu(i) { struct vmap_block_queue *vbq; vbq = &per_cpu(vmap_block_queue, i); spin_lock_init(&vbq->lock); INIT_LIST_HEAD(&vbq->free); } /* Import existing vmlist entries. */ for (tmp = vmlist; tmp; tmp = tmp->next) { va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); va->flags = VM_VM_AREA; va->va_start = (unsigned long)tmp->addr; va->va_end = va->va_start + tmp->size; va->vm = tmp; __insert_vmap_area(va); } vmap_area_pcpu_hole = VMALLOC_END; vmap_initialized = true; } /** * map_kernel_range_noflush - map kernel VM area with the specified pages * @addr: start of the VM area to map * @size: size of the VM area to map * @prot: page protection flags to use * @pages: pages to map * * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size * specify should have been allocated using get_vm_area() and its * friends. * * NOTE: * This function does NOT do any cache flushing. The caller is * responsible for calling flush_cache_vmap() on to-be-mapped areas * before calling this function. * * RETURNS: * The number of pages mapped on success, -errno on failure. */ int map_kernel_range_noflush(unsigned long addr, unsigned long size, pgprot_t prot, struct page **pages) { return vmap_page_range_noflush(addr, addr + size, prot, pages); } /** * unmap_kernel_range_noflush - unmap kernel VM area * @addr: start of the VM area to unmap * @size: size of the VM area to unmap * * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size * specify should have been allocated using get_vm_area() and its * friends. * * NOTE: * This function does NOT do any cache flushing. The caller is * responsible for calling flush_cache_vunmap() on to-be-mapped areas * before calling this function and flush_tlb_kernel_range() after. */ void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { vunmap_page_range(addr, addr + size); } EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); /** * unmap_kernel_range - unmap kernel VM area and flush cache and TLB * @addr: start of the VM area to unmap * @size: size of the VM area to unmap * * Similar to unmap_kernel_range_noflush() but flushes vcache before * the unmapping and tlb after. */ void unmap_kernel_range(unsigned long addr, unsigned long size) { unsigned long end = addr + size; flush_cache_vunmap(addr, end); vunmap_page_range(addr, end); flush_tlb_kernel_range(addr, end); } int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) { unsigned long addr = (unsigned long)area->addr; unsigned long end = addr + area->size - PAGE_SIZE; int err; err = vmap_page_range(addr, end, prot, *pages); if (err > 0) { *pages += err; err = 0; } return err; } EXPORT_SYMBOL_GPL(map_vm_area); /*** Old vmalloc interfaces ***/ DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist; static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { vm->flags = flags; vm->addr = (void *)va->va_start; vm->size = va->va_end - va->va_start; vm->caller = caller; va->vm = vm; va->flags |= VM_VM_AREA; } static void insert_vmalloc_vmlist(struct vm_struct *vm) { struct vm_struct *tmp, **p; vm->flags &= ~VM_UNLIST; write_lock(&vmlist_lock); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) break; } vm->next = *p; *p = vm; write_unlock(&vmlist_lock); } static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { setup_vmalloc_vm(vm, va, flags, caller); insert_vmalloc_vmlist(vm); } static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long align, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller) { struct vmap_area *va; struct vm_struct *area; BUG_ON(in_interrupt()); if (flags & VM_IOREMAP) { int bit = fls(size); if (bit > IOREMAP_MAX_ORDER) bit = IOREMAP_MAX_ORDER; else if (bit < PAGE_SHIFT) bit = PAGE_SHIFT; align = 1ul << bit; } size = PAGE_ALIGN(size); if (unlikely(!size)) return NULL; area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!area)) return NULL; /* * We always allocate a guard page. */ size += PAGE_SIZE; va = alloc_vmap_area(size, align, start, end, node, gfp_mask); if (IS_ERR(va)) { kfree(area); return NULL; } /* * When this function is called from __vmalloc_node_range, * we do not add vm_struct to vmlist here to avoid * accessing uninitialized members of vm_struct such as * pages and nr_pages fields. They will be set later. * To distinguish it from others, we use a VM_UNLIST flag. */ if (flags & VM_UNLIST) setup_vmalloc_vm(area, va, flags, caller); else insert_vmalloc_vm(area, va, flags, caller); return area; } struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end) { return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, __builtin_return_address(0)); } EXPORT_SYMBOL_GPL(__get_vm_area); struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller) { return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, caller); } /** * get_vm_area - reserve a contiguous kernel virtual area * @size: size of the area * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC * * Search an area of @size in the kernel virtual mapping area, * and reserved it for out purposes. Returns the area descriptor * on success or %NULL on failure. */ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { #ifdef CONFIG_ENABLE_VMALLOC_SAVING return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END, -1, GFP_KERNEL, __builtin_return_address(0)); #else return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, -1, GFP_KERNEL, __builtin_return_address(0)); #endif } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller) { #ifdef CONFIG_ENABLE_VMALLOC_SAVING return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END, -1, GFP_KERNEL, caller); #else return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, -1, GFP_KERNEL, __builtin_return_address(0)); #endif } /** * find_vm_area - find a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and return it. * It is up to the caller to do all required locking to keep the returned * pointer valid. */ struct vm_struct *find_vm_area(const void *addr) { struct vmap_area *va; va = find_vmap_area((unsigned long)addr); if (va && va->flags & VM_VM_AREA) return va->vm; return NULL; } /** * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and remove it. * This function returns the found VM area, but using it is NOT safe * on SMP machines, except for its size or flags. */ struct vm_struct *remove_vm_area(const void *addr) { struct vmap_area *va; va = find_vmap_area((unsigned long)addr); if (va && va->flags & VM_VM_AREA) { struct vm_struct *vm = va->vm; if (!(vm->flags & VM_UNLIST)) { struct vm_struct *tmp, **p; /* * remove from list and disallow access to * this vm_struct before unmap. (address range * confliction is maintained by vmap.) */ write_lock(&vmlist_lock); for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) ; *p = tmp->next; write_unlock(&vmlist_lock); } vmap_debug_free_range(va->va_start, va->va_end); free_unmap_vmap_area(va); vm->size -= PAGE_SIZE; return vm; } return NULL; } static void __vunmap(const void *addr, int deallocate_pages) { struct vm_struct *area; if (!addr) return; if ((PAGE_SIZE-1) & (unsigned long)addr) { WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); return; } area = remove_vm_area(addr); if (unlikely(!area)) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); return; } debug_check_no_locks_freed(addr, area->size); debug_check_no_obj_freed(addr, area->size); if (deallocate_pages) { int i; for (i = 0; i < area->nr_pages; i++) { struct page *page = area->pages[i]; BUG_ON(!page); __free_page(page); } if (area->flags & VM_VPAGES) vfree(area->pages); else kfree(area->pages); } kfree(area); return; } /** * vfree - release memory allocated by vmalloc() * @addr: memory base address * * Free the virtually continuous memory area starting at @addr, as * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is * NULL, no operation is performed. * * Must not be called in interrupt context. */ void vfree(const void *addr) { BUG_ON(in_interrupt()); kmemleak_free(addr); __vunmap(addr, 1); } EXPORT_SYMBOL(vfree); /** * vunmap - release virtual mapping obtained by vmap() * @addr: memory base address * * Free the virtually contiguous memory area starting at @addr, * which was created from the page array passed to vmap(). * * Must not be called in interrupt context. */ void vunmap(const void *addr) { BUG_ON(in_interrupt()); might_sleep(); __vunmap(addr, 0); } EXPORT_SYMBOL(vunmap); /** * vmap - map an array of pages into virtually contiguous space * @pages: array of page pointers * @count: number of pages to map * @flags: vm_area->flags * @prot: page protection for the mapping * * Maps @count pages from @pages into contiguous kernel virtual * space. */ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; might_sleep(); if (count > totalram_pages) return NULL; area = get_vm_area_caller((count << PAGE_SHIFT), flags, __builtin_return_address(0)); if (!area) return NULL; if (map_vm_area(area, prot, &pages)) { vunmap(area->addr); return NULL; } return area->addr; } EXPORT_SYMBOL(vmap); static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller); static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) { const int order = 0; struct page **pages; unsigned int nr_pages, array_size, i; gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, PAGE_KERNEL, node, caller); area->flags |= VM_VPAGES; } else { pages = kmalloc_node(array_size, nested_gfp, node); } area->pages = pages; area->caller = caller; if (!area->pages) { remove_vm_area(area->addr); kfree(area); return NULL; } for (i = 0; i < area->nr_pages; i++) { struct page *page; gfp_t tmp_mask = gfp_mask | __GFP_NOWARN; if (node < 0) page = alloc_page(tmp_mask); else page = alloc_pages_node(node, tmp_mask, order); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vunmap() */ area->nr_pages = i; goto fail; } area->pages[i] = page; } if (map_vm_area(area, prot, &pages)) goto fail; return area->addr; fail: warn_alloc_failed(gfp_mask, order, "vmalloc: allocation failure, allocated %ld of %ld bytes\n", (area->nr_pages*PAGE_SIZE), area->size); vfree(area->addr); return NULL; } /** * __vmalloc_node_range - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @start: vm area range start * @end: vm area range end * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @node: node to use for allocation or -1 * @caller: caller's return address * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) { struct vm_struct *area; void *addr; unsigned long real_size = size; #ifdef CONFIG_FIX_MOVABLE_ZONE unsigned long total_pages = total_unmovable_pages; #else unsigned long total_pages = totalram_pages; #endif size = PAGE_ALIGN(size); if (!size || (size >> PAGE_SHIFT) > total_pages) goto fail; area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, start, end, node, gfp_mask, caller); if (!area) goto fail; addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); if (!addr) return NULL; /* * In this function, newly allocated vm_struct is not added * to vmlist at __get_vm_area_node(). so, it is added here. */ insert_vmalloc_vmlist(area); /* * A ref_count = 3 is needed because the vm_struct and vmap_area * structures allocated in the __get_vm_area_node() function contain * references to the virtual address of the vmalloc'ed block. */ kmemleak_alloc(addr, real_size, 3, gfp_mask); return addr; fail: warn_alloc_failed(gfp_mask, 0, "vmalloc: allocation failure: %lu bytes\n", real_size); return NULL; } /** * __vmalloc_node - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @node: node to use for allocation or -1 * @caller: caller's return address * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) { return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, gfp_mask, prot, node, caller); } void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { return __vmalloc_node(size, 1, gfp_mask, prot, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(__vmalloc); static inline void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags) { return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, __builtin_return_address(0)); } /** * vmalloc - allocate virtually contiguous memory * @size: allocation size * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc(unsigned long size) { return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM); } EXPORT_SYMBOL(vmalloc); /** * vzalloc - allocate virtually contiguous memory with zero fill * @size: allocation size * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vzalloc(unsigned long size) { return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); } EXPORT_SYMBOL(vzalloc); /** * vmalloc_user - allocate zeroed virtually contiguous memory for userspace * @size: allocation size * * The resulting memory area is zeroed so it can be mapped to userspace * without leaking data. */ void *vmalloc_user(unsigned long size) { struct vm_struct *area; void *ret; ret = __vmalloc_node(size, SHMLBA, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL, -1, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); area->flags |= VM_USERMAP; } return ret; } EXPORT_SYMBOL(vmalloc_user); /** * vmalloc_node - allocate memory on a specific node * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc_node(unsigned long size, int node) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_node); /** * vzalloc_node - allocate memory on a specific node with zero fill * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc_node() instead. */ void *vzalloc_node(unsigned long size, int node) { return __vmalloc_node_flags(size, node, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); } EXPORT_SYMBOL(vzalloc_node); #ifndef PAGE_KERNEL_EXEC # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif /** * vmalloc_exec - allocate virtually contiguous, executable memory * @size: allocation size * * Kernel-internal function to allocate enough pages to cover @size * the page level allocator and map them into contiguous and * executable kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc_exec(unsigned long size) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, -1, __builtin_return_address(0)); } #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL #else #define GFP_VMALLOC32 GFP_KERNEL #endif /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into contiguous kernel virtual space. */ void *vmalloc_32(unsigned long size) { return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * @size: allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. */ void *vmalloc_32_user(unsigned long size) { struct vm_struct *area; void *ret; ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, -1, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); area->flags |= VM_USERMAP; } return ret; } EXPORT_SYMBOL(vmalloc_32_user); /* * small helper routine , copy contents to buf from addr. * If the page is not present, fill zero. */ static int aligned_vread(char *buf, char *addr, unsigned long count) { struct page *p; int copied = 0; while (count) { unsigned long offset, length; offset = (unsigned long)addr & ~PAGE_MASK; length = PAGE_SIZE - offset; if (length > count) length = count; p = vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add * overhead of vmalloc()/vfree() calles for this _debug_ * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ if (p) { /* * we can expect USER0 is not used (see vread/vwrite's * function description) */ void *map = kmap_atomic(p); memcpy(buf, map + offset, length); kunmap_atomic(map); } else memset(buf, 0, length); addr += length; buf += length; copied += length; count -= length; } return copied; } static int aligned_vwrite(char *buf, char *addr, unsigned long count) { struct page *p; int copied = 0; while (count) { unsigned long offset, length; offset = (unsigned long)addr & ~PAGE_MASK; length = PAGE_SIZE - offset; if (length > count) length = count; p = vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add * overhead of vmalloc()/vfree() calles for this _debug_ * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ if (p) { /* * we can expect USER0 is not used (see vread/vwrite's * function description) */ void *map = kmap_atomic(p); memcpy(map + offset, buf, length); kunmap_atomic(map); } addr += length; buf += length; copied += length; count -= length; } return copied; } /** * vread() - read vmalloc area in a safe way. * @buf: buffer for reading data * @addr: vm address. * @count: number of bytes to be read. * * Returns # of bytes which addr and buf should be increased. * (same number to @count). Returns 0 if [addr...addr+count) doesn't * includes any intersect with alive vmalloc area. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from that area to a given buffer. If the given memory range * of [addr...addr+count) includes some valid address, data is copied to * proper area of @buf. If there are memory holes, they'll be zero-filled. * IOREMAP area is treated as memory hole and no copy is done. * * If [addr...addr+count) doesn't includes any intersects with alive * vm_struct area, returns 0. * @buf should be kernel's buffer. Because this function uses KM_USER0, * the caller should guarantee KM_USER0 is not used. * * Note: In usual ops, vread() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without * any informaion, as /dev/kmem. * */ long vread(char *buf, char *addr, unsigned long count) { struct vm_struct *tmp; char *vaddr, *buf_start = buf; unsigned long buflen = count; unsigned long n; /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; read_lock(&vmlist_lock); for (tmp = vmlist; count && tmp; tmp = tmp->next) { vaddr = (char *) tmp->addr; if (addr >= vaddr + tmp->size - PAGE_SIZE) continue; while (addr < vaddr) { if (count == 0) goto finished; *buf = '\0'; buf++; addr++; count--; } n = vaddr + tmp->size - PAGE_SIZE - addr; if (n > count) n = count; if (!(tmp->flags & VM_IOREMAP)) aligned_vread(buf, addr, n); else /* IOREMAP area is treated as memory hole */ memset(buf, 0, n); buf += n; addr += n; count -= n; } finished: read_unlock(&vmlist_lock); if (buf == buf_start) return 0; /* zero-fill memory holes */ if (buf != buf_start + buflen) memset(buf, 0, buflen - (buf - buf_start)); return buflen; } /** * vwrite() - write vmalloc area in a safe way. * @buf: buffer for source data * @addr: vm address. * @count: number of bytes to be read. * * Returns # of bytes which addr and buf should be incresed. * (same number to @count). * If [addr...addr+count) doesn't includes any intersect with valid * vmalloc area, returns 0. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from a buffer to the given addr. If specified range of * [addr...addr+count) includes some valid address, data is copied from * proper area of @buf. If there are memory holes, no copy to hole. * IOREMAP area is treated as memory hole and no copy is done. * * If [addr...addr+count) doesn't includes any intersects with alive * vm_struct area, returns 0. * @buf should be kernel's buffer. Because this function uses KM_USER0, * the caller should guarantee KM_USER0 is not used. * * Note: In usual ops, vwrite() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without * any informaion, as /dev/kmem. */ long vwrite(char *buf, char *addr, unsigned long count) { struct vm_struct *tmp; char *vaddr; unsigned long n, buflen; int copied = 0; /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; buflen = count; read_lock(&vmlist_lock); for (tmp = vmlist; count && tmp; tmp = tmp->next) { vaddr = (char *) tmp->addr; if (addr >= vaddr + tmp->size - PAGE_SIZE) continue; while (addr < vaddr) { if (count == 0) goto finished; buf++; addr++; count--; } n = vaddr + tmp->size - PAGE_SIZE - addr; if (n > count) n = count; if (!(tmp->flags & VM_IOREMAP)) { aligned_vwrite(buf, addr, n); copied++; } buf += n; addr += n; count -= n; } finished: read_unlock(&vmlist_lock); if (!copied) return 0; return buflen; } /** * remap_vmalloc_range - map vmalloc pages to userspace * @vma: vma to cover (map full range of vma) * @addr: vmalloc memory * @pgoff: number of pages into addr before first page to map * * Returns: 0 for success, -Exxx on failure * * This function checks that addr is a valid vmalloc'ed area, and * that it is big enough to cover the vma. Will return failure if * that criteria isn't met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { struct vm_struct *area; unsigned long uaddr = vma->vm_start; unsigned long usize = vma->vm_end - vma->vm_start; if ((PAGE_SIZE-1) & (unsigned long)addr) return -EINVAL; area = find_vm_area(addr); if (!area) return -EINVAL; if (!(area->flags & VM_USERMAP)) return -EINVAL; if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) return -EINVAL; addr += pgoff << PAGE_SHIFT; do { struct page *page = vmalloc_to_page(addr); int ret; ret = vm_insert_page(vma, uaddr, page); if (ret) return ret; uaddr += PAGE_SIZE; addr += PAGE_SIZE; usize -= PAGE_SIZE; } while (usize > 0); /* Prevent "things" like memory migration? VM_flags need a cleanup... */ vma->vm_flags |= VM_RESERVED; return 0; } EXPORT_SYMBOL(remap_vmalloc_range); /* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */ void __attribute__((weak)) vmalloc_sync_all(void) { } static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) { pte_t ***p = data; if (p) { *(*p) = pte; (*p)++; } return 0; } /** * alloc_vm_area - allocate a range of kernel address space * @size: size of the area * @ptes: returns the PTEs for the address space * * Returns: NULL on failure, vm_struct on success * * This function reserves a range of kernel address space, and * allocates pagetables to map that range. No actual mappings * are created. * * If @ptes is non-NULL, pointers to the PTEs (in init_mm) * allocated for the VM area are returned. */ struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) { struct vm_struct *area; area = get_vm_area_caller(size, VM_IOREMAP, __builtin_return_address(0)); if (area == NULL) return NULL; /* * This ensures that page tables are constructed for this region * of kernel virtual address space and mapped into init_mm. */ if (apply_to_page_range(&init_mm, (unsigned long)area->addr, size, f, ptes ? &ptes : NULL)) { free_vm_area(area); return NULL; } /* * If the allocated address space is passed to a hypercall * before being used then we cannot rely on a page fault to * trigger an update of the page tables. So sync all the page * tables here. */ vmalloc_sync_all(); return area; } EXPORT_SYMBOL_GPL(alloc_vm_area); void free_vm_area(struct vm_struct *area) { struct vm_struct *ret; ret = remove_vm_area(area->addr); BUG_ON(ret != area); kfree(area); } EXPORT_SYMBOL_GPL(free_vm_area); #ifdef CONFIG_SMP static struct vmap_area *node_to_va(struct rb_node *n) { return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; } /** * pvm_find_next_prev - find the next and prev vmap_area surrounding @end * @end: target address * @pnext: out arg for the next vmap_area * @pprev: out arg for the previous vmap_area * * Returns: %true if either or both of next and prev are found, * %false if no vmap_area exists * * Find vmap_areas end addresses of which enclose @end. ie. if not * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. */ static bool pvm_find_next_prev(unsigned long end, struct vmap_area **pnext, struct vmap_area **pprev) { struct rb_node *n = vmap_area_root.rb_node; struct vmap_area *va = NULL; while (n) { va = rb_entry(n, struct vmap_area, rb_node); if (end < va->va_end) n = n->rb_left; else if (end > va->va_end) n = n->rb_right; else break; } if (!va) return false; if (va->va_end > end) { *pnext = va; *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); } else { *pprev = va; *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); } return true; } /** * pvm_determine_end - find the highest aligned address between two vmap_areas * @pnext: in/out arg for the next vmap_area * @pprev: in/out arg for the previous vmap_area * @align: alignment * * Returns: determined end address * * Find the highest aligned address between *@pnext and *@pprev below * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned * down address is between the end addresses of the two vmap_areas. * * Please note that the address returned by this function may fall * inside *@pnext vmap_area. The caller is responsible for checking * that. */ static unsigned long pvm_determine_end(struct vmap_area **pnext, struct vmap_area **pprev, unsigned long align) { const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); unsigned long addr; if (*pnext) addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); else addr = vmalloc_end; while (*pprev && (*pprev)->va_end > addr) { *pnext = *pprev; *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); } return addr; } /** * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator * @offsets: array containing offset of each area * @sizes: array containing size of each area * @nr_vms: the number of areas to allocate * @align: alignment, all entries in @offsets and @sizes must be aligned to this * * Returns: kmalloc'd vm_struct pointer array pointing to allocated * vm_structs on success, %NULL on failure * * Percpu allocator wants to use congruent vm areas so that it can * maintain the offsets among percpu areas. This function allocates * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to * be scattered pretty far, distance between two areas easily going up * to gigabytes. To avoid interacting with regular vmallocs, these * areas are allocated from top. * * Despite its complicated look, this allocator is rather simple. It * does everything top-down and scans areas from the end looking for * matching slot. While scanning, if any of the areas overlaps with * existing vmap_area, the base address is pulled down to fit the * area. Scanning is repeated till all the areas fit and then all * necessary data structres are inserted and the result is returned. */ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align) { const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); struct vmap_area **vas, *prev, *next; struct vm_struct **vms; int area, area2, last_area, term_area; unsigned long base, start, end, last_end; bool purged = false; /* verify parameters and allocate data structures */ BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); for (last_area = 0, area = 0; area < nr_vms; area++) { start = offsets[area]; end = start + sizes[area]; /* is everything aligned properly? */ BUG_ON(!IS_ALIGNED(offsets[area], align)); BUG_ON(!IS_ALIGNED(sizes[area], align)); /* detect the area with the highest address */ if (start > offsets[last_area]) last_area = area; for (area2 = 0; area2 < nr_vms; area2++) { unsigned long start2 = offsets[area2]; unsigned long end2 = start2 + sizes[area2]; if (area2 == area) continue; BUG_ON(start2 >= start && start2 < end); BUG_ON(end2 <= end && end2 > start); } } last_end = offsets[last_area] + sizes[last_area]; if (vmalloc_end - vmalloc_start < last_end) { WARN_ON(true); return NULL; } vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL); vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL); if (!vas || !vms) goto err_free2; for (area = 0; area < nr_vms; area++) { vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); if (!vas[area] || !vms[area]) goto err_free; } retry: spin_lock(&vmap_area_lock); /* start scanning - we scan from the top, begin with the last area */ area = term_area = last_area; start = offsets[area]; end = start + sizes[area]; if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { base = vmalloc_end - last_end; goto found; } base = pvm_determine_end(&next, &prev, align) - end; while (true) { BUG_ON(next && next->va_end <= base + end); BUG_ON(prev && prev->va_end > base + end); /* * base might have underflowed, add last_end before * comparing. */ if (base + last_end < vmalloc_start + last_end) { spin_unlock(&vmap_area_lock); if (!purged) { purge_vmap_area_lazy(); purged = true; goto retry; } goto err_free; } /* * If next overlaps, move base downwards so that it's * right below next and then recheck. */ if (next && next->va_start < base + end) { base = pvm_determine_end(&next, &prev, align) - end; term_area = area; continue; } /* * If prev overlaps, shift down next and prev and move * base so that it's right below new next and then * recheck. */ if (prev && prev->va_end > base + start) { next = prev; prev = node_to_va(rb_prev(&next->rb_node)); base = pvm_determine_end(&next, &prev, align) - end; term_area = area; continue; } /* * This area fits, move on to the previous one. If * the previous one is the terminal one, we're done. */ area = (area + nr_vms - 1) % nr_vms; if (area == term_area) break; start = offsets[area]; end = start + sizes[area]; pvm_find_next_prev(base + end, &next, &prev); } found: /* we've found a fitting base, insert all va's */ for (area = 0; area < nr_vms; area++) { struct vmap_area *va = vas[area]; va->va_start = base + offsets[area]; va->va_end = va->va_start + sizes[area]; __insert_vmap_area(va); } vmap_area_pcpu_hole = base + offsets[last_area]; spin_unlock(&vmap_area_lock); /* insert all vm's */ for (area = 0; area < nr_vms; area++) insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC, pcpu_get_vm_areas); kfree(vas); return vms; err_free: for (area = 0; area < nr_vms; area++) { kfree(vas[area]); kfree(vms[area]); } err_free2: kfree(vas); kfree(vms); return NULL; } /** * pcpu_free_vm_areas - free vmalloc areas for percpu allocator * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() * @nr_vms: the number of allocated areas * * Free vm_structs and the array allocated by pcpu_get_vm_areas(). */ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) { int i; for (i = 0; i < nr_vms; i++) free_vm_area(vms[i]); kfree(vms); } #endif /* CONFIG_SMP */ #ifdef CONFIG_PROC_FS static void *s_start(struct seq_file *m, loff_t *pos) __acquires(&vmlist_lock) { loff_t n = *pos; struct vm_struct *v; read_lock(&vmlist_lock); v = vmlist; while (n > 0 && v) { n--; v = v->next; } if (!n) return v; return NULL; } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { struct vm_struct *v = p; ++*pos; return v->next; } static void s_stop(struct seq_file *m, void *p) __releases(&vmlist_lock) { read_unlock(&vmlist_lock); } static void show_numa_info(struct seq_file *m, struct vm_struct *v) { if (NUMA_BUILD) { unsigned int nr, *counters = m->private; if (!counters) return; memset(counters, 0, nr_node_ids * sizeof(unsigned int)); for (nr = 0; nr < v->nr_pages; nr++) counters[page_to_nid(v->pages[nr])]++; for_each_node_state(nr, N_HIGH_MEMORY) if (counters[nr]) seq_printf(m, " N%u=%u", nr, counters[nr]); } } static int s_show(struct seq_file *m, void *p) { struct vm_struct *v = p; seq_printf(m, "0x%p-0x%p %7ld", v->addr, v->addr + v->size, v->size); if (v->caller) seq_printf(m, " %pS", v->caller); if (v->nr_pages) seq_printf(m, " pages=%d", v->nr_pages); if (v->phys_addr) seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr); if (v->flags & VM_IOREMAP) seq_printf(m, " ioremap"); if (v->flags & VM_ALLOC) seq_printf(m, " vmalloc"); if (v->flags & VM_MAP) seq_printf(m, " vmap"); if (v->flags & VM_USERMAP) seq_printf(m, " user"); if (v->flags & VM_VPAGES) seq_printf(m, " vpages"); if (v->flags & VM_LOWMEM) seq_printf(m, " lowmem"); show_numa_info(m, v); seq_putc(m, '\n'); return 0; } static const struct seq_operations vmalloc_op = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static int vmalloc_open(struct inode *inode, struct file *file) { unsigned int *ptr = NULL; int ret; if (NUMA_BUILD) { ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); if (ptr == NULL) return -ENOMEM; } ret = seq_open(file, &vmalloc_op); if (!ret) { struct seq_file *m = file->private_data; m->private = ptr; } else kfree(ptr); return ret; } static const struct file_operations proc_vmalloc_operations = { .open = vmalloc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int __init proc_vmalloc_init(void) { proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); return 0; } module_init(proc_vmalloc_init); #endif
gpl-2.0
gic4107/HSA-linux
arch/arm/mach-shmobile/board-armadillo800eva.c
55
31781
/* * armadillo 800 eva board support * * Copyright (C) 2012 Renesas Solutions Corp. * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/platform_data/st1232_pdata.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/regulator/driver.h> #include <linux/pinctrl/machine.h> #include <linux/platform_data/pwm-renesas-tpu.h> #include <linux/pwm_backlight.h> #include <linux/regulator/fixed.h> #include <linux/regulator/gpio-regulator.h> #include <linux/regulator/machine.h> #include <linux/sh_eth.h> #include <linux/videodev2.h> #include <linux/usb/renesas_usbhs.h> #include <linux/mfd/tmio.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/i2c-gpio.h> #include <linux/reboot.h> #include <mach/common.h> #include <mach/irqs.h> #include <mach/r8a7740.h> #include <media/mt9t112.h> #include <media/sh_mobile_ceu.h> #include <media/soc_camera.h> #include <asm/page.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <asm/hardware/cache-l2x0.h> #include <video/sh_mobile_lcdc.h> #include <video/sh_mobile_hdmi.h> #include <sound/sh_fsi.h> #include <sound/simple_card.h> #include "sh-gpio.h" /* * CON1 Camera Module * CON2 Extension Bus * CON3 HDMI Output * CON4 Composite Video Output * CON5 H-UDI JTAG * CON6 ARM JTAG * CON7 SD1 * CON8 SD2 * CON9 RTC BackUp * CON10 Monaural Mic Input * CON11 Stereo Headphone Output * CON12 Audio Line Output(L) * CON13 Audio Line Output(R) * CON14 AWL13 Module * CON15 Extension * CON16 LCD1 * CON17 LCD2 * CON19 Power Input * CON20 USB1 * CON21 USB2 * CON22 Serial * CON23 LAN * CON24 USB3 * LED1 Camera LED(Yellow) * LED2 Power LED (Green) * ED3-LED6 User LED(Yellow) * LED7 LAN link LED(Green) * LED8 LAN activity LED(Yellow) */ /* * DipSwitch * * SW1 * * -12345678-+---------------+---------------------------- * 1 | boot | hermit * 0 | boot | OS auto boot * -12345678-+---------------+---------------------------- * 00 | boot device | eMMC * 10 | boot device | SDHI0 (CON7) * 01 | boot device | - * 11 | boot device | Extension Buss (CS0) * -12345678-+---------------+---------------------------- * 0 | Extension Bus | D8-D15 disable, eMMC enable * 1 | Extension Bus | D8-D15 enable, eMMC disable * -12345678-+---------------+---------------------------- * 0 | SDHI1 | COM8 disable, COM14 enable * 1 | SDHI1 | COM8 enable, COM14 disable * -12345678-+---------------+---------------------------- * 0 | USB0 | COM20 enable, COM24 disable * 1 | USB0 | COM20 disable, COM24 enable * -12345678-+---------------+---------------------------- * 00 | JTAG | SH-X2 * 10 | JTAG | ARM * 01 | JTAG | - * 11 | JTAG | Boundary Scan *-----------+---------------+---------------------------- */ /* * FSI-WM8978 * * this command is required when playback. * * # amixer set "Headphone" 50 * * this command is required when capture. * * # amixer set "Input PGA" 15 * # amixer set "Left Input Mixer MicP" on * # amixer set "Left Input Mixer MicN" on * # amixer set "Right Input Mixer MicN" on * # amixer set "Right Input Mixer MicP" on */ /* * USB function * * When you use USB Function, * set SW1.6 ON, and connect cable to CN24. * * USBF needs workaround on R8A7740 chip. * These are a little bit complex. * see * usbhsf_power_ctrl() */ #define IRQ7 irq_pin(7) #define USBCR1 IOMEM(0xe605810a) #define USBH 0xC6700000 #define USBH_USBCTR 0x10834 struct usbhsf_private { struct clk *phy; struct clk *usb24; struct clk *pci; struct clk *func; struct clk *host; void __iomem *usbh_base; struct renesas_usbhs_platform_info info; }; #define usbhsf_get_priv(pdev) \ container_of(renesas_usbhs_get_info(pdev), \ struct usbhsf_private, info) static int usbhsf_get_id(struct platform_device *pdev) { return USBHS_GADGET; } static int usbhsf_power_ctrl(struct platform_device *pdev, void __iomem *base, int enable) { struct usbhsf_private *priv = usbhsf_get_priv(pdev); /* * Work around for USB Function. * It needs USB host clock, and settings */ if (enable) { /* * enable all the related usb clocks * for usb workaround */ clk_enable(priv->usb24); clk_enable(priv->pci); clk_enable(priv->host); clk_enable(priv->func); clk_enable(priv->phy); /* * set USBCR1 * * Port1 is driven by USB function, * Port2 is driven by USB HOST * One HOST (Port1 or Port2 is HOST) * USB PLL input clock = 24MHz */ __raw_writew(0xd750, USBCR1); mdelay(1); /* * start USB Host */ __raw_writel(0x0000000c, priv->usbh_base + USBH_USBCTR); __raw_writel(0x00000008, priv->usbh_base + USBH_USBCTR); mdelay(10); /* * USB PHY Power ON */ __raw_writew(0xd770, USBCR1); __raw_writew(0x4000, base + 0x102); /* USBF :: SUSPMODE */ } else { __raw_writel(0x0000010f, priv->usbh_base + USBH_USBCTR); __raw_writew(0xd7c0, USBCR1); /* GPIO */ clk_disable(priv->phy); clk_disable(priv->func); /* usb work around */ clk_disable(priv->host); /* usb work around */ clk_disable(priv->pci); /* usb work around */ clk_disable(priv->usb24); /* usb work around */ } return 0; } static int usbhsf_get_vbus(struct platform_device *pdev) { return gpio_get_value(209); } static irqreturn_t usbhsf_interrupt(int irq, void *data) { struct platform_device *pdev = data; renesas_usbhs_call_notify_hotplug(pdev); return IRQ_HANDLED; } static int usbhsf_hardware_exit(struct platform_device *pdev) { struct usbhsf_private *priv = usbhsf_get_priv(pdev); if (!IS_ERR(priv->phy)) clk_put(priv->phy); if (!IS_ERR(priv->usb24)) clk_put(priv->usb24); if (!IS_ERR(priv->pci)) clk_put(priv->pci); if (!IS_ERR(priv->host)) clk_put(priv->host); if (!IS_ERR(priv->func)) clk_put(priv->func); if (priv->usbh_base) iounmap(priv->usbh_base); priv->phy = NULL; priv->usb24 = NULL; priv->pci = NULL; priv->host = NULL; priv->func = NULL; priv->usbh_base = NULL; free_irq(IRQ7, pdev); return 0; } static int usbhsf_hardware_init(struct platform_device *pdev) { struct usbhsf_private *priv = usbhsf_get_priv(pdev); int ret; priv->phy = clk_get(&pdev->dev, "phy"); priv->usb24 = clk_get(&pdev->dev, "usb24"); priv->pci = clk_get(&pdev->dev, "pci"); priv->func = clk_get(&pdev->dev, "func"); priv->host = clk_get(&pdev->dev, "host"); priv->usbh_base = ioremap_nocache(USBH, 0x20000); if (IS_ERR(priv->phy) || IS_ERR(priv->usb24) || IS_ERR(priv->pci) || IS_ERR(priv->host) || IS_ERR(priv->func) || !priv->usbh_base) { dev_err(&pdev->dev, "USB clock setting failed\n"); usbhsf_hardware_exit(pdev); return -EIO; } ret = request_irq(IRQ7, usbhsf_interrupt, IRQF_TRIGGER_NONE, dev_name(&pdev->dev), pdev); if (ret) { dev_err(&pdev->dev, "request_irq err\n"); return ret; } irq_set_irq_type(IRQ7, IRQ_TYPE_EDGE_BOTH); /* usb24 use 1/1 of parent clock (= usb24s = 24MHz) */ clk_set_rate(priv->usb24, clk_get_rate(clk_get_parent(priv->usb24))); return 0; } static struct usbhsf_private usbhsf_private = { .info = { .platform_callback = { .get_id = usbhsf_get_id, .get_vbus = usbhsf_get_vbus, .hardware_init = usbhsf_hardware_init, .hardware_exit = usbhsf_hardware_exit, .power_ctrl = usbhsf_power_ctrl, }, .driver_param = { .buswait_bwait = 5, .detection_delay = 5, .d0_rx_id = SHDMA_SLAVE_USBHS_RX, .d1_tx_id = SHDMA_SLAVE_USBHS_TX, }, } }; static struct resource usbhsf_resources[] = { { .name = "USBHS", .start = 0xe6890000, .end = 0xe6890104 - 1, .flags = IORESOURCE_MEM, }, { .start = gic_spi(51), .flags = IORESOURCE_IRQ, }, }; static struct platform_device usbhsf_device = { .name = "renesas_usbhs", .dev = { .platform_data = &usbhsf_private.info, }, .id = -1, .num_resources = ARRAY_SIZE(usbhsf_resources), .resource = usbhsf_resources, }; /* Ether */ static struct sh_eth_plat_data sh_eth_platdata = { .phy = 0x00, /* LAN8710A */ .edmac_endian = EDMAC_LITTLE_ENDIAN, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct resource sh_eth_resources[] = { { .start = 0xe9a00000, .end = 0xe9a00800 - 1, .flags = IORESOURCE_MEM, }, { .start = 0xe9a01800, .end = 0xe9a02000 - 1, .flags = IORESOURCE_MEM, }, { .start = gic_spi(110), .flags = IORESOURCE_IRQ, }, }; static struct platform_device sh_eth_device = { .name = "r8a7740-gether", .id = -1, .dev = { .platform_data = &sh_eth_platdata, }, .resource = sh_eth_resources, .num_resources = ARRAY_SIZE(sh_eth_resources), }; /* PWM */ static struct resource pwm_resources[] = { [0] = { .start = 0xe6600000, .end = 0xe66000ff, .flags = IORESOURCE_MEM, }, }; static struct tpu_pwm_platform_data pwm_device_data = { .channels[2] = { .polarity = PWM_POLARITY_INVERSED, } }; static struct platform_device pwm_device = { .name = "renesas-tpu-pwm", .id = -1, .dev = { .platform_data = &pwm_device_data, }, .num_resources = ARRAY_SIZE(pwm_resources), .resource = pwm_resources, }; static struct pwm_lookup pwm_lookup[] = { PWM_LOOKUP("renesas-tpu-pwm", 2, "pwm-backlight.0", NULL), }; /* LCDC and backlight */ static struct platform_pwm_backlight_data pwm_backlight_data = { .lth_brightness = 50, .max_brightness = 255, .dft_brightness = 255, .pwm_period_ns = 33333, /* 30kHz */ .enable_gpio = -1, }; static struct platform_device pwm_backlight_device = { .name = "pwm-backlight", .dev = { .platform_data = &pwm_backlight_data, }, }; static struct fb_videomode lcdc0_mode = { .name = "AMPIER/AM-800480", .xres = 800, .yres = 480, .left_margin = 88, .right_margin = 40, .hsync_len = 128, .upper_margin = 20, .lower_margin = 5, .vsync_len = 5, .sync = 0, }; static struct sh_mobile_lcdc_info lcdc0_info = { .clock_source = LCDC_CLK_BUS, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .interface_type = RGB24, .clock_divider = 5, .flags = 0, .lcd_modes = &lcdc0_mode, .num_modes = 1, .panel_cfg = { .width = 111, .height = 68, }, }, }; static struct resource lcdc0_resources[] = { [0] = { .name = "LCD0", .start = 0xfe940000, .end = 0xfe943fff, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(177), .flags = IORESOURCE_IRQ, }, }; static struct platform_device lcdc0_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(lcdc0_resources), .resource = lcdc0_resources, .id = 0, .dev = { .platform_data = &lcdc0_info, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; /* * LCDC1/HDMI */ static struct sh_mobile_hdmi_info hdmi_info = { .flags = HDMI_OUTPUT_PUSH_PULL | HDMI_OUTPUT_POLARITY_HI | HDMI_32BIT_REG | HDMI_HAS_HTOP1 | HDMI_SND_SRC_SPDIF, }; static struct resource hdmi_resources[] = { [0] = { .name = "HDMI", .start = 0xe6be0000, .end = 0xe6be03ff, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(131), .flags = IORESOURCE_IRQ, }, [2] = { .name = "HDMI emma3pf", .start = 0xe6be4000, .end = 0xe6be43ff, .flags = IORESOURCE_MEM, }, }; static struct platform_device hdmi_device = { .name = "sh-mobile-hdmi", .num_resources = ARRAY_SIZE(hdmi_resources), .resource = hdmi_resources, .id = -1, .dev = { .platform_data = &hdmi_info, }, }; static const struct fb_videomode lcdc1_mode = { .name = "HDMI 720p", .xres = 1280, .yres = 720, .pixclock = 13468, .left_margin = 220, .right_margin = 110, .hsync_len = 40, .upper_margin = 20, .lower_margin = 5, .vsync_len = 5, .refresh = 60, .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, }; static struct sh_mobile_lcdc_info hdmi_lcdc_info = { .clock_source = LCDC_CLK_PERIPHERAL, /* HDMI clock */ .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .interface_type = RGB24, .clock_divider = 1, .flags = LCDC_FLAGS_DWPOL, .lcd_modes = &lcdc1_mode, .num_modes = 1, .tx_dev = &hdmi_device, .panel_cfg = { .width = 1280, .height = 720, }, }, }; static struct resource hdmi_lcdc_resources[] = { [0] = { .name = "LCDC1", .start = 0xfe944000, .end = 0xfe948000 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(178), .flags = IORESOURCE_IRQ, }, }; static struct platform_device hdmi_lcdc_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(hdmi_lcdc_resources), .resource = hdmi_lcdc_resources, .id = 1, .dev = { .platform_data = &hdmi_lcdc_info, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; /* GPIO KEY */ #define GPIO_KEY(c, g, d, ...) \ { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ } static struct gpio_keys_button gpio_buttons[] = { GPIO_KEY(KEY_POWER, 99, "SW3", .wakeup = 1), GPIO_KEY(KEY_BACK, 100, "SW4"), GPIO_KEY(KEY_MENU, 97, "SW5"), GPIO_KEY(KEY_HOME, 98, "SW6"), }; static struct gpio_keys_platform_data gpio_key_info = { .buttons = gpio_buttons, .nbuttons = ARRAY_SIZE(gpio_buttons), }; static struct platform_device gpio_keys_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &gpio_key_info, }, }; /* Fixed 3.3V regulator to be used by SDHI1, MMCIF */ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mmcif"), REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), }; /* Fixed 3.3V regulator used by LCD backlight */ static struct regulator_consumer_supply fixed5v0_power_consumers[] = { REGULATOR_SUPPLY("power", "pwm-backlight.0"), }; /* Fixed 3.3V regulator to be used by SDHI0 */ static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), }; static struct regulator_init_data vcc_sdhi0_init_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(vcc_sdhi0_consumers), .consumer_supplies = vcc_sdhi0_consumers, }; static struct fixed_voltage_config vcc_sdhi0_info = { .supply_name = "SDHI0 Vcc", .microvolts = 3300000, .gpio = 75, .enable_high = 1, .init_data = &vcc_sdhi0_init_data, }; static struct platform_device vcc_sdhi0 = { .name = "reg-fixed-voltage", .id = 1, .dev = { .platform_data = &vcc_sdhi0_info, }, }; /* 1.8 / 3.3V SDHI0 VccQ regulator */ static struct regulator_consumer_supply vccq_sdhi0_consumers[] = { REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), }; static struct regulator_init_data vccq_sdhi0_init_data = { .constraints = { .input_uV = 3300000, .min_uV = 1800000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(vccq_sdhi0_consumers), .consumer_supplies = vccq_sdhi0_consumers, }; static struct gpio vccq_sdhi0_gpios[] = { {17, GPIOF_OUT_INIT_LOW, "vccq-sdhi0" }, }; static struct gpio_regulator_state vccq_sdhi0_states[] = { { .value = 3300000, .gpios = (0 << 0) }, { .value = 1800000, .gpios = (1 << 0) }, }; static struct gpio_regulator_config vccq_sdhi0_info = { .supply_name = "vqmmc", .enable_gpio = 74, .enable_high = 1, .enabled_at_boot = 0, .gpios = vccq_sdhi0_gpios, .nr_gpios = ARRAY_SIZE(vccq_sdhi0_gpios), .states = vccq_sdhi0_states, .nr_states = ARRAY_SIZE(vccq_sdhi0_states), .type = REGULATOR_VOLTAGE, .init_data = &vccq_sdhi0_init_data, }; static struct platform_device vccq_sdhi0 = { .name = "gpio-regulator", .id = -1, .dev = { .platform_data = &vccq_sdhi0_info, }, }; /* Fixed 3.3V regulator to be used by SDHI1 */ static struct regulator_consumer_supply vcc_sdhi1_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), }; static struct regulator_init_data vcc_sdhi1_init_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(vcc_sdhi1_consumers), .consumer_supplies = vcc_sdhi1_consumers, }; static struct fixed_voltage_config vcc_sdhi1_info = { .supply_name = "SDHI1 Vcc", .microvolts = 3300000, .gpio = 16, .enable_high = 1, .init_data = &vcc_sdhi1_init_data, }; static struct platform_device vcc_sdhi1 = { .name = "reg-fixed-voltage", .id = 2, .dev = { .platform_data = &vcc_sdhi1_info, }, }; /* SDHI0 */ static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD, .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, .cd_gpio = 167, }; static struct resource sdhi0_resources[] = { { .name = "SDHI0", .start = 0xe6850000, .end = 0xe6850100 - 1, .flags = IORESOURCE_MEM, }, /* * no SH_MOBILE_SDHI_IRQ_CARD_DETECT here */ { .name = SH_MOBILE_SDHI_IRQ_SDCARD, .start = gic_spi(118), .flags = IORESOURCE_IRQ, }, { .name = SH_MOBILE_SDHI_IRQ_SDIO, .start = gic_spi(119), .flags = IORESOURCE_IRQ, }, }; static struct platform_device sdhi0_device = { .name = "sh_mobile_sdhi", .id = 0, .dev = { .platform_data = &sdhi0_info, }, .num_resources = ARRAY_SIZE(sdhi0_resources), .resource = sdhi0_resources, }; /* SDHI1 */ static struct sh_mobile_sdhi_info sdhi1_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD, .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, /* Port72 cannot generate IRQs, will be used in polling mode. */ .cd_gpio = 72, }; static struct resource sdhi1_resources[] = { [0] = { .name = "SDHI1", .start = 0xe6860000, .end = 0xe6860100 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(121), .flags = IORESOURCE_IRQ, }, [2] = { .start = gic_spi(122), .flags = IORESOURCE_IRQ, }, [3] = { .start = gic_spi(123), .flags = IORESOURCE_IRQ, }, }; static struct platform_device sdhi1_device = { .name = "sh_mobile_sdhi", .id = 1, .dev = { .platform_data = &sdhi1_info, }, .num_resources = ARRAY_SIZE(sdhi1_resources), .resource = sdhi1_resources, }; static const struct pinctrl_map eva_sdhi1_pinctrl_map[] = { PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740", "sdhi1_data4", "sdhi1"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740", "sdhi1_ctrl", "sdhi1"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740", "sdhi1_cd", "sdhi1"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740", "sdhi1_wp", "sdhi1"), }; /* MMCIF */ static struct sh_mmcif_plat_data sh_mmcif_plat = { .sup_pclk = 0, .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, .ccs_unsupported = true, .slave_id_tx = SHDMA_SLAVE_MMCIF_TX, .slave_id_rx = SHDMA_SLAVE_MMCIF_RX, }; static struct resource sh_mmcif_resources[] = { [0] = { .name = "MMCIF", .start = 0xe6bd0000, .end = 0xe6bd0100 - 1, .flags = IORESOURCE_MEM, }, [1] = { /* MMC ERR */ .start = gic_spi(56), .flags = IORESOURCE_IRQ, }, [2] = { /* MMC NOR */ .start = gic_spi(57), .flags = IORESOURCE_IRQ, }, }; static struct platform_device sh_mmcif_device = { .name = "sh_mmcif", .id = -1, .dev = { .platform_data = &sh_mmcif_plat, }, .num_resources = ARRAY_SIZE(sh_mmcif_resources), .resource = sh_mmcif_resources, }; /* Camera */ static int mt9t111_power(struct device *dev, int mode) { struct clk *mclk = clk_get(NULL, "video1"); if (IS_ERR(mclk)) { dev_err(dev, "can't get video1 clock\n"); return -EINVAL; } if (mode) { /* video1 (= CON1 camera) expect 24MHz */ clk_set_rate(mclk, clk_round_rate(mclk, 24000000)); clk_enable(mclk); gpio_set_value(158, 1); } else { gpio_set_value(158, 0); clk_disable(mclk); } clk_put(mclk); return 0; } static struct i2c_board_info i2c_camera_mt9t111 = { I2C_BOARD_INFO("mt9t112", 0x3d), }; static struct mt9t112_camera_info mt9t111_info = { .divider = { 16, 0, 0, 7, 0, 10, 14, 7, 7 }, }; static struct soc_camera_link mt9t111_link = { .i2c_adapter_id = 0, .bus_id = 0, .board_info = &i2c_camera_mt9t111, .power = mt9t111_power, .priv = &mt9t111_info, }; static struct platform_device camera_device = { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &mt9t111_link, }, }; /* CEU0 */ static struct sh_mobile_ceu_info sh_mobile_ceu0_info = { .flags = SH_CEU_FLAG_LOWER_8BIT, }; static struct resource ceu0_resources[] = { [0] = { .name = "CEU", .start = 0xfe910000, .end = 0xfe91009f, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(160), .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device ceu0_device = { .name = "sh_mobile_ceu", .id = 0, .num_resources = ARRAY_SIZE(ceu0_resources), .resource = ceu0_resources, .dev = { .platform_data = &sh_mobile_ceu0_info, .coherent_dma_mask = 0xffffffff, }, }; /* FSI */ static struct sh_fsi_platform_info fsi_info = { /* FSI-WM8978 */ .port_a = { .tx_id = SHDMA_SLAVE_FSIA_TX, }, /* FSI-HDMI */ .port_b = { .flags = SH_FSI_FMT_SPDIF | SH_FSI_ENABLE_STREAM_MODE | SH_FSI_CLK_CPG, .tx_id = SHDMA_SLAVE_FSIB_TX, } }; static struct resource fsi_resources[] = { [0] = { .name = "FSI", .start = 0xfe1f0000, .end = 0xfe1f8400 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(9), .flags = IORESOURCE_IRQ, }, }; static struct platform_device fsi_device = { .name = "sh_fsi2", .id = -1, .num_resources = ARRAY_SIZE(fsi_resources), .resource = fsi_resources, .dev = { .platform_data = &fsi_info, }, }; /* FSI-WM8978 */ static struct asoc_simple_card_info fsi_wm8978_info = { .name = "wm8978", .card = "FSI2A-WM8978", .codec = "wm8978.0-001a", .platform = "sh_fsi2", .daifmt = SND_SOC_DAIFMT_I2S, .cpu_dai = { .name = "fsia-dai", .fmt = SND_SOC_DAIFMT_CBS_CFS | SND_SOC_DAIFMT_IB_NF, }, .codec_dai = { .name = "wm8978-hifi", .fmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF, .sysclk = 12288000, }, }; static struct platform_device fsi_wm8978_device = { .name = "asoc-simple-card", .id = 0, .dev = { .platform_data = &fsi_wm8978_info, }, }; /* FSI-HDMI */ static struct asoc_simple_card_info fsi2_hdmi_info = { .name = "HDMI", .card = "FSI2B-HDMI", .codec = "sh-mobile-hdmi", .platform = "sh_fsi2", .cpu_dai = { .name = "fsib-dai", .fmt = SND_SOC_DAIFMT_CBM_CFM, }, .codec_dai = { .name = "sh_mobile_hdmi-hifi", }, }; static struct platform_device fsi_hdmi_device = { .name = "asoc-simple-card", .id = 1, .dev = { .platform_data = &fsi2_hdmi_info, }, }; /* RTC: RTC connects i2c-gpio. */ static struct i2c_gpio_platform_data i2c_gpio_data = { .sda_pin = 208, .scl_pin = 91, .udelay = 5, /* 100 kHz */ }; static struct platform_device i2c_gpio_device = { .name = "i2c-gpio", .id = 2, .dev = { .platform_data = &i2c_gpio_data, }, }; /* I2C */ static struct st1232_pdata st1232_i2c0_pdata = { .reset_gpio = 166, }; static struct i2c_board_info i2c0_devices[] = { { I2C_BOARD_INFO("st1232-ts", 0x55), .irq = irq_pin(10), .platform_data = &st1232_i2c0_pdata, }, { I2C_BOARD_INFO("wm8978", 0x1a), }, }; static struct i2c_board_info i2c2_devices[] = { { I2C_BOARD_INFO("s35390a", 0x30), .type = "s35390a", }, }; /* * board devices */ static struct platform_device *eva_devices[] __initdata = { &lcdc0_device, &pwm_device, &pwm_backlight_device, &gpio_keys_device, &sh_eth_device, &vcc_sdhi0, &vccq_sdhi0, &sdhi0_device, &sh_mmcif_device, &hdmi_device, &hdmi_lcdc_device, &camera_device, &ceu0_device, &fsi_device, &fsi_wm8978_device, &fsi_hdmi_device, &i2c_gpio_device, }; static const struct pinctrl_map eva_pinctrl_map[] = { /* CEU0 */ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740", "ceu0_data_0_7", "ceu0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740", "ceu0_clk_0", "ceu0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740", "ceu0_sync", "ceu0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740", "ceu0_field", "ceu0"), /* FSIA */ PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740", "fsia_sclk_in", "fsia"), PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740", "fsia_mclk_out", "fsia"), PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740", "fsia_data_in_1", "fsia"), PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740", "fsia_data_out_0", "fsia"), /* FSIB */ PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740", "fsib_mclk_in", "fsib"), /* GETHER */ PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740", "gether_mii", "gether"), PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740", "gether_int", "gether"), /* HDMI */ PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740", "hdmi", "hdmi"), /* LCD0 */ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-r8a7740", "lcd0_data24_0", "lcd0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-r8a7740", "lcd0_lclk_1", "lcd0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-r8a7740", "lcd0_sync", "lcd0"), /* MMCIF */ PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-r8a7740", "mmc0_data8_1", "mmc0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-r8a7740", "mmc0_ctrl_1", "mmc0"), /* SCIFA1 */ PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.1", "pfc-r8a7740", "scifa1_data", "scifa1"), /* SDHI0 */ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7740", "sdhi0_data4", "sdhi0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7740", "sdhi0_ctrl", "sdhi0"), PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7740", "sdhi0_wp", "sdhi0"), /* ST1232 */ PIN_MAP_MUX_GROUP_DEFAULT("0-0055", "pfc-r8a7740", "intc_irq10", "intc"), /* TPU0 */ PIN_MAP_MUX_GROUP_DEFAULT("renesas-tpu-pwm", "pfc-r8a7740", "tpu0_to2_1", "tpu0"), /* USBHS */ PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs", "pfc-r8a7740", "intc_irq7_1", "intc"), }; static void __init eva_clock_init(void) { struct clk *system = clk_get(NULL, "system_clk"); struct clk *xtal1 = clk_get(NULL, "extal1"); struct clk *usb24s = clk_get(NULL, "usb24s"); struct clk *fsibck = clk_get(NULL, "fsibck"); if (IS_ERR(system) || IS_ERR(xtal1) || IS_ERR(usb24s) || IS_ERR(fsibck)) { pr_err("armadillo800eva board clock init failed\n"); goto clock_error; } /* armadillo 800 eva extal1 is 24MHz */ clk_set_rate(xtal1, 24000000); /* usb24s use extal1 (= system) clock (= 24MHz) */ clk_set_parent(usb24s, system); /* FSIBCK is 12.288MHz, and it is parent of FSI-B */ clk_set_rate(fsibck, 12288000); clock_error: if (!IS_ERR(system)) clk_put(system); if (!IS_ERR(xtal1)) clk_put(xtal1); if (!IS_ERR(usb24s)) clk_put(usb24s); if (!IS_ERR(fsibck)) clk_put(fsibck); } /* * board init */ #define GPIO_PORT7CR IOMEM(0xe6050007) #define GPIO_PORT8CR IOMEM(0xe6050008) static void __init eva_init(void) { struct platform_device *usb = NULL; regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, ARRAY_SIZE(fixed3v3_power_consumers), 3300000); regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, ARRAY_SIZE(fixed5v0_power_consumers), 5000000); pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); r8a7740_pinmux_init(); r8a7740_meram_workaround(); /* LCDC0 */ gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */ /* GETHER */ gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ /* USB */ gpio_request_one(159, GPIOF_IN, NULL); /* USB_DEVICE_MODE */ if (gpio_get_value(159)) { /* USB Host */ } else { /* USB Func */ /* * The USBHS interrupt handlers needs to read the IRQ pin value * (HI/LOW) to diffentiate USB connection and disconnection * events (usbhsf_get_vbus()). We thus need to select both the * intc_irq7_1 pin group and GPIO 209 here. */ gpio_request_one(209, GPIOF_IN, NULL); platform_device_register(&usbhsf_device); usb = &usbhsf_device; } /* CON1/CON15 Camera */ gpio_request_one(173, GPIOF_OUT_INIT_LOW, NULL); /* STANDBY */ gpio_request_one(172, GPIOF_OUT_INIT_HIGH, NULL); /* RST */ /* see mt9t111_power() */ gpio_request_one(158, GPIOF_OUT_INIT_LOW, NULL); /* CAM_PON */ /* FSI-WM8978 */ gpio_request(7, NULL); gpio_request(8, NULL); gpio_direction_none(GPIO_PORT7CR); /* FSIAOBT needs no direction */ gpio_direction_none(GPIO_PORT8CR); /* FSIAOLR needs no direction */ /* * CAUTION * * DBGMD/LCDC0/FSIA MUX * DBGMD_SELECT_B should be set after setting PFC Function. */ gpio_request_one(176, GPIOF_OUT_INIT_HIGH, NULL); /* * We can switch CON8/CON14 by SW1.5, * but it needs after DBGMD_SELECT_B */ gpio_request_one(6, GPIOF_IN, NULL); if (gpio_get_value(6)) { /* CON14 enable */ } else { /* CON8 (SDHI1) enable */ pinctrl_register_mappings(eva_sdhi1_pinctrl_map, ARRAY_SIZE(eva_sdhi1_pinctrl_map)); platform_device_register(&vcc_sdhi1); platform_device_register(&sdhi1_device); } #ifdef CONFIG_CACHE_L2X0 /* Early BRESP enable, Shared attribute override enable, 32K*8way */ l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); #endif i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); i2c_register_board_info(2, i2c2_devices, ARRAY_SIZE(i2c2_devices)); r8a7740_add_standard_devices(); platform_add_devices(eva_devices, ARRAY_SIZE(eva_devices)); rmobile_add_device_to_domain("A4LC", &lcdc0_device); rmobile_add_device_to_domain("A4LC", &hdmi_lcdc_device); if (usb) rmobile_add_device_to_domain("A3SP", usb); r8a7740_pm_init(); } static void __init eva_earlytimer_init(void) { r8a7740_clock_init(MD_CK0 | MD_CK2); shmobile_earlytimer_init(); /* the rate of extal1 clock must be set before late_time_init */ eva_clock_init(); } static void __init eva_add_early_devices(void) { r8a7740_add_early_devices(); } #define RESCNT2 IOMEM(0xe6188020) static void eva_restart(enum reboot_mode mode, const char *cmd) { /* Do soft power on reset */ writel((1 << 31), RESCNT2); } static const char *eva_boards_compat_dt[] __initdata = { "renesas,armadillo800eva", NULL, }; DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva") .map_io = r8a7740_map_io, .init_early = eva_add_early_devices, .init_irq = r8a7740_init_irq_of, .init_machine = eva_init, .init_late = shmobile_init_late, .init_time = eva_earlytimer_init, .dt_compat = eva_boards_compat_dt, .restart = eva_restart, MACHINE_END
gpl-2.0
adrset/kernel
net/bluetooth/rfcomm/sock.c
1847
22520
/* RFCOMM implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* * RFCOMM sockets. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/list.h> #include <linux/device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <net/sock.h> #include <asm/system.h> #include <linux/uaccess.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/rfcomm.h> static const struct proto_ops rfcomm_sock_ops; static struct bt_sock_list rfcomm_sk_list = { .lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock) }; static void rfcomm_sock_close(struct sock *sk); static void rfcomm_sock_kill(struct sock *sk); /* ---- DLC callbacks ---- * * called under rfcomm_dlc_lock() */ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) { struct sock *sk = d->owner; if (!sk) return; atomic_add(skb->len, &sk->sk_rmem_alloc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) rfcomm_dlc_throttle(d); } static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) { struct sock *sk = d->owner, *parent; unsigned long flags; if (!sk) return; BT_DBG("dlc %p state %ld err %d", d, d->state, err); local_irq_save(flags); bh_lock_sock(sk); if (err) sk->sk_err = err; sk->sk_state = d->state; parent = bt_sk(sk)->parent; if (parent) { if (d->state == BT_CLOSED) { sock_set_flag(sk, SOCK_ZAPPED); bt_accept_unlink(sk); } parent->sk_data_ready(parent, 0); } else { if (d->state == BT_CONNECTED) rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL); sk->sk_state_change(sk); } bh_unlock_sock(sk); local_irq_restore(flags); if (parent && sock_flag(sk, SOCK_ZAPPED)) { /* We have to drop DLC lock here, otherwise * rfcomm_sock_destruct() will dead lock. */ rfcomm_dlc_unlock(d); rfcomm_sock_kill(sk); rfcomm_dlc_lock(d); } } /* ---- Socket functions ---- */ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) { struct sock *sk = NULL; struct hlist_node *node; sk_for_each(sk, node, &rfcomm_sk_list.head) { if (rfcomm_pi(sk)->channel == channel && !bacmp(&bt_sk(sk)->src, src)) break; } return node ? sk : NULL; } /* Find socket with channel and source bdaddr. * Returns closest match. */ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; struct hlist_node *node; read_lock(&rfcomm_sk_list.lock); sk_for_each(sk, node, &rfcomm_sk_list.head) { if (state && sk->sk_state != state) continue; if (rfcomm_pi(sk)->channel == channel) { /* Exact match. */ if (!bacmp(&bt_sk(sk)->src, src)) break; /* Closest match */ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) sk1 = sk; } } read_unlock(&rfcomm_sk_list.lock); return node ? sk : sk1; } static void rfcomm_sock_destruct(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p dlc %p", sk, d); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); rfcomm_dlc_lock(d); rfcomm_pi(sk)->dlc = NULL; /* Detach DLC if it's owned by this socket */ if (d->owner == sk) d->owner = NULL; rfcomm_dlc_unlock(d); rfcomm_dlc_put(d); } static void rfcomm_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted dlcs */ while ((sk = bt_accept_dequeue(parent, NULL))) { rfcomm_sock_close(sk); rfcomm_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void rfcomm_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); /* Kill poor orphan */ bt_sock_unlink(&rfcomm_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __rfcomm_sock_close(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: rfcomm_sock_cleanup_listen(sk); break; case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: case BT_CONNECTED: rfcomm_dlc_close(d, 0); default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Close socket. * Must be called on unlocked socket. */ static void rfcomm_sock_close(struct sock *sk) { lock_sock(sk); __rfcomm_sock_close(sk); release_sock(sk); } static void rfcomm_sock_init(struct sock *sk, struct sock *parent) { struct rfcomm_pinfo *pi = rfcomm_pi(sk); BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; pi->dlc->defer_setup = bt_sk(parent)->defer_setup; pi->sec_level = rfcomm_pi(parent)->sec_level; pi->role_switch = rfcomm_pi(parent)->role_switch; } else { pi->dlc->defer_setup = 0; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; } pi->dlc->sec_level = pi->sec_level; pi->dlc->role_switch = pi->role_switch; } static struct proto rfcomm_proto = { .name = "RFCOMM", .owner = THIS_MODULE, .obj_size = sizeof(struct rfcomm_pinfo) }; static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct rfcomm_dlc *d; struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); d = rfcomm_dlc_alloc(prio); if (!d) { sk_free(sk); return NULL; } d->data_ready = rfcomm_sk_data_ready; d->state_change = rfcomm_sk_state_change; rfcomm_pi(sk)->dlc = d; d->owner = sk; sk->sk_destruct = rfcomm_sock_destruct; sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; bt_sock_link(&rfcomm_sk_list, sk); BT_DBG("sk %p", sk); return sk; } static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &rfcomm_sock_ops; sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC); if (!sk) return -ENOMEM; rfcomm_sock_init(sk, NULL); return 0; } static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p %s", sk, batostr(&sa->rc_bdaddr)); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } write_lock_bh(&rfcomm_sk_list.lock); if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) { err = -EADDRINUSE; } else { /* Save source address */ bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr); rfcomm_pi(sk)->channel = sa->rc_channel; sk->sk_state = BT_BOUND; } write_unlock_bh(&rfcomm_sk_list.lock); done: release_sock(sk); return err; } static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int err = 0; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_rc) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } sk->sk_state = BT_CONNECT; bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr); rfcomm_pi(sk)->channel = sa->rc_channel; d->sec_level = rfcomm_pi(sk)->sec_level; d->role_switch = rfcomm_pi(sk)->role_switch; err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel); if (!err) err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; } static int rfcomm_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } if (!rfcomm_pi(sk)->channel) { bdaddr_t *src = &bt_sk(sk)->src; u8 channel; err = -EINVAL; write_lock_bh(&rfcomm_sk_list.lock); for (channel = 1; channel < 31; channel++) if (!__rfcomm_get_sock_by_addr(channel, src)) { rfcomm_pi(sk)->channel = channel; err = 0; break; } write_unlock_bh(&rfcomm_sk_list.lock); if (err < 0) goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock(sk); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(nsk = bt_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { err = -EAGAIN; break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; } static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); sa->rc_family = AF_BLUETOOTH; sa->rc_channel = rfcomm_pi(sk)->channel; if (peer) bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst); else bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src); *len = sizeof(struct sockaddr_rc); return 0; } static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; struct sk_buff *skb; int sent = 0; if (test_bit(RFCOMM_DEFER_SETUP, &d->flags)) return -ENOTCONN; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; BT_DBG("sock %p, sk %p", sock, sk); lock_sock(sk); while (len) { size_t size = min_t(size_t, len, d->mtu); int err; skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) { if (sent == 0) sent = err; break; } skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { kfree_skb(skb); if (sent == 0) sent = err; break; } err = rfcomm_dlc_send(d, skb); if (err < 0) { kfree_skb(skb); if (sent == 0) sent = err; break; } sent += size; len -= size; } release_sock(sk); return sent; } static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int len; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); return 0; } len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) atomic_sub(len, &sk->sk_rmem_alloc); if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return len; } static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case RFCOMM_LM: if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } if (opt & RFCOMM_LM_AUTH) rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; if (opt & RFCOMM_LM_ENCRYPT) rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; if (opt & RFCOMM_LM_SECURE) rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); if (copy_from_user((char *) &sec, optval, len)) { err = -EFAULT; break; } if (sec.level > BT_SECURITY_VERY_HIGH) { err = -EINVAL; break; } rfcomm_pi(sk)->sec_level = sec.level; BT_DBG("set to %d", sec.level); break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } bt_sk(sk)->defer_setup = opt; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sock *l2cap_sk; struct rfcomm_conninfo cinfo; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case RFCOMM_LM: switch (rfcomm_pi(sk)->sec_level) { case BT_SECURITY_LOW: opt = RFCOMM_LM_AUTH; break; case BT_SECURITY_MEDIUM: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT; break; case BT_SECURITY_HIGH: case BT_SECURITY_VERY_HIGH: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE; break; default: opt = 0; break; } if (rfcomm_pi(sk)->role_switch) opt |= RFCOMM_LM_MASTER; if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; case RFCOMM_CONNINFO: if (sk->sk_state != BT_CONNECTED && !rfcomm_pi(sk)->dlc->defer_setup) { err = -ENOTCONN; break; } l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle; memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct bt_security sec; int len, err = 0; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = rfcomm_pi(sk)->sec_level; len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk __maybe_unused = sock->sk; int err; BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); err = bt_sock_ioctl(sock, cmd, arg); if (err == -ENOIOCTLCMD) { #ifdef CONFIG_BT_RFCOMM_TTY lock_sock(sk); err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); release_sock(sk); #else err = -EOPNOTSUPP; #endif } return err; } static int rfcomm_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; __rfcomm_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); return err; } static int rfcomm_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; err = rfcomm_sock_shutdown(sock, 2); sock_orphan(sk); rfcomm_sock_kill(sk); return err; } /* ---- RFCOMM core layer callbacks ---- * * called under rfcomm_lock() */ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d) { struct sock *sk, *parent; bdaddr_t src, dst; int result = 0; BT_DBG("session %p channel %d", s, channel); rfcomm_session_getaddr(s, &src, &dst); /* Check if we have socket listening on channel */ parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src); if (!parent) return 0; bh_lock_sock(parent); /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); goto done; } sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC); if (!sk) goto done; rfcomm_sock_init(sk, parent); bacpy(&bt_sk(sk)->src, &src); bacpy(&bt_sk(sk)->dst, &dst); rfcomm_pi(sk)->channel = channel; sk->sk_state = BT_CONFIG; bt_accept_enqueue(parent, sk); /* Accept connection and return socket DLC */ *d = rfcomm_pi(sk)->dlc; result = 1; done: bh_unlock_sock(parent); if (bt_sk(parent)->defer_setup) parent->sk_state_change(parent); return result; } static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; struct hlist_node *node; read_lock_bh(&rfcomm_sk_list.lock); sk_for_each(sk, node, &rfcomm_sk_list.head) { seq_printf(f, "%s %s %d %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->sk_state, rfcomm_pi(sk)->channel); } read_unlock_bh(&rfcomm_sk_list.lock); return 0; } static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, rfcomm_sock_debugfs_show, inode->i_private); } static const struct file_operations rfcomm_sock_debugfs_fops = { .open = rfcomm_sock_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *rfcomm_sock_debugfs; static const struct proto_ops rfcomm_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = rfcomm_sock_release, .bind = rfcomm_sock_bind, .connect = rfcomm_sock_connect, .listen = rfcomm_sock_listen, .accept = rfcomm_sock_accept, .getname = rfcomm_sock_getname, .sendmsg = rfcomm_sock_sendmsg, .recvmsg = rfcomm_sock_recvmsg, .shutdown = rfcomm_sock_shutdown, .setsockopt = rfcomm_sock_setsockopt, .getsockopt = rfcomm_sock_getsockopt, .ioctl = rfcomm_sock_ioctl, .poll = bt_sock_poll, .socketpair = sock_no_socketpair, .mmap = sock_no_mmap }; static const struct net_proto_family rfcomm_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = rfcomm_sock_create }; int __init rfcomm_init_sockets(void) { int err; err = proto_register(&rfcomm_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops); if (err < 0) goto error; if (bt_debugfs) { rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, bt_debugfs, NULL, &rfcomm_sock_debugfs_fops); if (!rfcomm_sock_debugfs) BT_ERR("Failed to create RFCOMM debug file"); } BT_INFO("RFCOMM socket layer initialized"); return 0; error: BT_ERR("RFCOMM socket layer registration failed"); proto_unregister(&rfcomm_proto); return err; } void __exit rfcomm_cleanup_sockets(void) { debugfs_remove(rfcomm_sock_debugfs); if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) BT_ERR("RFCOMM socket layer unregistration failed"); proto_unregister(&rfcomm_proto); }
gpl-2.0
MasterChief87/android_kernel_zte_draconis
net/bluetooth/rfcomm/sock.c
1847
22520
/* RFCOMM implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* * RFCOMM sockets. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/list.h> #include <linux/device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <net/sock.h> #include <asm/system.h> #include <linux/uaccess.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/rfcomm.h> static const struct proto_ops rfcomm_sock_ops; static struct bt_sock_list rfcomm_sk_list = { .lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock) }; static void rfcomm_sock_close(struct sock *sk); static void rfcomm_sock_kill(struct sock *sk); /* ---- DLC callbacks ---- * * called under rfcomm_dlc_lock() */ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) { struct sock *sk = d->owner; if (!sk) return; atomic_add(skb->len, &sk->sk_rmem_alloc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) rfcomm_dlc_throttle(d); } static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) { struct sock *sk = d->owner, *parent; unsigned long flags; if (!sk) return; BT_DBG("dlc %p state %ld err %d", d, d->state, err); local_irq_save(flags); bh_lock_sock(sk); if (err) sk->sk_err = err; sk->sk_state = d->state; parent = bt_sk(sk)->parent; if (parent) { if (d->state == BT_CLOSED) { sock_set_flag(sk, SOCK_ZAPPED); bt_accept_unlink(sk); } parent->sk_data_ready(parent, 0); } else { if (d->state == BT_CONNECTED) rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL); sk->sk_state_change(sk); } bh_unlock_sock(sk); local_irq_restore(flags); if (parent && sock_flag(sk, SOCK_ZAPPED)) { /* We have to drop DLC lock here, otherwise * rfcomm_sock_destruct() will dead lock. */ rfcomm_dlc_unlock(d); rfcomm_sock_kill(sk); rfcomm_dlc_lock(d); } } /* ---- Socket functions ---- */ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) { struct sock *sk = NULL; struct hlist_node *node; sk_for_each(sk, node, &rfcomm_sk_list.head) { if (rfcomm_pi(sk)->channel == channel && !bacmp(&bt_sk(sk)->src, src)) break; } return node ? sk : NULL; } /* Find socket with channel and source bdaddr. * Returns closest match. */ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; struct hlist_node *node; read_lock(&rfcomm_sk_list.lock); sk_for_each(sk, node, &rfcomm_sk_list.head) { if (state && sk->sk_state != state) continue; if (rfcomm_pi(sk)->channel == channel) { /* Exact match. */ if (!bacmp(&bt_sk(sk)->src, src)) break; /* Closest match */ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) sk1 = sk; } } read_unlock(&rfcomm_sk_list.lock); return node ? sk : sk1; } static void rfcomm_sock_destruct(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p dlc %p", sk, d); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); rfcomm_dlc_lock(d); rfcomm_pi(sk)->dlc = NULL; /* Detach DLC if it's owned by this socket */ if (d->owner == sk) d->owner = NULL; rfcomm_dlc_unlock(d); rfcomm_dlc_put(d); } static void rfcomm_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted dlcs */ while ((sk = bt_accept_dequeue(parent, NULL))) { rfcomm_sock_close(sk); rfcomm_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void rfcomm_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); /* Kill poor orphan */ bt_sock_unlink(&rfcomm_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __rfcomm_sock_close(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: rfcomm_sock_cleanup_listen(sk); break; case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: case BT_CONNECTED: rfcomm_dlc_close(d, 0); default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Close socket. * Must be called on unlocked socket. */ static void rfcomm_sock_close(struct sock *sk) { lock_sock(sk); __rfcomm_sock_close(sk); release_sock(sk); } static void rfcomm_sock_init(struct sock *sk, struct sock *parent) { struct rfcomm_pinfo *pi = rfcomm_pi(sk); BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; pi->dlc->defer_setup = bt_sk(parent)->defer_setup; pi->sec_level = rfcomm_pi(parent)->sec_level; pi->role_switch = rfcomm_pi(parent)->role_switch; } else { pi->dlc->defer_setup = 0; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; } pi->dlc->sec_level = pi->sec_level; pi->dlc->role_switch = pi->role_switch; } static struct proto rfcomm_proto = { .name = "RFCOMM", .owner = THIS_MODULE, .obj_size = sizeof(struct rfcomm_pinfo) }; static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct rfcomm_dlc *d; struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); d = rfcomm_dlc_alloc(prio); if (!d) { sk_free(sk); return NULL; } d->data_ready = rfcomm_sk_data_ready; d->state_change = rfcomm_sk_state_change; rfcomm_pi(sk)->dlc = d; d->owner = sk; sk->sk_destruct = rfcomm_sock_destruct; sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; bt_sock_link(&rfcomm_sk_list, sk); BT_DBG("sk %p", sk); return sk; } static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &rfcomm_sock_ops; sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC); if (!sk) return -ENOMEM; rfcomm_sock_init(sk, NULL); return 0; } static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p %s", sk, batostr(&sa->rc_bdaddr)); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } write_lock_bh(&rfcomm_sk_list.lock); if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) { err = -EADDRINUSE; } else { /* Save source address */ bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr); rfcomm_pi(sk)->channel = sa->rc_channel; sk->sk_state = BT_BOUND; } write_unlock_bh(&rfcomm_sk_list.lock); done: release_sock(sk); return err; } static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int err = 0; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_rc) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } sk->sk_state = BT_CONNECT; bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr); rfcomm_pi(sk)->channel = sa->rc_channel; d->sec_level = rfcomm_pi(sk)->sec_level; d->role_switch = rfcomm_pi(sk)->role_switch; err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel); if (!err) err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; } static int rfcomm_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } if (!rfcomm_pi(sk)->channel) { bdaddr_t *src = &bt_sk(sk)->src; u8 channel; err = -EINVAL; write_lock_bh(&rfcomm_sk_list.lock); for (channel = 1; channel < 31; channel++) if (!__rfcomm_get_sock_by_addr(channel, src)) { rfcomm_pi(sk)->channel = channel; err = 0; break; } write_unlock_bh(&rfcomm_sk_list.lock); if (err < 0) goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock(sk); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(nsk = bt_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { err = -EAGAIN; break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; } static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); sa->rc_family = AF_BLUETOOTH; sa->rc_channel = rfcomm_pi(sk)->channel; if (peer) bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst); else bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src); *len = sizeof(struct sockaddr_rc); return 0; } static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; struct sk_buff *skb; int sent = 0; if (test_bit(RFCOMM_DEFER_SETUP, &d->flags)) return -ENOTCONN; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; BT_DBG("sock %p, sk %p", sock, sk); lock_sock(sk); while (len) { size_t size = min_t(size_t, len, d->mtu); int err; skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) { if (sent == 0) sent = err; break; } skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { kfree_skb(skb); if (sent == 0) sent = err; break; } err = rfcomm_dlc_send(d, skb); if (err < 0) { kfree_skb(skb); if (sent == 0) sent = err; break; } sent += size; len -= size; } release_sock(sk); return sent; } static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int len; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); return 0; } len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) atomic_sub(len, &sk->sk_rmem_alloc); if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return len; } static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case RFCOMM_LM: if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } if (opt & RFCOMM_LM_AUTH) rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; if (opt & RFCOMM_LM_ENCRYPT) rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; if (opt & RFCOMM_LM_SECURE) rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); if (copy_from_user((char *) &sec, optval, len)) { err = -EFAULT; break; } if (sec.level > BT_SECURITY_VERY_HIGH) { err = -EINVAL; break; } rfcomm_pi(sk)->sec_level = sec.level; BT_DBG("set to %d", sec.level); break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } bt_sk(sk)->defer_setup = opt; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sock *l2cap_sk; struct rfcomm_conninfo cinfo; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case RFCOMM_LM: switch (rfcomm_pi(sk)->sec_level) { case BT_SECURITY_LOW: opt = RFCOMM_LM_AUTH; break; case BT_SECURITY_MEDIUM: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT; break; case BT_SECURITY_HIGH: case BT_SECURITY_VERY_HIGH: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE; break; default: opt = 0; break; } if (rfcomm_pi(sk)->role_switch) opt |= RFCOMM_LM_MASTER; if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; case RFCOMM_CONNINFO: if (sk->sk_state != BT_CONNECTED && !rfcomm_pi(sk)->dlc->defer_setup) { err = -ENOTCONN; break; } l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle; memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct bt_security sec; int len, err = 0; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = rfcomm_pi(sk)->sec_level; len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk __maybe_unused = sock->sk; int err; BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); err = bt_sock_ioctl(sock, cmd, arg); if (err == -ENOIOCTLCMD) { #ifdef CONFIG_BT_RFCOMM_TTY lock_sock(sk); err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); release_sock(sk); #else err = -EOPNOTSUPP; #endif } return err; } static int rfcomm_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; __rfcomm_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); return err; } static int rfcomm_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; err = rfcomm_sock_shutdown(sock, 2); sock_orphan(sk); rfcomm_sock_kill(sk); return err; } /* ---- RFCOMM core layer callbacks ---- * * called under rfcomm_lock() */ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d) { struct sock *sk, *parent; bdaddr_t src, dst; int result = 0; BT_DBG("session %p channel %d", s, channel); rfcomm_session_getaddr(s, &src, &dst); /* Check if we have socket listening on channel */ parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src); if (!parent) return 0; bh_lock_sock(parent); /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); goto done; } sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC); if (!sk) goto done; rfcomm_sock_init(sk, parent); bacpy(&bt_sk(sk)->src, &src); bacpy(&bt_sk(sk)->dst, &dst); rfcomm_pi(sk)->channel = channel; sk->sk_state = BT_CONFIG; bt_accept_enqueue(parent, sk); /* Accept connection and return socket DLC */ *d = rfcomm_pi(sk)->dlc; result = 1; done: bh_unlock_sock(parent); if (bt_sk(parent)->defer_setup) parent->sk_state_change(parent); return result; } static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; struct hlist_node *node; read_lock_bh(&rfcomm_sk_list.lock); sk_for_each(sk, node, &rfcomm_sk_list.head) { seq_printf(f, "%s %s %d %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->sk_state, rfcomm_pi(sk)->channel); } read_unlock_bh(&rfcomm_sk_list.lock); return 0; } static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, rfcomm_sock_debugfs_show, inode->i_private); } static const struct file_operations rfcomm_sock_debugfs_fops = { .open = rfcomm_sock_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *rfcomm_sock_debugfs; static const struct proto_ops rfcomm_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = rfcomm_sock_release, .bind = rfcomm_sock_bind, .connect = rfcomm_sock_connect, .listen = rfcomm_sock_listen, .accept = rfcomm_sock_accept, .getname = rfcomm_sock_getname, .sendmsg = rfcomm_sock_sendmsg, .recvmsg = rfcomm_sock_recvmsg, .shutdown = rfcomm_sock_shutdown, .setsockopt = rfcomm_sock_setsockopt, .getsockopt = rfcomm_sock_getsockopt, .ioctl = rfcomm_sock_ioctl, .poll = bt_sock_poll, .socketpair = sock_no_socketpair, .mmap = sock_no_mmap }; static const struct net_proto_family rfcomm_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = rfcomm_sock_create }; int __init rfcomm_init_sockets(void) { int err; err = proto_register(&rfcomm_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops); if (err < 0) goto error; if (bt_debugfs) { rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, bt_debugfs, NULL, &rfcomm_sock_debugfs_fops); if (!rfcomm_sock_debugfs) BT_ERR("Failed to create RFCOMM debug file"); } BT_INFO("RFCOMM socket layer initialized"); return 0; error: BT_ERR("RFCOMM socket layer registration failed"); proto_unregister(&rfcomm_proto); return err; } void __exit rfcomm_cleanup_sockets(void) { debugfs_remove(rfcomm_sock_debugfs); if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) BT_ERR("RFCOMM socket layer unregistration failed"); proto_unregister(&rfcomm_proto); }
gpl-2.0
yangjoo/kernel_samsung_smdk4412
drivers/video/via/viafbdev.c
1847
60405
/* * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/via-core.h> #include <asm/olpc.h> #define _MASTER_FILE #include "global.h" static char *viafb_name = "Via"; static u32 pseudo_pal[17]; /* video mode */ static char *viafb_mode; static char *viafb_mode1; static int viafb_bpp = 32; static int viafb_bpp1 = 32; static unsigned int viafb_second_xres = 640; static unsigned int viafb_second_yres = 480; static unsigned int viafb_second_offset; static int viafb_second_size; static int viafb_accel = 1; /* Added for specifying active devices.*/ static char *viafb_active_dev; /*Added for specify lcd output port*/ static char *viafb_lcd_port = ""; static char *viafb_dvi_port = ""; static void retrieve_device_setting(struct viafb_ioctl_setting *setting_info); static int viafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static struct fb_ops viafb_ops; /* supported output devices on each IGP * only CX700, VX800, VX855, VX900 were documented * VIA_CRT should be everywhere * VIA_6C can be onle pre-CX700 (probably only on CLE266) as 6C is used for PLL * source selection on CX700 and later * K400 seems to support VIA_96, VIA_DVP1, VIA_LVDS{1,2} as in viamode.c */ static const u32 supported_odev_map[] = { [UNICHROME_CLE266] = VIA_CRT | VIA_LDVP0 | VIA_LDVP1, [UNICHROME_K400] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_K800] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_PM800] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_CN700] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_CX700] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_CN750] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_K8M890] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_P4M890] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_P4M900] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_VX800] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_VX855] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_VX900] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, }; static void viafb_fill_var_color_info(struct fb_var_screeninfo *var, u8 depth) { var->grayscale = 0; var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; var->nonstd = 0; switch (depth) { case 8: var->bits_per_pixel = 8; var->red.offset = 0; var->green.offset = 0; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; break; case 15: var->bits_per_pixel = 16; var->red.offset = 10; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 5; var->blue.length = 5; break; case 16: var->bits_per_pixel = 16; var->red.offset = 11; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 6; var->blue.length = 5; break; case 24: var->bits_per_pixel = 32; var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; break; case 30: var->bits_per_pixel = 32; var->red.offset = 20; var->green.offset = 10; var->blue.offset = 0; var->red.length = 10; var->green.length = 10; var->blue.length = 10; break; } } static void viafb_update_fix(struct fb_info *info) { u32 bpp = info->var.bits_per_pixel; info->fix.visual = bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; info->fix.line_length = ALIGN(info->var.xres_virtual * bpp / 8, VIA_PITCH_SIZE); } static void viafb_setup_fixinfo(struct fb_fix_screeninfo *fix, struct viafb_par *viaparinfo) { memset(fix, 0, sizeof(struct fb_fix_screeninfo)); strcpy(fix->id, viafb_name); fix->smem_start = viaparinfo->fbmem; fix->smem_len = viaparinfo->fbmem_free; fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; fix->visual = FB_VISUAL_TRUECOLOR; fix->xpanstep = fix->ywrapstep = 0; fix->ypanstep = 1; /* Just tell the accel name */ viafbinfo->fix.accel = FB_ACCEL_VIA_UNICHROME; } static int viafb_open(struct fb_info *info, int user) { DEBUG_MSG(KERN_INFO "viafb_open!\n"); return 0; } static int viafb_release(struct fb_info *info, int user) { DEBUG_MSG(KERN_INFO "viafb_release!\n"); return 0; } static inline int get_var_refresh(struct fb_var_screeninfo *var) { u32 htotal, vtotal; htotal = var->left_margin + var->xres + var->right_margin + var->hsync_len; vtotal = var->upper_margin + var->yres + var->lower_margin + var->vsync_len; return PICOS2KHZ(var->pixclock) * 1000 / (htotal * vtotal); } static int viafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int depth, refresh; struct VideoModeTable *vmode_entry; struct viafb_par *ppar = info->par; u32 line; DEBUG_MSG(KERN_INFO "viafb_check_var!\n"); /* Sanity check */ /* HW neither support interlacte nor double-scaned mode */ if (var->vmode & FB_VMODE_INTERLACED || var->vmode & FB_VMODE_DOUBLE) return -EINVAL; vmode_entry = viafb_get_mode(var->xres, var->yres); if (!vmode_entry) { DEBUG_MSG(KERN_INFO "viafb: Mode %dx%dx%d not supported!!\n", var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } depth = fb_get_color_depth(var, &info->fix); if (!depth) depth = var->bits_per_pixel; if (depth < 0 || depth > 32) return -EINVAL; else if (!depth) depth = 24; else if (depth == 15 && viafb_dual_fb && ppar->iga_path == IGA1) depth = 15; else if (depth == 30) depth = 30; else if (depth <= 8) depth = 8; else if (depth <= 16) depth = 16; else depth = 24; viafb_fill_var_color_info(var, depth); if (var->xres_virtual < var->xres) var->xres_virtual = var->xres; line = ALIGN(var->xres_virtual * var->bits_per_pixel / 8, VIA_PITCH_SIZE); if (line > VIA_PITCH_MAX || line * var->yres_virtual > ppar->memsize) return -EINVAL; /* Based on var passed in to calculate the refresh, * because our driver use some modes special. */ refresh = viafb_get_refresh(var->xres, var->yres, get_var_refresh(var)); /* Adjust var according to our driver's own table */ viafb_fill_var_timing_info(var, refresh, vmode_entry); if (var->accel_flags & FB_ACCELF_TEXT && !ppar->shared->vdev->engine_mmio) var->accel_flags = 0; return 0; } static int viafb_set_par(struct fb_info *info) { struct viafb_par *viapar = info->par; struct VideoModeTable *vmode_entry, *vmode_entry1 = NULL; int refresh; DEBUG_MSG(KERN_INFO "viafb_set_par!\n"); viafb_update_fix(info); viapar->depth = fb_get_color_depth(&info->var, &info->fix); viafb_update_device_setting(viafbinfo->var.xres, viafbinfo->var.yres, viafbinfo->var.bits_per_pixel, 0); vmode_entry = viafb_get_mode(viafbinfo->var.xres, viafbinfo->var.yres); if (viafb_dual_fb) { vmode_entry1 = viafb_get_mode(viafbinfo1->var.xres, viafbinfo1->var.yres); viafb_update_device_setting(viafbinfo1->var.xres, viafbinfo1->var.yres, viafbinfo1->var.bits_per_pixel, 1); } else if (viafb_SAMM_ON == 1) { DEBUG_MSG(KERN_INFO "viafb_second_xres = %d, viafb_second_yres = %d, bpp = %d\n", viafb_second_xres, viafb_second_yres, viafb_bpp1); vmode_entry1 = viafb_get_mode(viafb_second_xres, viafb_second_yres); viafb_update_device_setting(viafb_second_xres, viafb_second_yres, viafb_bpp1, 1); } refresh = viafb_get_refresh(info->var.xres, info->var.yres, get_var_refresh(&info->var)); if (vmode_entry) { if (viafb_dual_fb && viapar->iga_path == IGA2) { viafb_bpp1 = info->var.bits_per_pixel; viafb_refresh1 = refresh; } else { viafb_bpp = info->var.bits_per_pixel; viafb_refresh = refresh; } if (info->var.accel_flags & FB_ACCELF_TEXT) info->flags &= ~FBINFO_HWACCEL_DISABLED; else info->flags |= FBINFO_HWACCEL_DISABLED; viafb_setmode(vmode_entry, info->var.bits_per_pixel, vmode_entry1, viafb_bpp1); viafb_pan_display(&info->var, info); } return 0; } /* Set one color register */ static int viafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct viafb_par *viapar = info->par; u32 r, g, b; if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR) { if (regno > 255) return -EINVAL; if (!viafb_dual_fb || viapar->iga_path == IGA1) viafb_set_primary_color_register(regno, red >> 8, green >> 8, blue >> 8); if (!viafb_dual_fb || viapar->iga_path == IGA2) viafb_set_secondary_color_register(regno, red >> 8, green >> 8, blue >> 8); } else { if (regno > 15) return -EINVAL; r = (red >> (16 - info->var.red.length)) << info->var.red.offset; b = (blue >> (16 - info->var.blue.length)) << info->var.blue.offset; g = (green >> (16 - info->var.green.length)) << info->var.green.offset; ((u32 *) info->pseudo_palette)[regno] = r | g | b; } return 0; } static int viafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct viafb_par *viapar = info->par; u32 vram_addr = viapar->vram_addr + var->yoffset * info->fix.line_length + var->xoffset * info->var.bits_per_pixel / 8; DEBUG_MSG(KERN_DEBUG "viafb_pan_display, address = %d\n", vram_addr); if (!viafb_dual_fb) { via_set_primary_address(vram_addr); via_set_secondary_address(vram_addr); } else if (viapar->iga_path == IGA1) via_set_primary_address(vram_addr); else via_set_secondary_address(vram_addr); return 0; } static int viafb_blank(int blank_mode, struct fb_info *info) { DEBUG_MSG(KERN_INFO "viafb_blank!\n"); /* clear DPMS setting */ switch (blank_mode) { case FB_BLANK_UNBLANK: /* Screen: On, HSync: On, VSync: On */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_ON); break; case FB_BLANK_HSYNC_SUSPEND: /* Screen: Off, HSync: Off, VSync: On */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_STANDBY); break; case FB_BLANK_VSYNC_SUSPEND: /* Screen: Off, HSync: On, VSync: Off */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_SUSPEND); break; case FB_BLANK_POWERDOWN: /* Screen: Off, HSync: Off, VSync: Off */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_OFF); break; } return 0; } static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg) { union { struct viafb_ioctl_mode viamode; struct viafb_ioctl_samm viasamm; struct viafb_driver_version driver_version; struct fb_var_screeninfo sec_var; struct _panel_size_pos_info panel_pos_size_para; struct viafb_ioctl_setting viafb_setting; struct device_t active_dev; } u; u32 state_info = 0; u32 *viafb_gamma_table; char driver_name[] = "viafb"; u32 __user *argp = (u32 __user *) arg; u32 gpu32; DEBUG_MSG(KERN_INFO "viafb_ioctl: 0x%X !!\n", cmd); printk(KERN_WARNING "viafb_ioctl: Please avoid this interface as it is unstable and might change or vanish at any time!\n"); memset(&u, 0, sizeof(u)); switch (cmd) { case VIAFB_GET_CHIP_INFO: if (copy_to_user(argp, viaparinfo->chip_info, sizeof(struct chip_information))) return -EFAULT; break; case VIAFB_GET_INFO_SIZE: return put_user((u32)sizeof(struct viafb_ioctl_info), argp); case VIAFB_GET_INFO: return viafb_ioctl_get_viafb_info(arg); case VIAFB_HOTPLUG: return put_user(viafb_ioctl_hotplug(info->var.xres, info->var.yres, info->var.bits_per_pixel), argp); case VIAFB_SET_HOTPLUG_FLAG: if (copy_from_user(&gpu32, argp, sizeof(gpu32))) return -EFAULT; viafb_hotplug = (gpu32) ? 1 : 0; break; case VIAFB_GET_RESOLUTION: u.viamode.xres = (u32) viafb_hotplug_Xres; u.viamode.yres = (u32) viafb_hotplug_Yres; u.viamode.refresh = (u32) viafb_hotplug_refresh; u.viamode.bpp = (u32) viafb_hotplug_bpp; if (viafb_SAMM_ON == 1) { u.viamode.xres_sec = viafb_second_xres; u.viamode.yres_sec = viafb_second_yres; u.viamode.virtual_xres_sec = viafb_dual_fb ? viafbinfo1->var.xres_virtual : viafbinfo->var.xres_virtual; u.viamode.virtual_yres_sec = viafb_dual_fb ? viafbinfo1->var.yres_virtual : viafbinfo->var.yres_virtual; u.viamode.refresh_sec = viafb_refresh1; u.viamode.bpp_sec = viafb_bpp1; } else { u.viamode.xres_sec = 0; u.viamode.yres_sec = 0; u.viamode.virtual_xres_sec = 0; u.viamode.virtual_yres_sec = 0; u.viamode.refresh_sec = 0; u.viamode.bpp_sec = 0; } if (copy_to_user(argp, &u.viamode, sizeof(u.viamode))) return -EFAULT; break; case VIAFB_GET_SAMM_INFO: u.viasamm.samm_status = viafb_SAMM_ON; if (viafb_SAMM_ON == 1) { if (viafb_dual_fb) { u.viasamm.size_prim = viaparinfo->fbmem_free; u.viasamm.size_sec = viaparinfo1->fbmem_free; } else { if (viafb_second_size) { u.viasamm.size_prim = viaparinfo->fbmem_free - viafb_second_size * 1024 * 1024; u.viasamm.size_sec = viafb_second_size * 1024 * 1024; } else { u.viasamm.size_prim = viaparinfo->fbmem_free >> 1; u.viasamm.size_sec = (viaparinfo->fbmem_free >> 1); } } u.viasamm.mem_base = viaparinfo->fbmem; u.viasamm.offset_sec = viafb_second_offset; } else { u.viasamm.size_prim = viaparinfo->memsize - viaparinfo->fbmem_used; u.viasamm.size_sec = 0; u.viasamm.mem_base = viaparinfo->fbmem; u.viasamm.offset_sec = 0; } if (copy_to_user(argp, &u.viasamm, sizeof(u.viasamm))) return -EFAULT; break; case VIAFB_TURN_ON_OUTPUT_DEVICE: if (copy_from_user(&gpu32, argp, sizeof(gpu32))) return -EFAULT; if (gpu32 & CRT_Device) via_set_state(VIA_CRT, VIA_STATE_ON); if (gpu32 & DVI_Device) viafb_dvi_enable(); if (gpu32 & LCD_Device) viafb_lcd_enable(); break; case VIAFB_TURN_OFF_OUTPUT_DEVICE: if (copy_from_user(&gpu32, argp, sizeof(gpu32))) return -EFAULT; if (gpu32 & CRT_Device) via_set_state(VIA_CRT, VIA_STATE_OFF); if (gpu32 & DVI_Device) viafb_dvi_disable(); if (gpu32 & LCD_Device) viafb_lcd_disable(); break; case VIAFB_GET_DEVICE: u.active_dev.crt = viafb_CRT_ON; u.active_dev.dvi = viafb_DVI_ON; u.active_dev.lcd = viafb_LCD_ON; u.active_dev.samm = viafb_SAMM_ON; u.active_dev.primary_dev = viafb_primary_dev; u.active_dev.lcd_dsp_cent = viafb_lcd_dsp_method; u.active_dev.lcd_panel_id = viafb_lcd_panel_id; u.active_dev.lcd_mode = viafb_lcd_mode; u.active_dev.xres = viafb_hotplug_Xres; u.active_dev.yres = viafb_hotplug_Yres; u.active_dev.xres1 = viafb_second_xres; u.active_dev.yres1 = viafb_second_yres; u.active_dev.bpp = viafb_bpp; u.active_dev.bpp1 = viafb_bpp1; u.active_dev.refresh = viafb_refresh; u.active_dev.refresh1 = viafb_refresh1; u.active_dev.epia_dvi = viafb_platform_epia_dvi; u.active_dev.lcd_dual_edge = viafb_device_lcd_dualedge; u.active_dev.bus_width = viafb_bus_width; if (copy_to_user(argp, &u.active_dev, sizeof(u.active_dev))) return -EFAULT; break; case VIAFB_GET_DRIVER_VERSION: u.driver_version.iMajorNum = VERSION_MAJOR; u.driver_version.iKernelNum = VERSION_KERNEL; u.driver_version.iOSNum = VERSION_OS; u.driver_version.iMinorNum = VERSION_MINOR; if (copy_to_user(argp, &u.driver_version, sizeof(u.driver_version))) return -EFAULT; break; case VIAFB_GET_DEVICE_INFO: retrieve_device_setting(&u.viafb_setting); if (copy_to_user(argp, &u.viafb_setting, sizeof(u.viafb_setting))) return -EFAULT; break; case VIAFB_GET_DEVICE_SUPPORT: viafb_get_device_support_state(&state_info); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_GET_DEVICE_CONNECT: viafb_get_device_connect_state(&state_info); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_GET_PANEL_SUPPORT_EXPAND: state_info = viafb_lcd_get_support_expand_state(info->var.xres, info->var.yres); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_GET_DRIVER_NAME: if (copy_to_user(argp, driver_name, sizeof(driver_name))) return -EFAULT; break; case VIAFB_SET_GAMMA_LUT: viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32)); if (IS_ERR(viafb_gamma_table)) return PTR_ERR(viafb_gamma_table); viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); kfree(viafb_gamma_table); break; case VIAFB_GET_GAMMA_LUT: viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL); if (!viafb_gamma_table) return -ENOMEM; viafb_get_gamma_table(viafb_gamma_table); if (copy_to_user(argp, viafb_gamma_table, 256 * sizeof(u32))) { kfree(viafb_gamma_table); return -EFAULT; } kfree(viafb_gamma_table); break; case VIAFB_GET_GAMMA_SUPPORT_STATE: viafb_get_gamma_support_state(viafb_bpp, &state_info); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_SYNC_SURFACE: DEBUG_MSG(KERN_INFO "lobo VIAFB_SYNC_SURFACE\n"); break; case VIAFB_GET_DRIVER_CAPS: break; case VIAFB_GET_PANEL_MAX_SIZE: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_GET_PANEL_MAX_POSITION: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_GET_PANEL_POSITION: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_GET_PANEL_SIZE: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_SET_PANEL_POSITION: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_SET_PANEL_SIZE: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; break; default: return -EINVAL; } return 0; } static void viafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct viafb_par *viapar = info->par; struct viafb_shared *shared = viapar->shared; u32 fg_color; u8 rop; if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt) { cfb_fillrect(info, rect); return; } if (!rect->width || !rect->height) return; if (info->fix.visual == FB_VISUAL_TRUECOLOR) fg_color = ((u32 *)info->pseudo_palette)[rect->color]; else fg_color = rect->color; if (rect->rop == ROP_XOR) rop = 0x5A; else rop = 0xF0; DEBUG_MSG(KERN_DEBUG "viafb 2D engine: fillrect\n"); if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_FILL, rect->width, rect->height, info->var.bits_per_pixel, viapar->vram_addr, info->fix.line_length, rect->dx, rect->dy, NULL, 0, 0, 0, 0, fg_color, 0, rop)) cfb_fillrect(info, rect); } static void viafb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct viafb_par *viapar = info->par; struct viafb_shared *shared = viapar->shared; if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt) { cfb_copyarea(info, area); return; } if (!area->width || !area->height) return; DEBUG_MSG(KERN_DEBUG "viafb 2D engine: copyarea\n"); if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_COLOR, area->width, area->height, info->var.bits_per_pixel, viapar->vram_addr, info->fix.line_length, area->dx, area->dy, NULL, viapar->vram_addr, info->fix.line_length, area->sx, area->sy, 0, 0, 0)) cfb_copyarea(info, area); } static void viafb_imageblit(struct fb_info *info, const struct fb_image *image) { struct viafb_par *viapar = info->par; struct viafb_shared *shared = viapar->shared; u32 fg_color = 0, bg_color = 0; u8 op; if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt || (image->depth != 1 && image->depth != viapar->depth)) { cfb_imageblit(info, image); return; } if (image->depth == 1) { op = VIA_BITBLT_MONO; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { fg_color = ((u32 *)info->pseudo_palette)[image->fg_color]; bg_color = ((u32 *)info->pseudo_palette)[image->bg_color]; } else { fg_color = image->fg_color; bg_color = image->bg_color; } } else op = VIA_BITBLT_COLOR; DEBUG_MSG(KERN_DEBUG "viafb 2D engine: imageblit\n"); if (shared->hw_bitblt(shared->vdev->engine_mmio, op, image->width, image->height, info->var.bits_per_pixel, viapar->vram_addr, info->fix.line_length, image->dx, image->dy, (u32 *)image->data, 0, 0, 0, 0, fg_color, bg_color, 0)) cfb_imageblit(info, image); } static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct viafb_par *viapar = info->par; void __iomem *engine = viapar->shared->vdev->engine_mmio; u32 temp, xx, yy, bg_color = 0, fg_color = 0, chip_name = viapar->shared->chip_info.gfx_chip_name; int i, j = 0, cur_size = 64; if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo) return -ENODEV; /* LCD ouput does not support hw cursors (at least on VN896) */ if ((chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2) || viafb_LCD_ON) return -ENODEV; viafb_show_hw_cursor(info, HW_Cursor_OFF); if (cursor->set & FB_CUR_SETHOT) { temp = (cursor->hot.x << 16) + cursor->hot.y; writel(temp, engine + VIA_REG_CURSOR_ORG); } if (cursor->set & FB_CUR_SETPOS) { yy = cursor->image.dy - info->var.yoffset; xx = cursor->image.dx - info->var.xoffset; temp = yy & 0xFFFF; temp |= (xx << 16); writel(temp, engine + VIA_REG_CURSOR_POS); } if (cursor->image.width <= 32 && cursor->image.height <= 32) cur_size = 32; else if (cursor->image.width <= 64 && cursor->image.height <= 64) cur_size = 64; else { printk(KERN_WARNING "viafb_cursor: The cursor is too large " "%dx%d", cursor->image.width, cursor->image.height); return -ENXIO; } if (cursor->set & FB_CUR_SETSIZE) { temp = readl(engine + VIA_REG_CURSOR_MODE); if (cur_size == 32) temp |= 0x2; else temp &= ~0x2; writel(temp, engine + VIA_REG_CURSOR_MODE); } if (cursor->set & FB_CUR_SETCMAP) { fg_color = cursor->image.fg_color; bg_color = cursor->image.bg_color; if (chip_name == UNICHROME_CX700 || chip_name == UNICHROME_VX800 || chip_name == UNICHROME_VX855 || chip_name == UNICHROME_VX900) { fg_color = ((info->cmap.red[fg_color] & 0xFFC0) << 14) | ((info->cmap.green[fg_color] & 0xFFC0) << 4) | ((info->cmap.blue[fg_color] & 0xFFC0) >> 6); bg_color = ((info->cmap.red[bg_color] & 0xFFC0) << 14) | ((info->cmap.green[bg_color] & 0xFFC0) << 4) | ((info->cmap.blue[bg_color] & 0xFFC0) >> 6); } else { fg_color = ((info->cmap.red[fg_color] & 0xFF00) << 8) | (info->cmap.green[fg_color] & 0xFF00) | ((info->cmap.blue[fg_color] & 0xFF00) >> 8); bg_color = ((info->cmap.red[bg_color] & 0xFF00) << 8) | (info->cmap.green[bg_color] & 0xFF00) | ((info->cmap.blue[bg_color] & 0xFF00) >> 8); } writel(bg_color, engine + VIA_REG_CURSOR_BG); writel(fg_color, engine + VIA_REG_CURSOR_FG); } if (cursor->set & FB_CUR_SETSHAPE) { struct { u8 data[CURSOR_SIZE]; u32 bak[CURSOR_SIZE / 4]; } *cr_data = kzalloc(sizeof(*cr_data), GFP_ATOMIC); int size = ((cursor->image.width + 7) >> 3) * cursor->image.height; if (!cr_data) return -ENOMEM; if (cur_size == 32) { for (i = 0; i < (CURSOR_SIZE / 4); i++) { cr_data->bak[i] = 0x0; cr_data->bak[i + 1] = 0xFFFFFFFF; i += 1; } } else { for (i = 0; i < (CURSOR_SIZE / 4); i++) { cr_data->bak[i] = 0x0; cr_data->bak[i + 1] = 0x0; cr_data->bak[i + 2] = 0xFFFFFFFF; cr_data->bak[i + 3] = 0xFFFFFFFF; i += 3; } } switch (cursor->rop) { case ROP_XOR: for (i = 0; i < size; i++) cr_data->data[i] = cursor->mask[i]; break; case ROP_COPY: for (i = 0; i < size; i++) cr_data->data[i] = cursor->mask[i]; break; default: break; } if (cur_size == 32) { for (i = 0; i < size; i++) { cr_data->bak[j] = (u32) cr_data->data[i]; cr_data->bak[j + 1] = ~cr_data->bak[j]; j += 2; } } else { for (i = 0; i < size; i++) { cr_data->bak[j] = (u32) cr_data->data[i]; cr_data->bak[j + 1] = 0x0; cr_data->bak[j + 2] = ~cr_data->bak[j]; cr_data->bak[j + 3] = ~cr_data->bak[j + 1]; j += 4; } } memcpy_toio(viafbinfo->screen_base + viapar->shared-> cursor_vram_addr, cr_data->bak, CURSOR_SIZE); kfree(cr_data); } if (cursor->enable) viafb_show_hw_cursor(info, HW_Cursor_ON); return 0; } static int viafb_sync(struct fb_info *info) { if (!(info->flags & FBINFO_HWACCEL_DISABLED)) viafb_wait_engine_idle(info); return 0; } static int get_primary_device(void) { int primary_device = 0; /* Rule: device on iga1 path are the primary device. */ if (viafb_SAMM_ON) { if (viafb_CRT_ON) { if (viaparinfo->shared->iga1_devices & VIA_CRT) { DEBUG_MSG(KERN_INFO "CRT IGA Path:%d\n", IGA1); primary_device = CRT_Device; } } if (viafb_DVI_ON) { if (viaparinfo->tmds_setting_info->iga_path == IGA1) { DEBUG_MSG(KERN_INFO "DVI IGA Path:%d\n", viaparinfo-> tmds_setting_info->iga_path); primary_device = DVI_Device; } } if (viafb_LCD_ON) { if (viaparinfo->lvds_setting_info->iga_path == IGA1) { DEBUG_MSG(KERN_INFO "LCD IGA Path:%d\n", viaparinfo-> lvds_setting_info->iga_path); primary_device = LCD_Device; } } if (viafb_LCD2_ON) { if (viaparinfo->lvds_setting_info2->iga_path == IGA1) { DEBUG_MSG(KERN_INFO "LCD2 IGA Path:%d\n", viaparinfo-> lvds_setting_info2->iga_path); primary_device = LCD2_Device; } } } return primary_device; } static void retrieve_device_setting(struct viafb_ioctl_setting *setting_info) { /* get device status */ if (viafb_CRT_ON == 1) setting_info->device_status = CRT_Device; if (viafb_DVI_ON == 1) setting_info->device_status |= DVI_Device; if (viafb_LCD_ON == 1) setting_info->device_status |= LCD_Device; if (viafb_LCD2_ON == 1) setting_info->device_status |= LCD2_Device; setting_info->samm_status = viafb_SAMM_ON; setting_info->primary_device = get_primary_device(); setting_info->first_dev_bpp = viafb_bpp; setting_info->second_dev_bpp = viafb_bpp1; setting_info->first_dev_refresh = viafb_refresh; setting_info->second_dev_refresh = viafb_refresh1; setting_info->first_dev_hor_res = viafb_hotplug_Xres; setting_info->first_dev_ver_res = viafb_hotplug_Yres; setting_info->second_dev_hor_res = viafb_second_xres; setting_info->second_dev_ver_res = viafb_second_yres; /* Get lcd attributes */ setting_info->lcd_attributes.display_center = viafb_lcd_dsp_method; setting_info->lcd_attributes.panel_id = viafb_lcd_panel_id; setting_info->lcd_attributes.lcd_mode = viafb_lcd_mode; } static int __init parse_active_dev(void) { viafb_CRT_ON = STATE_OFF; viafb_DVI_ON = STATE_OFF; viafb_LCD_ON = STATE_OFF; viafb_LCD2_ON = STATE_OFF; /* 1. Modify the active status of devices. */ /* 2. Keep the order of devices, so we can set corresponding IGA path to devices in SAMM case. */ /* Note: The previous of active_dev is primary device, and the following is secondary device. */ if (!viafb_active_dev) { if (machine_is_olpc()) { /* LCD only */ viafb_LCD_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else { viafb_CRT_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } } else if (!strcmp(viafb_active_dev, "CRT+DVI")) { /* CRT+DVI */ viafb_CRT_ON = STATE_ON; viafb_DVI_ON = STATE_ON; viafb_primary_dev = CRT_Device; } else if (!strcmp(viafb_active_dev, "DVI+CRT")) { /* DVI+CRT */ viafb_CRT_ON = STATE_ON; viafb_DVI_ON = STATE_ON; viafb_primary_dev = DVI_Device; } else if (!strcmp(viafb_active_dev, "CRT+LCD")) { /* CRT+LCD */ viafb_CRT_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = CRT_Device; } else if (!strcmp(viafb_active_dev, "LCD+CRT")) { /* LCD+CRT */ viafb_CRT_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = LCD_Device; } else if (!strcmp(viafb_active_dev, "DVI+LCD")) { /* DVI+LCD */ viafb_DVI_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = DVI_Device; } else if (!strcmp(viafb_active_dev, "LCD+DVI")) { /* LCD+DVI */ viafb_DVI_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = LCD_Device; } else if (!strcmp(viafb_active_dev, "LCD+LCD2")) { viafb_LCD_ON = STATE_ON; viafb_LCD2_ON = STATE_ON; viafb_primary_dev = LCD_Device; } else if (!strcmp(viafb_active_dev, "LCD2+LCD")) { viafb_LCD_ON = STATE_ON; viafb_LCD2_ON = STATE_ON; viafb_primary_dev = LCD2_Device; } else if (!strcmp(viafb_active_dev, "CRT")) { /* CRT only */ viafb_CRT_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else if (!strcmp(viafb_active_dev, "DVI")) { /* DVI only */ viafb_DVI_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else if (!strcmp(viafb_active_dev, "LCD")) { /* LCD only */ viafb_LCD_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else return -EINVAL; return 0; } static int __devinit parse_port(char *opt_str, int *output_interface) { if (!strncmp(opt_str, "DVP0", 4)) *output_interface = INTERFACE_DVP0; else if (!strncmp(opt_str, "DVP1", 4)) *output_interface = INTERFACE_DVP1; else if (!strncmp(opt_str, "DFP_HIGHLOW", 11)) *output_interface = INTERFACE_DFP; else if (!strncmp(opt_str, "DFP_HIGH", 8)) *output_interface = INTERFACE_DFP_HIGH; else if (!strncmp(opt_str, "DFP_LOW", 7)) *output_interface = INTERFACE_DFP_LOW; else *output_interface = INTERFACE_NONE; return 0; } static void __devinit parse_lcd_port(void) { parse_port(viafb_lcd_port, &viaparinfo->chip_info->lvds_chip_info. output_interface); /*Initialize to avoid unexpected behavior */ viaparinfo->chip_info->lvds_chip_info2.output_interface = INTERFACE_NONE; DEBUG_MSG(KERN_INFO "parse_lcd_port: viafb_lcd_port:%s,interface:%d\n", viafb_lcd_port, viaparinfo->chip_info->lvds_chip_info. output_interface); } static void __devinit parse_dvi_port(void) { parse_port(viafb_dvi_port, &viaparinfo->chip_info->tmds_chip_info. output_interface); DEBUG_MSG(KERN_INFO "parse_dvi_port: viafb_dvi_port:%s,interface:%d\n", viafb_dvi_port, viaparinfo->chip_info->tmds_chip_info. output_interface); } #ifdef CONFIG_FB_VIA_DIRECT_PROCFS /* * The proc filesystem read/write function, a simple proc implement to * get/set the value of DPA DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1, * DVP1Driving, DFPHigh, DFPLow CR96, SR2A[5], SR1B[1], SR2A[4], SR1E[2], * CR9B, SR65, CR97, CR99 */ static int viafb_dvp0_proc_show(struct seq_file *m, void *v) { u8 dvp0_data_dri = 0, dvp0_clk_dri = 0, dvp0 = 0; dvp0_data_dri = (viafb_read_reg(VIASR, SR2A) & BIT5) >> 4 | (viafb_read_reg(VIASR, SR1B) & BIT1) >> 1; dvp0_clk_dri = (viafb_read_reg(VIASR, SR2A) & BIT4) >> 3 | (viafb_read_reg(VIASR, SR1E) & BIT2) >> 2; dvp0 = viafb_read_reg(VIACR, CR96) & 0x0f; seq_printf(m, "%x %x %x\n", dvp0, dvp0_data_dri, dvp0_clk_dri); return 0; } static int viafb_dvp0_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dvp0_proc_show, NULL); } static ssize_t viafb_dvp0_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20], *value, *pbuf; u8 reg_val = 0; unsigned long length, i; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ pbuf = &buf[0]; for (i = 0; i < 3; i++) { value = strsep(&pbuf, " "); if (value != NULL) { strict_strtoul(value, 0, (unsigned long *)&reg_val); DEBUG_MSG(KERN_INFO "DVP0:reg_val[%l]=:%x\n", i, reg_val); switch (i) { case 0: viafb_write_reg_mask(CR96, VIACR, reg_val, 0x0f); break; case 1: viafb_write_reg_mask(SR2A, VIASR, reg_val << 4, BIT5); viafb_write_reg_mask(SR1B, VIASR, reg_val << 1, BIT1); break; case 2: viafb_write_reg_mask(SR2A, VIASR, reg_val << 3, BIT4); viafb_write_reg_mask(SR1E, VIASR, reg_val << 2, BIT2); break; default: break; } } else { break; } } return count; } static const struct file_operations viafb_dvp0_proc_fops = { .owner = THIS_MODULE, .open = viafb_dvp0_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dvp0_proc_write, }; static int viafb_dvp1_proc_show(struct seq_file *m, void *v) { u8 dvp1 = 0, dvp1_data_dri = 0, dvp1_clk_dri = 0; dvp1 = viafb_read_reg(VIACR, CR9B) & 0x0f; dvp1_data_dri = (viafb_read_reg(VIASR, SR65) & 0x0c) >> 2; dvp1_clk_dri = viafb_read_reg(VIASR, SR65) & 0x03; seq_printf(m, "%x %x %x\n", dvp1, dvp1_data_dri, dvp1_clk_dri); return 0; } static int viafb_dvp1_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dvp1_proc_show, NULL); } static ssize_t viafb_dvp1_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20], *value, *pbuf; u8 reg_val = 0; unsigned long length, i; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ pbuf = &buf[0]; for (i = 0; i < 3; i++) { value = strsep(&pbuf, " "); if (value != NULL) { strict_strtoul(value, 0, (unsigned long *)&reg_val); switch (i) { case 0: viafb_write_reg_mask(CR9B, VIACR, reg_val, 0x0f); break; case 1: viafb_write_reg_mask(SR65, VIASR, reg_val << 2, 0x0c); break; case 2: viafb_write_reg_mask(SR65, VIASR, reg_val, 0x03); break; default: break; } } else { break; } } return count; } static const struct file_operations viafb_dvp1_proc_fops = { .owner = THIS_MODULE, .open = viafb_dvp1_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dvp1_proc_write, }; static int viafb_dfph_proc_show(struct seq_file *m, void *v) { u8 dfp_high = 0; dfp_high = viafb_read_reg(VIACR, CR97) & 0x0f; seq_printf(m, "%x\n", dfp_high); return 0; } static int viafb_dfph_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dfph_proc_show, NULL); } static ssize_t viafb_dfph_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20]; u8 reg_val = 0; unsigned long length; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ strict_strtoul(&buf[0], 0, (unsigned long *)&reg_val); viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f); return count; } static const struct file_operations viafb_dfph_proc_fops = { .owner = THIS_MODULE, .open = viafb_dfph_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dfph_proc_write, }; static int viafb_dfpl_proc_show(struct seq_file *m, void *v) { u8 dfp_low = 0; dfp_low = viafb_read_reg(VIACR, CR99) & 0x0f; seq_printf(m, "%x\n", dfp_low); return 0; } static int viafb_dfpl_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dfpl_proc_show, NULL); } static ssize_t viafb_dfpl_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20]; u8 reg_val = 0; unsigned long length; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ strict_strtoul(&buf[0], 0, (unsigned long *)&reg_val); viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f); return count; } static const struct file_operations viafb_dfpl_proc_fops = { .owner = THIS_MODULE, .open = viafb_dfpl_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dfpl_proc_write, }; static int viafb_vt1636_proc_show(struct seq_file *m, void *v) { u8 vt1636_08 = 0, vt1636_09 = 0; switch (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { case VT1636_LVDS: vt1636_08 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info, 0x08) & 0x0f; vt1636_09 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info, 0x09) & 0x1f; seq_printf(m, "%x %x\n", vt1636_08, vt1636_09); break; default: break; } switch (viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) { case VT1636_LVDS: vt1636_08 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info2, &viaparinfo->chip_info->lvds_chip_info2, 0x08) & 0x0f; vt1636_09 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info2, &viaparinfo->chip_info->lvds_chip_info2, 0x09) & 0x1f; seq_printf(m, " %x %x\n", vt1636_08, vt1636_09); break; default: break; } return 0; } static int viafb_vt1636_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_vt1636_proc_show, NULL); } static ssize_t viafb_vt1636_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[30], *value, *pbuf; struct IODATA reg_val; unsigned long length, i; if (count < 1) return -EINVAL; length = count > 30 ? 30 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ pbuf = &buf[0]; switch (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { case VT1636_LVDS: for (i = 0; i < 2; i++) { value = strsep(&pbuf, " "); if (value != NULL) { strict_strtoul(value, 0, (unsigned long *)&reg_val.Data); switch (i) { case 0: reg_val.Index = 0x08; reg_val.Mask = 0x0f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info, &viaparinfo-> chip_info->lvds_chip_info, reg_val); break; case 1: reg_val.Index = 0x09; reg_val.Mask = 0x1f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info, &viaparinfo-> chip_info->lvds_chip_info, reg_val); break; default: break; } } else { break; } } break; default: break; } switch (viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) { case VT1636_LVDS: for (i = 0; i < 2; i++) { value = strsep(&pbuf, " "); if (value != NULL) { strict_strtoul(value, 0, (unsigned long *)&reg_val.Data); switch (i) { case 0: reg_val.Index = 0x08; reg_val.Mask = 0x0f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info2, &viaparinfo-> chip_info->lvds_chip_info2, reg_val); break; case 1: reg_val.Index = 0x09; reg_val.Mask = 0x1f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info2, &viaparinfo-> chip_info->lvds_chip_info2, reg_val); break; default: break; } } else { break; } } break; default: break; } return count; } static const struct file_operations viafb_vt1636_proc_fops = { .owner = THIS_MODULE, .open = viafb_vt1636_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_vt1636_proc_write, }; #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ static int viafb_sup_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, supported_odev_map[ viaparinfo->shared->chip_info.gfx_chip_name]); return 0; } static int viafb_sup_odev_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_sup_odev_proc_show, NULL); } static const struct file_operations viafb_sup_odev_proc_fops = { .owner = THIS_MODULE, .open = viafb_sup_odev_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static ssize_t odev_update(const char __user *buffer, size_t count, u32 *odev) { char buf[64], *ptr = buf; u32 devices; bool add, sub; if (count < 1 || count > 63) return -EINVAL; if (copy_from_user(&buf[0], buffer, count)) return -EFAULT; buf[count] = '\0'; add = buf[0] == '+'; sub = buf[0] == '-'; if (add || sub) ptr++; devices = via_parse_odev(ptr, &ptr); if (*ptr == '\n') ptr++; if (*ptr != 0) return -EINVAL; if (add) *odev |= devices; else if (sub) *odev &= ~devices; else *odev = devices; return count; } static int viafb_iga1_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, viaparinfo->shared->iga1_devices); return 0; } static int viafb_iga1_odev_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_iga1_odev_proc_show, NULL); } static ssize_t viafb_iga1_odev_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { u32 dev_on, dev_off, dev_old, dev_new; ssize_t res; dev_old = dev_new = viaparinfo->shared->iga1_devices; res = odev_update(buffer, count, &dev_new); if (res != count) return res; dev_off = dev_old & ~dev_new; dev_on = dev_new & ~dev_old; viaparinfo->shared->iga1_devices = dev_new; viaparinfo->shared->iga2_devices &= ~dev_new; via_set_state(dev_off, VIA_STATE_OFF); via_set_source(dev_new, IGA1); via_set_state(dev_on, VIA_STATE_ON); return res; } static const struct file_operations viafb_iga1_odev_proc_fops = { .owner = THIS_MODULE, .open = viafb_iga1_odev_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_iga1_odev_proc_write, }; static int viafb_iga2_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, viaparinfo->shared->iga2_devices); return 0; } static int viafb_iga2_odev_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_iga2_odev_proc_show, NULL); } static ssize_t viafb_iga2_odev_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { u32 dev_on, dev_off, dev_old, dev_new; ssize_t res; dev_old = dev_new = viaparinfo->shared->iga2_devices; res = odev_update(buffer, count, &dev_new); if (res != count) return res; dev_off = dev_old & ~dev_new; dev_on = dev_new & ~dev_old; viaparinfo->shared->iga2_devices = dev_new; viaparinfo->shared->iga1_devices &= ~dev_new; via_set_state(dev_off, VIA_STATE_OFF); via_set_source(dev_new, IGA2); via_set_state(dev_on, VIA_STATE_ON); return res; } static const struct file_operations viafb_iga2_odev_proc_fops = { .owner = THIS_MODULE, .open = viafb_iga2_odev_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_iga2_odev_proc_write, }; #define IS_VT1636(lvds_chip) ((lvds_chip).lvds_chip_name == VT1636_LVDS) static void viafb_init_proc(struct viafb_shared *shared) { struct proc_dir_entry *iga1_entry, *iga2_entry, *viafb_entry = proc_mkdir("viafb", NULL); shared->proc_entry = viafb_entry; if (viafb_entry) { #ifdef CONFIG_FB_VIA_DIRECT_PROCFS proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_fops); proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_fops); proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_fops); proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_fops); if (IS_VT1636(shared->chip_info.lvds_chip_info) || IS_VT1636(shared->chip_info.lvds_chip_info2)) proc_create("vt1636", 0, viafb_entry, &viafb_vt1636_proc_fops); #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ proc_create("supported_output_devices", 0, viafb_entry, &viafb_sup_odev_proc_fops); iga1_entry = proc_mkdir("iga1", viafb_entry); shared->iga1_proc_entry = iga1_entry; proc_create("output_devices", 0, iga1_entry, &viafb_iga1_odev_proc_fops); iga2_entry = proc_mkdir("iga2", viafb_entry); shared->iga2_proc_entry = iga2_entry; proc_create("output_devices", 0, iga2_entry, &viafb_iga2_odev_proc_fops); } } static void viafb_remove_proc(struct viafb_shared *shared) { struct proc_dir_entry *viafb_entry = shared->proc_entry, *iga1_entry = shared->iga1_proc_entry, *iga2_entry = shared->iga2_proc_entry; if (!viafb_entry) return; remove_proc_entry("output_devices", iga2_entry); remove_proc_entry("iga2", viafb_entry); remove_proc_entry("output_devices", iga1_entry); remove_proc_entry("iga1", viafb_entry); remove_proc_entry("supported_output_devices", viafb_entry); #ifdef CONFIG_FB_VIA_DIRECT_PROCFS remove_proc_entry("dvp0", viafb_entry);/* parent dir */ remove_proc_entry("dvp1", viafb_entry); remove_proc_entry("dfph", viafb_entry); remove_proc_entry("dfpl", viafb_entry); if (IS_VT1636(shared->chip_info.lvds_chip_info) || IS_VT1636(shared->chip_info.lvds_chip_info2)) remove_proc_entry("vt1636", viafb_entry); #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ remove_proc_entry("viafb", NULL); } #undef IS_VT1636 static int parse_mode(const char *str, u32 *xres, u32 *yres) { char *ptr; if (!str) { if (machine_is_olpc()) { *xres = 1200; *yres = 900; } else { *xres = 640; *yres = 480; } return 0; } *xres = simple_strtoul(str, &ptr, 10); if (ptr[0] != 'x') return -EINVAL; *yres = simple_strtoul(&ptr[1], &ptr, 10); if (ptr[0]) return -EINVAL; return 0; } #ifdef CONFIG_PM static int viafb_suspend(void *unused) { console_lock(); fb_set_suspend(viafbinfo, 1); viafb_sync(viafbinfo); console_unlock(); return 0; } static int viafb_resume(void *unused) { console_lock(); if (viaparinfo->shared->vdev->engine_mmio) viafb_reset_engine(viaparinfo); viafb_set_par(viafbinfo); if (viafb_dual_fb) viafb_set_par(viafbinfo1); fb_set_suspend(viafbinfo, 0); console_unlock(); return 0; } static struct viafb_pm_hooks viafb_fb_pm_hooks = { .suspend = viafb_suspend, .resume = viafb_resume }; #endif int __devinit via_fb_pci_probe(struct viafb_dev *vdev) { u32 default_xres, default_yres; struct VideoModeTable *vmode_entry; struct fb_var_screeninfo default_var; int rc; u32 viafb_par_length; DEBUG_MSG(KERN_INFO "VIAFB PCI Probe!!\n"); memset(&default_var, 0, sizeof(default_var)); viafb_par_length = ALIGN(sizeof(struct viafb_par), BITS_PER_LONG/8); /* Allocate fb_info and ***_par here, also including some other needed * variables */ viafbinfo = framebuffer_alloc(viafb_par_length + ALIGN(sizeof(struct viafb_shared), BITS_PER_LONG/8), &vdev->pdev->dev); if (!viafbinfo) { printk(KERN_ERR"Could not allocate memory for viafb_info.\n"); return -ENOMEM; } viaparinfo = (struct viafb_par *)viafbinfo->par; viaparinfo->shared = viafbinfo->par + viafb_par_length; viaparinfo->shared->vdev = vdev; viaparinfo->vram_addr = 0; viaparinfo->tmds_setting_info = &viaparinfo->shared->tmds_setting_info; viaparinfo->lvds_setting_info = &viaparinfo->shared->lvds_setting_info; viaparinfo->lvds_setting_info2 = &viaparinfo->shared->lvds_setting_info2; viaparinfo->chip_info = &viaparinfo->shared->chip_info; if (viafb_dual_fb) viafb_SAMM_ON = 1; parse_lcd_port(); parse_dvi_port(); viafb_init_chip_info(vdev->chip_type); /* * The framebuffer will have been successfully mapped by * the core (or we'd not be here), but we still need to * set up our own accounting. */ viaparinfo->fbmem = vdev->fbmem_start; viaparinfo->memsize = vdev->fbmem_len; viaparinfo->fbmem_free = viaparinfo->memsize; viaparinfo->fbmem_used = 0; viafbinfo->screen_base = vdev->fbmem; viafbinfo->fix.mmio_start = vdev->engine_start; viafbinfo->fix.mmio_len = vdev->engine_len; viafbinfo->node = 0; viafbinfo->fbops = &viafb_ops; viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; viafbinfo->pseudo_palette = pseudo_pal; if (viafb_accel && !viafb_setup_engine(viafbinfo)) { viafbinfo->flags |= FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; default_var.accel_flags = FB_ACCELF_TEXT; } else { viafbinfo->flags |= FBINFO_HWACCEL_DISABLED; default_var.accel_flags = 0; } if (viafb_second_size && (viafb_second_size < 8)) { viafb_second_offset = viaparinfo->fbmem_free - viafb_second_size * 1024 * 1024; } else { viafb_second_size = 8; viafb_second_offset = viaparinfo->fbmem_free - viafb_second_size * 1024 * 1024; } parse_mode(viafb_mode, &default_xres, &default_yres); vmode_entry = viafb_get_mode(default_xres, default_yres); if (viafb_SAMM_ON == 1) parse_mode(viafb_mode1, &viafb_second_xres, &viafb_second_yres); default_var.xres = default_xres; default_var.yres = default_yres; default_var.xres_virtual = default_xres; default_var.yres_virtual = default_yres; default_var.bits_per_pixel = viafb_bpp; viafb_fill_var_timing_info(&default_var, viafb_get_refresh( default_var.xres, default_var.yres, viafb_refresh), viafb_get_mode(default_var.xres, default_var.yres)); viafb_setup_fixinfo(&viafbinfo->fix, viaparinfo); viafbinfo->var = default_var; if (viafb_dual_fb) { viafbinfo1 = framebuffer_alloc(viafb_par_length, &vdev->pdev->dev); if (!viafbinfo1) { printk(KERN_ERR "allocate the second framebuffer struct error\n"); rc = -ENOMEM; goto out_fb_release; } viaparinfo1 = viafbinfo1->par; memcpy(viaparinfo1, viaparinfo, viafb_par_length); viaparinfo1->vram_addr = viafb_second_offset; viaparinfo1->memsize = viaparinfo->memsize - viafb_second_offset; viaparinfo->memsize = viafb_second_offset; viaparinfo1->fbmem = viaparinfo->fbmem + viafb_second_offset; viaparinfo1->fbmem_used = viaparinfo->fbmem_used; viaparinfo1->fbmem_free = viaparinfo1->memsize - viaparinfo1->fbmem_used; viaparinfo->fbmem_free = viaparinfo->memsize; viaparinfo->fbmem_used = 0; viaparinfo->iga_path = IGA1; viaparinfo1->iga_path = IGA2; memcpy(viafbinfo1, viafbinfo, sizeof(struct fb_info)); viafbinfo1->par = viaparinfo1; viafbinfo1->screen_base = viafbinfo->screen_base + viafb_second_offset; default_var.xres = viafb_second_xres; default_var.yres = viafb_second_yres; default_var.xres_virtual = viafb_second_xres; default_var.yres_virtual = viafb_second_yres; default_var.bits_per_pixel = viafb_bpp1; viafb_fill_var_timing_info(&default_var, viafb_get_refresh( default_var.xres, default_var.yres, viafb_refresh1), viafb_get_mode(default_var.xres, default_var.yres)); viafb_setup_fixinfo(&viafbinfo1->fix, viaparinfo1); viafb_check_var(&default_var, viafbinfo1); viafbinfo1->var = default_var; viafb_update_fix(viafbinfo1); viaparinfo1->depth = fb_get_color_depth(&viafbinfo1->var, &viafbinfo1->fix); } viafb_check_var(&viafbinfo->var, viafbinfo); viafb_update_fix(viafbinfo); viaparinfo->depth = fb_get_color_depth(&viafbinfo->var, &viafbinfo->fix); default_var.activate = FB_ACTIVATE_NOW; rc = fb_alloc_cmap(&viafbinfo->cmap, 256, 0); if (rc) goto out_fb1_release; if (viafb_dual_fb && (viafb_primary_dev == LCD_Device) && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) { rc = register_framebuffer(viafbinfo1); if (rc) goto out_dealloc_cmap; } rc = register_framebuffer(viafbinfo); if (rc) goto out_fb1_unreg_lcd_cle266; if (viafb_dual_fb && ((viafb_primary_dev != LCD_Device) || (viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266))) { rc = register_framebuffer(viafbinfo1); if (rc) goto out_fb_unreg; } DEBUG_MSG(KERN_INFO "fb%d: %s frame buffer device %dx%d-%dbpp\n", viafbinfo->node, viafbinfo->fix.id, default_var.xres, default_var.yres, default_var.bits_per_pixel); viafb_init_proc(viaparinfo->shared); viafb_init_dac(IGA2); #ifdef CONFIG_PM viafb_pm_register(&viafb_fb_pm_hooks); #endif return 0; out_fb_unreg: unregister_framebuffer(viafbinfo); out_fb1_unreg_lcd_cle266: if (viafb_dual_fb && (viafb_primary_dev == LCD_Device) && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) unregister_framebuffer(viafbinfo1); out_dealloc_cmap: fb_dealloc_cmap(&viafbinfo->cmap); out_fb1_release: if (viafbinfo1) framebuffer_release(viafbinfo1); out_fb_release: framebuffer_release(viafbinfo); return rc; } void __devexit via_fb_pci_remove(struct pci_dev *pdev) { DEBUG_MSG(KERN_INFO "via_pci_remove!\n"); fb_dealloc_cmap(&viafbinfo->cmap); unregister_framebuffer(viafbinfo); if (viafb_dual_fb) unregister_framebuffer(viafbinfo1); viafb_remove_proc(viaparinfo->shared); framebuffer_release(viafbinfo); if (viafb_dual_fb) framebuffer_release(viafbinfo1); } #ifndef MODULE static int __init viafb_setup(void) { char *this_opt; char *options; DEBUG_MSG(KERN_INFO "viafb_setup!\n"); if (fb_get_options("viafb", &options)) return -ENODEV; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if (!strncmp(this_opt, "viafb_mode1=", 12)) viafb_mode1 = kstrdup(this_opt + 12, GFP_KERNEL); else if (!strncmp(this_opt, "viafb_mode=", 11)) viafb_mode = kstrdup(this_opt + 11, GFP_KERNEL); else if (!strncmp(this_opt, "viafb_bpp1=", 11)) strict_strtoul(this_opt + 11, 0, (unsigned long *)&viafb_bpp1); else if (!strncmp(this_opt, "viafb_bpp=", 10)) strict_strtoul(this_opt + 10, 0, (unsigned long *)&viafb_bpp); else if (!strncmp(this_opt, "viafb_refresh1=", 15)) strict_strtoul(this_opt + 15, 0, (unsigned long *)&viafb_refresh1); else if (!strncmp(this_opt, "viafb_refresh=", 14)) strict_strtoul(this_opt + 14, 0, (unsigned long *)&viafb_refresh); else if (!strncmp(this_opt, "viafb_lcd_dsp_method=", 21)) strict_strtoul(this_opt + 21, 0, (unsigned long *)&viafb_lcd_dsp_method); else if (!strncmp(this_opt, "viafb_lcd_panel_id=", 19)) strict_strtoul(this_opt + 19, 0, (unsigned long *)&viafb_lcd_panel_id); else if (!strncmp(this_opt, "viafb_accel=", 12)) strict_strtoul(this_opt + 12, 0, (unsigned long *)&viafb_accel); else if (!strncmp(this_opt, "viafb_SAMM_ON=", 14)) strict_strtoul(this_opt + 14, 0, (unsigned long *)&viafb_SAMM_ON); else if (!strncmp(this_opt, "viafb_active_dev=", 17)) viafb_active_dev = kstrdup(this_opt + 17, GFP_KERNEL); else if (!strncmp(this_opt, "viafb_display_hardware_layout=", 30)) strict_strtoul(this_opt + 30, 0, (unsigned long *)&viafb_display_hardware_layout); else if (!strncmp(this_opt, "viafb_second_size=", 18)) strict_strtoul(this_opt + 18, 0, (unsigned long *)&viafb_second_size); else if (!strncmp(this_opt, "viafb_platform_epia_dvi=", 24)) strict_strtoul(this_opt + 24, 0, (unsigned long *)&viafb_platform_epia_dvi); else if (!strncmp(this_opt, "viafb_device_lcd_dualedge=", 26)) strict_strtoul(this_opt + 26, 0, (unsigned long *)&viafb_device_lcd_dualedge); else if (!strncmp(this_opt, "viafb_bus_width=", 16)) strict_strtoul(this_opt + 16, 0, (unsigned long *)&viafb_bus_width); else if (!strncmp(this_opt, "viafb_lcd_mode=", 15)) strict_strtoul(this_opt + 15, 0, (unsigned long *)&viafb_lcd_mode); else if (!strncmp(this_opt, "viafb_lcd_port=", 15)) viafb_lcd_port = kstrdup(this_opt + 15, GFP_KERNEL); else if (!strncmp(this_opt, "viafb_dvi_port=", 15)) viafb_dvi_port = kstrdup(this_opt + 15, GFP_KERNEL); } return 0; } #endif /* * These are called out of via-core for now. */ int __init viafb_init(void) { u32 dummy_x, dummy_y; int r; if (machine_is_olpc()) /* Apply XO-1.5-specific configuration. */ viafb_lcd_panel_id = 23; #ifndef MODULE r = viafb_setup(); if (r < 0) return r; #endif if (parse_mode(viafb_mode, &dummy_x, &dummy_y) || !viafb_get_mode(dummy_x, dummy_y) || parse_mode(viafb_mode1, &dummy_x, &dummy_y) || !viafb_get_mode(dummy_x, dummy_y) || viafb_bpp < 0 || viafb_bpp > 32 || viafb_bpp1 < 0 || viafb_bpp1 > 32 || parse_active_dev()) return -EINVAL; printk(KERN_INFO "VIA Graphics Integration Chipset framebuffer %d.%d initializing\n", VERSION_MAJOR, VERSION_MINOR); return 0; } void __exit viafb_exit(void) { DEBUG_MSG(KERN_INFO "viafb_exit!\n"); } static struct fb_ops viafb_ops = { .owner = THIS_MODULE, .fb_open = viafb_open, .fb_release = viafb_release, .fb_check_var = viafb_check_var, .fb_set_par = viafb_set_par, .fb_setcolreg = viafb_setcolreg, .fb_pan_display = viafb_pan_display, .fb_blank = viafb_blank, .fb_fillrect = viafb_fillrect, .fb_copyarea = viafb_copyarea, .fb_imageblit = viafb_imageblit, .fb_cursor = viafb_cursor, .fb_ioctl = viafb_ioctl, .fb_sync = viafb_sync, }; #ifdef MODULE module_param(viafb_mode, charp, S_IRUSR); MODULE_PARM_DESC(viafb_mode, "Set resolution (default=640x480)"); module_param(viafb_mode1, charp, S_IRUSR); MODULE_PARM_DESC(viafb_mode1, "Set resolution (default=640x480)"); module_param(viafb_bpp, int, S_IRUSR); MODULE_PARM_DESC(viafb_bpp, "Set color depth (default=32bpp)"); module_param(viafb_bpp1, int, S_IRUSR); MODULE_PARM_DESC(viafb_bpp1, "Set color depth (default=32bpp)"); module_param(viafb_refresh, int, S_IRUSR); MODULE_PARM_DESC(viafb_refresh, "Set CRT viafb_refresh rate (default = 60)"); module_param(viafb_refresh1, int, S_IRUSR); MODULE_PARM_DESC(viafb_refresh1, "Set CRT refresh rate (default = 60)"); module_param(viafb_lcd_panel_id, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_panel_id, "Set Flat Panel type(Default=1024x768)"); module_param(viafb_lcd_dsp_method, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_dsp_method, "Set Flat Panel display scaling method.(Default=Expandsion)"); module_param(viafb_SAMM_ON, int, S_IRUSR); MODULE_PARM_DESC(viafb_SAMM_ON, "Turn on/off flag of SAMM(Default=OFF)"); module_param(viafb_accel, int, S_IRUSR); MODULE_PARM_DESC(viafb_accel, "Set 2D Hardware Acceleration: 0 = OFF, 1 = ON (default)"); module_param(viafb_active_dev, charp, S_IRUSR); MODULE_PARM_DESC(viafb_active_dev, "Specify active devices."); module_param(viafb_display_hardware_layout, int, S_IRUSR); MODULE_PARM_DESC(viafb_display_hardware_layout, "Display Hardware Layout (LCD Only, DVI Only...,etc)"); module_param(viafb_second_size, int, S_IRUSR); MODULE_PARM_DESC(viafb_second_size, "Set secondary device memory size"); module_param(viafb_dual_fb, int, S_IRUSR); MODULE_PARM_DESC(viafb_dual_fb, "Turn on/off flag of dual framebuffer devices.(Default = OFF)"); module_param(viafb_platform_epia_dvi, int, S_IRUSR); MODULE_PARM_DESC(viafb_platform_epia_dvi, "Turn on/off flag of DVI devices on EPIA board.(Default = OFF)"); module_param(viafb_device_lcd_dualedge, int, S_IRUSR); MODULE_PARM_DESC(viafb_device_lcd_dualedge, "Turn on/off flag of dual edge panel.(Default = OFF)"); module_param(viafb_bus_width, int, S_IRUSR); MODULE_PARM_DESC(viafb_bus_width, "Set bus width of panel.(Default = 12)"); module_param(viafb_lcd_mode, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_mode, "Set Flat Panel mode(Default=OPENLDI)"); module_param(viafb_lcd_port, charp, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_port, "Specify LCD output port."); module_param(viafb_dvi_port, charp, S_IRUSR); MODULE_PARM_DESC(viafb_dvi_port, "Specify DVI output port."); MODULE_LICENSE("GPL"); #endif
gpl-2.0
francpalm72/linux-imx_3.10.17
drivers/tty/serial/sunhv.c
2103
13705
/* sunhv.c: Serial driver for SUN4V hypervisor console. * * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> #include <linux/circ_buf.h> #include <linux/serial.h> #include <linux/sysrq.h> #include <linux/console.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/of_device.h> #include <asm/hypervisor.h> #include <asm/spitfire.h> #include <asm/prom.h> #include <asm/irq.h> #include <asm/setup.h> #if defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> #include <linux/sunserialcore.h> #define CON_BREAK ((long)-1) #define CON_HUP ((long)-2) #define IGNORE_BREAK 0x1 #define IGNORE_ALL 0x2 static char *con_write_page; static char *con_read_page; static int hung_up = 0; static void transmit_chars_putchar(struct uart_port *port, struct circ_buf *xmit) { while (!uart_circ_empty(xmit)) { long status = sun4v_con_putchar(xmit->buf[xmit->tail]); if (status != HV_EOK) break; xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; } } static void transmit_chars_write(struct uart_port *port, struct circ_buf *xmit) { while (!uart_circ_empty(xmit)) { unsigned long ra = __pa(xmit->buf + xmit->tail); unsigned long len, status, sent; len = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); status = sun4v_con_write(ra, len, &sent); if (status != HV_EOK) break; xmit->tail = (xmit->tail + sent) & (UART_XMIT_SIZE - 1); port->icount.tx += sent; } } static int receive_chars_getchar(struct uart_port *port) { int saw_console_brk = 0; int limit = 10000; while (limit-- > 0) { long status; long c = sun4v_con_getchar(&status); if (status == HV_EWOULDBLOCK) break; if (c == CON_BREAK) { if (uart_handle_break(port)) continue; saw_console_brk = 1; c = 0; } if (c == CON_HUP) { hung_up = 1; uart_handle_dcd_change(port, 0); } else if (hung_up) { hung_up = 0; uart_handle_dcd_change(port, 1); } if (port->state == NULL) { uart_handle_sysrq_char(port, c); continue; } port->icount.rx++; if (uart_handle_sysrq_char(port, c)) continue; tty_insert_flip_char(&port->state->port, c, TTY_NORMAL); } return saw_console_brk; } static int receive_chars_read(struct uart_port *port) { int saw_console_brk = 0; int limit = 10000; while (limit-- > 0) { unsigned long ra = __pa(con_read_page); unsigned long bytes_read, i; long stat = sun4v_con_read(ra, PAGE_SIZE, &bytes_read); if (stat != HV_EOK) { bytes_read = 0; if (stat == CON_BREAK) { if (uart_handle_break(port)) continue; saw_console_brk = 1; *con_read_page = 0; bytes_read = 1; } else if (stat == CON_HUP) { hung_up = 1; uart_handle_dcd_change(port, 0); continue; } else { /* HV_EWOULDBLOCK, etc. */ break; } } if (hung_up) { hung_up = 0; uart_handle_dcd_change(port, 1); } for (i = 0; i < bytes_read; i++) uart_handle_sysrq_char(port, con_read_page[i]); if (port->state == NULL) continue; port->icount.rx += bytes_read; tty_insert_flip_string(&port->state->port, con_read_page, bytes_read); } return saw_console_brk; } struct sunhv_ops { void (*transmit_chars)(struct uart_port *port, struct circ_buf *xmit); int (*receive_chars)(struct uart_port *port); }; static struct sunhv_ops bychar_ops = { .transmit_chars = transmit_chars_putchar, .receive_chars = receive_chars_getchar, }; static struct sunhv_ops bywrite_ops = { .transmit_chars = transmit_chars_write, .receive_chars = receive_chars_read, }; static struct sunhv_ops *sunhv_ops = &bychar_ops; static struct tty_port *receive_chars(struct uart_port *port) { struct tty_port *tport = NULL; if (port->state != NULL) /* Unopened serial console */ tport = &port->state->port; if (sunhv_ops->receive_chars(port)) sun_do_break(); return tport; } static void transmit_chars(struct uart_port *port) { struct circ_buf *xmit; if (!port->state) return; xmit = &port->state->xmit; if (uart_circ_empty(xmit) || uart_tx_stopped(port)) return; sunhv_ops->transmit_chars(port, xmit); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); } static irqreturn_t sunhv_interrupt(int irq, void *dev_id) { struct uart_port *port = dev_id; struct tty_port *tport; unsigned long flags; spin_lock_irqsave(&port->lock, flags); tport = receive_chars(port); transmit_chars(port); spin_unlock_irqrestore(&port->lock, flags); if (tport) tty_flip_buffer_push(tport); return IRQ_HANDLED; } /* port->lock is not held. */ static unsigned int sunhv_tx_empty(struct uart_port *port) { /* Transmitter is always empty for us. If the circ buffer * is non-empty or there is an x_char pending, our caller * will do the right thing and ignore what we return here. */ return TIOCSER_TEMT; } /* port->lock held by caller. */ static void sunhv_set_mctrl(struct uart_port *port, unsigned int mctrl) { return; } /* port->lock is held by caller and interrupts are disabled. */ static unsigned int sunhv_get_mctrl(struct uart_port *port) { return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS; } /* port->lock held by caller. */ static void sunhv_stop_tx(struct uart_port *port) { return; } /* port->lock held by caller. */ static void sunhv_start_tx(struct uart_port *port) { transmit_chars(port); } /* port->lock is not held. */ static void sunhv_send_xchar(struct uart_port *port, char ch) { unsigned long flags; int limit = 10000; spin_lock_irqsave(&port->lock, flags); while (limit-- > 0) { long status = sun4v_con_putchar(ch); if (status == HV_EOK) break; udelay(1); } spin_unlock_irqrestore(&port->lock, flags); } /* port->lock held by caller. */ static void sunhv_stop_rx(struct uart_port *port) { } /* port->lock held by caller. */ static void sunhv_enable_ms(struct uart_port *port) { } /* port->lock is not held. */ static void sunhv_break_ctl(struct uart_port *port, int break_state) { if (break_state) { unsigned long flags; int limit = 10000; spin_lock_irqsave(&port->lock, flags); while (limit-- > 0) { long status = sun4v_con_putchar(CON_BREAK); if (status == HV_EOK) break; udelay(1); } spin_unlock_irqrestore(&port->lock, flags); } } /* port->lock is not held. */ static int sunhv_startup(struct uart_port *port) { return 0; } /* port->lock is not held. */ static void sunhv_shutdown(struct uart_port *port) { } /* port->lock is not held. */ static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000); unsigned int quot = uart_get_divisor(port, baud); unsigned int iflag, cflag; unsigned long flags; spin_lock_irqsave(&port->lock, flags); iflag = termios->c_iflag; cflag = termios->c_cflag; port->ignore_status_mask = 0; if (iflag & IGNBRK) port->ignore_status_mask |= IGNORE_BREAK; if ((cflag & CREAD) == 0) port->ignore_status_mask |= IGNORE_ALL; /* XXX */ uart_update_timeout(port, cflag, (port->uartclk / (16 * quot))); spin_unlock_irqrestore(&port->lock, flags); } static const char *sunhv_type(struct uart_port *port) { return "SUN4V HCONS"; } static void sunhv_release_port(struct uart_port *port) { } static int sunhv_request_port(struct uart_port *port) { return 0; } static void sunhv_config_port(struct uart_port *port, int flags) { } static int sunhv_verify_port(struct uart_port *port, struct serial_struct *ser) { return -EINVAL; } static struct uart_ops sunhv_pops = { .tx_empty = sunhv_tx_empty, .set_mctrl = sunhv_set_mctrl, .get_mctrl = sunhv_get_mctrl, .stop_tx = sunhv_stop_tx, .start_tx = sunhv_start_tx, .send_xchar = sunhv_send_xchar, .stop_rx = sunhv_stop_rx, .enable_ms = sunhv_enable_ms, .break_ctl = sunhv_break_ctl, .startup = sunhv_startup, .shutdown = sunhv_shutdown, .set_termios = sunhv_set_termios, .type = sunhv_type, .release_port = sunhv_release_port, .request_port = sunhv_request_port, .config_port = sunhv_config_port, .verify_port = sunhv_verify_port, }; static struct uart_driver sunhv_reg = { .owner = THIS_MODULE, .driver_name = "sunhv", .dev_name = "ttyS", .major = TTY_MAJOR, }; static struct uart_port *sunhv_port; /* Copy 's' into the con_write_page, decoding "\n" into * "\r\n" along the way. We have to return two lengths * because the caller needs to know how much to advance * 's' and also how many bytes to output via con_write_page. */ static int fill_con_write_page(const char *s, unsigned int n, unsigned long *page_bytes) { const char *orig_s = s; char *p = con_write_page; int left = PAGE_SIZE; while (n--) { if (*s == '\n') { if (left < 2) break; *p++ = '\r'; left--; } else if (left < 1) break; *p++ = *s++; left--; } *page_bytes = p - con_write_page; return s - orig_s; } static void sunhv_console_write_paged(struct console *con, const char *s, unsigned n) { struct uart_port *port = sunhv_port; unsigned long flags; int locked = 1; local_irq_save(flags); if (port->sysrq) { locked = 0; } else if (oops_in_progress) { locked = spin_trylock(&port->lock); } else spin_lock(&port->lock); while (n > 0) { unsigned long ra = __pa(con_write_page); unsigned long page_bytes; unsigned int cpy = fill_con_write_page(s, n, &page_bytes); n -= cpy; s += cpy; while (page_bytes > 0) { unsigned long written; int limit = 1000000; while (limit--) { unsigned long stat; stat = sun4v_con_write(ra, page_bytes, &written); if (stat == HV_EOK) break; udelay(1); } if (limit < 0) break; page_bytes -= written; ra += written; } } if (locked) spin_unlock(&port->lock); local_irq_restore(flags); } static inline void sunhv_console_putchar(struct uart_port *port, char c) { int limit = 1000000; while (limit-- > 0) { long status = sun4v_con_putchar(c); if (status == HV_EOK) break; udelay(1); } } static void sunhv_console_write_bychar(struct console *con, const char *s, unsigned n) { struct uart_port *port = sunhv_port; unsigned long flags; int i, locked = 1; local_irq_save(flags); if (port->sysrq) { locked = 0; } else if (oops_in_progress) { locked = spin_trylock(&port->lock); } else spin_lock(&port->lock); for (i = 0; i < n; i++) { if (*s == '\n') sunhv_console_putchar(port, '\r'); sunhv_console_putchar(port, *s++); } if (locked) spin_unlock(&port->lock); local_irq_restore(flags); } static struct console sunhv_console = { .name = "ttyHV", .write = sunhv_console_write_bychar, .device = uart_console_device, .flags = CON_PRINTBUFFER, .index = -1, .data = &sunhv_reg, }; static int hv_probe(struct platform_device *op) { struct uart_port *port; unsigned long minor; int err; if (op->archdata.irqs[0] == 0xffffffff) return -ENODEV; port = kzalloc(sizeof(struct uart_port), GFP_KERNEL); if (unlikely(!port)) return -ENOMEM; minor = 1; if (sun4v_hvapi_register(HV_GRP_CORE, 1, &minor) == 0 && minor >= 1) { err = -ENOMEM; con_write_page = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!con_write_page) goto out_free_port; con_read_page = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!con_read_page) goto out_free_con_write_page; sunhv_console.write = sunhv_console_write_paged; sunhv_ops = &bywrite_ops; } sunhv_port = port; port->line = 0; port->ops = &sunhv_pops; port->type = PORT_SUNHV; port->uartclk = ( 29491200 / 16 ); /* arbitrary */ port->membase = (unsigned char __iomem *) __pa(port); port->irq = op->archdata.irqs[0]; port->dev = &op->dev; err = sunserial_register_minors(&sunhv_reg, 1); if (err) goto out_free_con_read_page; sunserial_console_match(&sunhv_console, op->dev.of_node, &sunhv_reg, port->line, false); err = uart_add_one_port(&sunhv_reg, port); if (err) goto out_unregister_driver; err = request_irq(port->irq, sunhv_interrupt, 0, "hvcons", port); if (err) goto out_remove_port; dev_set_drvdata(&op->dev, port); return 0; out_remove_port: uart_remove_one_port(&sunhv_reg, port); out_unregister_driver: sunserial_unregister_minors(&sunhv_reg, 1); out_free_con_read_page: kfree(con_read_page); out_free_con_write_page: kfree(con_write_page); out_free_port: kfree(port); sunhv_port = NULL; return err; } static int hv_remove(struct platform_device *dev) { struct uart_port *port = dev_get_drvdata(&dev->dev); free_irq(port->irq, port); uart_remove_one_port(&sunhv_reg, port); sunserial_unregister_minors(&sunhv_reg, 1); kfree(port); sunhv_port = NULL; dev_set_drvdata(&dev->dev, NULL); return 0; } static const struct of_device_id hv_match[] = { { .name = "console", .compatible = "qcn", }, { .name = "console", .compatible = "SUNW,sun4v-console", }, {}, }; MODULE_DEVICE_TABLE(of, hv_match); static struct platform_driver hv_driver = { .driver = { .name = "hv", .owner = THIS_MODULE, .of_match_table = hv_match, }, .probe = hv_probe, .remove = hv_remove, }; static int __init sunhv_init(void) { if (tlb_type != hypervisor) return -ENODEV; return platform_driver_register(&hv_driver); } static void __exit sunhv_exit(void) { platform_driver_unregister(&hv_driver); } module_init(sunhv_init); module_exit(sunhv_exit); MODULE_AUTHOR("David S. Miller"); MODULE_DESCRIPTION("SUN4V Hypervisor console driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
SM-G920P/Vindicator-S6-Sprint
arch/arm/mach-s3c24xx/dma.c
2103
33195
/* * Copyright 2003-2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2410 DMA core * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifdef CONFIG_S3C2410_DMA_DEBUG #define DEBUG #endif #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/syscore_ops.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/io.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/dma.h> #include <mach/map.h> #include <plat/dma-s3c24xx.h> #include <plat/regs-dma.h> /* io map for dma */ static void __iomem *dma_base; static struct kmem_cache *dma_kmem; static int dma_channels; static struct s3c24xx_dma_selection dma_sel; /* debugging functions */ #define BUF_MAGIC (0xcafebabe) #define dmawarn(fmt...) printk(KERN_DEBUG fmt) #define dma_regaddr(chan, reg) ((chan)->regs + (reg)) #if 1 #define dma_wrreg(chan, reg, val) writel((val), (chan)->regs + (reg)) #else static inline void dma_wrreg(struct s3c2410_dma_chan *chan, int reg, unsigned long val) { pr_debug("writing %08x to register %08x\n",(unsigned int)val,reg); writel(val, dma_regaddr(chan, reg)); } #endif #define dma_rdreg(chan, reg) readl((chan)->regs + (reg)) /* captured register state for debug */ struct s3c2410_dma_regstate { unsigned long dcsrc; unsigned long disrc; unsigned long dstat; unsigned long dcon; unsigned long dmsktrig; }; #ifdef CONFIG_S3C2410_DMA_DEBUG /* dmadbg_showregs * * simple debug routine to print the current state of the dma registers */ static void dmadbg_capture(struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs) { regs->dcsrc = dma_rdreg(chan, S3C2410_DMA_DCSRC); regs->disrc = dma_rdreg(chan, S3C2410_DMA_DISRC); regs->dstat = dma_rdreg(chan, S3C2410_DMA_DSTAT); regs->dcon = dma_rdreg(chan, S3C2410_DMA_DCON); regs->dmsktrig = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); } static void dmadbg_dumpregs(const char *fname, int line, struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs) { printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n", chan->number, fname, line, regs->dcsrc, regs->disrc, regs->dstat, regs->dmsktrig, regs->dcon); } static void dmadbg_showchan(const char *fname, int line, struct s3c2410_dma_chan *chan) { struct s3c2410_dma_regstate state; dmadbg_capture(chan, &state); printk(KERN_DEBUG "dma%d: %s:%d: ls=%d, cur=%p, %p %p\n", chan->number, fname, line, chan->load_state, chan->curr, chan->next, chan->end); dmadbg_dumpregs(fname, line, chan, &state); } static void dmadbg_showregs(const char *fname, int line, struct s3c2410_dma_chan *chan) { struct s3c2410_dma_regstate state; dmadbg_capture(chan, &state); dmadbg_dumpregs(fname, line, chan, &state); } #define dbg_showregs(chan) dmadbg_showregs(__func__, __LINE__, (chan)) #define dbg_showchan(chan) dmadbg_showchan(__func__, __LINE__, (chan)) #else #define dbg_showregs(chan) do { } while(0) #define dbg_showchan(chan) do { } while(0) #endif /* CONFIG_S3C2410_DMA_DEBUG */ /* s3c2410_dma_stats_timeout * * Update DMA stats from timeout info */ static void s3c2410_dma_stats_timeout(struct s3c2410_dma_stats *stats, int val) { if (stats == NULL) return; if (val > stats->timeout_longest) stats->timeout_longest = val; if (val < stats->timeout_shortest) stats->timeout_shortest = val; stats->timeout_avg += val; } /* s3c2410_dma_waitforload * * wait for the DMA engine to load a buffer, and update the state accordingly */ static int s3c2410_dma_waitforload(struct s3c2410_dma_chan *chan, int line) { int timeout = chan->load_timeout; int took; if (chan->load_state != S3C2410_DMALOAD_1LOADED) { printk(KERN_ERR "dma%d: s3c2410_dma_waitforload() called in loadstate %d from line %d\n", chan->number, chan->load_state, line); return 0; } if (chan->stats != NULL) chan->stats->loads++; while (--timeout > 0) { if ((dma_rdreg(chan, S3C2410_DMA_DSTAT) << (32-20)) != 0) { took = chan->load_timeout - timeout; s3c2410_dma_stats_timeout(chan->stats, took); switch (chan->load_state) { case S3C2410_DMALOAD_1LOADED: chan->load_state = S3C2410_DMALOAD_1RUNNING; break; default: printk(KERN_ERR "dma%d: unknown load_state in s3c2410_dma_waitforload() %d\n", chan->number, chan->load_state); } return 1; } } if (chan->stats != NULL) { chan->stats->timeout_failed++; } return 0; } /* s3c2410_dma_loadbuffer * * load a buffer, and update the channel state */ static inline int s3c2410_dma_loadbuffer(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf) { unsigned long reload; if (buf == NULL) { dmawarn("buffer is NULL\n"); return -EINVAL; } pr_debug("s3c2410_chan_loadbuffer: loading buff %p (0x%08lx,0x%06x)\n", buf, (unsigned long)buf->data, buf->size); /* check the state of the channel before we do anything */ if (chan->load_state == S3C2410_DMALOAD_1LOADED) { dmawarn("load_state is S3C2410_DMALOAD_1LOADED\n"); } if (chan->load_state == S3C2410_DMALOAD_1LOADED_1RUNNING) { dmawarn("state is S3C2410_DMALOAD_1LOADED_1RUNNING\n"); } /* it would seem sensible if we are the last buffer to not bother * with the auto-reload bit, so that the DMA engine will not try * and load another transfer after this one has finished... */ if (chan->load_state == S3C2410_DMALOAD_NONE) { pr_debug("load_state is none, checking for noreload (next=%p)\n", buf->next); reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0; } else { //pr_debug("load_state is %d => autoreload\n", chan->load_state); reload = S3C2410_DCON_AUTORELOAD; } if ((buf->data & 0xf0000000) != 0x30000000) { dmawarn("dmaload: buffer is %p\n", (void *)buf->data); } writel(buf->data, chan->addr_reg); dma_wrreg(chan, S3C2410_DMA_DCON, chan->dcon | reload | (buf->size/chan->xfer_unit)); chan->next = buf->next; /* update the state of the channel */ switch (chan->load_state) { case S3C2410_DMALOAD_NONE: chan->load_state = S3C2410_DMALOAD_1LOADED; break; case S3C2410_DMALOAD_1RUNNING: chan->load_state = S3C2410_DMALOAD_1LOADED_1RUNNING; break; default: dmawarn("dmaload: unknown state %d in loadbuffer\n", chan->load_state); break; } return 0; } /* s3c2410_dma_call_op * * small routine to call the op routine with the given op if it has been * registered */ static void s3c2410_dma_call_op(struct s3c2410_dma_chan *chan, enum s3c2410_chan_op op) { if (chan->op_fn != NULL) { (chan->op_fn)(chan, op); } } /* s3c2410_dma_buffdone * * small wrapper to check if callback routine needs to be called, and * if so, call it */ static inline void s3c2410_dma_buffdone(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf, enum s3c2410_dma_buffresult result) { #if 0 pr_debug("callback_fn=%p, buf=%p, id=%p, size=%d, result=%d\n", chan->callback_fn, buf, buf->id, buf->size, result); #endif if (chan->callback_fn != NULL) { (chan->callback_fn)(chan, buf->id, buf->size, result); } } /* s3c2410_dma_start * * start a dma channel going */ static int s3c2410_dma_start(struct s3c2410_dma_chan *chan) { unsigned long tmp; unsigned long flags; pr_debug("s3c2410_start_dma: channel=%d\n", chan->number); local_irq_save(flags); if (chan->state == S3C2410_DMA_RUNNING) { pr_debug("s3c2410_start_dma: already running (%d)\n", chan->state); local_irq_restore(flags); return 0; } chan->state = S3C2410_DMA_RUNNING; /* check whether there is anything to load, and if not, see * if we can find anything to load */ if (chan->load_state == S3C2410_DMALOAD_NONE) { if (chan->next == NULL) { printk(KERN_ERR "dma%d: channel has nothing loaded\n", chan->number); chan->state = S3C2410_DMA_IDLE; local_irq_restore(flags); return -EINVAL; } s3c2410_dma_loadbuffer(chan, chan->next); } dbg_showchan(chan); /* enable the channel */ if (!chan->irq_enabled) { enable_irq(chan->irq); chan->irq_enabled = 1; } /* start the channel going */ tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); tmp &= ~S3C2410_DMASKTRIG_STOP; tmp |= S3C2410_DMASKTRIG_ON; dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp); #if 0 /* the dma buffer loads should take care of clearing the AUTO * reloading feature */ tmp = dma_rdreg(chan, S3C2410_DMA_DCON); tmp &= ~S3C2410_DCON_NORELOAD; dma_wrreg(chan, S3C2410_DMA_DCON, tmp); #endif s3c2410_dma_call_op(chan, S3C2410_DMAOP_START); dbg_showchan(chan); /* if we've only loaded one buffer onto the channel, then chec * to see if we have another, and if so, try and load it so when * the first buffer is finished, the new one will be loaded onto * the channel */ if (chan->next != NULL) { if (chan->load_state == S3C2410_DMALOAD_1LOADED) { if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { pr_debug("%s: buff not yet loaded, no more todo\n", __func__); } else { chan->load_state = S3C2410_DMALOAD_1RUNNING; s3c2410_dma_loadbuffer(chan, chan->next); } } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) { s3c2410_dma_loadbuffer(chan, chan->next); } } local_irq_restore(flags); return 0; } /* s3c2410_dma_canload * * work out if we can queue another buffer into the DMA engine */ static int s3c2410_dma_canload(struct s3c2410_dma_chan *chan) { if (chan->load_state == S3C2410_DMALOAD_NONE || chan->load_state == S3C2410_DMALOAD_1RUNNING) return 1; return 0; } /* s3c2410_dma_enqueue * * queue an given buffer for dma transfer. * * id the device driver's id information for this buffer * data the physical address of the buffer data * size the size of the buffer in bytes * * If the channel is not running, then the flag S3C2410_DMAF_AUTOSTART * is checked, and if set, the channel is started. If this flag isn't set, * then an error will be returned. * * It is possible to queue more than one DMA buffer onto a channel at * once, and the code will deal with the re-loading of the next buffer * when necessary. */ int s3c2410_dma_enqueue(enum dma_ch channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); struct s3c2410_dma_buf *buf; unsigned long flags; if (chan == NULL) return -EINVAL; pr_debug("%s: id=%p, data=%08x, size=%d\n", __func__, id, (unsigned int)data, size); buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC); if (buf == NULL) { pr_debug("%s: out of memory (%ld alloc)\n", __func__, (long)sizeof(*buf)); return -ENOMEM; } //pr_debug("%s: new buffer %p\n", __func__, buf); //dbg_showchan(chan); buf->next = NULL; buf->data = buf->ptr = data; buf->size = size; buf->id = id; buf->magic = BUF_MAGIC; local_irq_save(flags); if (chan->curr == NULL) { /* we've got nothing loaded... */ pr_debug("%s: buffer %p queued onto empty channel\n", __func__, buf); chan->curr = buf; chan->end = buf; chan->next = NULL; } else { pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n", chan->number, __func__, buf); if (chan->end == NULL) { pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n", chan->number, __func__, chan); } else { chan->end->next = buf; chan->end = buf; } } /* if necessary, update the next buffer field */ if (chan->next == NULL) chan->next = buf; /* check to see if we can load a buffer */ if (chan->state == S3C2410_DMA_RUNNING) { if (chan->load_state == S3C2410_DMALOAD_1LOADED && 1) { if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { printk(KERN_ERR "dma%d: loadbuffer:" "timeout loading buffer\n", chan->number); dbg_showchan(chan); local_irq_restore(flags); return -EINVAL; } } while (s3c2410_dma_canload(chan) && chan->next != NULL) { s3c2410_dma_loadbuffer(chan, chan->next); } } else if (chan->state == S3C2410_DMA_IDLE) { if (chan->flags & S3C2410_DMAF_AUTOSTART) { s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL, S3C2410_DMAOP_START); } } local_irq_restore(flags); return 0; } EXPORT_SYMBOL(s3c2410_dma_enqueue); static inline void s3c2410_dma_freebuf(struct s3c2410_dma_buf *buf) { int magicok = (buf->magic == BUF_MAGIC); buf->magic = -1; if (magicok) { kmem_cache_free(dma_kmem, buf); } else { printk("s3c2410_dma_freebuf: buff %p with bad magic\n", buf); } } /* s3c2410_dma_lastxfer * * called when the system is out of buffers, to ensure that the channel * is prepared for shutdown. */ static inline void s3c2410_dma_lastxfer(struct s3c2410_dma_chan *chan) { #if 0 pr_debug("dma%d: s3c2410_dma_lastxfer: load_state %d\n", chan->number, chan->load_state); #endif switch (chan->load_state) { case S3C2410_DMALOAD_NONE: break; case S3C2410_DMALOAD_1LOADED: if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n", chan->number, __func__); return; } break; case S3C2410_DMALOAD_1LOADED_1RUNNING: /* I believe in this case we do not have anything to do * until the next buffer comes along, and we turn off the * reload */ return; default: pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n", chan->number, chan->load_state); return; } /* hopefully this'll shut the damned thing up after the transfer... */ dma_wrreg(chan, S3C2410_DMA_DCON, chan->dcon | S3C2410_DCON_NORELOAD); } #define dmadbg2(x...) static irqreturn_t s3c2410_dma_irq(int irq, void *devpw) { struct s3c2410_dma_chan *chan = (struct s3c2410_dma_chan *)devpw; struct s3c2410_dma_buf *buf; buf = chan->curr; dbg_showchan(chan); /* modify the channel state */ switch (chan->load_state) { case S3C2410_DMALOAD_1RUNNING: /* TODO - if we are running only one buffer, we probably * want to reload here, and then worry about the buffer * callback */ chan->load_state = S3C2410_DMALOAD_NONE; break; case S3C2410_DMALOAD_1LOADED: /* iirc, we should go back to NONE loaded here, we * had a buffer, and it was never verified as being * loaded. */ chan->load_state = S3C2410_DMALOAD_NONE; break; case S3C2410_DMALOAD_1LOADED_1RUNNING: /* we'll worry about checking to see if another buffer is * ready after we've called back the owner. This should * ensure we do not wait around too long for the DMA * engine to start the next transfer */ chan->load_state = S3C2410_DMALOAD_1LOADED; break; case S3C2410_DMALOAD_NONE: printk(KERN_ERR "dma%d: IRQ with no loaded buffer?\n", chan->number); break; default: printk(KERN_ERR "dma%d: IRQ in invalid load_state %d\n", chan->number, chan->load_state); break; } if (buf != NULL) { /* update the chain to make sure that if we load any more * buffers when we call the callback function, things should * work properly */ chan->curr = buf->next; buf->next = NULL; if (buf->magic != BUF_MAGIC) { printk(KERN_ERR "dma%d: %s: buf %p incorrect magic\n", chan->number, __func__, buf); return IRQ_HANDLED; } s3c2410_dma_buffdone(chan, buf, S3C2410_RES_OK); /* free resouces */ s3c2410_dma_freebuf(buf); } else { } /* only reload if the channel is still running... our buffer done * routine may have altered the state by requesting the dma channel * to stop or shutdown... */ /* todo: check that when the channel is shut-down from inside this * function, we cope with unsetting reload, etc */ if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) { unsigned long flags; switch (chan->load_state) { case S3C2410_DMALOAD_1RUNNING: /* don't need to do anything for this state */ break; case S3C2410_DMALOAD_NONE: /* can load buffer immediately */ break; case S3C2410_DMALOAD_1LOADED: if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n", chan->number, __func__); return IRQ_HANDLED; } break; case S3C2410_DMALOAD_1LOADED_1RUNNING: goto no_load; default: printk(KERN_ERR "dma%d: unknown load_state in irq, %d\n", chan->number, chan->load_state); return IRQ_HANDLED; } local_irq_save(flags); s3c2410_dma_loadbuffer(chan, chan->next); local_irq_restore(flags); } else { s3c2410_dma_lastxfer(chan); /* see if we can stop this channel.. */ if (chan->load_state == S3C2410_DMALOAD_NONE) { pr_debug("dma%d: end of transfer, stopping channel (%ld)\n", chan->number, jiffies); s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL, S3C2410_DMAOP_STOP); } } no_load: return IRQ_HANDLED; } static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel); /* s3c2410_request_dma * * get control of an dma channel */ int s3c2410_dma_request(enum dma_ch channel, struct s3c2410_dma_client *client, void *dev) { struct s3c2410_dma_chan *chan; unsigned long flags; int err; pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n", channel, client->name, dev); local_irq_save(flags); chan = s3c2410_dma_map_channel(channel); if (chan == NULL) { local_irq_restore(flags); return -EBUSY; } dbg_showchan(chan); chan->client = client; chan->in_use = 1; if (!chan->irq_claimed) { pr_debug("dma%d: %s : requesting irq %d\n", channel, __func__, chan->irq); chan->irq_claimed = 1; local_irq_restore(flags); err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED, client->name, (void *)chan); local_irq_save(flags); if (err) { chan->in_use = 0; chan->irq_claimed = 0; local_irq_restore(flags); printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n", client->name, chan->irq, chan->number); return err; } chan->irq_enabled = 1; } local_irq_restore(flags); /* need to setup */ pr_debug("%s: channel initialised, %p\n", __func__, chan); return chan->number | DMACH_LOW_LEVEL; } EXPORT_SYMBOL(s3c2410_dma_request); /* s3c2410_dma_free * * release the given channel back to the system, will stop and flush * any outstanding transfers, and ensure the channel is ready for the * next claimant. * * Note, although a warning is currently printed if the freeing client * info is not the same as the registrant's client info, the free is still * allowed to go through. */ int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned long flags; if (chan == NULL) return -EINVAL; local_irq_save(flags); if (chan->client != client) { printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n", channel, chan->client, client); } /* sort out stopping and freeing the channel */ if (chan->state != S3C2410_DMA_IDLE) { pr_debug("%s: need to stop dma channel %p\n", __func__, chan); /* possibly flush the channel */ s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STOP); } chan->client = NULL; chan->in_use = 0; if (chan->irq_claimed) free_irq(chan->irq, (void *)chan); chan->irq_claimed = 0; if (!(channel & DMACH_LOW_LEVEL)) s3c_dma_chan_map[channel] = NULL; local_irq_restore(flags); return 0; } EXPORT_SYMBOL(s3c2410_dma_free); static int s3c2410_dma_dostop(struct s3c2410_dma_chan *chan) { unsigned long flags; unsigned long tmp; pr_debug("%s:\n", __func__); dbg_showchan(chan); local_irq_save(flags); s3c2410_dma_call_op(chan, S3C2410_DMAOP_STOP); tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); tmp |= S3C2410_DMASKTRIG_STOP; //tmp &= ~S3C2410_DMASKTRIG_ON; dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); #if 0 /* should also clear interrupts, according to WinCE BSP */ tmp = dma_rdreg(chan, S3C2410_DMA_DCON); tmp |= S3C2410_DCON_NORELOAD; dma_wrreg(chan, S3C2410_DMA_DCON, tmp); #endif /* should stop do this, or should we wait for flush? */ chan->state = S3C2410_DMA_IDLE; chan->load_state = S3C2410_DMALOAD_NONE; local_irq_restore(flags); return 0; } static void s3c2410_dma_waitforstop(struct s3c2410_dma_chan *chan) { unsigned long tmp; unsigned int timeout = 0x10000; while (timeout-- > 0) { tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); if (!(tmp & S3C2410_DMASKTRIG_ON)) return; } pr_debug("dma%d: failed to stop?\n", chan->number); } /* s3c2410_dma_flush * * stop the channel, and remove all current and pending transfers */ static int s3c2410_dma_flush(struct s3c2410_dma_chan *chan) { struct s3c2410_dma_buf *buf, *next; unsigned long flags; pr_debug("%s: chan %p (%d)\n", __func__, chan, chan->number); dbg_showchan(chan); local_irq_save(flags); if (chan->state != S3C2410_DMA_IDLE) { pr_debug("%s: stopping channel...\n", __func__ ); s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP); } buf = chan->curr; if (buf == NULL) buf = chan->next; chan->curr = chan->next = chan->end = NULL; if (buf != NULL) { for ( ; buf != NULL; buf = next) { next = buf->next; pr_debug("%s: free buffer %p, next %p\n", __func__, buf, buf->next); s3c2410_dma_buffdone(chan, buf, S3C2410_RES_ABORT); s3c2410_dma_freebuf(buf); } } dbg_showregs(chan); s3c2410_dma_waitforstop(chan); #if 0 /* should also clear interrupts, according to WinCE BSP */ { unsigned long tmp; tmp = dma_rdreg(chan, S3C2410_DMA_DCON); tmp |= S3C2410_DCON_NORELOAD; dma_wrreg(chan, S3C2410_DMA_DCON, tmp); } #endif dbg_showregs(chan); local_irq_restore(flags); return 0; } static int s3c2410_dma_started(struct s3c2410_dma_chan *chan) { unsigned long flags; local_irq_save(flags); dbg_showchan(chan); /* if we've only loaded one buffer onto the channel, then chec * to see if we have another, and if so, try and load it so when * the first buffer is finished, the new one will be loaded onto * the channel */ if (chan->next != NULL) { if (chan->load_state == S3C2410_DMALOAD_1LOADED) { if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { pr_debug("%s: buff not yet loaded, no more todo\n", __func__); } else { chan->load_state = S3C2410_DMALOAD_1RUNNING; s3c2410_dma_loadbuffer(chan, chan->next); } } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) { s3c2410_dma_loadbuffer(chan, chan->next); } } local_irq_restore(flags); return 0; } int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); if (chan == NULL) return -EINVAL; switch (op) { case S3C2410_DMAOP_START: return s3c2410_dma_start(chan); case S3C2410_DMAOP_STOP: return s3c2410_dma_dostop(chan); case S3C2410_DMAOP_PAUSE: case S3C2410_DMAOP_RESUME: return -ENOENT; case S3C2410_DMAOP_FLUSH: return s3c2410_dma_flush(chan); case S3C2410_DMAOP_STARTED: return s3c2410_dma_started(chan); case S3C2410_DMAOP_TIMEOUT: return 0; } return -ENOENT; /* unknown, don't bother */ } EXPORT_SYMBOL(s3c2410_dma_ctrl); /* DMA configuration for each channel * * DISRCC -> source of the DMA (AHB,APB) * DISRC -> source address of the DMA * DIDSTC -> destination of the DMA (AHB,APD) * DIDST -> destination address of the DMA */ /* s3c2410_dma_config * * xfersize: size of unit in bytes (1,2,4) */ int s3c2410_dma_config(enum dma_ch channel, int xferunit) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned int dcon; pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit); if (chan == NULL) return -EINVAL; dcon = chan->dcon & dma_sel.dcon_mask; pr_debug("%s: dcon is %08x\n", __func__, dcon); switch (chan->req_ch) { case DMACH_I2S_IN: case DMACH_I2S_OUT: case DMACH_PCM_IN: case DMACH_PCM_OUT: case DMACH_MIC_IN: default: dcon |= S3C2410_DCON_HANDSHAKE; dcon |= S3C2410_DCON_SYNC_PCLK; break; case DMACH_SDI: /* note, ensure if need HANDSHAKE or not */ dcon |= S3C2410_DCON_SYNC_PCLK; break; case DMACH_XD0: case DMACH_XD1: dcon |= S3C2410_DCON_HANDSHAKE; dcon |= S3C2410_DCON_SYNC_HCLK; break; } switch (xferunit) { case 1: dcon |= S3C2410_DCON_BYTE; break; case 2: dcon |= S3C2410_DCON_HALFWORD; break; case 4: dcon |= S3C2410_DCON_WORD; break; default: pr_debug("%s: bad transfer size %d\n", __func__, xferunit); return -EINVAL; } dcon |= S3C2410_DCON_HWTRIG; dcon |= S3C2410_DCON_INTREQ; pr_debug("%s: dcon now %08x\n", __func__, dcon); chan->dcon = dcon; chan->xfer_unit = xferunit; return 0; } EXPORT_SYMBOL(s3c2410_dma_config); /* s3c2410_dma_devconfig * * configure the dma source/destination hardware type and address * * source: DMA_FROM_DEVICE: source is hardware * DMA_TO_DEVICE: source is memory * * devaddr: physical address of the source */ int s3c2410_dma_devconfig(enum dma_ch channel, enum dma_data_direction source, unsigned long devaddr) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned int hwcfg; if (chan == NULL) return -EINVAL; pr_debug("%s: source=%d, devaddr=%08lx\n", __func__, (int)source, devaddr); chan->source = source; chan->dev_addr = devaddr; switch (chan->req_ch) { case DMACH_XD0: case DMACH_XD1: hwcfg = 0; /* AHB */ break; default: hwcfg = S3C2410_DISRCC_APB; } /* always assume our peripheral desintation is a fixed * address in memory. */ hwcfg |= S3C2410_DISRCC_INC; switch (source) { case DMA_FROM_DEVICE: /* source is hardware */ pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n", __func__, devaddr, hwcfg); dma_wrreg(chan, S3C2410_DMA_DISRCC, hwcfg & 3); dma_wrreg(chan, S3C2410_DMA_DISRC, devaddr); dma_wrreg(chan, S3C2410_DMA_DIDSTC, (0<<1) | (0<<0)); chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST); break; case DMA_TO_DEVICE: /* source is memory */ pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n", __func__, devaddr, hwcfg); dma_wrreg(chan, S3C2410_DMA_DISRCC, (0<<1) | (0<<0)); dma_wrreg(chan, S3C2410_DMA_DIDST, devaddr); dma_wrreg(chan, S3C2410_DMA_DIDSTC, hwcfg & 3); chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DISRC); break; default: printk(KERN_ERR "dma%d: invalid source type (%d)\n", channel, source); return -EINVAL; } if (dma_sel.direction != NULL) (dma_sel.direction)(chan, chan->map, source); return 0; } EXPORT_SYMBOL(s3c2410_dma_devconfig); /* s3c2410_dma_getposition * * returns the current transfer points for the dma source and destination */ int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); if (chan == NULL) return -EINVAL; if (src != NULL) *src = dma_rdreg(chan, S3C2410_DMA_DCSRC); if (dst != NULL) *dst = dma_rdreg(chan, S3C2410_DMA_DCDST); return 0; } EXPORT_SYMBOL(s3c2410_dma_getposition); /* system core operations */ #ifdef CONFIG_PM static void s3c2410_dma_suspend_chan(struct s3c2410_dma_chan *cp) { printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) { /* the dma channel is still working, which is probably * a bad thing to do over suspend/resume. We stop the * channel and assume that the client is either going to * retry after resume, or that it is broken. */ printk(KERN_INFO "dma: stopping channel %d due to suspend\n", cp->number); s3c2410_dma_dostop(cp); } } static int s3c2410_dma_suspend(void) { struct s3c2410_dma_chan *cp = s3c2410_chans; int channel; for (channel = 0; channel < dma_channels; cp++, channel++) s3c2410_dma_suspend_chan(cp); return 0; } static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) { unsigned int no = cp->number | DMACH_LOW_LEVEL; /* restore channel's hardware configuration */ if (!cp->in_use) return; printk(KERN_INFO "dma%d: restoring configuration\n", cp->number); s3c2410_dma_config(no, cp->xfer_unit); s3c2410_dma_devconfig(no, cp->source, cp->dev_addr); /* re-select the dma source for this channel */ if (cp->map != NULL) dma_sel.select(cp, cp->map); } static void s3c2410_dma_resume(void) { struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1; int channel; for (channel = dma_channels - 1; channel >= 0; cp--, channel--) s3c2410_dma_resume_chan(cp); } #else #define s3c2410_dma_suspend NULL #define s3c2410_dma_resume NULL #endif /* CONFIG_PM */ struct syscore_ops dma_syscore_ops = { .suspend = s3c2410_dma_suspend, .resume = s3c2410_dma_resume, }; /* kmem cache implementation */ static void s3c2410_dma_cache_ctor(void *p) { memset(p, 0, sizeof(struct s3c2410_dma_buf)); } /* initialisation code */ static int __init s3c24xx_dma_syscore_init(void) { register_syscore_ops(&dma_syscore_ops); return 0; } late_initcall(s3c24xx_dma_syscore_init); int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq, unsigned int stride) { struct s3c2410_dma_chan *cp; int channel; int ret; printk("S3C24XX DMA Driver, Copyright 2003-2006 Simtec Electronics\n"); dma_channels = channels; dma_base = ioremap(S3C24XX_PA_DMA, stride * channels); if (dma_base == NULL) { printk(KERN_ERR "dma failed to remap register block\n"); return -ENOMEM; } dma_kmem = kmem_cache_create("dma_desc", sizeof(struct s3c2410_dma_buf), 0, SLAB_HWCACHE_ALIGN, s3c2410_dma_cache_ctor); if (dma_kmem == NULL) { printk(KERN_ERR "dma failed to make kmem cache\n"); ret = -ENOMEM; goto err; } for (channel = 0; channel < channels; channel++) { cp = &s3c2410_chans[channel]; memset(cp, 0, sizeof(struct s3c2410_dma_chan)); /* dma channel irqs are in order.. */ cp->number = channel; cp->irq = channel + irq; cp->regs = dma_base + (channel * stride); /* point current stats somewhere */ cp->stats = &cp->stats_store; cp->stats_store.timeout_shortest = LONG_MAX; /* basic channel configuration */ cp->load_timeout = 1<<18; printk("DMA channel %d at %p, irq %d\n", cp->number, cp->regs, cp->irq); } return 0; err: kmem_cache_destroy(dma_kmem); iounmap(dma_base); dma_base = NULL; return ret; } int __init s3c2410_dma_init(void) { return s3c24xx_dma_init(4, IRQ_DMA0, 0x40); } static inline int is_channel_valid(unsigned int channel) { return (channel & DMA_CH_VALID); } static struct s3c24xx_dma_order *dma_order; /* s3c2410_dma_map_channel() * * turn the virtual channel number into a real, and un-used hardware * channel. * * first, try the dma ordering given to us by either the relevant * dma code, or the board. Then just find the first usable free * channel */ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel) { struct s3c24xx_dma_order_ch *ord = NULL; struct s3c24xx_dma_map *ch_map; struct s3c2410_dma_chan *dmach; int ch; if (dma_sel.map == NULL || channel > dma_sel.map_size) return NULL; ch_map = dma_sel.map + channel; /* first, try the board mapping */ if (dma_order) { ord = &dma_order->channels[channel]; for (ch = 0; ch < dma_channels; ch++) { int tmp; if (!is_channel_valid(ord->list[ch])) continue; tmp = ord->list[ch] & ~DMA_CH_VALID; if (s3c2410_chans[tmp].in_use == 0) { ch = tmp; goto found; } } if (ord->flags & DMA_CH_NEVER) return NULL; } /* second, search the channel map for first free */ for (ch = 0; ch < dma_channels; ch++) { if (!is_channel_valid(ch_map->channels[ch])) continue; if (s3c2410_chans[ch].in_use == 0) { printk("mapped channel %d to %d\n", channel, ch); break; } } if (ch >= dma_channels) return NULL; /* update our channel mapping */ found: dmach = &s3c2410_chans[ch]; dmach->map = ch_map; dmach->req_ch = channel; s3c_dma_chan_map[channel] = dmach; /* select the channel */ (dma_sel.select)(dmach, ch_map); return dmach; } static int s3c24xx_dma_check_entry(struct s3c24xx_dma_map *map, int ch) { return 0; } int __init s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel) { struct s3c24xx_dma_map *nmap; size_t map_sz = sizeof(*nmap) * sel->map_size; int ptr; nmap = kmemdup(sel->map, map_sz, GFP_KERNEL); if (nmap == NULL) return -ENOMEM; memcpy(&dma_sel, sel, sizeof(*sel)); dma_sel.map = nmap; for (ptr = 0; ptr < sel->map_size; ptr++) s3c24xx_dma_check_entry(nmap+ptr, ptr); return 0; } int __init s3c24xx_dma_order_set(struct s3c24xx_dma_order *ord) { struct s3c24xx_dma_order *nord = dma_order; if (nord == NULL) nord = kmalloc(sizeof(struct s3c24xx_dma_order), GFP_KERNEL); if (nord == NULL) { printk(KERN_ERR "no memory to store dma channel order\n"); return -ENOMEM; } dma_order = nord; memcpy(nord, ord, sizeof(struct s3c24xx_dma_order)); return 0; }
gpl-2.0
MatiasBjorling/lightnvm-moved-to-OpenChannelSSD-Linux
arch/tile/lib/strnlen_32.c
2359
1434
/* * Copyright 2013 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> size_t strnlen(const char *s, size_t count) { /* Get an aligned pointer. */ const uintptr_t s_int = (uintptr_t) s; const uint32_t *p = (const uint32_t *)(s_int & -4); size_t bytes_read = sizeof(*p) - (s_int & (sizeof(*p) - 1)); size_t len; uint32_t v, bits; /* Avoid page fault risk by not reading any bytes when count is 0. */ if (count == 0) return 0; /* Read first word, but force bytes before the string to be nonzero. */ v = *p | ((1 << ((s_int << 3) & 31)) - 1); while ((bits = __insn_seqb(v, 0)) == 0) { if (bytes_read >= count) { /* Read COUNT bytes and didn't find the terminator. */ return count; } v = *++p; bytes_read += sizeof(v); } len = ((const char *) p) + (__insn_ctz(bits) >> 3) - s; return (len < count ? len : count); } EXPORT_SYMBOL(strnlen);
gpl-2.0
pershoot/kernel-2638
drivers/leds/leds-pca955x.c
4151
9879
/* * Copyright 2007-2008 Extreme Engineering Solutions, Inc. * * Author: Nate Case <ncase@xes-inc.com> * * This file is subject to the terms and conditions of version 2 of * the GNU General Public License. See the file COPYING in the main * directory of this archive for more details. * * LED driver for various PCA955x I2C LED drivers * * Supported devices: * * Device Description 7-bit slave address * ------ ----------- ------------------- * PCA9550 2-bit driver 0x60 .. 0x61 * PCA9551 8-bit driver 0x60 .. 0x67 * PCA9552 16-bit driver 0x60 .. 0x67 * PCA9553/01 4-bit driver 0x62 * PCA9553/02 4-bit driver 0x63 * * Philips PCA955x LED driver chips follow a register map as shown below: * * Control Register Description * ---------------- ----------- * 0x0 Input register 0 * .. * NUM_INPUT_REGS - 1 Last Input register X * * NUM_INPUT_REGS Frequency prescaler 0 * NUM_INPUT_REGS + 1 PWM register 0 * NUM_INPUT_REGS + 2 Frequency prescaler 1 * NUM_INPUT_REGS + 3 PWM register 1 * * NUM_INPUT_REGS + 4 LED selector 0 * NUM_INPUT_REGS + 4 * + NUM_LED_REGS - 1 Last LED selector * * where NUM_INPUT_REGS and NUM_LED_REGS vary depending on how many * bits the chip supports. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/leds.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/workqueue.h> #include <linux/slab.h> /* LED select registers determine the source that drives LED outputs */ #define PCA955X_LS_LED_ON 0x0 /* Output LOW */ #define PCA955X_LS_LED_OFF 0x1 /* Output HI-Z */ #define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */ #define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */ enum pca955x_type { pca9550, pca9551, pca9552, pca9553, }; struct pca955x_chipdef { int bits; u8 slv_addr; /* 7-bit slave address mask */ int slv_addr_shift; /* Number of bits to ignore */ }; static struct pca955x_chipdef pca955x_chipdefs[] = { [pca9550] = { .bits = 2, .slv_addr = /* 110000x */ 0x60, .slv_addr_shift = 1, }, [pca9551] = { .bits = 8, .slv_addr = /* 1100xxx */ 0x60, .slv_addr_shift = 3, }, [pca9552] = { .bits = 16, .slv_addr = /* 1100xxx */ 0x60, .slv_addr_shift = 3, }, [pca9553] = { .bits = 4, .slv_addr = /* 110001x */ 0x62, .slv_addr_shift = 1, }, }; static const struct i2c_device_id pca955x_id[] = { { "pca9550", pca9550 }, { "pca9551", pca9551 }, { "pca9552", pca9552 }, { "pca9553", pca9553 }, { } }; MODULE_DEVICE_TABLE(i2c, pca955x_id); struct pca955x_led { struct pca955x_chipdef *chipdef; struct i2c_client *client; struct work_struct work; spinlock_t lock; enum led_brightness brightness; struct led_classdev led_cdev; int led_num; /* 0 .. 15 potentially */ char name[32]; }; /* 8 bits per input register */ static inline int pca95xx_num_input_regs(int bits) { return (bits + 7) / 8; } /* 4 bits per LED selector register */ static inline int pca95xx_num_led_regs(int bits) { return (bits + 3) / 4; } /* * Return an LED selector register value based on an existing one, with * the appropriate 2-bit state value set for the given LED number (0-3). */ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state) { return (oldval & (~(0x3 << (led_num << 1)))) | ((state & 0x3) << (led_num << 1)); } /* * Write to frequency prescaler register, used to program the * period of the PWM output. period = (PSCx + 1) / 38 */ static void pca955x_write_psc(struct i2c_client *client, int n, u8 val) { struct pca955x_led *pca955x = i2c_get_clientdata(client); i2c_smbus_write_byte_data(client, pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n, val); } /* * Write to PWM register, which determines the duty cycle of the * output. LED is OFF when the count is less than the value of this * register, and ON when it is greater. If PWMx == 0, LED is always OFF. * * Duty cycle is (256 - PWMx) / 256 */ static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val) { struct pca955x_led *pca955x = i2c_get_clientdata(client); i2c_smbus_write_byte_data(client, pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n, val); } /* * Write to LED selector register, which determines the source that * drives the LED output. */ static void pca955x_write_ls(struct i2c_client *client, int n, u8 val) { struct pca955x_led *pca955x = i2c_get_clientdata(client); i2c_smbus_write_byte_data(client, pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n, val); } /* * Read the LED selector register, which determines the source that * drives the LED output. */ static u8 pca955x_read_ls(struct i2c_client *client, int n) { struct pca955x_led *pca955x = i2c_get_clientdata(client); return (u8) i2c_smbus_read_byte_data(client, pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n); } static void pca955x_led_work(struct work_struct *work) { struct pca955x_led *pca955x; u8 ls; int chip_ls; /* which LSx to use (0-3 potentially) */ int ls_led; /* which set of bits within LSx to use (0-3) */ pca955x = container_of(work, struct pca955x_led, work); chip_ls = pca955x->led_num / 4; ls_led = pca955x->led_num % 4; ls = pca955x_read_ls(pca955x->client, chip_ls); switch (pca955x->brightness) { case LED_FULL: ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON); break; case LED_OFF: ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_OFF); break; case LED_HALF: ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK0); break; default: /* * Use PWM1 for all other values. This has the unwanted * side effect of making all LEDs on the chip share the * same brightness level if set to a value other than * OFF, HALF, or FULL. But, this is probably better than * just turning off for all other values. */ pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness); ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1); break; } pca955x_write_ls(pca955x->client, chip_ls, ls); } static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct pca955x_led *pca955x; pca955x = container_of(led_cdev, struct pca955x_led, led_cdev); spin_lock(&pca955x->lock); pca955x->brightness = value; /* * Must use workqueue for the actual I/O since I2C operations * can sleep. */ schedule_work(&pca955x->work); spin_unlock(&pca955x->lock); } static int __devinit pca955x_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct pca955x_led *pca955x; struct pca955x_chipdef *chip; struct i2c_adapter *adapter; struct led_platform_data *pdata; int i, err; chip = &pca955x_chipdefs[id->driver_data]; adapter = to_i2c_adapter(client->dev.parent); pdata = client->dev.platform_data; /* Make sure the slave address / chip type combo given is possible */ if ((client->addr & ~((1 << chip->slv_addr_shift) - 1)) != chip->slv_addr) { dev_err(&client->dev, "invalid slave address %02x\n", client->addr); return -ENODEV; } printk(KERN_INFO "leds-pca955x: Using %s %d-bit LED driver at " "slave address 0x%02x\n", id->name, chip->bits, client->addr); if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) return -EIO; if (pdata) { if (pdata->num_leds != chip->bits) { dev_err(&client->dev, "board info claims %d LEDs" " on a %d-bit chip\n", pdata->num_leds, chip->bits); return -ENODEV; } } pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL); if (!pca955x) return -ENOMEM; i2c_set_clientdata(client, pca955x); for (i = 0; i < chip->bits; i++) { pca955x[i].chipdef = chip; pca955x[i].client = client; pca955x[i].led_num = i; /* Platform data can specify LED names and default triggers */ if (pdata) { if (pdata->leds[i].name) snprintf(pca955x[i].name, sizeof(pca955x[i].name), "pca955x:%s", pdata->leds[i].name); if (pdata->leds[i].default_trigger) pca955x[i].led_cdev.default_trigger = pdata->leds[i].default_trigger; } else { snprintf(pca955x[i].name, sizeof(pca955x[i].name), "pca955x:%d", i); } spin_lock_init(&pca955x[i].lock); pca955x[i].led_cdev.name = pca955x[i].name; pca955x[i].led_cdev.brightness_set = pca955x_led_set; INIT_WORK(&pca955x[i].work, pca955x_led_work); err = led_classdev_register(&client->dev, &pca955x[i].led_cdev); if (err < 0) goto exit; } /* Turn off LEDs */ for (i = 0; i < pca95xx_num_led_regs(chip->bits); i++) pca955x_write_ls(client, i, 0x55); /* PWM0 is used for half brightness or 50% duty cycle */ pca955x_write_pwm(client, 0, 255-LED_HALF); /* PWM1 is used for variable brightness, default to OFF */ pca955x_write_pwm(client, 1, 0); /* Set to fast frequency so we do not see flashing */ pca955x_write_psc(client, 0, 0); pca955x_write_psc(client, 1, 0); return 0; exit: while (i--) { led_classdev_unregister(&pca955x[i].led_cdev); cancel_work_sync(&pca955x[i].work); } kfree(pca955x); return err; } static int __devexit pca955x_remove(struct i2c_client *client) { struct pca955x_led *pca955x = i2c_get_clientdata(client); int i; for (i = 0; i < pca955x->chipdef->bits; i++) { led_classdev_unregister(&pca955x[i].led_cdev); cancel_work_sync(&pca955x[i].work); } kfree(pca955x); return 0; } static struct i2c_driver pca955x_driver = { .driver = { .name = "leds-pca955x", .owner = THIS_MODULE, }, .probe = pca955x_probe, .remove = __devexit_p(pca955x_remove), .id_table = pca955x_id, }; static int __init pca955x_leds_init(void) { return i2c_add_driver(&pca955x_driver); } static void __exit pca955x_leds_exit(void) { i2c_del_driver(&pca955x_driver); } module_init(pca955x_leds_init); module_exit(pca955x_leds_exit); MODULE_AUTHOR("Nate Case <ncase@xes-inc.com>"); MODULE_DESCRIPTION("PCA955x LED driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
gmillz/kernel_lge_msm8974
drivers/acpi/acpica/tbxfroot.c
4919
8378
/****************************************************************************** * * Module Name: tbxfroot - Find the root ACPI table (RSDT) * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbxfroot") /* Local prototypes */ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length); static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp); /******************************************************************************* * * FUNCTION: acpi_tb_validate_rsdp * * PARAMETERS: Rsdp - Pointer to unvalidated RSDP * * RETURN: Status * * DESCRIPTION: Validate the RSDP (ptr) * ******************************************************************************/ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) { ACPI_FUNCTION_ENTRY(); /* * The signature and checksum must both be correct * * Note: Sometimes there exists more than one RSDP in memory; the valid * RSDP has a valid checksum, all others have an invalid checksum. */ if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1) != 0) { /* Nope, BAD Signature */ return (AE_BAD_SIGNATURE); } /* Check the standard checksum */ if (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { return (AE_BAD_CHECKSUM); } /* Check extended checksum if table version >= 2 */ if ((rsdp->revision >= 2) && (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) { return (AE_BAD_CHECKSUM); } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_find_root_pointer * * PARAMETERS: table_address - Where the table pointer is returned * * RETURN: Status, RSDP physical address * * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor * pointer structure. If it is found, set *RSDP to point to it. * * NOTE1: The RSDP must be either in the first 1_k of the Extended * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.) * Only a 32-bit physical address is necessary. * * NOTE2: This function is always available, regardless of the * initialization state of the rest of ACPI. * ******************************************************************************/ acpi_status acpi_find_root_pointer(acpi_size *table_address) { u8 *table_ptr; u8 *mem_rover; u32 physical_address; ACPI_FUNCTION_TRACE(acpi_find_root_pointer); /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */ table_ptr = acpi_os_map_memory((acpi_physical_address) ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH); if (!table_ptr) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X for length %u", ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH)); return_ACPI_STATUS(AE_NO_MEMORY); } ACPI_MOVE_16_TO_32(&physical_address, table_ptr); /* Convert segment part to physical address */ physical_address <<= 4; acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH); /* EBDA present? */ if (physical_address > 0x400) { /* * 1b) Search EBDA paragraphs (EBDA is required to be a * minimum of 1_k length) */ table_ptr = acpi_os_map_memory((acpi_physical_address) physical_address, ACPI_EBDA_WINDOW_SIZE); if (!table_ptr) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X for length %u", physical_address, ACPI_EBDA_WINDOW_SIZE)); return_ACPI_STATUS(AE_NO_MEMORY); } mem_rover = acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_EBDA_WINDOW_SIZE); acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE); if (mem_rover) { /* Return the physical address */ physical_address += (u32) ACPI_PTR_DIFF(mem_rover, table_ptr); *table_address = physical_address; return_ACPI_STATUS(AE_OK); } } /* * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh */ table_ptr = acpi_os_map_memory((acpi_physical_address) ACPI_HI_RSDP_WINDOW_BASE, ACPI_HI_RSDP_WINDOW_SIZE); if (!table_ptr) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X for length %u", ACPI_HI_RSDP_WINDOW_BASE, ACPI_HI_RSDP_WINDOW_SIZE)); return_ACPI_STATUS(AE_NO_MEMORY); } mem_rover = acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); if (mem_rover) { /* Return the physical address */ physical_address = (u32) (ACPI_HI_RSDP_WINDOW_BASE + ACPI_PTR_DIFF(mem_rover, table_ptr)); *table_address = physical_address; return_ACPI_STATUS(AE_OK); } /* A valid RSDP was not found */ ACPI_ERROR((AE_INFO, "A valid RSDP was not found")); return_ACPI_STATUS(AE_NOT_FOUND); } /******************************************************************************* * * FUNCTION: acpi_tb_scan_memory_for_rsdp * * PARAMETERS: start_address - Starting pointer for search * Length - Maximum length to search * * RETURN: Pointer to the RSDP if found, otherwise NULL. * * DESCRIPTION: Search a block of memory for the RSDP signature * ******************************************************************************/ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length) { acpi_status status; u8 *mem_rover; u8 *end_address; ACPI_FUNCTION_TRACE(tb_scan_memory_for_rsdp); end_address = start_address + length; /* Search from given start address for the requested length */ for (mem_rover = start_address; mem_rover < end_address; mem_rover += ACPI_RSDP_SCAN_STEP) { /* The RSDP signature and checksum must both be correct */ status = acpi_tb_validate_rsdp(ACPI_CAST_PTR (struct acpi_table_rsdp, mem_rover)); if (ACPI_SUCCESS(status)) { /* Sig and checksum valid, we have found a real RSDP */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "RSDP located at physical address %p\n", mem_rover)); return_PTR(mem_rover); } /* No sig match or bad checksum, keep searching */ } /* Searched entire block, no RSDP was found */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Searched entire block from %p, valid RSDP was not found\n", start_address)); return_PTR(NULL); }
gpl-2.0
sebirdman/m7_kernel_dev
drivers/acpi/acpica/nsaccess.c
4919
19029
/******************************************************************************* * * Module Name: nsaccess - Top-level functions for accessing ACPI namespace * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acnamesp.h" #include "acdispat.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsaccess") /******************************************************************************* * * FUNCTION: acpi_ns_root_initialize * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Allocate and initialize the default root named objects * * MUTEX: Locks namespace for entire execution * ******************************************************************************/ acpi_status acpi_ns_root_initialize(void) { acpi_status status; const struct acpi_predefined_names *init_val = NULL; struct acpi_namespace_node *new_node; union acpi_operand_object *obj_desc; acpi_string val = NULL; ACPI_FUNCTION_TRACE(ns_root_initialize); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * The global root ptr is initially NULL, so a non-NULL value indicates * that acpi_ns_root_initialize() has already been called; just return. */ if (acpi_gbl_root_node) { status = AE_OK; goto unlock_and_exit; } /* * Tell the rest of the subsystem that the root is initialized * (This is OK because the namespace is locked) */ acpi_gbl_root_node = &acpi_gbl_root_node_struct; /* Enter the pre-defined names in the name table */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Entering predefined entries into namespace\n")); for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) { /* _OSI is optional for now, will be permanent later */ if (!ACPI_STRCMP(init_val->name, "_OSI") && !acpi_gbl_create_osi_method) { continue; } status = acpi_ns_lookup(NULL, init_val->name, init_val->type, ACPI_IMODE_LOAD_PASS2, ACPI_NS_NO_UPSEARCH, NULL, &new_node); if (ACPI_FAILURE(status) || (!new_node)) { /* Must be on same line for code converter */ ACPI_EXCEPTION((AE_INFO, status, "Could not create predefined name %s", init_val->name)); } /* * Name entered successfully. If entry in pre_defined_names[] specifies * an initial value, create the initial value. */ if (init_val->val) { status = acpi_os_predefined_override(init_val, &val); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not override predefined %s", init_val->name)); } if (!val) { val = init_val->val; } /* * Entry requests an initial value, allocate a * descriptor for it. */ obj_desc = acpi_ut_create_internal_object(init_val->type); if (!obj_desc) { status = AE_NO_MEMORY; goto unlock_and_exit; } /* * Convert value string from table entry to * internal representation. Only types actually * used for initial values are implemented here. */ switch (init_val->type) { case ACPI_TYPE_METHOD: obj_desc->method.param_count = (u8) ACPI_TO_INTEGER(val); obj_desc->common.flags |= AOPOBJ_DATA_VALID; #if defined (ACPI_ASL_COMPILER) /* Save the parameter count for the i_aSL compiler */ new_node->value = obj_desc->method.param_count; #else /* Mark this as a very SPECIAL method */ obj_desc->method.info_flags = ACPI_METHOD_INTERNAL_ONLY; obj_desc->method.dispatch.implementation = acpi_ut_osi_implementation; #endif break; case ACPI_TYPE_INTEGER: obj_desc->integer.value = ACPI_TO_INTEGER(val); break; case ACPI_TYPE_STRING: /* Build an object around the static string */ obj_desc->string.length = (u32) ACPI_STRLEN(val); obj_desc->string.pointer = val; obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; break; case ACPI_TYPE_MUTEX: obj_desc->mutex.node = new_node; obj_desc->mutex.sync_level = (u8) (ACPI_TO_INTEGER(val) - 1); /* Create a mutex */ status = acpi_os_create_mutex(&obj_desc->mutex. os_mutex); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(obj_desc); goto unlock_and_exit; } /* Special case for ACPI Global Lock */ if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { acpi_gbl_global_lock_mutex = obj_desc; /* Create additional counting semaphore for global lock */ status = acpi_os_create_semaphore(1, 0, &acpi_gbl_global_lock_semaphore); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference (obj_desc); goto unlock_and_exit; } } break; default: ACPI_ERROR((AE_INFO, "Unsupported initial type value 0x%X", init_val->type)); acpi_ut_remove_reference(obj_desc); obj_desc = NULL; continue; } /* Store pointer to value descriptor in the Node */ status = acpi_ns_attach_object(new_node, obj_desc, obj_desc->common.type); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); } } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); /* Save a handle to "_GPE", it is always present */ if (ACPI_SUCCESS(status)) { status = acpi_ns_get_node(NULL, "\\_GPE", ACPI_NS_NO_UPSEARCH, &acpi_gbl_fadt_gpe_device); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ns_lookup * * PARAMETERS: scope_info - Current scope info block * Pathname - Search pathname, in internal format * (as represented in the AML stream) * Type - Type associated with name * interpreter_mode - IMODE_LOAD_PASS2 => add name if not found * Flags - Flags describing the search restrictions * walk_state - Current state of the walk * return_node - Where the Node is placed (if found * or created successfully) * * RETURN: Status * * DESCRIPTION: Find or enter the passed name in the name space. * Log an error if name not found in Exec mode. * * MUTEX: Assumes namespace is locked. * ******************************************************************************/ acpi_status acpi_ns_lookup(union acpi_generic_state *scope_info, char *pathname, acpi_object_type type, acpi_interpreter_mode interpreter_mode, u32 flags, struct acpi_walk_state *walk_state, struct acpi_namespace_node **return_node) { acpi_status status; char *path = pathname; struct acpi_namespace_node *prefix_node; struct acpi_namespace_node *current_node = NULL; struct acpi_namespace_node *this_node = NULL; u32 num_segments; u32 num_carats; acpi_name simple_name; acpi_object_type type_to_check_for; acpi_object_type this_search_type; u32 search_parent_flag = ACPI_NS_SEARCH_PARENT; u32 local_flags; ACPI_FUNCTION_TRACE(ns_lookup); if (!return_node) { return_ACPI_STATUS(AE_BAD_PARAMETER); } local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT); *return_node = ACPI_ENTRY_NOT_FOUND; acpi_gbl_ns_lookup_count++; if (!acpi_gbl_root_node) { return_ACPI_STATUS(AE_NO_NAMESPACE); } /* Get the prefix scope. A null scope means use the root scope */ if ((!scope_info) || (!scope_info->scope.node)) { ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Null scope prefix, using root node (%p)\n", acpi_gbl_root_node)); prefix_node = acpi_gbl_root_node; } else { prefix_node = scope_info->scope.node; if (ACPI_GET_DESCRIPTOR_TYPE(prefix_node) != ACPI_DESC_TYPE_NAMED) { ACPI_ERROR((AE_INFO, "%p is not a namespace node [%s]", prefix_node, acpi_ut_get_descriptor_name(prefix_node))); return_ACPI_STATUS(AE_AML_INTERNAL); } if (!(flags & ACPI_NS_PREFIX_IS_SCOPE)) { /* * This node might not be a actual "scope" node (such as a * Device/Method, etc.) It could be a Package or other object * node. Backup up the tree to find the containing scope node. */ while (!acpi_ns_opens_scope(prefix_node->type) && prefix_node->type != ACPI_TYPE_ANY) { prefix_node = prefix_node->parent; } } } /* Save type. TBD: may be no longer necessary */ type_to_check_for = type; /* * Begin examination of the actual pathname */ if (!pathname) { /* A Null name_path is allowed and refers to the root */ num_segments = 0; this_node = acpi_gbl_root_node; path = ""; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Null Pathname (Zero segments), Flags=%X\n", flags)); } else { /* * Name pointer is valid (and must be in internal name format) * * Check for scope prefixes: * * As represented in the AML stream, a namepath consists of an * optional scope prefix followed by a name segment part. * * If present, the scope prefix is either a Root Prefix (in * which case the name is fully qualified), or one or more * Parent Prefixes (in which case the name's scope is relative * to the current scope). */ if (*path == (u8) AML_ROOT_PREFIX) { /* Pathname is fully qualified, start from the root */ this_node = acpi_gbl_root_node; search_parent_flag = ACPI_NS_NO_UPSEARCH; /* Point to name segment part */ path++; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Path is absolute from root [%p]\n", this_node)); } else { /* Pathname is relative to current scope, start there */ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Searching relative to prefix scope [%4.4s] (%p)\n", acpi_ut_get_node_name(prefix_node), prefix_node)); /* * Handle multiple Parent Prefixes (carat) by just getting * the parent node for each prefix instance. */ this_node = prefix_node; num_carats = 0; while (*path == (u8) AML_PARENT_PREFIX) { /* Name is fully qualified, no search rules apply */ search_parent_flag = ACPI_NS_NO_UPSEARCH; /* * Point past this prefix to the name segment * part or the next Parent Prefix */ path++; /* Backup to the parent node */ num_carats++; this_node = this_node->parent; if (!this_node) { /* Current scope has no parent scope */ ACPI_ERROR((AE_INFO, "ACPI path has too many parent prefixes (^) " "- reached beyond root node")); return_ACPI_STATUS(AE_NOT_FOUND); } } if (search_parent_flag == ACPI_NS_NO_UPSEARCH) { ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Search scope is [%4.4s], path has %u carat(s)\n", acpi_ut_get_node_name (this_node), num_carats)); } } /* * Determine the number of ACPI name segments in this pathname. * * The segment part consists of either: * - A Null name segment (0) * - A dual_name_prefix followed by two 4-byte name segments * - A multi_name_prefix followed by a byte indicating the * number of segments and the segments themselves. * - A single 4-byte name segment * * Examine the name prefix opcode, if any, to determine the number of * segments. */ switch (*path) { case 0: /* * Null name after a root or parent prefixes. We already * have the correct target node and there are no name segments. */ num_segments = 0; type = this_node->type; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Prefix-only Pathname (Zero name segments), Flags=%X\n", flags)); break; case AML_DUAL_NAME_PREFIX: /* More than one name_seg, search rules do not apply */ search_parent_flag = ACPI_NS_NO_UPSEARCH; /* Two segments, point to first name segment */ num_segments = 2; path++; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Dual Pathname (2 segments, Flags=%X)\n", flags)); break; case AML_MULTI_NAME_PREFIX_OP: /* More than one name_seg, search rules do not apply */ search_parent_flag = ACPI_NS_NO_UPSEARCH; /* Extract segment count, point to first name segment */ path++; num_segments = (u32) (u8) * path; path++; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Multi Pathname (%u Segments, Flags=%X)\n", num_segments, flags)); break; default: /* * Not a Null name, no Dual or Multi prefix, hence there is * only one name segment and Pathname is already pointing to it. */ num_segments = 1; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Simple Pathname (1 segment, Flags=%X)\n", flags)); break; } ACPI_DEBUG_EXEC(acpi_ns_print_pathname(num_segments, path)); } /* * Search namespace for each segment of the name. Loop through and * verify (or add to the namespace) each name segment. * * The object type is significant only at the last name * segment. (We don't care about the types along the path, only * the type of the final target object.) */ this_search_type = ACPI_TYPE_ANY; current_node = this_node; while (num_segments && current_node) { num_segments--; if (!num_segments) { /* This is the last segment, enable typechecking */ this_search_type = type; /* * Only allow automatic parent search (search rules) if the caller * requested it AND we have a single, non-fully-qualified name_seg */ if ((search_parent_flag != ACPI_NS_NO_UPSEARCH) && (flags & ACPI_NS_SEARCH_PARENT)) { local_flags |= ACPI_NS_SEARCH_PARENT; } /* Set error flag according to caller */ if (flags & ACPI_NS_ERROR_IF_FOUND) { local_flags |= ACPI_NS_ERROR_IF_FOUND; } } /* Extract one ACPI name from the front of the pathname */ ACPI_MOVE_32_TO_32(&simple_name, path); /* Try to find the single (4 character) ACPI name */ status = acpi_ns_search_and_enter(simple_name, walk_state, current_node, interpreter_mode, this_search_type, local_flags, &this_node); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { /* Name not found in ACPI namespace */ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Name [%4.4s] not found in scope [%4.4s] %p\n", (char *)&simple_name, (char *)&current_node->name, current_node)); } *return_node = this_node; return_ACPI_STATUS(status); } /* More segments to follow? */ if (num_segments > 0) { /* * If we have an alias to an object that opens a scope (such as a * device or processor), we need to dereference the alias here so * that we can access any children of the original node (via the * remaining segments). */ if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) { if (!this_node->object) { return_ACPI_STATUS(AE_NOT_EXIST); } if (acpi_ns_opens_scope (((struct acpi_namespace_node *) this_node->object)->type)) { this_node = (struct acpi_namespace_node *) this_node->object; } } } /* Special handling for the last segment (num_segments == 0) */ else { /* * Sanity typecheck of the target object: * * If 1) This is the last segment (num_segments == 0) * 2) And we are looking for a specific type * (Not checking for TYPE_ANY) * 3) Which is not an alias * 4) Which is not a local type (TYPE_SCOPE) * 5) And the type of target object is known (not TYPE_ANY) * 6) And target object does not match what we are looking for * * Then we have a type mismatch. Just warn and ignore it. */ if ((type_to_check_for != ACPI_TYPE_ANY) && (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) && (type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS) && (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE) && (this_node->type != ACPI_TYPE_ANY) && (this_node->type != type_to_check_for)) { /* Complain about a type mismatch */ ACPI_WARNING((AE_INFO, "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)", ACPI_CAST_PTR(char, &simple_name), acpi_ut_get_type_name(this_node-> type), acpi_ut_get_type_name (type_to_check_for))); } /* * If this is the last name segment and we are not looking for a * specific type, but the type of found object is known, use that * type to (later) see if it opens a scope. */ if (type == ACPI_TYPE_ANY) { type = this_node->type; } } /* Point to next name segment and make this node current */ path += ACPI_NAME_SIZE; current_node = this_node; } /* Always check if we need to open a new scope */ if (!(flags & ACPI_NS_DONT_OPEN_SCOPE) && (walk_state)) { /* * If entry is a type which opens a scope, push the new scope on the * scope stack. */ if (acpi_ns_opens_scope(type)) { status = acpi_ds_scope_stack_push(this_node, type, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } } *return_node = this_node; return_ACPI_STATUS(AE_OK); }
gpl-2.0
pan60157/A830L_JB_KERNEL_214
drivers/acpi/acpica/exmutex.c
4919
15331
/****************************************************************************** * * Module Name: exmutex - ASL Mutex Acquire/Release functions * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "acevents.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exmutex") /* Local prototypes */ static void acpi_ex_link_mutex(union acpi_operand_object *obj_desc, struct acpi_thread_state *thread); /******************************************************************************* * * FUNCTION: acpi_ex_unlink_mutex * * PARAMETERS: obj_desc - The mutex to be unlinked * * RETURN: None * * DESCRIPTION: Remove a mutex from the "AcquiredMutex" list * ******************************************************************************/ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc) { struct acpi_thread_state *thread = obj_desc->mutex.owner_thread; if (!thread) { return; } /* Doubly linked list */ if (obj_desc->mutex.next) { (obj_desc->mutex.next)->mutex.prev = obj_desc->mutex.prev; } if (obj_desc->mutex.prev) { (obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next; /* * Migrate the previous sync level associated with this mutex to * the previous mutex on the list so that it may be preserved. * This handles the case where several mutexes have been acquired * at the same level, but are not released in opposite order. */ (obj_desc->mutex.prev)->mutex.original_sync_level = obj_desc->mutex.original_sync_level; } else { thread->acquired_mutex_list = obj_desc->mutex.next; } } /******************************************************************************* * * FUNCTION: acpi_ex_link_mutex * * PARAMETERS: obj_desc - The mutex to be linked * Thread - Current executing thread object * * RETURN: None * * DESCRIPTION: Add a mutex to the "AcquiredMutex" list for this walk * ******************************************************************************/ static void acpi_ex_link_mutex(union acpi_operand_object *obj_desc, struct acpi_thread_state *thread) { union acpi_operand_object *list_head; list_head = thread->acquired_mutex_list; /* This object will be the first object in the list */ obj_desc->mutex.prev = NULL; obj_desc->mutex.next = list_head; /* Update old first object to point back to this object */ if (list_head) { list_head->mutex.prev = obj_desc; } /* Update list head */ thread->acquired_mutex_list = obj_desc; } /******************************************************************************* * * FUNCTION: acpi_ex_acquire_mutex_object * * PARAMETERS: Timeout - Timeout in milliseconds * obj_desc - Mutex object * thread_id - Current thread state * * RETURN: Status * * DESCRIPTION: Acquire an AML mutex, low-level interface. Provides a common * path that supports multiple acquires by the same thread. * * MUTEX: Interpreter must be locked * * NOTE: This interface is called from three places: * 1) From acpi_ex_acquire_mutex, via an AML Acquire() operator * 2) From acpi_ex_acquire_global_lock when an AML Field access requires the * global lock * 3) From the external interface, acpi_acquire_global_lock * ******************************************************************************/ acpi_status acpi_ex_acquire_mutex_object(u16 timeout, union acpi_operand_object *obj_desc, acpi_thread_id thread_id) { acpi_status status; ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex_object, obj_desc); if (!obj_desc) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Support for multiple acquires by the owning thread */ if (obj_desc->mutex.thread_id == thread_id) { /* * The mutex is already owned by this thread, just increment the * acquisition depth */ obj_desc->mutex.acquisition_depth++; return_ACPI_STATUS(AE_OK); } /* Acquire the mutex, wait if necessary. Special case for Global Lock */ if (obj_desc == acpi_gbl_global_lock_mutex) { status = acpi_ev_acquire_global_lock(timeout); } else { status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex, timeout); } if (ACPI_FAILURE(status)) { /* Includes failure from a timeout on time_desc */ return_ACPI_STATUS(status); } /* Acquired the mutex: update mutex object */ obj_desc->mutex.thread_id = thread_id; obj_desc->mutex.acquisition_depth = 1; obj_desc->mutex.original_sync_level = 0; obj_desc->mutex.owner_thread = NULL; /* Used only for AML Acquire() */ return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_acquire_mutex * * PARAMETERS: time_desc - Timeout integer * obj_desc - Mutex object * walk_state - Current method execution state * * RETURN: Status * * DESCRIPTION: Acquire an AML mutex * ******************************************************************************/ acpi_status acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { acpi_status status; ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc); if (!obj_desc) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Must have a valid thread state struct */ if (!walk_state->thread) { ACPI_ERROR((AE_INFO, "Cannot acquire Mutex [%4.4s], null thread info", acpi_ut_get_node_name(obj_desc->mutex.node))); return_ACPI_STATUS(AE_AML_INTERNAL); } /* * Current sync level must be less than or equal to the sync level of the * mutex. This mechanism provides some deadlock prevention */ if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { ACPI_ERROR((AE_INFO, "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)", acpi_ut_get_node_name(obj_desc->mutex.node), walk_state->thread->current_sync_level)); return_ACPI_STATUS(AE_AML_MUTEX_ORDER); } status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value, obj_desc, walk_state->thread->thread_id); if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) { /* Save Thread object, original/current sync levels */ obj_desc->mutex.owner_thread = walk_state->thread; obj_desc->mutex.original_sync_level = walk_state->thread->current_sync_level; walk_state->thread->current_sync_level = obj_desc->mutex.sync_level; /* Link the mutex to the current thread for force-unlock at method exit */ acpi_ex_link_mutex(obj_desc, walk_state->thread); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_release_mutex_object * * PARAMETERS: obj_desc - The object descriptor for this op * * RETURN: Status * * DESCRIPTION: Release a previously acquired Mutex, low level interface. * Provides a common path that supports multiple releases (after * previous multiple acquires) by the same thread. * * MUTEX: Interpreter must be locked * * NOTE: This interface is called from three places: * 1) From acpi_ex_release_mutex, via an AML Acquire() operator * 2) From acpi_ex_release_global_lock when an AML Field access requires the * global lock * 3) From the external interface, acpi_release_global_lock * ******************************************************************************/ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_release_mutex_object); if (obj_desc->mutex.acquisition_depth == 0) { return (AE_NOT_ACQUIRED); } /* Match multiple Acquires with multiple Releases */ obj_desc->mutex.acquisition_depth--; if (obj_desc->mutex.acquisition_depth != 0) { /* Just decrement the depth and return */ return_ACPI_STATUS(AE_OK); } if (obj_desc->mutex.owner_thread) { /* Unlink the mutex from the owner's list */ acpi_ex_unlink_mutex(obj_desc); obj_desc->mutex.owner_thread = NULL; } /* Release the mutex, special case for Global Lock */ if (obj_desc == acpi_gbl_global_lock_mutex) { status = acpi_ev_release_global_lock(); } else { acpi_os_release_mutex(obj_desc->mutex.os_mutex); } /* Clear mutex info */ obj_desc->mutex.thread_id = 0; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_release_mutex * * PARAMETERS: obj_desc - The object descriptor for this op * walk_state - Current method execution state * * RETURN: Status * * DESCRIPTION: Release a previously acquired Mutex. * ******************************************************************************/ acpi_status acpi_ex_release_mutex(union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; u8 previous_sync_level; struct acpi_thread_state *owner_thread; ACPI_FUNCTION_TRACE(ex_release_mutex); if (!obj_desc) { return_ACPI_STATUS(AE_BAD_PARAMETER); } owner_thread = obj_desc->mutex.owner_thread; /* The mutex must have been previously acquired in order to release it */ if (!owner_thread) { ACPI_ERROR((AE_INFO, "Cannot release Mutex [%4.4s], not acquired", acpi_ut_get_node_name(obj_desc->mutex.node))); return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); } /* Must have a valid thread. */ if (!walk_state->thread) { ACPI_ERROR((AE_INFO, "Cannot release Mutex [%4.4s], null thread info", acpi_ut_get_node_name(obj_desc->mutex.node))); return_ACPI_STATUS(AE_AML_INTERNAL); } /* * The Mutex is owned, but this thread must be the owner. * Special case for Global Lock, any thread can release */ if ((owner_thread->thread_id != walk_state->thread->thread_id) && (obj_desc != acpi_gbl_global_lock_mutex)) { ACPI_ERROR((AE_INFO, "Thread %u cannot release Mutex [%4.4s] acquired by thread %u", (u32)walk_state->thread->thread_id, acpi_ut_get_node_name(obj_desc->mutex.node), (u32)owner_thread->thread_id)); return_ACPI_STATUS(AE_AML_NOT_OWNER); } /* * The sync level of the mutex must be equal to the current sync level. In * other words, the current level means that at least one mutex at that * level is currently being held. Attempting to release a mutex of a * different level can only mean that the mutex ordering rule is being * violated. This behavior is clarified in ACPI 4.0 specification. */ if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) { ACPI_ERROR((AE_INFO, "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u", acpi_ut_get_node_name(obj_desc->mutex.node), obj_desc->mutex.sync_level, walk_state->thread->current_sync_level)); return_ACPI_STATUS(AE_AML_MUTEX_ORDER); } /* * Get the previous sync_level from the head of the acquired mutex list. * This handles the case where several mutexes at the same level have been * acquired, but are not released in reverse order. */ previous_sync_level = owner_thread->acquired_mutex_list->mutex.original_sync_level; status = acpi_ex_release_mutex_object(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (obj_desc->mutex.acquisition_depth == 0) { /* Restore the previous sync_level */ owner_thread->current_sync_level = previous_sync_level; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_release_all_mutexes * * PARAMETERS: Thread - Current executing thread object * * RETURN: Status * * DESCRIPTION: Release all mutexes held by this thread * * NOTE: This function is called as the thread is exiting the interpreter. * Mutexes are not released when an individual control method is exited, but * only when the parent thread actually exits the interpreter. This allows one * method to acquire a mutex, and a different method to release it, as long as * this is performed underneath a single parent control method. * ******************************************************************************/ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) { union acpi_operand_object *next = thread->acquired_mutex_list; union acpi_operand_object *obj_desc; ACPI_FUNCTION_ENTRY(); /* Traverse the list of owned mutexes, releasing each one */ while (next) { obj_desc = next; next = obj_desc->mutex.next; obj_desc->mutex.prev = NULL; obj_desc->mutex.next = NULL; obj_desc->mutex.acquisition_depth = 0; /* Release the mutex, special case for Global Lock */ if (obj_desc == acpi_gbl_global_lock_mutex) { /* Ignore errors */ (void)acpi_ev_release_global_lock(); } else { acpi_os_release_mutex(obj_desc->mutex.os_mutex); } /* Mark mutex unowned */ obj_desc->mutex.owner_thread = NULL; obj_desc->mutex.thread_id = 0; /* Update Thread sync_level (Last mutex is the important one) */ thread->current_sync_level = obj_desc->mutex.original_sync_level; } }
gpl-2.0
mozilla-b2g/kernel_flatfish
drivers/acpi/acpica/exsystem.c
4919
9338
/****************************************************************************** * * Module Name: exsystem - Interface to OS services * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exsystem") /******************************************************************************* * * FUNCTION: acpi_ex_system_wait_semaphore * * PARAMETERS: Semaphore - Semaphore to wait on * Timeout - Max time to wait * * RETURN: Status * * DESCRIPTION: Implements a semaphore wait with a check to see if the * semaphore is available immediately. If it is not, the * interpreter is released before waiting. * ******************************************************************************/ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) { acpi_status status; ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); status = acpi_os_wait_semaphore(semaphore, 1, ACPI_DO_NOT_WAIT); if (ACPI_SUCCESS(status)) { return_ACPI_STATUS(status); } if (status == AE_TIME) { /* We must wait, so unlock the interpreter */ acpi_ex_relinquish_interpreter(); status = acpi_os_wait_semaphore(semaphore, 1, timeout); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*** Thread awake after blocking, %s\n", acpi_format_exception(status))); /* Reacquire the interpreter */ acpi_ex_reacquire_interpreter(); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_wait_mutex * * PARAMETERS: Mutex - Mutex to wait on * Timeout - Max time to wait * * RETURN: Status * * DESCRIPTION: Implements a mutex wait with a check to see if the * mutex is available immediately. If it is not, the * interpreter is released before waiting. * ******************************************************************************/ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) { acpi_status status; ACPI_FUNCTION_TRACE(ex_system_wait_mutex); status = acpi_os_acquire_mutex(mutex, ACPI_DO_NOT_WAIT); if (ACPI_SUCCESS(status)) { return_ACPI_STATUS(status); } if (status == AE_TIME) { /* We must wait, so unlock the interpreter */ acpi_ex_relinquish_interpreter(); status = acpi_os_acquire_mutex(mutex, timeout); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*** Thread awake after blocking, %s\n", acpi_format_exception(status))); /* Reacquire the interpreter */ acpi_ex_reacquire_interpreter(); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_do_stall * * PARAMETERS: how_long - The amount of time to stall, * in microseconds * * RETURN: Status * * DESCRIPTION: Suspend running thread for specified amount of time. * Note: ACPI specification requires that Stall() does not * relinquish the processor, and delays longer than 100 usec * should use Sleep() instead. We allow stalls up to 255 usec * for compatibility with other interpreters and existing BIOSs. * ******************************************************************************/ acpi_status acpi_ex_system_do_stall(u32 how_long) { acpi_status status = AE_OK; ACPI_FUNCTION_ENTRY(); if (how_long > 255) { /* 255 microseconds */ /* * Longer than 255 usec, this is an error * * (ACPI specifies 100 usec as max, but this gives some slack in * order to support existing BIOSs) */ ACPI_ERROR((AE_INFO, "Time parameter is too large (%u)", how_long)); status = AE_AML_OPERAND_VALUE; } else { acpi_os_stall(how_long); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_do_sleep * * PARAMETERS: how_long - The amount of time to sleep, * in milliseconds * * RETURN: None * * DESCRIPTION: Sleep the running thread for specified amount of time. * ******************************************************************************/ acpi_status acpi_ex_system_do_sleep(u64 how_long) { ACPI_FUNCTION_ENTRY(); /* Since this thread will sleep, we must release the interpreter */ acpi_ex_relinquish_interpreter(); /* * For compatibility with other ACPI implementations and to prevent * accidental deep sleeps, limit the sleep time to something reasonable. */ if (how_long > ACPI_MAX_SLEEP) { how_long = ACPI_MAX_SLEEP; } acpi_os_sleep(how_long); /* And now we must get the interpreter again */ acpi_ex_reacquire_interpreter(); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_system_signal_event * * PARAMETERS: obj_desc - The object descriptor for this op * * RETURN: Status * * DESCRIPTION: Provides an access point to perform synchronization operations * within the AML. * ******************************************************************************/ acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_system_signal_event); if (obj_desc) { status = acpi_os_signal_semaphore(obj_desc->event.os_semaphore, 1); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_wait_event * * PARAMETERS: time_desc - The 'time to delay' object descriptor * obj_desc - The object descriptor for this op * * RETURN: Status * * DESCRIPTION: Provides an access point to perform synchronization operations * within the AML. This operation is a request to wait for an * event. * ******************************************************************************/ acpi_status acpi_ex_system_wait_event(union acpi_operand_object *time_desc, union acpi_operand_object *obj_desc) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_system_wait_event); if (obj_desc) { status = acpi_ex_system_wait_semaphore(obj_desc->event.os_semaphore, (u16) time_desc->integer. value); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_system_reset_event * * PARAMETERS: obj_desc - The object descriptor for this op * * RETURN: Status * * DESCRIPTION: Reset an event to a known state. * ******************************************************************************/ acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc) { acpi_status status = AE_OK; acpi_semaphore temp_semaphore; ACPI_FUNCTION_ENTRY(); /* * We are going to simply delete the existing semaphore and * create a new one! */ status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore); if (ACPI_SUCCESS(status)) { (void)acpi_os_delete_semaphore(obj_desc->event.os_semaphore); obj_desc->event.os_semaphore = temp_semaphore; } return (status); }
gpl-2.0