repo_name
string
path
string
copies
string
size
string
content
string
license
string
Vegaviet-DevTeam/kernel-stock-4.4.2-ef63slk
drivers/power/max8997_charger.c
5067
5421
/* * max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966 * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham <myungjoo.ham@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/err.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/mfd/max8997.h> #include <linux/mfd/max8997-private.h> struct charger_data { struct device *dev; struct max8997_dev *iodev; struct power_supply battery; }; static enum power_supply_property max8997_battery_props[] = { POWER_SUPPLY_PROP_STATUS, /* "FULL" or "NOT FULL" only. */ POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */ }; /* Note that the charger control is done by a current regulator "CHARGER" */ static int max8997_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct charger_data *charger = container_of(psy, struct charger_data, battery); struct i2c_client *i2c = charger->iodev->i2c; int ret; u8 reg; switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = 0; ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg); if (ret) return ret; if ((reg & (1 << 0)) == 0x1) val->intval = POWER_SUPPLY_STATUS_FULL; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 0; ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg); if (ret) return ret; if ((reg & (1 << 2)) == 0x0) val->intval = 1; break; case POWER_SUPPLY_PROP_ONLINE: val->intval = 0; ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg); if (ret) return ret; /* DCINOK */ if (reg & (1 << 1)) val->intval = 1; break; default: return -EINVAL; } return 0; } static __devinit int max8997_battery_probe(struct platform_device *pdev) { int ret = 0; struct charger_data *charger; struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev); if (!pdata) return -EINVAL; if (pdata->eoc_mA) { int val = (pdata->eoc_mA - 50) / 10; if (val < 0) val = 0; if (val > 0xf) val = 0xf; ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL5, val, 0xf); if (ret < 0) { dev_err(&pdev->dev, "Cannot use i2c bus.\n"); return ret; } } switch (pdata->timeout) { case 5: ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1, 0x2 << 4, 0x7 << 4); break; case 6: ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1, 0x3 << 4, 0x7 << 4); break; case 7: ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1, 0x4 << 4, 0x7 << 4); break; case 0: ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1, 0x7 << 4, 0x7 << 4); break; default: dev_err(&pdev->dev, "incorrect timeout value (%d)\n", pdata->timeout); return -EINVAL; } if (ret < 0) { dev_err(&pdev->dev, "Cannot use i2c bus.\n"); return ret; } charger = kzalloc(sizeof(struct charger_data), GFP_KERNEL); if (charger == NULL) { dev_err(&pdev->dev, "Cannot allocate memory.\n"); return -ENOMEM; } platform_set_drvdata(pdev, charger); charger->battery.name = "max8997_pmic"; charger->battery.type = POWER_SUPPLY_TYPE_BATTERY; charger->battery.get_property = max8997_battery_get_property; charger->battery.properties = max8997_battery_props; charger->battery.num_properties = ARRAY_SIZE(max8997_battery_props); charger->dev = &pdev->dev; charger->iodev = iodev; ret = power_supply_register(&pdev->dev, &charger->battery); if (ret) { dev_err(&pdev->dev, "failed: power supply register\n"); goto err; } return 0; err: kfree(charger); return ret; } static int __devexit max8997_battery_remove(struct platform_device *pdev) { struct charger_data *charger = platform_get_drvdata(pdev); power_supply_unregister(&charger->battery); kfree(charger); return 0; } static const struct platform_device_id max8997_battery_id[] = { { "max8997-battery", 0 }, { } }; static struct platform_driver max8997_battery_driver = { .driver = { .name = "max8997-battery", .owner = THIS_MODULE, }, .probe = max8997_battery_probe, .remove = __devexit_p(max8997_battery_remove), .id_table = max8997_battery_id, }; static int __init max8997_battery_init(void) { return platform_driver_register(&max8997_battery_driver); } subsys_initcall(max8997_battery_init); static void __exit max8997_battery_cleanup(void) { platform_driver_unregister(&max8997_battery_driver); } module_exit(max8997_battery_cleanup); MODULE_DESCRIPTION("MAXIM 8997/8966 battery control driver"); MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
AshleyLai/testing1
drivers/uwb/lc-rc.c
5323
11139
/* * Ultra Wide Band * Life cycle of radio controllers * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs * * A UWB radio controller is also a UWB device, so it embeds one... * * List of RCs comes from the 'struct class uwb_rc_class'. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/device.h> #include <linux/err.h> #include <linux/random.h> #include <linux/kdev_t.h> #include <linux/etherdevice.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/export.h> #include "uwb-internal.h" static int uwb_rc_index_match(struct device *dev, void *data) { int *index = data; struct uwb_rc *rc = dev_get_drvdata(dev); if (rc->index == *index) return 1; return 0; } static struct uwb_rc *uwb_rc_find_by_index(int index) { struct device *dev; struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); if (dev) rc = dev_get_drvdata(dev); return rc; } static int uwb_rc_new_index(void) { int index = 0; for (;;) { if (!uwb_rc_find_by_index(index)) return index; if (++index < 0) index = 0; } } /** * Release the backing device of a uwb_rc that has been dynamically allocated. */ static void uwb_rc_sys_release(struct device *dev) { struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); uwb_rc_ie_release(rc); kfree(rc); } void uwb_rc_init(struct uwb_rc *rc) { struct uwb_dev *uwb_dev = &rc->uwb_dev; uwb_dev_init(uwb_dev); rc->uwb_dev.dev.class = &uwb_rc_class; rc->uwb_dev.dev.release = uwb_rc_sys_release; uwb_rc_neh_create(rc); rc->beaconing = -1; rc->scan_type = UWB_SCAN_DISABLED; INIT_LIST_HEAD(&rc->notifs_chain.list); mutex_init(&rc->notifs_chain.mutex); INIT_LIST_HEAD(&rc->uwb_beca.list); mutex_init(&rc->uwb_beca.mutex); uwb_drp_avail_init(rc); uwb_rc_ie_init(rc); uwb_rsv_init(rc); uwb_rc_pal_init(rc); } EXPORT_SYMBOL_GPL(uwb_rc_init); struct uwb_rc *uwb_rc_alloc(void) { struct uwb_rc *rc; rc = kzalloc(sizeof(*rc), GFP_KERNEL); if (rc == NULL) return NULL; uwb_rc_init(rc); return rc; } EXPORT_SYMBOL_GPL(uwb_rc_alloc); static struct attribute *rc_attrs[] = { &dev_attr_mac_address.attr, &dev_attr_scan.attr, &dev_attr_beacon.attr, NULL, }; static struct attribute_group rc_attr_group = { .attrs = rc_attrs, }; /* * Registration of sysfs specific stuff */ static int uwb_rc_sys_add(struct uwb_rc *rc) { return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); } static void __uwb_rc_sys_rm(struct uwb_rc *rc) { sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); } /** * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it * @rc: the radio controller. * * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF * then a random locally administered EUI-48 is generated and set on * the device. The probability of address collisions is sufficiently * unlikely (1/2^40 = 9.1e-13) that they're not checked for. */ static int uwb_rc_mac_addr_setup(struct uwb_rc *rc) { int result; struct device *dev = &rc->uwb_dev.dev; struct uwb_dev *uwb_dev = &rc->uwb_dev; char devname[UWB_ADDR_STRSIZE]; struct uwb_mac_addr addr; result = uwb_rc_mac_addr_get(rc, &addr); if (result < 0) { dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result); return result; } if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) { addr.data[0] = 0x02; /* locally administered and unicast */ get_random_bytes(&addr.data[1], sizeof(addr.data)-1); result = uwb_rc_mac_addr_set(rc, &addr); if (result < 0) { uwb_mac_addr_print(devname, sizeof(devname), &addr); dev_err(dev, "cannot set EUI-48 address %s: %d\n", devname, result); return result; } } uwb_dev->mac_addr = addr; return 0; } static int uwb_rc_setup(struct uwb_rc *rc) { int result; struct device *dev = &rc->uwb_dev.dev; result = uwb_radio_setup(rc); if (result < 0) { dev_err(dev, "cannot setup UWB radio: %d\n", result); goto error; } result = uwb_rc_mac_addr_setup(rc); if (result < 0) { dev_err(dev, "cannot setup UWB MAC address: %d\n", result); goto error; } result = uwb_rc_dev_addr_assign(rc); if (result < 0) { dev_err(dev, "cannot assign UWB DevAddr: %d\n", result); goto error; } result = uwb_rc_ie_setup(rc); if (result < 0) { dev_err(dev, "cannot setup IE subsystem: %d\n", result); goto error_ie_setup; } result = uwb_rsv_setup(rc); if (result < 0) { dev_err(dev, "cannot setup reservation subsystem: %d\n", result); goto error_rsv_setup; } uwb_dbg_add_rc(rc); return 0; error_rsv_setup: uwb_rc_ie_release(rc); error_ie_setup: error: return result; } /** * Register a new UWB radio controller * * Did you call uwb_rc_init() on your rc? * * We assume that this is being called with a > 0 refcount on * it [through ops->{get|put}_device(). We'll take our own, though. * * @parent_dev is our real device, the one that provides the actual UWB device */ int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv) { int result; struct device *dev; char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; rc->index = uwb_rc_new_index(); dev = &rc->uwb_dev.dev; dev_set_name(dev, "uwb%d", rc->index); rc->priv = priv; init_waitqueue_head(&rc->uwbd.wq); INIT_LIST_HEAD(&rc->uwbd.event_list); spin_lock_init(&rc->uwbd.event_list_lock); uwbd_start(rc); result = rc->start(rc); if (result < 0) goto error_rc_start; result = uwb_rc_setup(rc); if (result < 0) { dev_err(dev, "cannot setup UWB radio controller: %d\n", result); goto error_rc_setup; } result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc); if (result < 0 && result != -EADDRNOTAVAIL) goto error_dev_add; result = uwb_rc_sys_add(rc); if (result < 0) { dev_err(parent_dev, "cannot register UWB radio controller " "dev attributes: %d\n", result); goto error_sys_add; } uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr); uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr); dev_info(dev, "new uwb radio controller (mac %s dev %s) on %s %s\n", macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev)); rc->ready = 1; return 0; error_sys_add: uwb_dev_rm(&rc->uwb_dev); error_dev_add: error_rc_setup: rc->stop(rc); error_rc_start: uwbd_stop(rc); return result; } EXPORT_SYMBOL_GPL(uwb_rc_add); static int uwb_dev_offair_helper(struct device *dev, void *priv) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); return __uwb_dev_offair(uwb_dev, uwb_dev->rc); } /* * Remove a Radio Controller; stop beaconing/scanning, disconnect all children */ void uwb_rc_rm(struct uwb_rc *rc) { rc->ready = 0; uwb_dbg_del_rc(rc); uwb_rsv_remove_all(rc); uwb_radio_shutdown(rc); rc->stop(rc); uwbd_stop(rc); uwb_rc_neh_destroy(rc); uwb_dev_lock(&rc->uwb_dev); rc->priv = NULL; rc->cmd = NULL; uwb_dev_unlock(&rc->uwb_dev); mutex_lock(&rc->uwb_beca.mutex); uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); __uwb_rc_sys_rm(rc); mutex_unlock(&rc->uwb_beca.mutex); uwb_rsv_cleanup(rc); uwb_beca_release(rc); uwb_dev_rm(&rc->uwb_dev); } EXPORT_SYMBOL_GPL(uwb_rc_rm); static int find_rc_try_get(struct device *dev, void *data) { struct uwb_rc *target_rc = data; struct uwb_rc *rc = dev_get_drvdata(dev); if (rc == NULL) { WARN_ON(1); return 0; } if (rc == target_rc) { if (rc->ready == 0) return 0; else return 1; } return 0; } /** * Given a radio controller descriptor, validate and refcount it * * @returns NULL if the rc does not exist or is quiescing; the ptr to * it otherwise. */ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc) { struct device *dev; struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc_try_get); if (dev) { rc = dev_get_drvdata(dev); __uwb_rc_get(rc); } return rc; } EXPORT_SYMBOL_GPL(__uwb_rc_try_get); /* * RC get for external refcount acquirers... * * Increments the refcount of the device and it's backend modules */ static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc) { if (rc->ready == 0) return NULL; uwb_dev_get(&rc->uwb_dev); return rc; } static int find_rc_grandpa(struct device *dev, void *data) { struct device *grandpa_dev = data; struct uwb_rc *rc = dev_get_drvdata(dev); if (rc->uwb_dev.dev.parent->parent == grandpa_dev) { rc = uwb_rc_get(rc); return 1; } return 0; } /** * Locate and refcount a radio controller given a common grand-parent * * @grandpa_dev Pointer to the 'grandparent' device structure. * @returns NULL If the rc does not exist or is quiescing; the ptr to * it otherwise, properly referenced. * * The Radio Control interface (or the UWB Radio Controller) is always * an interface of a device. The parent is the interface, the * grandparent is the device that encapsulates the interface. * * There is no need to lock around as the "grandpa" would be * refcounted by the target, and to remove the referemes, the * uwb_rc_class->sem would have to be taken--we hold it, ergo we * should be safe. */ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev) { struct device *dev; struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev, find_rc_grandpa); if (dev) rc = dev_get_drvdata(dev); return rc; } EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); /** * Find a radio controller by device address * * @returns the pointer to the radio controller, properly referenced */ static int find_rc_dev(struct device *dev, void *data) { struct uwb_dev_addr *addr = data; struct uwb_rc *rc = dev_get_drvdata(dev); if (rc == NULL) { WARN_ON(1); return 0; } if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) { rc = uwb_rc_get(rc); return 1; } return 0; } struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr) { struct device *dev; struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, (void *)addr, find_rc_dev); if (dev) rc = dev_get_drvdata(dev); return rc; } EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev); /** * Drop a reference on a radio controller * * This is the version that should be done by entities external to the * UWB Radio Control stack (ie: clients of the API). */ void uwb_rc_put(struct uwb_rc *rc) { __uwb_rc_put(rc); } EXPORT_SYMBOL_GPL(uwb_rc_put);
gpl-2.0
SaberMod/lge-kernel-mako
drivers/mfd/rdc321x-southbridge.c
7883
3401
/* * RDC321x MFD southbrige driver * * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org> * Copyright (C) 2010 Bernhard Loos <bernhardloos@googlemail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/mfd/core.h> #include <linux/mfd/rdc321x.h> static struct rdc321x_wdt_pdata rdc321x_wdt_pdata; static struct resource rdc321x_wdt_resource[] = { { .name = "wdt-reg", .start = RDC321X_WDT_CTRL, .end = RDC321X_WDT_CTRL + 0x3, .flags = IORESOURCE_IO, } }; static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = { .max_gpios = RDC321X_MAX_GPIO, }; static struct resource rdc321x_gpio_resources[] = { { .name = "gpio-reg1", .start = RDC321X_GPIO_CTRL_REG1, .end = RDC321X_GPIO_CTRL_REG1 + 0x7, .flags = IORESOURCE_IO, }, { .name = "gpio-reg2", .start = RDC321X_GPIO_CTRL_REG2, .end = RDC321X_GPIO_CTRL_REG2 + 0x7, .flags = IORESOURCE_IO, } }; static struct mfd_cell rdc321x_sb_cells[] = { { .name = "rdc321x-wdt", .resources = rdc321x_wdt_resource, .num_resources = ARRAY_SIZE(rdc321x_wdt_resource), .platform_data = &rdc321x_wdt_pdata, .pdata_size = sizeof(rdc321x_wdt_pdata), }, { .name = "rdc321x-gpio", .resources = rdc321x_gpio_resources, .num_resources = ARRAY_SIZE(rdc321x_gpio_resources), .platform_data = &rdc321x_gpio_pdata, .pdata_size = sizeof(rdc321x_gpio_pdata), }, }; static int __devinit rdc321x_sb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable device\n"); return err; } rdc321x_gpio_pdata.sb_pdev = pdev; rdc321x_wdt_pdata.sb_pdev = pdev; return mfd_add_devices(&pdev->dev, -1, rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), NULL, 0); } static void __devexit rdc321x_sb_remove(struct pci_dev *pdev) { mfd_remove_devices(&pdev->dev); } static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = { { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) }, {} }; MODULE_DEVICE_TABLE(pci, rdc321x_sb_table); static struct pci_driver rdc321x_sb_driver = { .name = "RDC321x Southbridge", .id_table = rdc321x_sb_table, .probe = rdc321x_sb_probe, .remove = __devexit_p(rdc321x_sb_remove), }; static int __init rdc321x_sb_init(void) { return pci_register_driver(&rdc321x_sb_driver); } static void __exit rdc321x_sb_exit(void) { pci_unregister_driver(&rdc321x_sb_driver); } module_init(rdc321x_sb_init); module_exit(rdc321x_sb_exit); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RDC R-321x MFD southbridge driver");
gpl-2.0
pio-masaki/kernel_at300se
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
7883
11400
/* * Host AP crypt: host-based CCMP encryption implementation for Host AP driver * * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ //#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <asm/string.h> #include <linux/wireless.h> #include "ieee80211.h" #include <linux/crypto.h> #include <linux/scatterlist.h> MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Host AP crypt: CCMP"); MODULE_LICENSE("GPL"); #define AES_BLOCK_LEN 16 #define CCMP_HDR_LEN 8 #define CCMP_MIC_LEN 8 #define CCMP_TK_LEN 16 #define CCMP_PN_LEN 6 struct ieee80211_ccmp_data { u8 key[CCMP_TK_LEN]; int key_set; u8 tx_pn[CCMP_PN_LEN]; u8 rx_pn[CCMP_PN_LEN]; u32 dot11RSNAStatsCCMPFormatErrors; u32 dot11RSNAStatsCCMPReplays; u32 dot11RSNAStatsCCMPDecryptErrors; int key_idx; struct crypto_tfm *tfm; /* scratch buffers for virt_to_page() (crypto API) */ u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN]; u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; }; void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm, const u8 pt[16], u8 ct[16]) { crypto_cipher_encrypt_one((void*)tfm, ct, pt); } static void * ieee80211_ccmp_init(int key_idx) { struct ieee80211_ccmp_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; priv->tfm = (void*)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tfm)) { printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate " "crypto API aes\n"); priv->tfm = NULL; goto fail; } return priv; fail: if (priv) { if (priv->tfm) crypto_free_cipher((void*)priv->tfm); kfree(priv); } return NULL; } static void ieee80211_ccmp_deinit(void *priv) { struct ieee80211_ccmp_data *_priv = priv; if (_priv && _priv->tfm) crypto_free_cipher((void*)_priv->tfm); kfree(priv); } static inline void xor_block(u8 *b, u8 *a, size_t len) { int i; for (i = 0; i < len; i++) b[i] ^= a[i]; } static void ccmp_init_blocks(struct crypto_tfm *tfm, struct ieee80211_hdr_4addr *hdr, u8 *pn, size_t dlen, u8 *b0, u8 *auth, u8 *s0) { u8 *pos, qc = 0; size_t aad_len; u16 fc; int a4_included, qc_included; u8 aad[2 * AES_BLOCK_LEN]; fc = le16_to_cpu(hdr->frame_ctl); a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)); /* qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && (WLAN_FC_GET_STYPE(fc) & 0x08)); */ // fixed by David :2006.9.6 qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && (WLAN_FC_GET_STYPE(fc) & 0x80)); aad_len = 22; if (a4_included) aad_len += 6; if (qc_included) { pos = (u8 *) &hdr->addr4; if (a4_included) pos += 6; qc = *pos & 0x0f; aad_len += 2; } /* CCM Initial Block: * Flag (Include authentication header, M=3 (8-octet MIC), * L=1 (2-octet Dlen)) * Nonce: 0x00 | A2 | PN * Dlen */ b0[0] = 0x59; b0[1] = qc; memcpy(b0 + 2, hdr->addr2, ETH_ALEN); memcpy(b0 + 8, pn, CCMP_PN_LEN); b0[14] = (dlen >> 8) & 0xff; b0[15] = dlen & 0xff; /* AAD: * FC with bits 4..6 and 11..13 masked to zero; 14 is always one * A1 | A2 | A3 * SC with bits 4..15 (seq#) masked to zero * A4 (if present) * QC (if present) */ pos = (u8 *) hdr; aad[0] = 0; /* aad_len >> 8 */ aad[1] = aad_len & 0xff; aad[2] = pos[0] & 0x8f; aad[3] = pos[1] & 0xc7; memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN); pos = (u8 *) &hdr->seq_ctl; aad[22] = pos[0] & 0x0f; aad[23] = 0; /* all bits masked */ memset(aad + 24, 0, 8); if (a4_included) memcpy(aad + 24, hdr->addr4, ETH_ALEN); if (qc_included) { aad[a4_included ? 30 : 24] = qc; /* rest of QC masked */ } /* Start with the first block and AAD */ ieee80211_ccmp_aes_encrypt(tfm, b0, auth); xor_block(auth, aad, AES_BLOCK_LEN); ieee80211_ccmp_aes_encrypt(tfm, auth, auth); xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN); ieee80211_ccmp_aes_encrypt(tfm, auth, auth); b0[0] &= 0x07; b0[14] = b0[15] = 0; ieee80211_ccmp_aes_encrypt(tfm, b0, s0); } static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; int data_len, i; u8 *pos; struct ieee80211_hdr_4addr *hdr; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); if (skb_headroom(skb) < CCMP_HDR_LEN || skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) return -1; data_len = skb->len - hdr_len; pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdr_len); pos += hdr_len; // mic = skb_put(skb, CCMP_MIC_LEN); i = CCMP_PN_LEN - 1; while (i >= 0) { key->tx_pn[i]++; if (key->tx_pn[i] != 0) break; i--; } *pos++ = key->tx_pn[5]; *pos++ = key->tx_pn[4]; *pos++ = 0; *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */; *pos++ = key->tx_pn[3]; *pos++ = key->tx_pn[2]; *pos++ = key->tx_pn[1]; *pos++ = key->tx_pn[0]; hdr = (struct ieee80211_hdr_4addr *) skb->data; if (!tcb_desc->bHwSec) { int blocks, last, len; u8 *mic; u8 *b0 = key->tx_b0; u8 *b = key->tx_b; u8 *e = key->tx_e; u8 *s0 = key->tx_s0; //mic is moved to here by john mic = skb_put(skb, CCMP_MIC_LEN); ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Authentication */ xor_block(b, pos, len); ieee80211_ccmp_aes_encrypt(key->tfm, b, b); /* Encryption, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; ieee80211_ccmp_aes_encrypt(key->tfm, b0, e); xor_block(pos, e, len); pos += len; } for (i = 0; i < CCMP_MIC_LEN; i++) mic[i] = b[i] ^ s0[i]; } return 0; } static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; u8 keyidx, *pos; struct ieee80211_hdr_4addr *hdr; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); u8 pn[6]; if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) { key->dot11RSNAStatsCCMPFormatErrors++; return -1; } hdr = (struct ieee80211_hdr_4addr *) skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: received packet without ExtIV" " flag from %pM\n", hdr->addr2); } key->dot11RSNAStatsCCMPFormatErrors++; return -2; } keyidx >>= 6; if (key->key_idx != keyidx) { printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame " "keyidx=%d priv=%p\n", key->key_idx, keyidx, priv); return -6; } if (!key->key_set) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: received packet from %pM" " with keyid=%d that does not have a configured" " key\n", hdr->addr2, keyidx); } return -3; } pn[0] = pos[7]; pn[1] = pos[6]; pn[2] = pos[5]; pn[3] = pos[4]; pn[4] = pos[1]; pn[5] = pos[0]; pos += 8; if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: replay detected: STA=%pM" " previous PN %pm received PN %pm\n", hdr->addr2, key->rx_pn, pn); } key->dot11RSNAStatsCCMPReplays++; return -4; } if (!tcb_desc->bHwSec) { size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN; u8 *mic = skb->data + skb->len - CCMP_MIC_LEN; u8 *b0 = key->rx_b0; u8 *b = key->rx_b; u8 *a = key->rx_a; int i, blocks, last, len; ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); xor_block(mic, b, CCMP_MIC_LEN); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Decrypt, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; ieee80211_ccmp_aes_encrypt(key->tfm, b0, b); xor_block(pos, b, len); /* Authentication */ xor_block(a, pos, len); ieee80211_ccmp_aes_encrypt(key->tfm, a, a); pos += len; } if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: decrypt failed: STA=" "%pM\n", hdr->addr2); } key->dot11RSNAStatsCCMPDecryptErrors++; return -5; } memcpy(key->rx_pn, pn, CCMP_PN_LEN); } /* Remove hdr and MIC */ memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len); skb_pull(skb, CCMP_HDR_LEN); skb_trim(skb, skb->len - CCMP_MIC_LEN); return keyidx; } static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv) { struct ieee80211_ccmp_data *data = priv; int keyidx; struct crypto_tfm *tfm = data->tfm; keyidx = data->key_idx; memset(data, 0, sizeof(*data)); data->key_idx = keyidx; data->tfm = tfm; if (len == CCMP_TK_LEN) { memcpy(data->key, key, CCMP_TK_LEN); data->key_set = 1; if (seq) { data->rx_pn[0] = seq[5]; data->rx_pn[1] = seq[4]; data->rx_pn[2] = seq[3]; data->rx_pn[3] = seq[2]; data->rx_pn[4] = seq[1]; data->rx_pn[5] = seq[0]; } crypto_cipher_setkey((void*)data->tfm, data->key, CCMP_TK_LEN); } else if (len == 0) data->key_set = 0; else return -1; return 0; } static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv) { struct ieee80211_ccmp_data *data = priv; if (len < CCMP_TK_LEN) return -1; if (!data->key_set) return 0; memcpy(key, data->key, CCMP_TK_LEN); if (seq) { seq[0] = data->tx_pn[5]; seq[1] = data->tx_pn[4]; seq[2] = data->tx_pn[3]; seq[3] = data->tx_pn[2]; seq[4] = data->tx_pn[1]; seq[5] = data->tx_pn[0]; } return CCMP_TK_LEN; } static char * ieee80211_ccmp_print_stats(char *p, void *priv) { struct ieee80211_ccmp_data *ccmp = priv; p += sprintf(p, "key[%d] alg=CCMP key_set=%d " "tx_pn=%pm rx_pn=%pm " "format_errors=%d replays=%d decrypt_errors=%d\n", ccmp->key_idx, ccmp->key_set, ccmp->tx_pn, ccmp->rx_pn, ccmp->dot11RSNAStatsCCMPFormatErrors, ccmp->dot11RSNAStatsCCMPReplays, ccmp->dot11RSNAStatsCCMPDecryptErrors); return p; } void ieee80211_ccmp_null(void) { // printk("============>%s()\n", __FUNCTION__); return; } static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { .name = "CCMP", .init = ieee80211_ccmp_init, .deinit = ieee80211_ccmp_deinit, .encrypt_mpdu = ieee80211_ccmp_encrypt, .decrypt_mpdu = ieee80211_ccmp_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = ieee80211_ccmp_set_key, .get_key = ieee80211_ccmp_get_key, .print_stats = ieee80211_ccmp_print_stats, .extra_prefix_len = CCMP_HDR_LEN, .extra_postfix_len = CCMP_MIC_LEN, .owner = THIS_MODULE, }; int __init ieee80211_crypto_ccmp_init(void) { return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp); } void __exit ieee80211_crypto_ccmp_exit(void) { ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp); }
gpl-2.0
androidarmv6/android_kernel_samsung_bcm21553-common
arch/parisc/kernel/init_task.c
8395
2689
/* * Static declaration of "init" task data structure. * * Copyright (C) 2000 Paul Bame <bame at parisc-linux.org> * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org> * Copyright (C) 2001 Helge Deller <deller @ parisc-linux.org> * Copyright (C) 2002 Matthew Wilcox <willy with parisc-linux.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/init_task.h> #include <linux/mqueue.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); /* * Initial task structure. * * We need to make sure that this is 16384-byte aligned due to the * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ union thread_union init_thread_union __init_task_data __attribute__((aligned(128))) = { INIT_THREAD_INFO(init_task) }; #if PT_NLEVELS == 3 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout * with the first pmd adjacent to the pgd and below it. gcc doesn't actually * guarantee that global objects will be laid out in memory in the same order * as the order of declaration, so put these in different sections and use * the linker script to order them. */ pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); #endif pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); /* * Initial task structure. * * All other task structs will be allocated on slabs in fork.c */ EXPORT_SYMBOL(init_task); __asm__(".data"); struct task_struct init_task = INIT_TASK(init_task);
gpl-2.0
kingklick/kk-note2-kernel
arch/parisc/kernel/init_task.c
8395
2689
/* * Static declaration of "init" task data structure. * * Copyright (C) 2000 Paul Bame <bame at parisc-linux.org> * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org> * Copyright (C) 2001 Helge Deller <deller @ parisc-linux.org> * Copyright (C) 2002 Matthew Wilcox <willy with parisc-linux.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/init_task.h> #include <linux/mqueue.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); /* * Initial task structure. * * We need to make sure that this is 16384-byte aligned due to the * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ union thread_union init_thread_union __init_task_data __attribute__((aligned(128))) = { INIT_THREAD_INFO(init_task) }; #if PT_NLEVELS == 3 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout * with the first pmd adjacent to the pgd and below it. gcc doesn't actually * guarantee that global objects will be laid out in memory in the same order * as the order of declaration, so put these in different sections and use * the linker script to order them. */ pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); #endif pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); /* * Initial task structure. * * All other task structs will be allocated on slabs in fork.c */ EXPORT_SYMBOL(init_task); __asm__(".data"); struct task_struct init_task = INIT_TASK(init_task);
gpl-2.0
Mirenk/android_kernel_semc_msm7x30
sound/oss/uart401.c
9419
10693
/* * sound/oss/uart401.c * * MPU-401 UART driver (formerly uart401_midi.c) * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * Changes: * Alan Cox Reformatted, removed sound_mem usage, use normal Linux * interrupt allocation. Protect against bogus unload * Fixed to allow IRQ > 15 * Christoph Hellwig Adapted to module_init/module_exit * Arnaldo C. de Melo got rid of check_region * * Status: * Untested */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "sound_config.h" #include "mpu401.h" typedef struct uart401_devc { int base; int irq; int *osp; void (*midi_input_intr) (int dev, unsigned char data); int opened, disabled; volatile unsigned char input_byte; int my_dev; int share_irq; spinlock_t lock; } uart401_devc; #define DATAPORT (devc->base) #define COMDPORT (devc->base+1) #define STATPORT (devc->base+1) static int uart401_status(uart401_devc * devc) { return inb(STATPORT); } #define input_avail(devc) (!(uart401_status(devc)&INPUT_AVAIL)) #define output_ready(devc) (!(uart401_status(devc)&OUTPUT_READY)) static void uart401_cmd(uart401_devc * devc, unsigned char cmd) { outb((cmd), COMDPORT); } static int uart401_read(uart401_devc * devc) { return inb(DATAPORT); } static void uart401_write(uart401_devc * devc, unsigned char byte) { outb((byte), DATAPORT); } #define OUTPUT_READY 0x40 #define INPUT_AVAIL 0x80 #define MPU_ACK 0xFE #define MPU_RESET 0xFF #define UART_MODE_ON 0x3F static int reset_uart401(uart401_devc * devc); static void enter_uart_mode(uart401_devc * devc); static void uart401_input_loop(uart401_devc * devc) { int work_limit=30000; while (input_avail(devc) && --work_limit) { unsigned char c = uart401_read(devc); if (c == MPU_ACK) devc->input_byte = c; else if (devc->opened & OPEN_READ && devc->midi_input_intr) devc->midi_input_intr(devc->my_dev, c); } if(work_limit==0) printk(KERN_WARNING "Too much work in interrupt on uart401 (0x%X). UART jabbering ??\n", devc->base); } irqreturn_t uart401intr(int irq, void *dev_id) { uart401_devc *devc = dev_id; if (devc == NULL) { printk(KERN_ERR "uart401: bad devc\n"); return IRQ_NONE; } if (input_avail(devc)) uart401_input_loop(devc); return IRQ_HANDLED; } static int uart401_open(int dev, int mode, void (*input) (int dev, unsigned char data), void (*output) (int dev) ) { uart401_devc *devc = (uart401_devc *) midi_devs[dev]->devc; if (devc->opened) return -EBUSY; /* Flush the UART */ while (input_avail(devc)) uart401_read(devc); devc->midi_input_intr = input; devc->opened = mode; enter_uart_mode(devc); devc->disabled = 0; return 0; } static void uart401_close(int dev) { uart401_devc *devc = (uart401_devc *) midi_devs[dev]->devc; reset_uart401(devc); devc->opened = 0; } static int uart401_out(int dev, unsigned char midi_byte) { int timeout; unsigned long flags; uart401_devc *devc = (uart401_devc *) midi_devs[dev]->devc; if (devc->disabled) return 1; /* * Test for input since pending input seems to block the output. */ spin_lock_irqsave(&devc->lock,flags); if (input_avail(devc)) uart401_input_loop(devc); spin_unlock_irqrestore(&devc->lock,flags); /* * Sometimes it takes about 13000 loops before the output becomes ready * (After reset). Normally it takes just about 10 loops. */ for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--); if (!output_ready(devc)) { printk(KERN_WARNING "uart401: Timeout - Device not responding\n"); devc->disabled = 1; reset_uart401(devc); enter_uart_mode(devc); return 1; } uart401_write(devc, midi_byte); return 1; } static inline int uart401_start_read(int dev) { return 0; } static inline int uart401_end_read(int dev) { return 0; } static inline void uart401_kick(int dev) { } static inline int uart401_buffer_status(int dev) { return 0; } #define MIDI_SYNTH_NAME "MPU-401 UART" #define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT #include "midi_synth.h" static const struct midi_operations uart401_operations = { .owner = THIS_MODULE, .info = {"MPU-401 (UART) MIDI", 0, 0, SNDCARD_MPU401}, .converter = &std_midi_synth, .in_info = {0}, .open = uart401_open, .close = uart401_close, .outputc = uart401_out, .start_read = uart401_start_read, .end_read = uart401_end_read, .kick = uart401_kick, .buffer_status = uart401_buffer_status, }; static void enter_uart_mode(uart401_devc * devc) { int ok, timeout; unsigned long flags; spin_lock_irqsave(&devc->lock,flags); for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--); devc->input_byte = 0; uart401_cmd(devc, UART_MODE_ON); ok = 0; for (timeout = 50000; timeout > 0 && !ok; timeout--) if (devc->input_byte == MPU_ACK) ok = 1; else if (input_avail(devc)) if (uart401_read(devc) == MPU_ACK) ok = 1; spin_unlock_irqrestore(&devc->lock,flags); } static int reset_uart401(uart401_devc * devc) { int ok, timeout, n; /* * Send the RESET command. Try again if no success at the first time. */ ok = 0; for (n = 0; n < 2 && !ok; n++) { for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--); devc->input_byte = 0; uart401_cmd(devc, MPU_RESET); /* * Wait at least 25 msec. This method is not accurate so let's make the * loop bit longer. Cannot sleep since this is called during boot. */ for (timeout = 50000; timeout > 0 && !ok; timeout--) { if (devc->input_byte == MPU_ACK) /* Interrupt */ ok = 1; else if (input_avail(devc)) { if (uart401_read(devc) == MPU_ACK) ok = 1; } } } if (ok) { DEB(printk("Reset UART401 OK\n")); } else DDB(printk("Reset UART401 failed - No hardware detected.\n")); if (ok) uart401_input_loop(devc); /* * Flush input before enabling interrupts */ return ok; } int probe_uart401(struct address_info *hw_config, struct module *owner) { uart401_devc *devc; char *name = "MPU-401 (UART) MIDI"; int ok = 0; unsigned long flags; DDB(printk("Entered probe_uart401()\n")); /* Default to "not found" */ hw_config->slots[4] = -1; if (!request_region(hw_config->io_base, 4, "MPU-401 UART")) { printk(KERN_INFO "uart401: could not request_region(%d, 4)\n", hw_config->io_base); return 0; } devc = kmalloc(sizeof(uart401_devc), GFP_KERNEL); if (!devc) { printk(KERN_WARNING "uart401: Can't allocate memory\n"); goto cleanup_region; } devc->base = hw_config->io_base; devc->irq = hw_config->irq; devc->osp = hw_config->osp; devc->midi_input_intr = NULL; devc->opened = 0; devc->input_byte = 0; devc->my_dev = 0; devc->share_irq = 0; spin_lock_init(&devc->lock); spin_lock_irqsave(&devc->lock,flags); ok = reset_uart401(devc); spin_unlock_irqrestore(&devc->lock,flags); if (!ok) goto cleanup_devc; if (hw_config->name) name = hw_config->name; if (devc->irq < 0) { devc->share_irq = 1; devc->irq *= -1; } else devc->share_irq = 0; if (!devc->share_irq) if (request_irq(devc->irq, uart401intr, 0, "MPU-401 UART", devc) < 0) { printk(KERN_WARNING "uart401: Failed to allocate IRQ%d\n", devc->irq); devc->share_irq = 1; } devc->my_dev = sound_alloc_mididev(); enter_uart_mode(devc); if (devc->my_dev == -1) { printk(KERN_INFO "uart401: Too many midi devices detected\n"); goto cleanup_irq; } conf_printf(name, hw_config); midi_devs[devc->my_dev] = kmalloc(sizeof(struct midi_operations), GFP_KERNEL); if (!midi_devs[devc->my_dev]) { printk(KERN_ERR "uart401: Failed to allocate memory\n"); goto cleanup_unload_mididev; } memcpy(midi_devs[devc->my_dev], &uart401_operations, sizeof(struct midi_operations)); if (owner) midi_devs[devc->my_dev]->owner = owner; midi_devs[devc->my_dev]->devc = devc; midi_devs[devc->my_dev]->converter = kmalloc(sizeof(struct synth_operations), GFP_KERNEL); if (!midi_devs[devc->my_dev]->converter) { printk(KERN_WARNING "uart401: Failed to allocate memory\n"); goto cleanup_midi_devs; } memcpy(midi_devs[devc->my_dev]->converter, &std_midi_synth, sizeof(struct synth_operations)); strcpy(midi_devs[devc->my_dev]->info.name, name); midi_devs[devc->my_dev]->converter->id = "UART401"; midi_devs[devc->my_dev]->converter->midi_dev = devc->my_dev; if (owner) midi_devs[devc->my_dev]->converter->owner = owner; hw_config->slots[4] = devc->my_dev; sequencer_init(); devc->opened = 0; return 1; cleanup_midi_devs: kfree(midi_devs[devc->my_dev]); cleanup_unload_mididev: sound_unload_mididev(devc->my_dev); cleanup_irq: if (!devc->share_irq) free_irq(devc->irq, devc); cleanup_devc: kfree(devc); cleanup_region: release_region(hw_config->io_base, 4); return 0; } void unload_uart401(struct address_info *hw_config) { uart401_devc *devc; int n=hw_config->slots[4]; /* Not set up */ if(n==-1 || midi_devs[n]==NULL) return; /* Not allocated (erm ??) */ devc = midi_devs[hw_config->slots[4]]->devc; if (devc == NULL) return; reset_uart401(devc); release_region(hw_config->io_base, 4); if (!devc->share_irq) free_irq(devc->irq, devc); if (devc) { kfree(midi_devs[devc->my_dev]->converter); kfree(midi_devs[devc->my_dev]); kfree(devc); devc = NULL; } /* This kills midi_devs[x] */ sound_unload_mididev(hw_config->slots[4]); } EXPORT_SYMBOL(probe_uart401); EXPORT_SYMBOL(unload_uart401); EXPORT_SYMBOL(uart401intr); static struct address_info cfg_mpu; static int io = -1; static int irq = -1; module_param(io, int, 0444); module_param(irq, int, 0444); static int __init init_uart401(void) { cfg_mpu.irq = irq; cfg_mpu.io_base = io; /* Can be loaded either for module use or to provide functions to others */ if (cfg_mpu.io_base != -1 && cfg_mpu.irq != -1) { printk(KERN_INFO "MPU-401 UART driver Copyright (C) Hannu Savolainen 1993-1997"); if (!probe_uart401(&cfg_mpu, THIS_MODULE)) return -ENODEV; } return 0; } static void __exit cleanup_uart401(void) { if (cfg_mpu.io_base != -1 && cfg_mpu.irq != -1) unload_uart401(&cfg_mpu); } module_init(init_uart401); module_exit(cleanup_uart401); #ifndef MODULE static int __init setup_uart401(char *str) { /* io, irq */ int ints[3]; str = get_options(str, ARRAY_SIZE(ints), ints); io = ints[1]; irq = ints[2]; return 1; } __setup("uart401=", setup_uart401); #endif MODULE_LICENSE("GPL");
gpl-2.0
XxXPachaXxX/PachaRX-VS4-3.0.16
arch/m68k/sun3/sun3dvma.c
10699
6997
/* * linux/arch/m68k/sun3/sun3dvma.c * * Copyright (C) 2000 Sam Creasey * * Contains common routines for sun3/sun3x DVMA management. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/list.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/dvma.h> #undef DVMA_DEBUG #ifdef CONFIG_SUN3X extern void dvma_unmap_iommu(unsigned long baddr, int len); #else static inline void dvma_unmap_iommu(unsigned long a, int b) { } #endif #ifdef CONFIG_SUN3 extern void sun3_dvma_init(void); #endif static unsigned long iommu_use[IOMMU_TOTAL_ENTRIES]; #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) #define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)]) struct hole { unsigned long start; unsigned long end; unsigned long size; struct list_head list; }; static struct list_head hole_list; static struct list_head hole_cache; static struct hole initholes[64]; #ifdef DVMA_DEBUG static unsigned long dvma_allocs; static unsigned long dvma_frees; static unsigned long long dvma_alloc_bytes; static unsigned long long dvma_free_bytes; static void print_use(void) { int i; int j = 0; printk("dvma entry usage:\n"); for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) { if(!iommu_use[i]) continue; j++; printk("dvma entry: %08lx len %08lx\n", ( i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]); } printk("%d entries in use total\n", j); printk("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees); printk("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes, dvma_free_bytes); } static void print_holes(struct list_head *holes) { struct list_head *cur; struct hole *hole; printk("listing dvma holes\n"); list_for_each(cur, holes) { hole = list_entry(cur, struct hole, list); if((hole->start == 0) && (hole->end == 0) && (hole->size == 0)) continue; printk("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size); } printk("end of hole listing...\n"); } #endif /* DVMA_DEBUG */ static inline int refill(void) { struct hole *hole; struct hole *prev = NULL; struct list_head *cur; int ret = 0; list_for_each(cur, &hole_list) { hole = list_entry(cur, struct hole, list); if(!prev) { prev = hole; continue; } if(hole->end == prev->start) { hole->size += prev->size; hole->end = prev->end; list_move(&(prev->list), &hole_cache); ret++; } } return ret; } static inline struct hole *rmcache(void) { struct hole *ret; if(list_empty(&hole_cache)) { if(!refill()) { printk("out of dvma hole cache!\n"); BUG(); } } ret = list_entry(hole_cache.next, struct hole, list); list_del(&(ret->list)); return ret; } static inline unsigned long get_baddr(int len, unsigned long align) { struct list_head *cur; struct hole *hole; if(list_empty(&hole_list)) { #ifdef DVMA_DEBUG printk("out of dvma holes! (printing hole cache)\n"); print_holes(&hole_cache); print_use(); #endif BUG(); } list_for_each(cur, &hole_list) { unsigned long newlen; hole = list_entry(cur, struct hole, list); if(align > DVMA_PAGE_SIZE) newlen = len + ((hole->end - len) & (align-1)); else newlen = len; if(hole->size > newlen) { hole->end -= newlen; hole->size -= newlen; dvma_entry_use(hole->end) = newlen; #ifdef DVMA_DEBUG dvma_allocs++; dvma_alloc_bytes += newlen; #endif return hole->end; } else if(hole->size == newlen) { list_move(&(hole->list), &hole_cache); dvma_entry_use(hole->start) = newlen; #ifdef DVMA_DEBUG dvma_allocs++; dvma_alloc_bytes += newlen; #endif return hole->start; } } printk("unable to find dvma hole!\n"); BUG(); return 0; } static inline int free_baddr(unsigned long baddr) { unsigned long len; struct hole *hole; struct list_head *cur; unsigned long orig_baddr; orig_baddr = baddr; len = dvma_entry_use(baddr); dvma_entry_use(baddr) = 0; baddr &= DVMA_PAGE_MASK; dvma_unmap_iommu(baddr, len); #ifdef DVMA_DEBUG dvma_frees++; dvma_free_bytes += len; #endif list_for_each(cur, &hole_list) { hole = list_entry(cur, struct hole, list); if(hole->end == baddr) { hole->end += len; hole->size += len; return 0; } else if(hole->start == (baddr + len)) { hole->start = baddr; hole->size += len; return 0; } } hole = rmcache(); hole->start = baddr; hole->end = baddr + len; hole->size = len; // list_add_tail(&(hole->list), cur); list_add(&(hole->list), cur); return 0; } void dvma_init(void) { struct hole *hole; int i; INIT_LIST_HEAD(&hole_list); INIT_LIST_HEAD(&hole_cache); /* prepare the hole cache */ for(i = 0; i < 64; i++) list_add(&(initholes[i].list), &hole_cache); hole = rmcache(); hole->start = DVMA_START; hole->end = DVMA_END; hole->size = DVMA_SIZE; list_add(&(hole->list), &hole_list); memset(iommu_use, 0, sizeof(iommu_use)); dvma_unmap_iommu(DVMA_START, DVMA_SIZE); #ifdef CONFIG_SUN3 sun3_dvma_init(); #endif } inline unsigned long dvma_map_align(unsigned long kaddr, int len, int align) { unsigned long baddr; unsigned long off; if(!len) len = 0x800; if(!kaddr || !len) { // printk("error: kaddr %lx len %x\n", kaddr, len); // *(int *)4 = 0; return 0; } #ifdef DEBUG printk("dvma_map request %08lx bytes from %08lx\n", len, kaddr); #endif off = kaddr & ~DVMA_PAGE_MASK; kaddr &= PAGE_MASK; len += off; len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); if(align == 0) align = DVMA_PAGE_SIZE; else align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); baddr = get_baddr(len, align); // printk("using baddr %lx\n", baddr); if(!dvma_map_iommu(kaddr, baddr, len)) return (baddr + off); printk("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len); BUG(); return 0; } EXPORT_SYMBOL(dvma_map_align); void dvma_unmap(void *baddr) { unsigned long addr; addr = (unsigned long)baddr; /* check if this is a vme mapping */ if(!(addr & 0x00f00000)) addr |= 0xf00000; free_baddr(addr); return; } EXPORT_SYMBOL(dvma_unmap); void *dvma_malloc_align(unsigned long len, unsigned long align) { unsigned long kaddr; unsigned long baddr; unsigned long vaddr; if(!len) return NULL; #ifdef DEBUG printk("dvma_malloc request %lx bytes\n", len); #endif len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0) return NULL; if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) { free_pages(kaddr, get_order(len)); return NULL; } vaddr = dvma_btov(baddr); if(dvma_map_cpu(kaddr, vaddr, len) < 0) { dvma_unmap((void *)baddr); free_pages(kaddr, get_order(len)); return NULL; } #ifdef DEBUG printk("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr, baddr); #endif return (void *)vaddr; } EXPORT_SYMBOL(dvma_malloc_align); void dvma_free(void *vaddr) { return; } EXPORT_SYMBOL(dvma_free);
gpl-2.0
AOKPSaber/android_external_busybox
coreutils/cp.c
204
6251
/* vi: set sw=4 ts=4: */ /* * Mini cp implementation for busybox * * Copyright (C) 2000 by Matt Kraai <kraai@alumni.carnegiemellon.edu> * SELinux support by Yuichi Nakamura <ynakam@hitachisoft.jp> * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ /* http://www.opengroup.org/onlinepubs/007904975/utilities/cp.html */ /* Mar 16, 2003 Manuel Novoa III (mjn3@codepoet.org) * * Size reduction. */ //usage:#define cp_trivial_usage //usage: "[OPTIONS] SOURCE... DEST" //usage:#define cp_full_usage "\n\n" //usage: "Copy SOURCE(s) to DEST\n" //usage: "\n -a Same as -dpR" //usage: IF_SELINUX( //usage: "\n -c Preserve security context" //usage: ) //usage: "\n -R,-r Recurse" //usage: "\n -d,-P Preserve symlinks (default if -R)" //usage: "\n -L Follow all symlinks" //usage: "\n -H Follow symlinks on command line" //usage: "\n -p Preserve file attributes if possible" //usage: "\n -f Overwrite" //usage: "\n -i Prompt before overwrite" //usage: "\n -l,-s Create (sym)links" #include "libbb.h" #include "libcoreutils/coreutils.h" /* This is a NOEXEC applet. Be very careful! */ int cp_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; int cp_main(int argc, char **argv) { struct stat source_stat; struct stat dest_stat; const char *last; const char *dest; int s_flags; int d_flags; int flags; int status; enum { OPT_a = 1 << (sizeof(FILEUTILS_CP_OPTSTR)-1), OPT_r = 1 << (sizeof(FILEUTILS_CP_OPTSTR)), OPT_P = 1 << (sizeof(FILEUTILS_CP_OPTSTR)+1), OPT_v = 1 << (sizeof(FILEUTILS_CP_OPTSTR)+2), #if ENABLE_FEATURE_CP_LONG_OPTIONS OPT_parents = 1 << (sizeof(FILEUTILS_CP_OPTSTR)+3), #endif }; // Need at least two arguments // Soft- and hardlinking doesn't mix // -P and -d are the same (-P is POSIX, -d is GNU) // -r and -R are the same // -R (and therefore -r) turns on -d (coreutils does this) // -a = -pdR opt_complementary = "-2:l--s:s--l:Pd:rRd:Rd:apdR"; #if ENABLE_FEATURE_CP_LONG_OPTIONS applet_long_options = "archive\0" No_argument "a" "force\0" No_argument "f" "interactive\0" No_argument "i" "link\0" No_argument "l" "dereference\0" No_argument "L" "no-dereference\0" No_argument "P" "recursive\0" No_argument "R" "symbolic-link\0" No_argument "s" "verbose\0" No_argument "v" "parents\0" No_argument "\xff" ; #endif // -v (--verbose) is ignored flags = getopt32(argv, FILEUTILS_CP_OPTSTR "arPv"); /* Options of cp from GNU coreutils 6.10: * -a, --archive * -f, --force * -i, --interactive * -l, --link * -L, --dereference * -P, --no-dereference * -R, -r, --recursive * -s, --symbolic-link * -v, --verbose * -H follow command-line symbolic links in SOURCE * -d same as --no-dereference --preserve=links * -p same as --preserve=mode,ownership,timestamps * -c same as --preserve=context * --parents * use full source file name under DIRECTORY * NOT SUPPORTED IN BBOX: * --backup[=CONTROL] * make a backup of each existing destination file * -b like --backup but does not accept an argument * --copy-contents * copy contents of special files when recursive * --preserve[=ATTR_LIST] * preserve attributes (default: mode,ownership,timestamps), * if possible additional attributes: security context,links,all * --no-preserve=ATTR_LIST * --remove-destination * remove each existing destination file before attempting to open * --sparse=WHEN * control creation of sparse files * --strip-trailing-slashes * remove any trailing slashes from each SOURCE argument * -S, --suffix=SUFFIX * override the usual backup suffix * -t, --target-directory=DIRECTORY * copy all SOURCE arguments into DIRECTORY * -T, --no-target-directory * treat DEST as a normal file * -u, --update * copy only when the SOURCE file is newer than the destination * file or when the destination file is missing * -x, --one-file-system * stay on this file system * -Z, --context=CONTEXT * (SELinux) set SELinux security context of copy to CONTEXT */ argc -= optind; argv += optind; /* Reverse this bit. If there is -d, bit is not set: */ flags ^= FILEUTILS_DEREFERENCE; /* coreutils 6.9 compat: * by default, "cp" derefs symlinks (creates regular dest files), * but "cp -R" does not. We switch off deref if -r or -R (see above). * However, "cp -RL" must still deref symlinks: */ if (flags & FILEUTILS_DEREF_SOFTLINK) /* -L */ flags |= FILEUTILS_DEREFERENCE; #if ENABLE_SELINUX if (flags & FILEUTILS_PRESERVE_SECURITY_CONTEXT) { selinux_or_die(); } #endif status = EXIT_SUCCESS; last = argv[argc - 1]; /* If there are only two arguments and... */ if (argc == 2) { s_flags = cp_mv_stat2(*argv, &source_stat, (flags & FILEUTILS_DEREFERENCE) ? stat : lstat); if (s_flags < 0) return EXIT_FAILURE; d_flags = cp_mv_stat(last, &dest_stat); if (d_flags < 0) return EXIT_FAILURE; #if ENABLE_FEATURE_CP_LONG_OPTIONS if (flags & OPT_parents) { if (!(d_flags & 2)) { bb_error_msg_and_die("with --parents, the destination must be a directory"); } } #endif /* ...if neither is a directory... */ if (!((s_flags | d_flags) & 2) /* ...or: recursing, the 1st is a directory, and the 2nd doesn't exist... */ || ((flags & FILEUTILS_RECUR) && (s_flags & 2) && !d_flags) ) { /* Do a simple copy */ dest = last; goto DO_COPY; /* NB: argc==2 -> *++argv==last */ } } while (1) { #if ENABLE_FEATURE_CP_LONG_OPTIONS if (flags & OPT_parents) { char *dest_dup; char *dest_dir; dest = concat_path_file(last, *argv); dest_dup = xstrdup(dest); dest_dir = dirname(dest_dup); if (bb_make_directory(dest_dir, -1, FILEUTILS_RECUR)) { return EXIT_FAILURE; } free(dest_dup); goto DO_COPY; } #endif dest = concat_path_file(last, bb_get_last_path_component_strip(*argv)); DO_COPY: if (copy_file(*argv, dest, flags) < 0) { status = EXIT_FAILURE; } if (*++argv == last) { /* possibly leaking dest... */ break; } /* don't move up: dest may be == last and not malloced! */ free((void*)dest); } /* Exit. We are NOEXEC, not NOFORK. We do exit at the end of main() */ return status; }
gpl-2.0
neykov/chipidea-device-driver
arch/x86/kernel/paravirt.c
204
12173
/* Paravirtualization interfaces Copyright (C) 2006 Rusty Russell IBM Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc */ #include <linux/errno.h> #include <linux/module.h> #include <linux/efi.h> #include <linux/bcd.h> #include <linux/highmem.h> #include <asm/bug.h> #include <asm/paravirt.h> #include <asm/debugreg.h> #include <asm/desc.h> #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/time.h> #include <asm/pgalloc.h> #include <asm/irq.h> #include <asm/delay.h> #include <asm/fixmap.h> #include <asm/apic.h> #include <asm/tlbflush.h> #include <asm/timer.h> #include <asm/special_insns.h> /* nop stub */ void _paravirt_nop(void) { } /* identity function, which can be inlined */ u32 _paravirt_ident_32(u32 x) { return x; } u64 _paravirt_ident_64(u64 x) { return x; } void __init default_banner(void) { printk(KERN_INFO "Booting paravirtualized kernel on %s\n", pv_info.name); } /* Simple instruction patching code. */ #define DEF_NATIVE(ops, name, code) \ extern const char start_##ops##_##name[], end_##ops##_##name[]; \ asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") /* Undefined instruction for dealing with missing ops pointers. */ static const unsigned char ud2a[] = { 0x0f, 0x0b }; unsigned paravirt_patch_nop(void) { return 0; } unsigned paravirt_patch_ignore(unsigned len) { return len; } struct branch { unsigned char opcode; u32 delta; } __attribute__((packed)); unsigned paravirt_patch_call(void *insnbuf, const void *target, u16 tgt_clobbers, unsigned long addr, u16 site_clobbers, unsigned len) { struct branch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); if (tgt_clobbers & ~site_clobbers) return len; /* target would clobber too much for this site */ if (len < 5) return len; /* call too long for patch site */ b->opcode = 0xe8; /* call */ b->delta = delta; BUILD_BUG_ON(sizeof(*b) != 5); return 5; } unsigned paravirt_patch_jmp(void *insnbuf, const void *target, unsigned long addr, unsigned len) { struct branch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); if (len < 5) return len; /* call too long for patch site */ b->opcode = 0xe9; /* jmp */ b->delta = delta; return 5; } /* Neat trick to map patch type back to the call within the * corresponding structure. */ static void *get_call_destination(u8 type) { struct paravirt_patch_template tmpl = { .pv_init_ops = pv_init_ops, .pv_time_ops = pv_time_ops, .pv_cpu_ops = pv_cpu_ops, .pv_irq_ops = pv_irq_ops, .pv_apic_ops = pv_apic_ops, .pv_mmu_ops = pv_mmu_ops, #ifdef CONFIG_PARAVIRT_SPINLOCKS .pv_lock_ops = pv_lock_ops, #endif }; return *((void **)&tmpl + type); } unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, unsigned long addr, unsigned len) { void *opfunc = get_call_destination(type); unsigned ret; if (opfunc == NULL) /* If there's no function, patch it with a ud2a (BUG) */ ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); else if (opfunc == _paravirt_nop) /* If the operation is a nop, then nop the callsite */ ret = paravirt_patch_nop(); /* identity functions just return their single argument */ else if (opfunc == _paravirt_ident_32) ret = paravirt_patch_ident_32(insnbuf, len); else if (opfunc == _paravirt_ident_64) ret = paravirt_patch_ident_64(insnbuf, len); else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) /* If operation requires a jmp, then jmp */ ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); else /* Otherwise call the function; assume target could clobber any caller-save reg */ ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY, addr, clobbers, len); return ret; } unsigned paravirt_patch_insns(void *insnbuf, unsigned len, const char *start, const char *end) { unsigned insn_len = end - start; if (insn_len > len || start == NULL) insn_len = len; else memcpy(insnbuf, start, insn_len); return insn_len; } static void native_flush_tlb(void) { __native_flush_tlb(); } /* * Global pages have to be flushed a bit differently. Not a real * performance problem because this does not happen often. */ static void native_flush_tlb_global(void) { __native_flush_tlb_global(); } static void native_flush_tlb_single(unsigned long addr) { __native_flush_tlb_single(addr); } struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; static u64 native_steal_clock(int cpu) { return 0; } /* These are in entry.S */ extern void native_iret(void); extern void native_irq_enable_sysexit(void); extern void native_usergs_sysret32(void); extern void native_usergs_sysret64(void); static struct resource reserve_ioports = { .start = 0, .end = IO_SPACE_LIMIT, .name = "paravirt-ioport", .flags = IORESOURCE_IO | IORESOURCE_BUSY, }; /* * Reserve the whole legacy IO space to prevent any legacy drivers * from wasting time probing for their hardware. This is a fairly * brute-force approach to disabling all non-virtual drivers. * * Note that this must be called very early to have any effect. */ int paravirt_disable_iospace(void) { return request_resource(&ioport_resource, &reserve_ioports); } static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; static inline void enter_lazy(enum paravirt_lazy_mode mode) { BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); this_cpu_write(paravirt_lazy_mode, mode); } static void leave_lazy(enum paravirt_lazy_mode mode) { BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode); this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); } void paravirt_enter_lazy_mmu(void) { enter_lazy(PARAVIRT_LAZY_MMU); } void paravirt_leave_lazy_mmu(void) { leave_lazy(PARAVIRT_LAZY_MMU); } void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { arch_leave_lazy_mmu_mode(); set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); } enter_lazy(PARAVIRT_LAZY_CPU); } void paravirt_end_context_switch(struct task_struct *next) { BUG_ON(preemptible()); leave_lazy(PARAVIRT_LAZY_CPU); if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) arch_enter_lazy_mmu_mode(); } enum paravirt_lazy_mode paravirt_get_lazy_mode(void) { if (in_interrupt()) return PARAVIRT_LAZY_NONE; return this_cpu_read(paravirt_lazy_mode); } void arch_flush_lazy_mmu_mode(void) { preempt_disable(); if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { arch_leave_lazy_mmu_mode(); arch_enter_lazy_mmu_mode(); } preempt_enable(); } struct pv_info pv_info = { .name = "bare hardware", .paravirt_enabled = 0, .kernel_rpl = 0, .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ #ifdef CONFIG_X86_64 .extra_user_64bit_cs = __USER_CS, #endif }; struct pv_init_ops pv_init_ops = { .patch = native_patch, }; struct pv_time_ops pv_time_ops = { .sched_clock = native_sched_clock, .steal_clock = native_steal_clock, }; struct pv_irq_ops pv_irq_ops = { .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), .safe_halt = native_safe_halt, .halt = native_halt, #ifdef CONFIG_X86_64 .adjust_exception_frame = paravirt_nop, #endif }; struct pv_cpu_ops pv_cpu_ops = { .cpuid = native_cpuid, .get_debugreg = native_get_debugreg, .set_debugreg = native_set_debugreg, .clts = native_clts, .read_cr0 = native_read_cr0, .write_cr0 = native_write_cr0, .read_cr4 = native_read_cr4, .read_cr4_safe = native_read_cr4_safe, .write_cr4 = native_write_cr4, #ifdef CONFIG_X86_64 .read_cr8 = native_read_cr8, .write_cr8 = native_write_cr8, #endif .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, .write_msr = native_write_msr_safe, .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, .read_tscp = native_read_tscp, .load_tr_desc = native_load_tr_desc, .set_ldt = native_set_ldt, .load_gdt = native_load_gdt, .load_idt = native_load_idt, .store_gdt = native_store_gdt, .store_idt = native_store_idt, .store_tr = native_store_tr, .load_tls = native_load_tls, #ifdef CONFIG_X86_64 .load_gs_index = native_load_gs_index, #endif .write_ldt_entry = native_write_ldt_entry, .write_gdt_entry = native_write_gdt_entry, .write_idt_entry = native_write_idt_entry, .alloc_ldt = paravirt_nop, .free_ldt = paravirt_nop, .load_sp0 = native_load_sp0, #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) .irq_enable_sysexit = native_irq_enable_sysexit, #endif #ifdef CONFIG_X86_64 #ifdef CONFIG_IA32_EMULATION .usergs_sysret32 = native_usergs_sysret32, #endif .usergs_sysret64 = native_usergs_sysret64, #endif .iret = native_iret, .swapgs = native_swapgs, .set_iopl_mask = native_set_iopl_mask, .io_delay = native_io_delay, .start_context_switch = paravirt_nop, .end_context_switch = paravirt_nop, }; struct pv_apic_ops pv_apic_ops = { #ifdef CONFIG_X86_LOCAL_APIC .startup_ipi_hook = paravirt_nop, #endif }; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) /* 32-bit pagetable entries */ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) #else /* 64-bit pagetable entries */ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) #endif struct pv_mmu_ops pv_mmu_ops = { .read_cr2 = native_read_cr2, .write_cr2 = native_write_cr2, .read_cr3 = native_read_cr3, .write_cr3 = native_write_cr3, .flush_tlb_user = native_flush_tlb, .flush_tlb_kernel = native_flush_tlb_global, .flush_tlb_single = native_flush_tlb_single, .flush_tlb_others = native_flush_tlb_others, .pgd_alloc = __paravirt_pgd_alloc, .pgd_free = paravirt_nop, .alloc_pte = paravirt_nop, .alloc_pmd = paravirt_nop, .alloc_pud = paravirt_nop, .release_pte = paravirt_nop, .release_pmd = paravirt_nop, .release_pud = paravirt_nop, .set_pte = native_set_pte, .set_pte_at = native_set_pte_at, .set_pmd = native_set_pmd, .set_pmd_at = native_set_pmd_at, .pte_update = paravirt_nop, .pte_update_defer = paravirt_nop, .pmd_update = paravirt_nop, .pmd_update_defer = paravirt_nop, .ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_commit = __ptep_modify_prot_commit, #if PAGETABLE_LEVELS >= 3 #ifdef CONFIG_X86_PAE .set_pte_atomic = native_set_pte_atomic, .pte_clear = native_pte_clear, .pmd_clear = native_pmd_clear, #endif .set_pud = native_set_pud, .pmd_val = PTE_IDENT, .make_pmd = PTE_IDENT, #if PAGETABLE_LEVELS == 4 .pud_val = PTE_IDENT, .make_pud = PTE_IDENT, .set_pgd = native_set_pgd, #endif #endif /* PAGETABLE_LEVELS >= 3 */ .pte_val = PTE_IDENT, .pgd_val = PTE_IDENT, .make_pte = PTE_IDENT, .make_pgd = PTE_IDENT, .dup_mmap = paravirt_nop, .exit_mmap = paravirt_nop, .activate_mm = paravirt_nop, .lazy_mode = { .enter = paravirt_nop, .leave = paravirt_nop, }, .set_fixmap = native_set_fixmap, }; EXPORT_SYMBOL_GPL(pv_time_ops); EXPORT_SYMBOL (pv_cpu_ops); EXPORT_SYMBOL (pv_mmu_ops); EXPORT_SYMBOL_GPL(pv_apic_ops); EXPORT_SYMBOL_GPL(pv_info); EXPORT_SYMBOL (pv_irq_ops);
gpl-2.0
TrustZoneGenericDriver/linux-xlnx
drivers/mfd/menelaus.c
460
31739
/* * Copyright (C) 2004 Texas Instruments, Inc. * * Some parts based tps65010.c: * Copyright (C) 2004 Texas Instruments and * Copyright (C) 2004-2005 David Brownell * * Some parts based on tlv320aic24.c: * Copyright (C) by Kai Svahn <kai.svahn@nokia.com> * * Changes for interrupt handling and clean-up by * Tony Lindgren <tony@atomide.com> and Imre Deak <imre.deak@nokia.com> * Cleanup and generalized support for voltage setting by * Juha Yrjola * Added support for controlling VCORE and regulator sleep states, * Amit Kucheria <amit.kucheria@nokia.com> * Copyright (C) 2005, 2006 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/slab.h> #include <linux/mfd/menelaus.h> #include <asm/mach/irq.h> #include <asm/gpio.h> #define DRIVER_NAME "menelaus" #define MENELAUS_I2C_ADDRESS 0x72 #define MENELAUS_REV 0x01 #define MENELAUS_VCORE_CTRL1 0x02 #define MENELAUS_VCORE_CTRL2 0x03 #define MENELAUS_VCORE_CTRL3 0x04 #define MENELAUS_VCORE_CTRL4 0x05 #define MENELAUS_VCORE_CTRL5 0x06 #define MENELAUS_DCDC_CTRL1 0x07 #define MENELAUS_DCDC_CTRL2 0x08 #define MENELAUS_DCDC_CTRL3 0x09 #define MENELAUS_LDO_CTRL1 0x0A #define MENELAUS_LDO_CTRL2 0x0B #define MENELAUS_LDO_CTRL3 0x0C #define MENELAUS_LDO_CTRL4 0x0D #define MENELAUS_LDO_CTRL5 0x0E #define MENELAUS_LDO_CTRL6 0x0F #define MENELAUS_LDO_CTRL7 0x10 #define MENELAUS_LDO_CTRL8 0x11 #define MENELAUS_SLEEP_CTRL1 0x12 #define MENELAUS_SLEEP_CTRL2 0x13 #define MENELAUS_DEVICE_OFF 0x14 #define MENELAUS_OSC_CTRL 0x15 #define MENELAUS_DETECT_CTRL 0x16 #define MENELAUS_INT_MASK1 0x17 #define MENELAUS_INT_MASK2 0x18 #define MENELAUS_INT_STATUS1 0x19 #define MENELAUS_INT_STATUS2 0x1A #define MENELAUS_INT_ACK1 0x1B #define MENELAUS_INT_ACK2 0x1C #define MENELAUS_GPIO_CTRL 0x1D #define MENELAUS_GPIO_IN 0x1E #define MENELAUS_GPIO_OUT 0x1F #define MENELAUS_BBSMS 0x20 #define MENELAUS_RTC_CTRL 0x21 #define MENELAUS_RTC_UPDATE 0x22 #define MENELAUS_RTC_SEC 0x23 #define MENELAUS_RTC_MIN 0x24 #define MENELAUS_RTC_HR 0x25 #define MENELAUS_RTC_DAY 0x26 #define MENELAUS_RTC_MON 0x27 #define MENELAUS_RTC_YR 0x28 #define MENELAUS_RTC_WKDAY 0x29 #define MENELAUS_RTC_AL_SEC 0x2A #define MENELAUS_RTC_AL_MIN 0x2B #define MENELAUS_RTC_AL_HR 0x2C #define MENELAUS_RTC_AL_DAY 0x2D #define MENELAUS_RTC_AL_MON 0x2E #define MENELAUS_RTC_AL_YR 0x2F #define MENELAUS_RTC_COMP_MSB 0x30 #define MENELAUS_RTC_COMP_LSB 0x31 #define MENELAUS_S1_PULL_EN 0x32 #define MENELAUS_S1_PULL_DIR 0x33 #define MENELAUS_S2_PULL_EN 0x34 #define MENELAUS_S2_PULL_DIR 0x35 #define MENELAUS_MCT_CTRL1 0x36 #define MENELAUS_MCT_CTRL2 0x37 #define MENELAUS_MCT_CTRL3 0x38 #define MENELAUS_MCT_PIN_ST 0x39 #define MENELAUS_DEBOUNCE1 0x3A #define IH_MENELAUS_IRQS 12 #define MENELAUS_MMC_S1CD_IRQ 0 /* MMC slot 1 card change */ #define MENELAUS_MMC_S2CD_IRQ 1 /* MMC slot 2 card change */ #define MENELAUS_MMC_S1D1_IRQ 2 /* MMC DAT1 low in slot 1 */ #define MENELAUS_MMC_S2D1_IRQ 3 /* MMC DAT1 low in slot 2 */ #define MENELAUS_LOWBAT_IRQ 4 /* Low battery */ #define MENELAUS_HOTDIE_IRQ 5 /* Hot die detect */ #define MENELAUS_UVLO_IRQ 6 /* UVLO detect */ #define MENELAUS_TSHUT_IRQ 7 /* Thermal shutdown */ #define MENELAUS_RTCTMR_IRQ 8 /* RTC timer */ #define MENELAUS_RTCALM_IRQ 9 /* RTC alarm */ #define MENELAUS_RTCERR_IRQ 10 /* RTC error */ #define MENELAUS_PSHBTN_IRQ 11 /* Push button */ #define MENELAUS_RESERVED12_IRQ 12 /* Reserved */ #define MENELAUS_RESERVED13_IRQ 13 /* Reserved */ #define MENELAUS_RESERVED14_IRQ 14 /* Reserved */ #define MENELAUS_RESERVED15_IRQ 15 /* Reserved */ /* VCORE_CTRL1 register */ #define VCORE_CTRL1_BYP_COMP (1 << 5) #define VCORE_CTRL1_HW_NSW (1 << 7) /* GPIO_CTRL register */ #define GPIO_CTRL_SLOTSELEN (1 << 5) #define GPIO_CTRL_SLPCTLEN (1 << 6) #define GPIO1_DIR_INPUT (1 << 0) #define GPIO2_DIR_INPUT (1 << 1) #define GPIO3_DIR_INPUT (1 << 2) /* MCT_CTRL1 register */ #define MCT_CTRL1_S1_CMD_OD (1 << 2) #define MCT_CTRL1_S2_CMD_OD (1 << 3) /* MCT_CTRL2 register */ #define MCT_CTRL2_VS2_SEL_D0 (1 << 0) #define MCT_CTRL2_VS2_SEL_D1 (1 << 1) #define MCT_CTRL2_S1CD_BUFEN (1 << 4) #define MCT_CTRL2_S2CD_BUFEN (1 << 5) #define MCT_CTRL2_S1CD_DBEN (1 << 6) #define MCT_CTRL2_S2CD_BEN (1 << 7) /* MCT_CTRL3 register */ #define MCT_CTRL3_SLOT1_EN (1 << 0) #define MCT_CTRL3_SLOT2_EN (1 << 1) #define MCT_CTRL3_S1_AUTO_EN (1 << 2) #define MCT_CTRL3_S2_AUTO_EN (1 << 3) /* MCT_PIN_ST register */ #define MCT_PIN_ST_S1_CD_ST (1 << 0) #define MCT_PIN_ST_S2_CD_ST (1 << 1) static void menelaus_work(struct work_struct *_menelaus); struct menelaus_chip { struct mutex lock; struct i2c_client *client; struct work_struct work; #ifdef CONFIG_RTC_DRV_TWL92330 struct rtc_device *rtc; u8 rtc_control; unsigned uie:1; #endif unsigned vcore_hw_mode:1; u8 mask1, mask2; void (*handlers[16])(struct menelaus_chip *); void (*mmc_callback)(void *data, u8 mask); void *mmc_callback_data; }; static struct menelaus_chip *the_menelaus; static int menelaus_write_reg(int reg, u8 value) { int val = i2c_smbus_write_byte_data(the_menelaus->client, reg, value); if (val < 0) { pr_err(DRIVER_NAME ": write error"); return val; } return 0; } static int menelaus_read_reg(int reg) { int val = i2c_smbus_read_byte_data(the_menelaus->client, reg); if (val < 0) pr_err(DRIVER_NAME ": read error"); return val; } static int menelaus_enable_irq(int irq) { if (irq > 7) { irq -= 8; the_menelaus->mask2 &= ~(1 << irq); return menelaus_write_reg(MENELAUS_INT_MASK2, the_menelaus->mask2); } else { the_menelaus->mask1 &= ~(1 << irq); return menelaus_write_reg(MENELAUS_INT_MASK1, the_menelaus->mask1); } } static int menelaus_disable_irq(int irq) { if (irq > 7) { irq -= 8; the_menelaus->mask2 |= (1 << irq); return menelaus_write_reg(MENELAUS_INT_MASK2, the_menelaus->mask2); } else { the_menelaus->mask1 |= (1 << irq); return menelaus_write_reg(MENELAUS_INT_MASK1, the_menelaus->mask1); } } static int menelaus_ack_irq(int irq) { if (irq > 7) return menelaus_write_reg(MENELAUS_INT_ACK2, 1 << (irq - 8)); else return menelaus_write_reg(MENELAUS_INT_ACK1, 1 << irq); } /* Adds a handler for an interrupt. Does not run in interrupt context */ static int menelaus_add_irq_work(int irq, void (*handler)(struct menelaus_chip *)) { int ret = 0; mutex_lock(&the_menelaus->lock); the_menelaus->handlers[irq] = handler; ret = menelaus_enable_irq(irq); mutex_unlock(&the_menelaus->lock); return ret; } /* Removes handler for an interrupt */ static int menelaus_remove_irq_work(int irq) { int ret = 0; mutex_lock(&the_menelaus->lock); ret = menelaus_disable_irq(irq); the_menelaus->handlers[irq] = NULL; mutex_unlock(&the_menelaus->lock); return ret; } /* * Gets scheduled when a card detect interrupt happens. Note that in some cases * this line is wired to card cover switch rather than the card detect switch * in each slot. In this case the cards are not seen by menelaus. * FIXME: Add handling for D1 too */ static void menelaus_mmc_cd_work(struct menelaus_chip *menelaus_hw) { int reg; unsigned char card_mask = 0; reg = menelaus_read_reg(MENELAUS_MCT_PIN_ST); if (reg < 0) return; if (!(reg & 0x1)) card_mask |= MCT_PIN_ST_S1_CD_ST; if (!(reg & 0x2)) card_mask |= MCT_PIN_ST_S2_CD_ST; if (menelaus_hw->mmc_callback) menelaus_hw->mmc_callback(menelaus_hw->mmc_callback_data, card_mask); } /* * Toggles the MMC slots between open-drain and push-pull mode. */ int menelaus_set_mmc_opendrain(int slot, int enable) { int ret, val; if (slot != 1 && slot != 2) return -EINVAL; mutex_lock(&the_menelaus->lock); ret = menelaus_read_reg(MENELAUS_MCT_CTRL1); if (ret < 0) { mutex_unlock(&the_menelaus->lock); return ret; } val = ret; if (slot == 1) { if (enable) val |= MCT_CTRL1_S1_CMD_OD; else val &= ~MCT_CTRL1_S1_CMD_OD; } else { if (enable) val |= MCT_CTRL1_S2_CMD_OD; else val &= ~MCT_CTRL1_S2_CMD_OD; } ret = menelaus_write_reg(MENELAUS_MCT_CTRL1, val); mutex_unlock(&the_menelaus->lock); return ret; } EXPORT_SYMBOL(menelaus_set_mmc_opendrain); int menelaus_set_slot_sel(int enable) { int ret; mutex_lock(&the_menelaus->lock); ret = menelaus_read_reg(MENELAUS_GPIO_CTRL); if (ret < 0) goto out; ret |= GPIO2_DIR_INPUT; if (enable) ret |= GPIO_CTRL_SLOTSELEN; else ret &= ~GPIO_CTRL_SLOTSELEN; ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret); out: mutex_unlock(&the_menelaus->lock); return ret; } EXPORT_SYMBOL(menelaus_set_slot_sel); int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_en) { int ret, val; if (slot != 1 && slot != 2) return -EINVAL; if (power >= 3) return -EINVAL; mutex_lock(&the_menelaus->lock); ret = menelaus_read_reg(MENELAUS_MCT_CTRL2); if (ret < 0) goto out; val = ret; if (slot == 1) { if (cd_en) val |= MCT_CTRL2_S1CD_BUFEN | MCT_CTRL2_S1CD_DBEN; else val &= ~(MCT_CTRL2_S1CD_BUFEN | MCT_CTRL2_S1CD_DBEN); } else { if (cd_en) val |= MCT_CTRL2_S2CD_BUFEN | MCT_CTRL2_S2CD_BEN; else val &= ~(MCT_CTRL2_S2CD_BUFEN | MCT_CTRL2_S2CD_BEN); } ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, val); if (ret < 0) goto out; ret = menelaus_read_reg(MENELAUS_MCT_CTRL3); if (ret < 0) goto out; val = ret; if (slot == 1) { if (enable) val |= MCT_CTRL3_SLOT1_EN; else val &= ~MCT_CTRL3_SLOT1_EN; } else { int b; if (enable) val |= MCT_CTRL3_SLOT2_EN; else val &= ~MCT_CTRL3_SLOT2_EN; b = menelaus_read_reg(MENELAUS_MCT_CTRL2); b &= ~(MCT_CTRL2_VS2_SEL_D0 | MCT_CTRL2_VS2_SEL_D1); b |= power; ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, b); if (ret < 0) goto out; } /* Disable autonomous shutdown */ val &= ~(MCT_CTRL3_S1_AUTO_EN | MCT_CTRL3_S2_AUTO_EN); ret = menelaus_write_reg(MENELAUS_MCT_CTRL3, val); out: mutex_unlock(&the_menelaus->lock); return ret; } EXPORT_SYMBOL(menelaus_set_mmc_slot); int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask), void *data) { int ret = 0; the_menelaus->mmc_callback_data = data; the_menelaus->mmc_callback = callback; ret = menelaus_add_irq_work(MENELAUS_MMC_S1CD_IRQ, menelaus_mmc_cd_work); if (ret < 0) return ret; ret = menelaus_add_irq_work(MENELAUS_MMC_S2CD_IRQ, menelaus_mmc_cd_work); if (ret < 0) return ret; ret = menelaus_add_irq_work(MENELAUS_MMC_S1D1_IRQ, menelaus_mmc_cd_work); if (ret < 0) return ret; ret = menelaus_add_irq_work(MENELAUS_MMC_S2D1_IRQ, menelaus_mmc_cd_work); return ret; } EXPORT_SYMBOL(menelaus_register_mmc_callback); void menelaus_unregister_mmc_callback(void) { menelaus_remove_irq_work(MENELAUS_MMC_S1CD_IRQ); menelaus_remove_irq_work(MENELAUS_MMC_S2CD_IRQ); menelaus_remove_irq_work(MENELAUS_MMC_S1D1_IRQ); menelaus_remove_irq_work(MENELAUS_MMC_S2D1_IRQ); the_menelaus->mmc_callback = NULL; the_menelaus->mmc_callback_data = NULL; } EXPORT_SYMBOL(menelaus_unregister_mmc_callback); struct menelaus_vtg { const char *name; u8 vtg_reg; u8 vtg_shift; u8 vtg_bits; u8 mode_reg; }; struct menelaus_vtg_value { u16 vtg; u16 val; }; static int menelaus_set_voltage(const struct menelaus_vtg *vtg, int mV, int vtg_val, int mode) { int val, ret; struct i2c_client *c = the_menelaus->client; mutex_lock(&the_menelaus->lock); if (!vtg) goto set_voltage; ret = menelaus_read_reg(vtg->vtg_reg); if (ret < 0) goto out; val = ret & ~(((1 << vtg->vtg_bits) - 1) << vtg->vtg_shift); val |= vtg_val << vtg->vtg_shift; dev_dbg(&c->dev, "Setting voltage '%s'" "to %d mV (reg 0x%02x, val 0x%02x)\n", vtg->name, mV, vtg->vtg_reg, val); ret = menelaus_write_reg(vtg->vtg_reg, val); if (ret < 0) goto out; set_voltage: ret = menelaus_write_reg(vtg->mode_reg, mode); out: mutex_unlock(&the_menelaus->lock); if (ret == 0) { /* Wait for voltage to stabilize */ msleep(1); } return ret; } static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl, int n) { int i; for (i = 0; i < n; i++, tbl++) if (tbl->vtg == vtg) return tbl->val; return -EINVAL; } /* * Vcore can be programmed in two ways: * SW-controlled: Required voltage is programmed into VCORE_CTRL1 * HW-controlled: Required range (roof-floor) is programmed into VCORE_CTRL3 * and VCORE_CTRL4 * * Call correct 'set' function accordingly */ static const struct menelaus_vtg_value vcore_values[] = { { 1000, 0 }, { 1025, 1 }, { 1050, 2 }, { 1075, 3 }, { 1100, 4 }, { 1125, 5 }, { 1150, 6 }, { 1175, 7 }, { 1200, 8 }, { 1225, 9 }, { 1250, 10 }, { 1275, 11 }, { 1300, 12 }, { 1325, 13 }, { 1350, 14 }, { 1375, 15 }, { 1400, 16 }, { 1425, 17 }, { 1450, 18 }, }; int menelaus_set_vcore_sw(unsigned int mV) { int val, ret; struct i2c_client *c = the_menelaus->client; val = menelaus_get_vtg_value(mV, vcore_values, ARRAY_SIZE(vcore_values)); if (val < 0) return -EINVAL; dev_dbg(&c->dev, "Setting VCORE to %d mV (val 0x%02x)\n", mV, val); /* Set SW mode and the voltage in one go. */ mutex_lock(&the_menelaus->lock); ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val); if (ret == 0) the_menelaus->vcore_hw_mode = 0; mutex_unlock(&the_menelaus->lock); msleep(1); return ret; } int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV) { int fval, rval, val, ret; struct i2c_client *c = the_menelaus->client; rval = menelaus_get_vtg_value(roof_mV, vcore_values, ARRAY_SIZE(vcore_values)); if (rval < 0) return -EINVAL; fval = menelaus_get_vtg_value(floor_mV, vcore_values, ARRAY_SIZE(vcore_values)); if (fval < 0) return -EINVAL; dev_dbg(&c->dev, "Setting VCORE FLOOR to %d mV and ROOF to %d mV\n", floor_mV, roof_mV); mutex_lock(&the_menelaus->lock); ret = menelaus_write_reg(MENELAUS_VCORE_CTRL3, fval); if (ret < 0) goto out; ret = menelaus_write_reg(MENELAUS_VCORE_CTRL4, rval); if (ret < 0) goto out; if (!the_menelaus->vcore_hw_mode) { val = menelaus_read_reg(MENELAUS_VCORE_CTRL1); /* HW mode, turn OFF byte comparator */ val |= (VCORE_CTRL1_HW_NSW | VCORE_CTRL1_BYP_COMP); ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val); the_menelaus->vcore_hw_mode = 1; } msleep(1); out: mutex_unlock(&the_menelaus->lock); return ret; } static const struct menelaus_vtg vmem_vtg = { .name = "VMEM", .vtg_reg = MENELAUS_LDO_CTRL1, .vtg_shift = 0, .vtg_bits = 2, .mode_reg = MENELAUS_LDO_CTRL3, }; static const struct menelaus_vtg_value vmem_values[] = { { 1500, 0 }, { 1800, 1 }, { 1900, 2 }, { 2500, 3 }, }; int menelaus_set_vmem(unsigned int mV) { int val; if (mV == 0) return menelaus_set_voltage(&vmem_vtg, 0, 0, 0); val = menelaus_get_vtg_value(mV, vmem_values, ARRAY_SIZE(vmem_values)); if (val < 0) return -EINVAL; return menelaus_set_voltage(&vmem_vtg, mV, val, 0x02); } EXPORT_SYMBOL(menelaus_set_vmem); static const struct menelaus_vtg vio_vtg = { .name = "VIO", .vtg_reg = MENELAUS_LDO_CTRL1, .vtg_shift = 2, .vtg_bits = 2, .mode_reg = MENELAUS_LDO_CTRL4, }; static const struct menelaus_vtg_value vio_values[] = { { 1500, 0 }, { 1800, 1 }, { 2500, 2 }, { 2800, 3 }, }; int menelaus_set_vio(unsigned int mV) { int val; if (mV == 0) return menelaus_set_voltage(&vio_vtg, 0, 0, 0); val = menelaus_get_vtg_value(mV, vio_values, ARRAY_SIZE(vio_values)); if (val < 0) return -EINVAL; return menelaus_set_voltage(&vio_vtg, mV, val, 0x02); } EXPORT_SYMBOL(menelaus_set_vio); static const struct menelaus_vtg_value vdcdc_values[] = { { 1500, 0 }, { 1800, 1 }, { 2000, 2 }, { 2200, 3 }, { 2400, 4 }, { 2800, 5 }, { 3000, 6 }, { 3300, 7 }, }; static const struct menelaus_vtg vdcdc2_vtg = { .name = "VDCDC2", .vtg_reg = MENELAUS_DCDC_CTRL1, .vtg_shift = 0, .vtg_bits = 3, .mode_reg = MENELAUS_DCDC_CTRL2, }; static const struct menelaus_vtg vdcdc3_vtg = { .name = "VDCDC3", .vtg_reg = MENELAUS_DCDC_CTRL1, .vtg_shift = 3, .vtg_bits = 3, .mode_reg = MENELAUS_DCDC_CTRL3, }; int menelaus_set_vdcdc(int dcdc, unsigned int mV) { const struct menelaus_vtg *vtg; int val; if (dcdc != 2 && dcdc != 3) return -EINVAL; if (dcdc == 2) vtg = &vdcdc2_vtg; else vtg = &vdcdc3_vtg; if (mV == 0) return menelaus_set_voltage(vtg, 0, 0, 0); val = menelaus_get_vtg_value(mV, vdcdc_values, ARRAY_SIZE(vdcdc_values)); if (val < 0) return -EINVAL; return menelaus_set_voltage(vtg, mV, val, 0x03); } static const struct menelaus_vtg_value vmmc_values[] = { { 1850, 0 }, { 2800, 1 }, { 3000, 2 }, { 3100, 3 }, }; static const struct menelaus_vtg vmmc_vtg = { .name = "VMMC", .vtg_reg = MENELAUS_LDO_CTRL1, .vtg_shift = 6, .vtg_bits = 2, .mode_reg = MENELAUS_LDO_CTRL7, }; int menelaus_set_vmmc(unsigned int mV) { int val; if (mV == 0) return menelaus_set_voltage(&vmmc_vtg, 0, 0, 0); val = menelaus_get_vtg_value(mV, vmmc_values, ARRAY_SIZE(vmmc_values)); if (val < 0) return -EINVAL; return menelaus_set_voltage(&vmmc_vtg, mV, val, 0x02); } EXPORT_SYMBOL(menelaus_set_vmmc); static const struct menelaus_vtg_value vaux_values[] = { { 1500, 0 }, { 1800, 1 }, { 2500, 2 }, { 2800, 3 }, }; static const struct menelaus_vtg vaux_vtg = { .name = "VAUX", .vtg_reg = MENELAUS_LDO_CTRL1, .vtg_shift = 4, .vtg_bits = 2, .mode_reg = MENELAUS_LDO_CTRL6, }; int menelaus_set_vaux(unsigned int mV) { int val; if (mV == 0) return menelaus_set_voltage(&vaux_vtg, 0, 0, 0); val = menelaus_get_vtg_value(mV, vaux_values, ARRAY_SIZE(vaux_values)); if (val < 0) return -EINVAL; return menelaus_set_voltage(&vaux_vtg, mV, val, 0x02); } EXPORT_SYMBOL(menelaus_set_vaux); int menelaus_get_slot_pin_states(void) { return menelaus_read_reg(MENELAUS_MCT_PIN_ST); } EXPORT_SYMBOL(menelaus_get_slot_pin_states); int menelaus_set_regulator_sleep(int enable, u32 val) { int t, ret; struct i2c_client *c = the_menelaus->client; mutex_lock(&the_menelaus->lock); ret = menelaus_write_reg(MENELAUS_SLEEP_CTRL2, val); if (ret < 0) goto out; dev_dbg(&c->dev, "regulator sleep configuration: %02x\n", val); ret = menelaus_read_reg(MENELAUS_GPIO_CTRL); if (ret < 0) goto out; t = (GPIO_CTRL_SLPCTLEN | GPIO3_DIR_INPUT); if (enable) ret |= t; else ret &= ~t; ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret); out: mutex_unlock(&the_menelaus->lock); return ret; } /*-----------------------------------------------------------------------*/ /* Handles Menelaus interrupts. Does not run in interrupt context */ static void menelaus_work(struct work_struct *_menelaus) { struct menelaus_chip *menelaus = container_of(_menelaus, struct menelaus_chip, work); void (*handler)(struct menelaus_chip *menelaus); while (1) { unsigned isr; isr = (menelaus_read_reg(MENELAUS_INT_STATUS2) & ~menelaus->mask2) << 8; isr |= menelaus_read_reg(MENELAUS_INT_STATUS1) & ~menelaus->mask1; if (!isr) break; while (isr) { int irq = fls(isr) - 1; isr &= ~(1 << irq); mutex_lock(&menelaus->lock); menelaus_disable_irq(irq); menelaus_ack_irq(irq); handler = menelaus->handlers[irq]; if (handler) handler(menelaus); menelaus_enable_irq(irq); mutex_unlock(&menelaus->lock); } } enable_irq(menelaus->client->irq); } /* * We cannot use I2C in interrupt context, so we just schedule work. */ static irqreturn_t menelaus_irq(int irq, void *_menelaus) { struct menelaus_chip *menelaus = _menelaus; disable_irq_nosync(irq); (void)schedule_work(&menelaus->work); return IRQ_HANDLED; } /*-----------------------------------------------------------------------*/ /* * The RTC needs to be set once, then it runs on backup battery power. * It supports alarms, including system wake alarms (from some modes); * and 1/second IRQs if requested. */ #ifdef CONFIG_RTC_DRV_TWL92330 #define RTC_CTRL_RTC_EN (1 << 0) #define RTC_CTRL_AL_EN (1 << 1) #define RTC_CTRL_MODE12 (1 << 2) #define RTC_CTRL_EVERY_MASK (3 << 3) #define RTC_CTRL_EVERY_SEC (0 << 3) #define RTC_CTRL_EVERY_MIN (1 << 3) #define RTC_CTRL_EVERY_HR (2 << 3) #define RTC_CTRL_EVERY_DAY (3 << 3) #define RTC_UPDATE_EVERY 0x08 #define RTC_HR_PM (1 << 7) static void menelaus_to_time(char *regs, struct rtc_time *t) { t->tm_sec = bcd2bin(regs[0]); t->tm_min = bcd2bin(regs[1]); if (the_menelaus->rtc_control & RTC_CTRL_MODE12) { t->tm_hour = bcd2bin(regs[2] & 0x1f) - 1; if (regs[2] & RTC_HR_PM) t->tm_hour += 12; } else t->tm_hour = bcd2bin(regs[2] & 0x3f); t->tm_mday = bcd2bin(regs[3]); t->tm_mon = bcd2bin(regs[4]) - 1; t->tm_year = bcd2bin(regs[5]) + 100; } static int time_to_menelaus(struct rtc_time *t, int regnum) { int hour, status; status = menelaus_write_reg(regnum++, bin2bcd(t->tm_sec)); if (status < 0) goto fail; status = menelaus_write_reg(regnum++, bin2bcd(t->tm_min)); if (status < 0) goto fail; if (the_menelaus->rtc_control & RTC_CTRL_MODE12) { hour = t->tm_hour + 1; if (hour > 12) hour = RTC_HR_PM | bin2bcd(hour - 12); else hour = bin2bcd(hour); } else hour = bin2bcd(t->tm_hour); status = menelaus_write_reg(regnum++, hour); if (status < 0) goto fail; status = menelaus_write_reg(regnum++, bin2bcd(t->tm_mday)); if (status < 0) goto fail; status = menelaus_write_reg(regnum++, bin2bcd(t->tm_mon + 1)); if (status < 0) goto fail; status = menelaus_write_reg(regnum++, bin2bcd(t->tm_year - 100)); if (status < 0) goto fail; return 0; fail: dev_err(&the_menelaus->client->dev, "rtc write reg %02x, err %d\n", --regnum, status); return status; } static int menelaus_read_time(struct device *dev, struct rtc_time *t) { struct i2c_msg msg[2]; char regs[7]; int status; /* block read date and time registers */ regs[0] = MENELAUS_RTC_SEC; msg[0].addr = MENELAUS_I2C_ADDRESS; msg[0].flags = 0; msg[0].len = 1; msg[0].buf = regs; msg[1].addr = MENELAUS_I2C_ADDRESS; msg[1].flags = I2C_M_RD; msg[1].len = sizeof(regs); msg[1].buf = regs; status = i2c_transfer(the_menelaus->client->adapter, msg, 2); if (status != 2) { dev_err(dev, "%s error %d\n", "read", status); return -EIO; } menelaus_to_time(regs, t); t->tm_wday = bcd2bin(regs[6]); return 0; } static int menelaus_set_time(struct device *dev, struct rtc_time *t) { int status; /* write date and time registers */ status = time_to_menelaus(t, MENELAUS_RTC_SEC); if (status < 0) return status; status = menelaus_write_reg(MENELAUS_RTC_WKDAY, bin2bcd(t->tm_wday)); if (status < 0) { dev_err(&the_menelaus->client->dev, "rtc write reg %02x " "err %d\n", MENELAUS_RTC_WKDAY, status); return status; } /* now commit the write */ status = menelaus_write_reg(MENELAUS_RTC_UPDATE, RTC_UPDATE_EVERY); if (status < 0) dev_err(&the_menelaus->client->dev, "rtc commit time, err %d\n", status); return 0; } static int menelaus_read_alarm(struct device *dev, struct rtc_wkalrm *w) { struct i2c_msg msg[2]; char regs[6]; int status; /* block read alarm registers */ regs[0] = MENELAUS_RTC_AL_SEC; msg[0].addr = MENELAUS_I2C_ADDRESS; msg[0].flags = 0; msg[0].len = 1; msg[0].buf = regs; msg[1].addr = MENELAUS_I2C_ADDRESS; msg[1].flags = I2C_M_RD; msg[1].len = sizeof(regs); msg[1].buf = regs; status = i2c_transfer(the_menelaus->client->adapter, msg, 2); if (status != 2) { dev_err(dev, "%s error %d\n", "alarm read", status); return -EIO; } menelaus_to_time(regs, &w->time); w->enabled = !!(the_menelaus->rtc_control & RTC_CTRL_AL_EN); /* NOTE we *could* check if actually pending... */ w->pending = 0; return 0; } static int menelaus_set_alarm(struct device *dev, struct rtc_wkalrm *w) { int status; if (the_menelaus->client->irq <= 0 && w->enabled) return -ENODEV; /* clear previous alarm enable */ if (the_menelaus->rtc_control & RTC_CTRL_AL_EN) { the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN; status = menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control); if (status < 0) return status; } /* write alarm registers */ status = time_to_menelaus(&w->time, MENELAUS_RTC_AL_SEC); if (status < 0) return status; /* enable alarm if requested */ if (w->enabled) { the_menelaus->rtc_control |= RTC_CTRL_AL_EN; status = menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control); } return status; } #ifdef CONFIG_RTC_INTF_DEV static void menelaus_rtc_update_work(struct menelaus_chip *m) { /* report 1/sec update */ local_irq_disable(); rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_UF); local_irq_enable(); } static int menelaus_ioctl(struct device *dev, unsigned cmd, unsigned long arg) { int status; if (the_menelaus->client->irq <= 0) return -ENOIOCTLCMD; switch (cmd) { /* alarm IRQ */ case RTC_AIE_ON: if (the_menelaus->rtc_control & RTC_CTRL_AL_EN) return 0; the_menelaus->rtc_control |= RTC_CTRL_AL_EN; break; case RTC_AIE_OFF: if (!(the_menelaus->rtc_control & RTC_CTRL_AL_EN)) return 0; the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN; break; /* 1/second "update" IRQ */ case RTC_UIE_ON: if (the_menelaus->uie) return 0; status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ); status = menelaus_add_irq_work(MENELAUS_RTCTMR_IRQ, menelaus_rtc_update_work); if (status == 0) the_menelaus->uie = 1; return status; case RTC_UIE_OFF: if (!the_menelaus->uie) return 0; status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ); if (status == 0) the_menelaus->uie = 0; return status; default: return -ENOIOCTLCMD; } return menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control); } #else #define menelaus_ioctl NULL #endif /* REVISIT no compensation register support ... */ static const struct rtc_class_ops menelaus_rtc_ops = { .ioctl = menelaus_ioctl, .read_time = menelaus_read_time, .set_time = menelaus_set_time, .read_alarm = menelaus_read_alarm, .set_alarm = menelaus_set_alarm, }; static void menelaus_rtc_alarm_work(struct menelaus_chip *m) { /* report alarm */ local_irq_disable(); rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_AF); local_irq_enable(); /* then disable it; alarms are oneshot */ the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN; menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control); } static inline void menelaus_rtc_init(struct menelaus_chip *m) { int alarm = (m->client->irq > 0); /* assume 32KDETEN pin is pulled high */ if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) { dev_dbg(&m->client->dev, "no 32k oscillator\n"); return; } /* support RTC alarm; it can issue wakeups */ if (alarm) { if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ, menelaus_rtc_alarm_work) < 0) { dev_err(&m->client->dev, "can't handle RTC alarm\n"); return; } device_init_wakeup(&m->client->dev, 1); } /* be sure RTC is enabled; allow 1/sec irqs; leave 12hr mode alone */ m->rtc_control = menelaus_read_reg(MENELAUS_RTC_CTRL); if (!(m->rtc_control & RTC_CTRL_RTC_EN) || (m->rtc_control & RTC_CTRL_AL_EN) || (m->rtc_control & RTC_CTRL_EVERY_MASK)) { if (!(m->rtc_control & RTC_CTRL_RTC_EN)) { dev_warn(&m->client->dev, "rtc clock needs setting\n"); m->rtc_control |= RTC_CTRL_RTC_EN; } m->rtc_control &= ~RTC_CTRL_EVERY_MASK; m->rtc_control &= ~RTC_CTRL_AL_EN; menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control); } m->rtc = rtc_device_register(DRIVER_NAME, &m->client->dev, &menelaus_rtc_ops, THIS_MODULE); if (IS_ERR(m->rtc)) { if (alarm) { menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ); device_init_wakeup(&m->client->dev, 0); } dev_err(&m->client->dev, "can't register RTC: %d\n", (int) PTR_ERR(m->rtc)); the_menelaus->rtc = NULL; } } #else static inline void menelaus_rtc_init(struct menelaus_chip *m) { /* nothing */ } #endif /*-----------------------------------------------------------------------*/ static struct i2c_driver menelaus_i2c_driver; static int menelaus_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct menelaus_chip *menelaus; int rev = 0, val; int err = 0; struct menelaus_platform_data *menelaus_pdata = dev_get_platdata(&client->dev); if (the_menelaus) { dev_dbg(&client->dev, "only one %s for now\n", DRIVER_NAME); return -ENODEV; } menelaus = devm_kzalloc(&client->dev, sizeof(*menelaus), GFP_KERNEL); if (!menelaus) return -ENOMEM; i2c_set_clientdata(client, menelaus); the_menelaus = menelaus; menelaus->client = client; /* If a true probe check the device */ rev = menelaus_read_reg(MENELAUS_REV); if (rev < 0) { pr_err(DRIVER_NAME ": device not found"); return -ENODEV; } /* Ack and disable all Menelaus interrupts */ menelaus_write_reg(MENELAUS_INT_ACK1, 0xff); menelaus_write_reg(MENELAUS_INT_ACK2, 0xff); menelaus_write_reg(MENELAUS_INT_MASK1, 0xff); menelaus_write_reg(MENELAUS_INT_MASK2, 0xff); menelaus->mask1 = 0xff; menelaus->mask2 = 0xff; /* Set output buffer strengths */ menelaus_write_reg(MENELAUS_MCT_CTRL1, 0x73); if (client->irq > 0) { err = request_irq(client->irq, menelaus_irq, 0, DRIVER_NAME, menelaus); if (err) { dev_dbg(&client->dev, "can't get IRQ %d, err %d\n", client->irq, err); return err; } } mutex_init(&menelaus->lock); INIT_WORK(&menelaus->work, menelaus_work); pr_info("Menelaus rev %d.%d\n", rev >> 4, rev & 0x0f); val = menelaus_read_reg(MENELAUS_VCORE_CTRL1); if (val < 0) goto fail; if (val & (1 << 7)) menelaus->vcore_hw_mode = 1; else menelaus->vcore_hw_mode = 0; if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) { err = menelaus_pdata->late_init(&client->dev); if (err < 0) goto fail; } menelaus_rtc_init(menelaus); return 0; fail: free_irq(client->irq, menelaus); flush_work(&menelaus->work); return err; } static int __exit menelaus_remove(struct i2c_client *client) { struct menelaus_chip *menelaus = i2c_get_clientdata(client); free_irq(client->irq, menelaus); flush_work(&menelaus->work); the_menelaus = NULL; return 0; } static const struct i2c_device_id menelaus_id[] = { { "menelaus", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, menelaus_id); static struct i2c_driver menelaus_i2c_driver = { .driver = { .name = DRIVER_NAME, }, .probe = menelaus_probe, .remove = __exit_p(menelaus_remove), .id_table = menelaus_id, }; static int __init menelaus_init(void) { int res; res = i2c_add_driver(&menelaus_i2c_driver); if (res < 0) { pr_err(DRIVER_NAME ": driver registration failed\n"); return res; } return 0; } static void __exit menelaus_exit(void) { i2c_del_driver(&menelaus_i2c_driver); /* FIXME: Shutdown menelaus parts that can be shut down */ } MODULE_AUTHOR("Texas Instruments, Inc. (and others)"); MODULE_DESCRIPTION("I2C interface for Menelaus."); MODULE_LICENSE("GPL"); module_init(menelaus_init); module_exit(menelaus_exit);
gpl-2.0
fanyukui/linux3.12.10
drivers/s390/char/vmlogrdr.c
460
22338
/* * character device driver for reading z/VM system service records * * * Copyright IBM Corp. 2004, 2009 * character device driver for reading z/VM system service records, * Version 1.0 * Author(s): Xenia Tkatschow <xenia@us.ibm.com> * Stefan Weinhuber <wein@de.ibm.com> * */ #define KMSG_COMPONENT "vmlogrdr" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include <asm/cpcmd.h> #include <asm/debug.h> #include <asm/ebcdic.h> #include <net/iucv/iucv.h> #include <linux/kmod.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/string.h> MODULE_AUTHOR ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n" " Stefan Weinhuber (wein@de.ibm.com)"); MODULE_DESCRIPTION ("Character device driver for reading z/VM " "system service records."); MODULE_LICENSE("GPL"); /* * The size of the buffer for iucv data transfer is one page, * but in addition to the data we read from iucv we also * place an integer and some characters into that buffer, * so the maximum size for record data is a little less then * one page. */ #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE)) /* * The elements that are concurrently accessed by bottom halves are * connection_established, iucv_path_severed, local_interrupt_buffer * and receive_ready. The first three can be protected by * priv_lock. receive_ready is atomic, so it can be incremented and * decremented without holding a lock. * The variable dev_in_use needs to be protected by the lock, since * it's a flag used by open to make sure that the device is opened only * by one user at the same time. */ struct vmlogrdr_priv_t { char system_service[8]; char internal_name[8]; char recording_name[8]; struct iucv_path *path; int connection_established; int iucv_path_severed; struct iucv_message local_interrupt_buffer; atomic_t receive_ready; int minor_num; char * buffer; char * current_position; int remaining; ulong residual_length; int buffer_free; int dev_in_use; /* 1: already opened, 0: not opened*/ spinlock_t priv_lock; struct device *device; struct device *class_device; int autorecording; int autopurge; }; /* * File operation structure for vmlogrdr devices */ static int vmlogrdr_open(struct inode *, struct file *); static int vmlogrdr_release(struct inode *, struct file *); static ssize_t vmlogrdr_read (struct file *filp, char __user *data, size_t count, loff_t * ppos); static const struct file_operations vmlogrdr_fops = { .owner = THIS_MODULE, .open = vmlogrdr_open, .release = vmlogrdr_release, .read = vmlogrdr_read, .llseek = no_llseek, }; static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]); static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]); static void vmlogrdr_iucv_message_pending(struct iucv_path *, struct iucv_message *); static struct iucv_handler vmlogrdr_iucv_handler = { .path_complete = vmlogrdr_iucv_path_complete, .path_severed = vmlogrdr_iucv_path_severed, .message_pending = vmlogrdr_iucv_message_pending, }; static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); /* * pointer to system service private structure * minor number 0 --> logrec * minor number 1 --> account * minor number 2 --> symptom */ static struct vmlogrdr_priv_t sys_ser[] = { { .system_service = "*LOGREC ", .internal_name = "logrec", .recording_name = "EREP", .minor_num = 0, .buffer_free = 1, .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock), .autorecording = 1, .autopurge = 1, }, { .system_service = "*ACCOUNT", .internal_name = "account", .recording_name = "ACCOUNT", .minor_num = 1, .buffer_free = 1, .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock), .autorecording = 1, .autopurge = 1, }, { .system_service = "*SYMPTOM", .internal_name = "symptom", .recording_name = "SYMPTOM", .minor_num = 2, .buffer_free = 1, .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock), .autorecording = 1, .autopurge = 1, } }; #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t)) static char FENCE[] = {"EOR"}; static int vmlogrdr_major = 0; static struct cdev *vmlogrdr_cdev = NULL; static int recording_class_AB; static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) { struct vmlogrdr_priv_t * logptr = path->private; spin_lock(&logptr->priv_lock); logptr->connection_established = 1; spin_unlock(&logptr->priv_lock); wake_up(&conn_wait_queue); } static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) { struct vmlogrdr_priv_t * logptr = path->private; u8 reason = (u8) ipuser[8]; pr_err("vmlogrdr: connection severed with reason %i\n", reason); iucv_path_sever(path, NULL); kfree(path); logptr->path = NULL; spin_lock(&logptr->priv_lock); logptr->connection_established = 0; logptr->iucv_path_severed = 1; spin_unlock(&logptr->priv_lock); wake_up(&conn_wait_queue); /* just in case we're sleeping waiting for a record */ wake_up_interruptible(&read_wait_queue); } static void vmlogrdr_iucv_message_pending(struct iucv_path *path, struct iucv_message *msg) { struct vmlogrdr_priv_t * logptr = path->private; /* * This function is the bottom half so it should be quick. * Copy the external interrupt data into our local eib and increment * the usage count */ spin_lock(&logptr->priv_lock); memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg)); atomic_inc(&logptr->receive_ready); spin_unlock(&logptr->priv_lock); wake_up_interruptible(&read_wait_queue); } static int vmlogrdr_get_recording_class_AB(void) { static const char cp_command[] = "QUERY COMMAND RECORDING "; char cp_response[80]; char *tail; int len,i; cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); len = strnlen(cp_response,sizeof(cp_response)); // now the parsing tail=strnchr(cp_response,len,'='); if (!tail) return 0; tail++; if (!strncmp("ANY",tail,3)) return 1; if (!strncmp("NONE",tail,4)) return 0; /* * expect comma separated list of classes here, if one of them * is A or B return 1 otherwise 0 */ for (i=tail-cp_response; i<len; i++) if ( cp_response[i]=='A' || cp_response[i]=='B' ) return 1; return 0; } static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) { char cp_command[80]; char cp_response[160]; char *onoff, *qid_string; int rc; onoff = ((action == 1) ? "ON" : "OFF"); qid_string = ((recording_class_AB == 1) ? " QID * " : ""); /* * The recording commands needs to be called with option QID * for guests that have previlege classes A or B. * Purging has to be done as separate step, because recording * can't be switched on as long as records are on the queue. * Doing both at the same time doesn't work. */ if (purge && (action == 1)) { memset(cp_command, 0x00, sizeof(cp_command)); memset(cp_response, 0x00, sizeof(cp_response)); snprintf(cp_command, sizeof(cp_command), "RECORDING %s PURGE %s", logptr->recording_name, qid_string); cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); } memset(cp_command, 0x00, sizeof(cp_command)); memset(cp_response, 0x00, sizeof(cp_response)); snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s", logptr->recording_name, onoff, qid_string); cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); /* The recording command will usually answer with 'Command complete' * on success, but when the specific service was never connected * before then there might be an additional informational message * 'HCPCRC8072I Recording entry not found' before the * 'Command complete'. So I use strstr rather then the strncmp. */ if (strstr(cp_response,"Command complete")) rc = 0; else rc = -EIO; /* * If we turn recording off, we have to purge any remaining records * afterwards, as a large number of queued records may impact z/VM * performance. */ if (purge && (action == 0)) { memset(cp_command, 0x00, sizeof(cp_command)); memset(cp_response, 0x00, sizeof(cp_response)); snprintf(cp_command, sizeof(cp_command), "RECORDING %s PURGE %s", logptr->recording_name, qid_string); cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); } return rc; } static int vmlogrdr_open (struct inode *inode, struct file *filp) { int dev_num = 0; struct vmlogrdr_priv_t * logptr = NULL; int connect_rc = 0; int ret; dev_num = iminor(inode); if (dev_num >= MAXMINOR) return -ENODEV; logptr = &sys_ser[dev_num]; /* * only allow for blocking reads to be open */ if (filp->f_flags & O_NONBLOCK) return -EOPNOTSUPP; /* Besure this device hasn't already been opened */ spin_lock_bh(&logptr->priv_lock); if (logptr->dev_in_use) { spin_unlock_bh(&logptr->priv_lock); return -EBUSY; } logptr->dev_in_use = 1; logptr->connection_established = 0; logptr->iucv_path_severed = 0; atomic_set(&logptr->receive_ready, 0); logptr->buffer_free = 1; spin_unlock_bh(&logptr->priv_lock); /* set the file options */ filp->private_data = logptr; filp->f_op = &vmlogrdr_fops; /* start recording for this service*/ if (logptr->autorecording) { ret = vmlogrdr_recording(logptr,1,logptr->autopurge); if (ret) pr_warning("vmlogrdr: failed to start " "recording automatically\n"); } /* create connection to the system service */ logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL); if (!logptr->path) goto out_dev; connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler, logptr->system_service, NULL, NULL, logptr); if (connect_rc) { pr_err("vmlogrdr: iucv connection to %s " "failed with rc %i \n", logptr->system_service, connect_rc); goto out_path; } /* We've issued the connect and now we must wait for a * ConnectionComplete or ConnectinSevered Interrupt * before we can continue to process. */ wait_event(conn_wait_queue, (logptr->connection_established) || (logptr->iucv_path_severed)); if (logptr->iucv_path_severed) goto out_record; nonseekable_open(inode, filp); return 0; out_record: if (logptr->autorecording) vmlogrdr_recording(logptr,0,logptr->autopurge); out_path: kfree(logptr->path); /* kfree(NULL) is ok. */ logptr->path = NULL; out_dev: logptr->dev_in_use = 0; return -EIO; } static int vmlogrdr_release (struct inode *inode, struct file *filp) { int ret; struct vmlogrdr_priv_t * logptr = filp->private_data; iucv_path_sever(logptr->path, NULL); kfree(logptr->path); logptr->path = NULL; if (logptr->autorecording) { ret = vmlogrdr_recording(logptr,0,logptr->autopurge); if (ret) pr_warning("vmlogrdr: failed to stop " "recording automatically\n"); } logptr->dev_in_use = 0; return 0; } static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { int rc, *temp; /* we need to keep track of two data sizes here: * The number of bytes we need to receive from iucv and * the total number of bytes we actually write into the buffer. */ int user_data_count, iucv_data_count; char * buffer; if (atomic_read(&priv->receive_ready)) { spin_lock_bh(&priv->priv_lock); if (priv->residual_length){ /* receive second half of a record */ iucv_data_count = priv->residual_length; user_data_count = 0; buffer = priv->buffer; } else { /* receive a new record: * We need to return the total length of the record * + size of FENCE in the first 4 bytes of the buffer. */ iucv_data_count = priv->local_interrupt_buffer.length; user_data_count = sizeof(int); temp = (int*)priv->buffer; *temp= iucv_data_count + sizeof(FENCE); buffer = priv->buffer + sizeof(int); } /* * If the record is bigger than our buffer, we receive only * a part of it. We can get the rest later. */ if (iucv_data_count > NET_BUFFER_SIZE) iucv_data_count = NET_BUFFER_SIZE; rc = iucv_message_receive(priv->path, &priv->local_interrupt_buffer, 0, buffer, iucv_data_count, &priv->residual_length); spin_unlock_bh(&priv->priv_lock); /* An rc of 5 indicates that the record was bigger than * the buffer, which is OK for us. A 9 indicates that the * record was purged befor we could receive it. */ if (rc == 5) rc = 0; if (rc == 9) atomic_set(&priv->receive_ready, 0); } else { rc = 1; } if (!rc) { priv->buffer_free = 0; user_data_count += iucv_data_count; priv->current_position = priv->buffer; if (priv->residual_length == 0){ /* the whole record has been captured, * now add the fence */ atomic_dec(&priv->receive_ready); buffer = priv->buffer + user_data_count; memcpy(buffer, FENCE, sizeof(FENCE)); user_data_count += sizeof(FENCE); } priv->remaining = user_data_count; } return rc; } static ssize_t vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) { int rc; struct vmlogrdr_priv_t * priv = filp->private_data; while (priv->buffer_free) { rc = vmlogrdr_receive_data(priv); if (rc) { rc = wait_event_interruptible(read_wait_queue, atomic_read(&priv->receive_ready)); if (rc) return rc; } } /* copy only up to end of record */ if (count > priv->remaining) count = priv->remaining; if (copy_to_user(data, priv->current_position, count)) return -EFAULT; *ppos += count; priv->current_position += count; priv->remaining -= count; /* if all data has been transferred, set buffer free */ if (priv->remaining == 0) priv->buffer_free = 1; return count; } static ssize_t vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); ssize_t ret = count; switch (buf[0]) { case '0': priv->autopurge=0; break; case '1': priv->autopurge=1; break; default: ret = -EINVAL; } return ret; } static ssize_t vmlogrdr_autopurge_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); return sprintf(buf, "%u\n", priv->autopurge); } static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show, vmlogrdr_autopurge_store); static ssize_t vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { char cp_command[80]; char cp_response[80]; struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); if (buf[0] != '1') return -EINVAL; memset(cp_command, 0x00, sizeof(cp_command)); memset(cp_response, 0x00, sizeof(cp_response)); /* * The recording command needs to be called with option QID * for guests that have previlege classes A or B. * Other guests will not recognize the command and we have to * issue the same command without the QID parameter. */ if (recording_class_AB) snprintf(cp_command, sizeof(cp_command), "RECORDING %s PURGE QID * ", priv->recording_name); else snprintf(cp_command, sizeof(cp_command), "RECORDING %s PURGE ", priv->recording_name); cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); return count; } static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store); static ssize_t vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); ssize_t ret = count; switch (buf[0]) { case '0': priv->autorecording=0; break; case '1': priv->autorecording=1; break; default: ret = -EINVAL; } return ret; } static ssize_t vmlogrdr_autorecording_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); return sprintf(buf, "%u\n", priv->autorecording); } static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show, vmlogrdr_autorecording_store); static ssize_t vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); ssize_t ret; switch (buf[0]) { case '0': ret = vmlogrdr_recording(priv,0,0); break; case '1': ret = vmlogrdr_recording(priv,1,0); break; default: ret = -EINVAL; } if (ret) return ret; else return count; } static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store); static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) { static const char cp_command[] = "QUERY RECORDING "; int len; cpcmd(cp_command, buf, 4096, NULL); len = strlen(buf); return len; } static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show, NULL); static struct attribute *vmlogrdr_drv_attrs[] = { &driver_attr_recording_status.attr, NULL, }; static struct attribute_group vmlogrdr_drv_attr_group = { .attrs = vmlogrdr_drv_attrs, }; static const struct attribute_group *vmlogrdr_drv_attr_groups[] = { &vmlogrdr_drv_attr_group, NULL, }; static struct attribute *vmlogrdr_attrs[] = { &dev_attr_autopurge.attr, &dev_attr_purge.attr, &dev_attr_autorecording.attr, &dev_attr_recording.attr, NULL, }; static struct attribute_group vmlogrdr_attr_group = { .attrs = vmlogrdr_attrs, }; static const struct attribute_group *vmlogrdr_attr_groups[] = { &vmlogrdr_attr_group, NULL, }; static int vmlogrdr_pm_prepare(struct device *dev) { int rc; struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); rc = 0; if (priv) { spin_lock_bh(&priv->priv_lock); if (priv->dev_in_use) rc = -EBUSY; spin_unlock_bh(&priv->priv_lock); } if (rc) pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n", dev_name(dev)); return rc; } static const struct dev_pm_ops vmlogrdr_pm_ops = { .prepare = vmlogrdr_pm_prepare, }; static struct class *vmlogrdr_class; static struct device_driver vmlogrdr_driver = { .name = "vmlogrdr", .bus = &iucv_bus, .pm = &vmlogrdr_pm_ops, .groups = vmlogrdr_drv_attr_groups, }; static int vmlogrdr_register_driver(void) { int ret; /* Register with iucv driver */ ret = iucv_register(&vmlogrdr_iucv_handler, 1); if (ret) goto out; ret = driver_register(&vmlogrdr_driver); if (ret) goto out_iucv; vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); if (IS_ERR(vmlogrdr_class)) { ret = PTR_ERR(vmlogrdr_class); vmlogrdr_class = NULL; goto out_driver; } return 0; out_driver: driver_unregister(&vmlogrdr_driver); out_iucv: iucv_unregister(&vmlogrdr_iucv_handler, 1); out: return ret; } static void vmlogrdr_unregister_driver(void) { class_destroy(vmlogrdr_class); vmlogrdr_class = NULL; driver_unregister(&vmlogrdr_driver); iucv_unregister(&vmlogrdr_iucv_handler, 1); } static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { struct device *dev; int ret; dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (dev) { dev_set_name(dev, priv->internal_name); dev->bus = &iucv_bus; dev->parent = iucv_root; dev->driver = &vmlogrdr_driver; dev->groups = vmlogrdr_attr_groups; dev_set_drvdata(dev, priv); /* * The release function could be called after the * module has been unloaded. It's _only_ task is to * free the struct. Therefore, we specify kfree() * directly here. (Probably a little bit obfuscating * but legitime ...). */ dev->release = (void (*)(struct device *))kfree; } else return -ENOMEM; ret = device_register(dev); if (ret) { put_device(dev); return ret; } priv->class_device = device_create(vmlogrdr_class, dev, MKDEV(vmlogrdr_major, priv->minor_num), priv, "%s", dev_name(dev)); if (IS_ERR(priv->class_device)) { ret = PTR_ERR(priv->class_device); priv->class_device=NULL; device_unregister(dev); return ret; } priv->device = dev; return 0; } static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) { device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); if (priv->device != NULL) { device_unregister(priv->device); priv->device=NULL; } return 0; } static int vmlogrdr_register_cdev(dev_t dev) { int rc = 0; vmlogrdr_cdev = cdev_alloc(); if (!vmlogrdr_cdev) { return -ENOMEM; } vmlogrdr_cdev->owner = THIS_MODULE; vmlogrdr_cdev->ops = &vmlogrdr_fops; vmlogrdr_cdev->dev = dev; rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR); if (!rc) return 0; // cleanup: cdev is not fully registered, no cdev_del here! kobject_put(&vmlogrdr_cdev->kobj); vmlogrdr_cdev=NULL; return rc; } static void vmlogrdr_cleanup(void) { int i; if (vmlogrdr_cdev) { cdev_del(vmlogrdr_cdev); vmlogrdr_cdev=NULL; } for (i=0; i < MAXMINOR; ++i ) { vmlogrdr_unregister_device(&sys_ser[i]); free_page((unsigned long)sys_ser[i].buffer); } vmlogrdr_unregister_driver(); if (vmlogrdr_major) { unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR); vmlogrdr_major=0; } } static int __init vmlogrdr_init(void) { int rc; int i; dev_t dev; if (! MACHINE_IS_VM) { pr_err("not running under VM, driver not loaded.\n"); return -ENODEV; } recording_class_AB = vmlogrdr_get_recording_class_AB(); rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr"); if (rc) return rc; vmlogrdr_major = MAJOR(dev); rc=vmlogrdr_register_driver(); if (rc) goto cleanup; for (i=0; i < MAXMINOR; ++i ) { sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL); if (!sys_ser[i].buffer) { rc = -ENOMEM; break; } sys_ser[i].current_position = sys_ser[i].buffer; rc=vmlogrdr_register_device(&sys_ser[i]); if (rc) break; } if (rc) goto cleanup; rc = vmlogrdr_register_cdev(dev); if (rc) goto cleanup; return 0; cleanup: vmlogrdr_cleanup(); return rc; } static void __exit vmlogrdr_exit(void) { vmlogrdr_cleanup(); return; } module_init(vmlogrdr_init); module_exit(vmlogrdr_exit);
gpl-2.0
paloda/android_kernel_htc_evita
mm/percpu.c
716
58410
/* * mm/percpu.c - percpu memory allocator * * Copyright (C) 2009 SUSE Linux Products GmbH * Copyright (C) 2009 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * This is percpu allocator which can handle both static and dynamic * areas. Percpu areas are allocated in chunks. Each chunk is * consisted of boot-time determined number of units and the first * chunk is used for static percpu variables in the kernel image * (special boot time alloc/init handling necessary as these areas * need to be brought up before allocation services are running). * Unit grows as necessary and all units grow or shrink in unison. * When a chunk is filled up, another chunk is allocated. * * c0 c1 c2 * ------------------- ------------------- ------------ * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u * ------------------- ...... ------------------- .... ------------ * * Allocation is done in offset-size areas of single unit space. Ie, * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to * cpus. On NUMA, the mapping can be non-linear and even sparse. * Percpu access can be done by configuring percpu base registers * according to cpu to unit mapping and pcpu_unit_size. * * There are usually many small percpu allocations many of them being * as small as 4 bytes. The allocator organizes chunks into lists * according to free size and tries to allocate from the fullest one. * Each chunk keeps the maximum contiguous area size hint which is * guaranteed to be equal to or larger than the maximum contiguous * area in the chunk. This helps the allocator not to iterate the * chunk maps unnecessarily. * * Allocation state in each chunk is kept using an array of integers * on chunk->map. A positive value in the map represents a free * region and negative allocated. Allocation inside a chunk is done * by scanning this map sequentially and serving the first matching * entry. This is mostly copied from the percpu_modalloc() allocator. * Chunks can be determined from the address using the index field * in the page struct. The index field contains a pointer to the chunk. * * To use this allocator, arch code should do the followings. * * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate * regular address to percpu pointer and back if they need to be * different from the default * * - use pcpu_setup_first_chunk() during percpu area initialization to * setup the first chunk containing the kernel static percpu area */ #include <linux/bitmap.h> #include <linux/bootmem.h> #include <linux/err.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/pfn.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/kmemleak.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/io.h> #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ #ifdef CONFIG_SMP /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ #ifndef __addr_to_pcpu_ptr #define __addr_to_pcpu_ptr(addr) \ (void __percpu *)((unsigned long)(addr) - \ (unsigned long)pcpu_base_addr + \ (unsigned long)__per_cpu_start) #endif #ifndef __pcpu_ptr_to_addr #define __pcpu_ptr_to_addr(ptr) \ (void __force *)((unsigned long)(ptr) + \ (unsigned long)pcpu_base_addr - \ (unsigned long)__per_cpu_start) #endif #else /* CONFIG_SMP */ /* on UP, it's always identity mapped */ #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) #endif /* CONFIG_SMP */ struct pcpu_chunk { struct list_head list; /* linked to pcpu_slot lists */ int free_size; /* free bytes in the chunk */ int contig_hint; /* max contiguous size hint */ void *base_addr; /* base address of this chunk */ int map_used; /* # of map entries used */ int map_alloc; /* # of map entries allocated */ int *map; /* allocation map */ void *data; /* chunk data */ bool immutable; /* no [de]population allowed */ unsigned long populated[]; /* populated bitmap */ }; static int pcpu_unit_pages __read_mostly; static int pcpu_unit_size __read_mostly; static int pcpu_nr_units __read_mostly; static int pcpu_atom_size __read_mostly; static int pcpu_nr_slots __read_mostly; static size_t pcpu_chunk_struct_size __read_mostly; /* cpus with the lowest and highest unit addresses */ static unsigned int pcpu_low_unit_cpu __read_mostly; static unsigned int pcpu_high_unit_cpu __read_mostly; /* the address of the first chunk which starts with the kernel static area */ void *pcpu_base_addr __read_mostly; EXPORT_SYMBOL_GPL(pcpu_base_addr); static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ /* group information, used for vm allocation */ static int pcpu_nr_groups __read_mostly; static const unsigned long *pcpu_group_offsets __read_mostly; static const size_t *pcpu_group_sizes __read_mostly; /* * The first chunk which always exists. Note that unlike other * chunks, this one can be allocated and mapped in several different * ways and thus often doesn't live in the vmalloc area. */ static struct pcpu_chunk *pcpu_first_chunk; /* * Optional reserved chunk. This chunk reserves part of the first * chunk and serves it for reserved allocations. The amount of * reserved offset is in pcpu_reserved_chunk_limit. When reserved * area doesn't exist, the following variables contain NULL and 0 * respectively. */ static struct pcpu_chunk *pcpu_reserved_chunk; static int pcpu_reserved_chunk_limit; /* * Synchronization rules. * * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former * protects allocation/reclaim paths, chunks, populated bitmap and * vmalloc mapping. The latter is a spinlock and protects the index * data structures - chunk slots, chunks and area maps in chunks. * * During allocation, pcpu_alloc_mutex is kept locked all the time and * pcpu_lock is grabbed and released as necessary. All actual memory * allocations are done using GFP_KERNEL with pcpu_lock released. In * general, percpu memory can't be allocated with irq off but * irqsave/restore are still used in alloc path so that it can be used * from early init path - sched_init() specifically. * * Free path accesses and alters only the index data structures, so it * can be safely called from atomic context. When memory needs to be * returned to the system, free path schedules reclaim_work which * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be * reclaimed, release both locks and frees the chunks. Note that it's * necessary to grab both locks to remove a chunk from circulation as * allocation path might be referencing the chunk with only * pcpu_alloc_mutex locked. */ static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ /* reclaim work to release fully free chunks, scheduled from free path */ static void pcpu_reclaim(struct work_struct *work); static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); static bool pcpu_addr_in_first_chunk(void *addr) { void *first_start = pcpu_first_chunk->base_addr; return addr >= first_start && addr < first_start + pcpu_unit_size; } static bool pcpu_addr_in_reserved_chunk(void *addr) { void *first_start = pcpu_first_chunk->base_addr; return addr >= first_start && addr < first_start + pcpu_reserved_chunk_limit; } static int __pcpu_size_to_slot(int size) { int highbit = fls(size); /* size is in bytes */ return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); } static int pcpu_size_to_slot(int size) { if (size == pcpu_unit_size) return pcpu_nr_slots - 1; return __pcpu_size_to_slot(size); } static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) { if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) return 0; return pcpu_size_to_slot(chunk->free_size); } /* set the pointer to a chunk in a page struct */ static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) { page->index = (unsigned long)pcpu; } /* obtain pointer to a chunk from a page struct */ static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) { return (struct pcpu_chunk *)page->index; } static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) { return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; } static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) { return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); } static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) { *rs = find_next_zero_bit(chunk->populated, end, *rs); *re = find_next_bit(chunk->populated, end, *rs + 1); } static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) { *rs = find_next_bit(chunk->populated, end, *rs); *re = find_next_zero_bit(chunk->populated, end, *rs + 1); } /* * (Un)populated page region iterators. Iterate over (un)populated * page regions between @start and @end in @chunk. @rs and @re should * be integer variables and will be set to start and end page index of * the current region. */ #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ (rs) < (re); \ (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ (rs) < (re); \ (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) /** * pcpu_mem_zalloc - allocate memory * @size: bytes to allocate * * Allocate @size bytes. If @size is smaller than PAGE_SIZE, * kzalloc() is used; otherwise, vzalloc() is used. The returned * memory is always zeroed. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * Pointer to the allocated area on success, NULL on failure. */ static void *pcpu_mem_zalloc(size_t size) { if (WARN_ON_ONCE(!slab_is_available())) return NULL; if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); else return vzalloc(size); } /** * pcpu_mem_free - free memory * @ptr: memory to free * @size: size of the area * * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). */ static void pcpu_mem_free(void *ptr, size_t size) { if (size <= PAGE_SIZE) kfree(ptr); else vfree(ptr); } /** * pcpu_chunk_relocate - put chunk in the appropriate chunk slot * @chunk: chunk of interest * @oslot: the previous slot it was on * * This function is called after an allocation or free changed @chunk. * New slot according to the changed state is determined and @chunk is * moved to the slot. Note that the reserved chunk is never put on * chunk slots. * * CONTEXT: * pcpu_lock. */ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) { int nslot = pcpu_chunk_slot(chunk); if (chunk != pcpu_reserved_chunk && oslot != nslot) { if (oslot < nslot) list_move(&chunk->list, &pcpu_slot[nslot]); else list_move_tail(&chunk->list, &pcpu_slot[nslot]); } } /** * pcpu_need_to_extend - determine whether chunk area map needs to be extended * @chunk: chunk of interest * * Determine whether area map of @chunk needs to be extended to * accommodate a new allocation. * * CONTEXT: * pcpu_lock. * * RETURNS: * New target map allocation length if extension is necessary, 0 * otherwise. */ static int pcpu_need_to_extend(struct pcpu_chunk *chunk) { int new_alloc; if (chunk->map_alloc >= chunk->map_used + 2) return 0; new_alloc = PCPU_DFL_MAP_ALLOC; while (new_alloc < chunk->map_used + 2) new_alloc *= 2; return new_alloc; } /** * pcpu_extend_area_map - extend area map of a chunk * @chunk: chunk of interest * @new_alloc: new target allocation length of the area map * * Extend area map of @chunk to have @new_alloc entries. * * CONTEXT: * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. * * RETURNS: * 0 on success, -errno on failure. */ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) { int *old = NULL, *new = NULL; size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); unsigned long flags; new = pcpu_mem_zalloc(new_size); if (!new) return -ENOMEM; /* acquire pcpu_lock and switch to new area map */ spin_lock_irqsave(&pcpu_lock, flags); if (new_alloc <= chunk->map_alloc) goto out_unlock; old_size = chunk->map_alloc * sizeof(chunk->map[0]); old = chunk->map; memcpy(new, old, old_size); chunk->map_alloc = new_alloc; chunk->map = new; new = NULL; out_unlock: spin_unlock_irqrestore(&pcpu_lock, flags); /* * pcpu_mem_free() might end up calling vfree() which uses * IRQ-unsafe lock and thus can't be called under pcpu_lock. */ pcpu_mem_free(old, old_size); pcpu_mem_free(new, new_size); return 0; } /** * pcpu_split_block - split a map block * @chunk: chunk of interest * @i: index of map block to split * @head: head size in bytes (can be 0) * @tail: tail size in bytes (can be 0) * * Split the @i'th map block into two or three blocks. If @head is * non-zero, @head bytes block is inserted before block @i moving it * to @i+1 and reducing its size by @head bytes. * * If @tail is non-zero, the target block, which can be @i or @i+1 * depending on @head, is reduced by @tail bytes and @tail byte block * is inserted after the target block. * * @chunk->map must have enough free slots to accommodate the split. * * CONTEXT: * pcpu_lock. */ static void pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail) { int nr_extra = !!head + !!tail; BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); /* insert new subblocks */ memmove(&chunk->map[i + nr_extra], &chunk->map[i], sizeof(chunk->map[0]) * (chunk->map_used - i)); chunk->map_used += nr_extra; if (head) { chunk->map[i + 1] = chunk->map[i] - head; chunk->map[i++] = head; } if (tail) { chunk->map[i++] -= tail; chunk->map[i] = tail; } } /** * pcpu_alloc_area - allocate area from a pcpu_chunk * @chunk: chunk of interest * @size: wanted size in bytes * @align: wanted align * * Try to allocate @size bytes area aligned at @align from @chunk. * Note that this function only allocates the offset. It doesn't * populate or map the area. * * @chunk->map must have at least two free slots. * * CONTEXT: * pcpu_lock. * * RETURNS: * Allocated offset in @chunk on success, -1 if no matching area is * found. */ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) { int oslot = pcpu_chunk_slot(chunk); int max_contig = 0; int i, off; for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { bool is_last = i + 1 == chunk->map_used; int head, tail; /* extra for alignment requirement */ head = ALIGN(off, align) - off; BUG_ON(i == 0 && head != 0); if (chunk->map[i] < 0) continue; if (chunk->map[i] < head + size) { max_contig = max(chunk->map[i], max_contig); continue; } /* * If head is small or the previous block is free, * merge'em. Note that 'small' is defined as smaller * than sizeof(int), which is very small but isn't too * uncommon for percpu allocations. */ if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { if (chunk->map[i - 1] > 0) chunk->map[i - 1] += head; else { chunk->map[i - 1] -= head; chunk->free_size -= head; } chunk->map[i] -= head; off += head; head = 0; } /* if tail is small, just keep it around */ tail = chunk->map[i] - head - size; if (tail < sizeof(int)) tail = 0; /* split if warranted */ if (head || tail) { pcpu_split_block(chunk, i, head, tail); if (head) { i++; off += head; max_contig = max(chunk->map[i - 1], max_contig); } if (tail) max_contig = max(chunk->map[i + 1], max_contig); } /* update hint and mark allocated */ if (is_last) chunk->contig_hint = max_contig; /* fully scanned */ else chunk->contig_hint = max(chunk->contig_hint, max_contig); chunk->free_size -= chunk->map[i]; chunk->map[i] = -chunk->map[i]; pcpu_chunk_relocate(chunk, oslot); return off; } chunk->contig_hint = max_contig; /* fully scanned */ pcpu_chunk_relocate(chunk, oslot); /* tell the upper layer that this chunk has no matching area */ return -1; } /** * pcpu_free_area - free area to a pcpu_chunk * @chunk: chunk of interest * @freeme: offset of area to free * * Free area starting from @freeme to @chunk. Note that this function * only modifies the allocation map. It doesn't depopulate or unmap * the area. * * CONTEXT: * pcpu_lock. */ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) { int oslot = pcpu_chunk_slot(chunk); int i, off; for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) if (off == freeme) break; BUG_ON(off != freeme); BUG_ON(chunk->map[i] > 0); chunk->map[i] = -chunk->map[i]; chunk->free_size += chunk->map[i]; /* merge with previous? */ if (i > 0 && chunk->map[i - 1] >= 0) { chunk->map[i - 1] += chunk->map[i]; chunk->map_used--; memmove(&chunk->map[i], &chunk->map[i + 1], (chunk->map_used - i) * sizeof(chunk->map[0])); i--; } /* merge with next? */ if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { chunk->map[i] += chunk->map[i + 1]; chunk->map_used--; memmove(&chunk->map[i + 1], &chunk->map[i + 2], (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); } chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); pcpu_chunk_relocate(chunk, oslot); } static struct pcpu_chunk *pcpu_alloc_chunk(void) { struct pcpu_chunk *chunk; chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); if (!chunk) return NULL; chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); if (!chunk->map) { pcpu_mem_free(chunk, pcpu_chunk_struct_size); return NULL; } chunk->map_alloc = PCPU_DFL_MAP_ALLOC; chunk->map[chunk->map_used++] = pcpu_unit_size; INIT_LIST_HEAD(&chunk->list); chunk->free_size = pcpu_unit_size; chunk->contig_hint = pcpu_unit_size; return chunk; } static void pcpu_free_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); kfree(chunk); } /* * Chunk management implementation. * * To allow different implementations, chunk alloc/free and * [de]population are implemented in a separate file which is pulled * into this file and compiled together. The following functions * should be implemented. * * pcpu_populate_chunk - populate the specified range of a chunk * pcpu_depopulate_chunk - depopulate the specified range of a chunk * pcpu_create_chunk - create a new chunk * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop * pcpu_addr_to_page - translate address to physical address * pcpu_verify_alloc_info - check alloc_info is acceptable during init */ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); static struct pcpu_chunk *pcpu_create_chunk(void); static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); static struct page *pcpu_addr_to_page(void *addr); static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); #ifdef CONFIG_NEED_PER_CPU_KM #include "percpu-km.c" #else #include "percpu-vm.c" #endif /** * pcpu_chunk_addr_search - determine chunk containing specified address * @addr: address for which the chunk needs to be determined. * * RETURNS: * The address of the found chunk. */ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) { /* is it in the first chunk? */ if (pcpu_addr_in_first_chunk(addr)) { /* is it in the reserved area? */ if (pcpu_addr_in_reserved_chunk(addr)) return pcpu_reserved_chunk; return pcpu_first_chunk; } /* * The address is relative to unit0 which might be unused and * thus unmapped. Offset the address to the unit space of the * current processor before looking it up in the vmalloc * space. Note that any possible cpu id can be used here, so * there's no need to worry about preemption or cpu hotplug. */ addr += pcpu_unit_offsets[raw_smp_processor_id()]; return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); } /** * pcpu_alloc - the percpu allocator * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * @reserved: allocate from the reserved chunk if available * * Allocate percpu area of @size bytes aligned at @align. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) { static int warn_limit = 10; struct pcpu_chunk *chunk; const char *err; int slot, off, new_alloc; unsigned long flags; void __percpu *ptr; if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { WARN(true, "illegal size (%zu) or align (%zu) for " "percpu allocation\n", size, align); return NULL; } mutex_lock(&pcpu_alloc_mutex); spin_lock_irqsave(&pcpu_lock, flags); /* serve reserved allocations from the reserved chunk if available */ if (reserved && pcpu_reserved_chunk) { chunk = pcpu_reserved_chunk; if (size > chunk->contig_hint) { err = "alloc from reserved chunk failed"; goto fail_unlock; } while ((new_alloc = pcpu_need_to_extend(chunk))) { spin_unlock_irqrestore(&pcpu_lock, flags); if (pcpu_extend_area_map(chunk, new_alloc) < 0) { err = "failed to extend area map of reserved chunk"; goto fail_unlock_mutex; } spin_lock_irqsave(&pcpu_lock, flags); } off = pcpu_alloc_area(chunk, size, align); if (off >= 0) goto area_found; err = "alloc from reserved chunk failed"; goto fail_unlock; } restart: /* search through normal chunks */ for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { list_for_each_entry(chunk, &pcpu_slot[slot], list) { if (size > chunk->contig_hint) continue; new_alloc = pcpu_need_to_extend(chunk); if (new_alloc) { spin_unlock_irqrestore(&pcpu_lock, flags); if (pcpu_extend_area_map(chunk, new_alloc) < 0) { err = "failed to extend area map"; goto fail_unlock_mutex; } spin_lock_irqsave(&pcpu_lock, flags); /* * pcpu_lock has been dropped, need to * restart cpu_slot list walking. */ goto restart; } off = pcpu_alloc_area(chunk, size, align); if (off >= 0) goto area_found; } } /* hmmm... no space left, create a new chunk */ spin_unlock_irqrestore(&pcpu_lock, flags); chunk = pcpu_create_chunk(); if (!chunk) { err = "failed to allocate new chunk"; goto fail_unlock_mutex; } spin_lock_irqsave(&pcpu_lock, flags); pcpu_chunk_relocate(chunk, -1); goto restart; area_found: spin_unlock_irqrestore(&pcpu_lock, flags); /* populate, map and clear the area */ if (pcpu_populate_chunk(chunk, off, size)) { spin_lock_irqsave(&pcpu_lock, flags); pcpu_free_area(chunk, off); err = "failed to populate"; goto fail_unlock; } mutex_unlock(&pcpu_alloc_mutex); /* return address relative to base address */ ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); kmemleak_alloc_percpu(ptr, size); return ptr; fail_unlock: spin_unlock_irqrestore(&pcpu_lock, flags); fail_unlock_mutex: mutex_unlock(&pcpu_alloc_mutex); if (warn_limit) { pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " "%s\n", size, align, err); dump_stack(); if (!--warn_limit) pr_info("PERCPU: limit reached, disable warning\n"); } return NULL; } /** * __alloc_percpu - allocate dynamic percpu area * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * * Allocate zero-filled percpu area of @size bytes aligned at @align. * Might sleep. Might trigger writeouts. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ void __percpu *__alloc_percpu(size_t size, size_t align) { return pcpu_alloc(size, align, false); } EXPORT_SYMBOL_GPL(__alloc_percpu); /** * __alloc_reserved_percpu - allocate reserved percpu area * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * * Allocate zero-filled percpu area of @size bytes aligned at @align * from reserved percpu area if arch has set it up; otherwise, * allocation is served from the same dynamic area. Might sleep. * Might trigger writeouts. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ void __percpu *__alloc_reserved_percpu(size_t size, size_t align) { return pcpu_alloc(size, align, true); } /** * pcpu_reclaim - reclaim fully free chunks, workqueue function * @work: unused * * Reclaim all fully free chunks except for the first one. * * CONTEXT: * workqueue context. */ static void pcpu_reclaim(struct work_struct *work) { LIST_HEAD(todo); struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; struct pcpu_chunk *chunk, *next; mutex_lock(&pcpu_alloc_mutex); spin_lock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, head, list) { WARN_ON(chunk->immutable); /* spare the first one */ if (chunk == list_first_entry(head, struct pcpu_chunk, list)) continue; list_move(&chunk->list, &todo); } spin_unlock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, &todo, list) { pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); pcpu_destroy_chunk(chunk); } mutex_unlock(&pcpu_alloc_mutex); } /** * free_percpu - free percpu area * @ptr: pointer to area to free * * Free percpu area @ptr. * * CONTEXT: * Can be called from atomic context. */ void free_percpu(void __percpu *ptr) { void *addr; struct pcpu_chunk *chunk; unsigned long flags; int off; if (!ptr) return; kmemleak_free_percpu(ptr); addr = __pcpu_ptr_to_addr(ptr); spin_lock_irqsave(&pcpu_lock, flags); chunk = pcpu_chunk_addr_search(addr); off = addr - chunk->base_addr; pcpu_free_area(chunk, off); /* if there are more than one fully free chunks, wake up grim reaper */ if (chunk->free_size == pcpu_unit_size) { struct pcpu_chunk *pos; list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) if (pos != chunk) { schedule_work(&pcpu_reclaim_work); break; } } spin_unlock_irqrestore(&pcpu_lock, flags); } EXPORT_SYMBOL_GPL(free_percpu); /** * is_kernel_percpu_address - test whether address is from static percpu area * @addr: address to test * * Test whether @addr belongs to in-kernel static percpu area. Module * static percpu areas are not considered. For those, use * is_module_percpu_address(). * * RETURNS: * %true if @addr is from in-kernel static percpu area, %false otherwise. */ bool is_kernel_percpu_address(unsigned long addr) { #ifdef CONFIG_SMP const size_t static_size = __per_cpu_end - __per_cpu_start; void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); unsigned int cpu; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if ((void *)addr >= start && (void *)addr < start + static_size) return true; } #endif /* on UP, can't distinguish from other static vars, always false */ return false; } /** * per_cpu_ptr_to_phys - convert translated percpu address to physical address * @addr: the address to be converted to physical address * * Given @addr which is dereferenceable address obtained via one of * percpu access macros, this function translates it into its physical * address. The caller is responsible for ensuring @addr stays valid * until this function finishes. * * percpu allocator has special setup for the first chunk, which currently * supports either embedding in linear address space or vmalloc mapping, * and, from the second one, the backing allocator (currently either vm or * km) provides translation. * * The addr can be tranlated simply without checking if it falls into the * first chunk. But the current code reflects better how percpu allocator * actually works, and the verification can discover both bugs in percpu * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current * code. * * RETURNS: * The physical address for @addr. */ phys_addr_t per_cpu_ptr_to_phys(void *addr) { void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); bool in_first_chunk = false; unsigned long first_low, first_high; unsigned int cpu; /* * The following test on unit_low/high isn't strictly * necessary but will speed up lookups of addresses which * aren't in the first chunk. */ first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, pcpu_unit_pages); if ((unsigned long)addr >= first_low && (unsigned long)addr < first_high) { for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if (addr >= start && addr < start + pcpu_unit_size) { in_first_chunk = true; break; } } } if (in_first_chunk) { if (!is_vmalloc_addr(addr)) return __pa(addr); else return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); } else return page_to_phys(pcpu_addr_to_page(addr)) + offset_in_page(addr); } /** * pcpu_alloc_alloc_info - allocate percpu allocation info * @nr_groups: the number of groups * @nr_units: the number of units * * Allocate ai which is large enough for @nr_groups groups containing * @nr_units units. The returned ai's groups[0].cpu_map points to the * cpu_map array which is long enough for @nr_units and filled with * NR_CPUS. It's the caller's responsibility to initialize cpu_map * pointer of other groups. * * RETURNS: * Pointer to the allocated pcpu_alloc_info on success, NULL on * failure. */ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units) { struct pcpu_alloc_info *ai; size_t base_size, ai_size; void *ptr; int unit; base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), __alignof__(ai->groups[0].cpu_map[0])); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); if (!ptr) return NULL; ai = ptr; ptr += base_size; ai->groups[0].cpu_map = ptr; for (unit = 0; unit < nr_units; unit++) ai->groups[0].cpu_map[unit] = NR_CPUS; ai->nr_groups = nr_groups; ai->__ai_size = PFN_ALIGN(ai_size); return ai; } /** * pcpu_free_alloc_info - free percpu allocation info * @ai: pcpu_alloc_info to free * * Free @ai which was allocated by pcpu_alloc_alloc_info(). */ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) { free_bootmem(__pa(ai), ai->__ai_size); } /** * pcpu_dump_alloc_info - print out information about pcpu_alloc_info * @lvl: loglevel * @ai: allocation info to dump * * Print out information about @ai using loglevel @lvl. */ static void pcpu_dump_alloc_info(const char *lvl, const struct pcpu_alloc_info *ai) { int group_width = 1, cpu_width = 1, width; char empty_str[] = "--------"; int alloc = 0, alloc_end = 0; int group, v; int upa, apl; /* units per alloc, allocs per line */ v = ai->nr_groups; while (v /= 10) group_width++; v = num_possible_cpus(); while (v /= 10) cpu_width++; empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; upa = ai->alloc_size / ai->unit_size; width = upa * (cpu_width + 1) + group_width + 3; apl = rounddown_pow_of_two(max(60 / width, 1)); printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", lvl, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); for (group = 0; group < ai->nr_groups; group++) { const struct pcpu_group_info *gi = &ai->groups[group]; int unit = 0, unit_end = 0; BUG_ON(gi->nr_units % upa); for (alloc_end += gi->nr_units / upa; alloc < alloc_end; alloc++) { if (!(alloc % apl)) { printk(KERN_CONT "\n"); printk("%spcpu-alloc: ", lvl); } printk(KERN_CONT "[%0*d] ", group_width, group); for (unit_end += upa; unit < unit_end; unit++) if (gi->cpu_map[unit] != NR_CPUS) printk(KERN_CONT "%0*d ", cpu_width, gi->cpu_map[unit]); else printk(KERN_CONT "%s ", empty_str); } } printk(KERN_CONT "\n"); } /** * pcpu_setup_first_chunk - initialize the first percpu chunk * @ai: pcpu_alloc_info describing how to percpu area is shaped * @base_addr: mapped address * * Initialize the first percpu chunk which contains the kernel static * perpcu area. This function is to be called from arch percpu area * setup path. * * @ai contains all information necessary to initialize the first * chunk and prime the dynamic percpu allocator. * * @ai->static_size is the size of static percpu area. * * @ai->reserved_size, if non-zero, specifies the amount of bytes to * reserve after the static area in the first chunk. This reserves * the first chunk such that it's available only through reserved * percpu allocation. This is primarily used to serve module percpu * static areas on architectures where the addressing model has * limited offset range for symbol relocations to guarantee module * percpu symbols fall inside the relocatable range. * * @ai->dyn_size determines the number of bytes available for dynamic * allocation in the first chunk. The area between @ai->static_size + * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. * * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE * and equal to or larger than @ai->static_size + @ai->reserved_size + * @ai->dyn_size. * * @ai->atom_size is the allocation atom size and used as alignment * for vm areas. * * @ai->alloc_size is the allocation size and always multiple of * @ai->atom_size. This is larger than @ai->atom_size if * @ai->unit_size is larger than @ai->atom_size. * * @ai->nr_groups and @ai->groups describe virtual memory layout of * percpu areas. Units which should be colocated are put into the * same group. Dynamic VM areas will be allocated according to these * groupings. If @ai->nr_groups is zero, a single group containing * all units is assumed. * * The caller should have mapped the first chunk at @base_addr and * copied static data to each unit. * * If the first chunk ends up with both reserved and dynamic areas, it * is served by two chunks - one to serve the core static and reserved * areas and the other for the dynamic area. They share the same vm * and page map but uses different area allocation map to stay away * from each other. The latter chunk is circulated in the chunk slots * and available for dynamic allocation like any other chunks. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr) { static char cpus_buf[4096] __initdata; static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; size_t dyn_size = ai->dyn_size; size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; struct pcpu_chunk *schunk, *dchunk = NULL; unsigned long *group_offsets; size_t *group_sizes; unsigned long *unit_off; unsigned int cpu; int *unit_map; int group, unit, i; cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); #define PCPU_SETUP_BUG_ON(cond) do { \ if (unlikely(cond)) { \ pr_emerg("PERCPU: failed to initialize, %s", #cond); \ pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ pcpu_dump_alloc_info(KERN_EMERG, ai); \ BUG(); \ } \ } while (0) /* sanity checks */ PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); #ifdef CONFIG_SMP PCPU_SETUP_BUG_ON(!ai->static_size); PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK); #endif PCPU_SETUP_BUG_ON(!base_addr); PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK); PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); /* process group information and build config tables accordingly */ group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); for (cpu = 0; cpu < nr_cpu_ids; cpu++) unit_map[cpu] = UINT_MAX; pcpu_low_unit_cpu = NR_CPUS; pcpu_high_unit_cpu = NR_CPUS; for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { const struct pcpu_group_info *gi = &ai->groups[group]; group_offsets[group] = gi->base_offset; group_sizes[group] = gi->nr_units * ai->unit_size; for (i = 0; i < gi->nr_units; i++) { cpu = gi->cpu_map[i]; if (cpu == NR_CPUS) continue; PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); unit_map[cpu] = unit + i; unit_off[cpu] = gi->base_offset + i * ai->unit_size; /* determine low/high unit_cpu */ if (pcpu_low_unit_cpu == NR_CPUS || unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) pcpu_low_unit_cpu = cpu; if (pcpu_high_unit_cpu == NR_CPUS || unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) pcpu_high_unit_cpu = cpu; } } pcpu_nr_units = unit; for_each_possible_cpu(cpu) PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); /* we're done parsing the input, undefine BUG macro and dump config */ #undef PCPU_SETUP_BUG_ON pcpu_dump_alloc_info(KERN_DEBUG, ai); pcpu_nr_groups = ai->nr_groups; pcpu_group_offsets = group_offsets; pcpu_group_sizes = group_sizes; pcpu_unit_map = unit_map; pcpu_unit_offsets = unit_off; /* determine basic parameters */ pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; pcpu_atom_size = ai->atom_size; pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); /* * Allocate chunk slots. The additional last slot is for * empty chunks. */ pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_slot[i]); /* * Initialize static chunk. If reserved_size is zero, the * static chunk covers static area + dynamic allocation area * in the first chunk. If reserved_size is not zero, it * covers static area + reserved area (mostly used for module * static percpu allocation). */ schunk = alloc_bootmem(pcpu_chunk_struct_size); INIT_LIST_HEAD(&schunk->list); schunk->base_addr = base_addr; schunk->map = smap; schunk->map_alloc = ARRAY_SIZE(smap); schunk->immutable = true; bitmap_fill(schunk->populated, pcpu_unit_pages); if (ai->reserved_size) { schunk->free_size = ai->reserved_size; pcpu_reserved_chunk = schunk; pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; } else { schunk->free_size = dyn_size; dyn_size = 0; /* dynamic area covered */ } schunk->contig_hint = schunk->free_size; schunk->map[schunk->map_used++] = -ai->static_size; if (schunk->free_size) schunk->map[schunk->map_used++] = schunk->free_size; /* init dynamic chunk if necessary */ if (dyn_size) { dchunk = alloc_bootmem(pcpu_chunk_struct_size); INIT_LIST_HEAD(&dchunk->list); dchunk->base_addr = base_addr; dchunk->map = dmap; dchunk->map_alloc = ARRAY_SIZE(dmap); dchunk->immutable = true; bitmap_fill(dchunk->populated, pcpu_unit_pages); dchunk->contig_hint = dchunk->free_size = dyn_size; dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; dchunk->map[dchunk->map_used++] = dchunk->free_size; } /* link the first chunk in */ pcpu_first_chunk = dchunk ?: schunk; pcpu_chunk_relocate(pcpu_first_chunk, -1); /* we're done */ pcpu_base_addr = base_addr; return 0; } #ifdef CONFIG_SMP const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { [PCPU_FC_AUTO] = "auto", [PCPU_FC_EMBED] = "embed", [PCPU_FC_PAGE] = "page", }; enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; static int __init percpu_alloc_setup(char *str) { if (0) /* nada */; #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK else if (!strcmp(str, "embed")) pcpu_chosen_fc = PCPU_FC_EMBED; #endif #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK else if (!strcmp(str, "page")) pcpu_chosen_fc = PCPU_FC_PAGE; #endif else pr_warning("PERCPU: unknown allocator %s specified\n", str); return 0; } early_param("percpu_alloc", percpu_alloc_setup); /* * pcpu_embed_first_chunk() is used by the generic percpu setup. * Build it if needed by the arch config or the generic setup is going * to be used. */ #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) #define BUILD_EMBED_FIRST_CHUNK #endif /* build pcpu_page_first_chunk() iff needed by the arch config */ #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #define BUILD_PAGE_FIRST_CHUNK #endif /* pcpu_build_alloc_info() is used by both embed and page first chunk */ #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) /** * pcpu_build_alloc_info - build alloc_info considering distances between CPUs * @reserved_size: the size of reserved percpu area in bytes * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * * This function determines grouping of units, their mappings to cpus * and other parameters considering needed percpu size, allocation * atom size and distances between CPUs. * * Groups are always mutliples of atom size and CPUs which are of * LOCAL_DISTANCE both ways are grouped together and share space for * units in the same group. The returned configuration is guaranteed * to have CPUs on different nodes on different groups and >=75% usage * of allocated virtual address space. * * RETURNS: * On success, pointer to the new allocation_info is returned. On * failure, ERR_PTR value is returned. */ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn) { static int group_map[NR_CPUS] __initdata; static int group_cnt[NR_CPUS] __initdata; const size_t static_size = __per_cpu_end - __per_cpu_start; int nr_groups = 1, nr_units = 0; size_t size_sum, min_unit_size, alloc_size; int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ int last_allocs, group, unit; unsigned int cpu, tcpu; struct pcpu_alloc_info *ai; unsigned int *cpu_map; /* this function may be called multiple times */ memset(group_map, 0, sizeof(group_map)); memset(group_cnt, 0, sizeof(group_cnt)); /* calculate size_sum and ensure dyn_size is enough for early alloc */ size_sum = PFN_ALIGN(static_size + reserved_size + max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); dyn_size = size_sum - static_size - reserved_size; /* * Determine min_unit_size, alloc_size and max_upa such that * alloc_size is multiple of atom_size and is the smallest * which can accommodate 4k aligned segments which are equal to * or larger than min_unit_size. */ min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); alloc_size = roundup(min_unit_size, atom_size); upa = alloc_size / min_unit_size; while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) upa--; max_upa = upa; /* group cpus according to their proximity */ for_each_possible_cpu(cpu) { group = 0; next_group: for_each_possible_cpu(tcpu) { if (cpu == tcpu) break; if (group_map[tcpu] == group && cpu_distance_fn && (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { group++; nr_groups = max(nr_groups, group + 1); goto next_group; } } group_map[cpu] = group; group_cnt[group]++; } /* * Expand unit size until address space usage goes over 75% * and then as much as possible without using more address * space. */ last_allocs = INT_MAX; for (upa = max_upa; upa; upa--) { int allocs = 0, wasted = 0; if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) continue; for (group = 0; group < nr_groups; group++) { int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); allocs += this_allocs; wasted += this_allocs * upa - group_cnt[group]; } /* * Don't accept if wastage is over 1/3. The * greater-than comparison ensures upa==1 always * passes the following check. */ if (wasted > num_possible_cpus() / 3) continue; /* and then don't consume more memory */ if (allocs > last_allocs) break; last_allocs = allocs; best_upa = upa; } upa = best_upa; /* allocate and fill alloc_info */ for (group = 0; group < nr_groups; group++) nr_units += roundup(group_cnt[group], upa); ai = pcpu_alloc_alloc_info(nr_groups, nr_units); if (!ai) return ERR_PTR(-ENOMEM); cpu_map = ai->groups[0].cpu_map; for (group = 0; group < nr_groups; group++) { ai->groups[group].cpu_map = cpu_map; cpu_map += roundup(group_cnt[group], upa); } ai->static_size = static_size; ai->reserved_size = reserved_size; ai->dyn_size = dyn_size; ai->unit_size = alloc_size / upa; ai->atom_size = atom_size; ai->alloc_size = alloc_size; for (group = 0, unit = 0; group_cnt[group]; group++) { struct pcpu_group_info *gi = &ai->groups[group]; /* * Initialize base_offset as if all groups are located * back-to-back. The caller should update this to * reflect actual allocation. */ gi->base_offset = unit * ai->unit_size; for_each_possible_cpu(cpu) if (group_map[cpu] == group) gi->cpu_map[gi->nr_units++] = cpu; gi->nr_units = roundup(gi->nr_units, upa); unit += gi->nr_units; } BUG_ON(unit != nr_units); return ai; } #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ #if defined(BUILD_EMBED_FIRST_CHUNK) /** * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem * @reserved_size: the size of reserved percpu area in bytes * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * @alloc_fn: function to allocate percpu page * @free_fn: function to free percpu page * * This is a helper to ease setting up embedded first percpu chunk and * can be called where pcpu_setup_first_chunk() is expected. * * If this function is used to setup the first chunk, it is allocated * by calling @alloc_fn and used as-is without being mapped into * vmalloc area. Allocations are always whole multiples of @atom_size * aligned to @atom_size. * * This enables the first chunk to piggy back on the linear physical * mapping which often uses larger page size. Please note that this * can result in very sparse cpu->unit mapping on NUMA machines thus * requiring large vmalloc address space. Don't use this allocator if * vmalloc space is not orders of magnitude larger than distances * between node memory addresses (ie. 32bit NUMA machines). * * @dyn_size specifies the minimum dynamic area size. * * If the needed size is smaller than the minimum or specified unit * size, the leftover is returned using @free_fn. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn) { void *base = (void *)ULONG_MAX; void **areas = NULL; struct pcpu_alloc_info *ai; size_t size_sum, areas_size, max_distance; int group, i, rc; ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, cpu_distance_fn); if (IS_ERR(ai)) return PTR_ERR(ai); size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); areas = alloc_bootmem_nopanic(areas_size); if (!areas) { rc = -ENOMEM; goto out_free; } /* allocate, copy and determine base address */ for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; unsigned int cpu = NR_CPUS; void *ptr; for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) cpu = gi->cpu_map[i]; BUG_ON(cpu == NR_CPUS); /* allocate space for the whole group */ ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); if (!ptr) { rc = -ENOMEM; goto out_free_areas; } /* kmemleak tracks the percpu allocations separately */ kmemleak_free(ptr); areas[group] = ptr; base = min(ptr, base); } /* * Copy data and free unused parts. This should happen after all * allocations are complete; otherwise, we may end up with * overlapping groups. */ for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; void *ptr = areas[group]; for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { if (gi->cpu_map[i] == NR_CPUS) { /* unused unit, free whole */ free_fn(ptr, ai->unit_size); continue; } /* copy and return the unused part */ memcpy(ptr, __per_cpu_load, ai->static_size); free_fn(ptr + size_sum, ai->unit_size - size_sum); } } /* base address is now known, determine group base offsets */ max_distance = 0; for (group = 0; group < ai->nr_groups; group++) { ai->groups[group].base_offset = areas[group] - base; max_distance = max_t(size_t, max_distance, ai->groups[group].base_offset); } max_distance += ai->unit_size; /* warn if maximum distance is further than 75% of vmalloc space */ if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " "space 0x%lx\n", max_distance, (unsigned long)(VMALLOC_END - VMALLOC_START)); #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK /* and fail if we have fallback */ rc = -EINVAL; goto out_free; #endif } pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size); rc = pcpu_setup_first_chunk(ai, base); goto out_free; out_free_areas: for (group = 0; group < ai->nr_groups; group++) free_fn(areas[group], ai->groups[group].nr_units * ai->unit_size); out_free: pcpu_free_alloc_info(ai); if (areas) free_bootmem(__pa(areas), areas_size); return rc; } #endif /* BUILD_EMBED_FIRST_CHUNK */ #ifdef BUILD_PAGE_FIRST_CHUNK /** * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages * @reserved_size: the size of reserved percpu area in bytes * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE * @free_fn: function to free percpu page, always called with PAGE_SIZE * @populate_pte_fn: function to populate pte * * This is a helper to ease setting up page-remapped first percpu * chunk and can be called where pcpu_setup_first_chunk() is expected. * * This is the basic allocator. Static percpu area is allocated * page-by-page into vmalloc area. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn, pcpu_fc_populate_pte_fn_t populate_pte_fn) { static struct vm_struct vm; struct pcpu_alloc_info *ai; char psize_str[16]; int unit_pages; size_t pages_size; struct page **pages; int unit, i, j, rc; snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); if (IS_ERR(ai)) return PTR_ERR(ai); BUG_ON(ai->nr_groups != 1); BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); unit_pages = ai->unit_size >> PAGE_SHIFT; /* unaligned allocations can't be freed, round up to page size */ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); pages = alloc_bootmem(pages_size); /* allocate pages */ j = 0; for (unit = 0; unit < num_possible_cpus(); unit++) for (i = 0; i < unit_pages; i++) { unsigned int cpu = ai->groups[0].cpu_map[unit]; void *ptr; ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); if (!ptr) { pr_warning("PERCPU: failed to allocate %s page " "for cpu%u\n", psize_str, cpu); goto enomem; } /* kmemleak tracks the percpu allocations separately */ kmemleak_free(ptr); pages[j++] = virt_to_page(ptr); } /* allocate vm area, map the pages and copy static data */ vm.flags = VM_ALLOC; vm.size = num_possible_cpus() * ai->unit_size; vm_area_register_early(&vm, PAGE_SIZE); for (unit = 0; unit < num_possible_cpus(); unit++) { unsigned long unit_addr = (unsigned long)vm.addr + unit * ai->unit_size; for (i = 0; i < unit_pages; i++) populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); /* pte already populated, the following shouldn't fail */ rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], unit_pages); if (rc < 0) panic("failed to map percpu area, err=%d\n", rc); /* * FIXME: Archs with virtual cache should flush local * cache for the linear mapping here - something * equivalent to flush_cache_vmap() on the local cpu. * flush_cache_vmap() can't be used as most supporting * data structures are not set up yet. */ /* copy static data */ memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); } /* we're ready, commit */ pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", unit_pages, psize_str, vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size); rc = pcpu_setup_first_chunk(ai, vm.addr); goto out_free_ar; enomem: while (--j >= 0) free_fn(page_address(pages[j]), PAGE_SIZE); rc = -ENOMEM; out_free_ar: free_bootmem(__pa(pages), pages_size); pcpu_free_alloc_info(ai); return rc; } #endif /* BUILD_PAGE_FIRST_CHUNK */ #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA /* * Generic SMP percpu area setup. * * The embedding helper is used because its behavior closely resembles * the original non-dynamic generic percpu area setup. This is * important because many archs have addressing restrictions and might * fail if the percpu area is located far away from the previous * location. As an added bonus, in non-NUMA cases, embedding is * generally a good idea TLB-wise because percpu area can piggy back * on the physical linear memory mapping which uses large page * mappings on applicable archs. */ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, size_t align) { return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); } static void __init pcpu_dfl_fc_free(void *ptr, size_t size) { free_bootmem(__pa(ptr), size); } void __init setup_per_cpu_areas(void) { unsigned long delta; unsigned int cpu; int rc; /* * Always reserve area for module percpu variables. That's * what the legacy allocator did. */ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); if (rc < 0) panic("Failed to initialize percpu areas."); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; } #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ #else /* CONFIG_SMP */ /* * UP percpu area setup. * * UP always uses km-based percpu allocator with identity mapping. * Static percpu variables are indistinguishable from the usual static * variables and don't require any special preparation. */ void __init setup_per_cpu_areas(void) { const size_t unit_size = roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, PERCPU_DYNAMIC_RESERVE)); struct pcpu_alloc_info *ai; void *fc; ai = pcpu_alloc_alloc_info(1, 1); fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); /* kmemleak tracks the percpu allocations separately */ kmemleak_free(fc); ai->dyn_size = unit_size; ai->unit_size = unit_size; ai->atom_size = unit_size; ai->alloc_size = unit_size; ai->groups[0].nr_units = 1; ai->groups[0].cpu_map[0] = 0; if (pcpu_setup_first_chunk(ai, fc) < 0) panic("Failed to initialize percpu areas."); } #endif /* CONFIG_SMP */ /* * First and reserved chunks are initialized with temporary allocation * map in initdata so that they can be used before slab is online. * This function is called after slab is brought up and replaces those * with properly allocated maps. */ void __init percpu_init_late(void) { struct pcpu_chunk *target_chunks[] = { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; struct pcpu_chunk *chunk; unsigned long flags; int i; for (i = 0; (chunk = target_chunks[i]); i++) { int *map; const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); BUILD_BUG_ON(size > PAGE_SIZE); map = pcpu_mem_zalloc(size); BUG_ON(!map); spin_lock_irqsave(&pcpu_lock, flags); memcpy(map, chunk->map, size); chunk->map = map; spin_unlock_irqrestore(&pcpu_lock, flags); } }
gpl-2.0
AOKP/kernel_lge_mako
drivers/input/misc/mpu3050.c
972
21028
/* * MPU3050 Tri-axis gyroscope driver * * Copyright (C) 2011 Wistron Co.Ltd * Joseph Lai <joseph_lai@wistron.com> * * Trimmed down by Alan Cox <alan@linux.intel.com> to produce this version * * This is a 'lite' version of the driver, while we consider the right way * to present the other features to user space. In particular it requires the * device has an IRQ, and it only provides an input interface, so is not much * use for device orientation. A fuller version is available from the Meego * tree. * * This program is based on bma023.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/gpio.h> #include <linux/input/mpu3050.h> #include <linux/regulator/consumer.h> #define MPU3050_CHIP_ID 0x69 #define MPU3050_AUTO_DELAY 1000 #define MPU3050_MIN_VALUE -32768 #define MPU3050_MAX_VALUE 32767 #define MPU3050_MIN_POLL_INTERVAL 1 #define MPU3050_MAX_POLL_INTERVAL 250 #define MPU3050_DEFAULT_POLL_INTERVAL 200 #define MPU3050_DEFAULT_FS_RANGE 3 /* Register map */ #define MPU3050_CHIP_ID_REG 0x00 #define MPU3050_SMPLRT_DIV 0x15 #define MPU3050_DLPF_FS_SYNC 0x16 #define MPU3050_INT_CFG 0x17 #define MPU3050_XOUT_H 0x1D #define MPU3050_PWR_MGM 0x3E #define MPU3050_PWR_MGM_POS 6 /* Register bits */ /* DLPF_FS_SYNC */ #define MPU3050_EXT_SYNC_NONE 0x00 #define MPU3050_EXT_SYNC_TEMP 0x20 #define MPU3050_EXT_SYNC_GYROX 0x40 #define MPU3050_EXT_SYNC_GYROY 0x60 #define MPU3050_EXT_SYNC_GYROZ 0x80 #define MPU3050_EXT_SYNC_ACCELX 0xA0 #define MPU3050_EXT_SYNC_ACCELY 0xC0 #define MPU3050_EXT_SYNC_ACCELZ 0xE0 #define MPU3050_EXT_SYNC_MASK 0xE0 #define MPU3050_FS_250DPS 0x00 #define MPU3050_FS_500DPS 0x08 #define MPU3050_FS_1000DPS 0x10 #define MPU3050_FS_2000DPS 0x18 #define MPU3050_FS_MASK 0x18 #define MPU3050_DLPF_CFG_256HZ_NOLPF2 0x00 #define MPU3050_DLPF_CFG_188HZ 0x01 #define MPU3050_DLPF_CFG_98HZ 0x02 #define MPU3050_DLPF_CFG_42HZ 0x03 #define MPU3050_DLPF_CFG_20HZ 0x04 #define MPU3050_DLPF_CFG_10HZ 0x05 #define MPU3050_DLPF_CFG_5HZ 0x06 #define MPU3050_DLPF_CFG_2100HZ_NOLPF 0x07 #define MPU3050_DLPF_CFG_MASK 0x07 /* INT_CFG */ #define MPU3050_RAW_RDY_EN 0x01 #define MPU3050_MPU_RDY_EN 0x04 #define MPU3050_LATCH_INT_EN 0x20 #define MPU3050_OPEN_DRAIN 0x40 #define MPU3050_ACTIVE_LOW 0x80 /* PWR_MGM */ #define MPU3050_PWR_MGM_PLL_X 0x01 #define MPU3050_PWR_MGM_PLL_Y 0x02 #define MPU3050_PWR_MGM_PLL_Z 0x03 #define MPU3050_PWR_MGM_CLKSEL 0x07 #define MPU3050_PWR_MGM_STBY_ZG 0x08 #define MPU3050_PWR_MGM_STBY_YG 0x10 #define MPU3050_PWR_MGM_STBY_XG 0x20 #define MPU3050_PWR_MGM_SLEEP 0x40 #define MPU3050_PWR_MGM_RESET 0x80 #define MPU3050_PWR_MGM_MASK 0x40 struct axis_data { s16 x; s16 y; s16 z; }; struct mpu3050_sensor { struct i2c_client *client; struct device *dev; struct input_dev *idev; struct mpu3050_gyro_platform_data *platform_data; struct delayed_work input_work; u32 use_poll; u32 poll_interval; u32 dlpf_index; atomic_t enabled; }; struct sensor_regulator { struct regulator *vreg; const char *name; u32 min_uV; u32 max_uV; }; struct sensor_regulator mpu_vreg[] = { {NULL, "vdd", 2100000, 3600000}, {NULL, "vlogic", 1800000, 1800000}, }; struct dlpf_cfg_tb { u8 cfg; /* cfg index */ u32 lpf_bw; /* low pass filter bandwidth in Hz */ u32 sample_rate; /* analog sample rate in Khz, 1 or 8 */ }; static struct dlpf_cfg_tb dlpf_table[] = { {6, 5, 1}, {5, 10, 1}, {4, 20, 1}, {3, 42, 1}, {2, 98, 1}, {1, 188, 1}, {0, 256, 8}, }; static void mpu3050_set_power_mode(struct i2c_client *client, u8 val); static int mpu3050_start(struct mpu3050_sensor *sensor); static void mpu3050_stop(struct mpu3050_sensor *sensor); static u8 interval_to_dlpf_cfg(u32 interval) { u32 sample_rate = 1000 / interval; u32 i; /* the filter bandwidth needs to be greater or * equal to half of the sample rate */ for (i = 0; i < sizeof(dlpf_table)/sizeof(dlpf_table[0]); i++) { if (dlpf_table[i].lpf_bw * 2 >= sample_rate) return i; } /* return the maximum possible */ return --i; } static int mpu3050_config_regulator(struct i2c_client *client, bool on) { int rc = 0, i; int num_reg = sizeof(mpu_vreg) / sizeof(struct sensor_regulator); if (on) { for (i = 0; i < num_reg; i++) { mpu_vreg[i].vreg = regulator_get(&client->dev, mpu_vreg[i].name); if (IS_ERR(mpu_vreg[i].vreg)) { rc = PTR_ERR(mpu_vreg[i].vreg); pr_err("%s:regulator get failed rc=%d\n", __func__, rc); mpu_vreg[i].vreg = NULL; goto error_vdd; } if (regulator_count_voltages(mpu_vreg[i].vreg) > 0) { rc = regulator_set_voltage(mpu_vreg[i].vreg, mpu_vreg[i].min_uV, mpu_vreg[i].max_uV); if (rc) { pr_err("%s:set_voltage failed rc=%d\n", __func__, rc); regulator_put(mpu_vreg[i].vreg); mpu_vreg[i].vreg = NULL; goto error_vdd; } } rc = regulator_enable(mpu_vreg[i].vreg); if (rc) { pr_err("%s: regulator_enable failed rc =%d\n", __func__, rc); if (regulator_count_voltages( mpu_vreg[i].vreg) > 0) { regulator_set_voltage(mpu_vreg[i].vreg, 0, mpu_vreg[i].max_uV); } regulator_put(mpu_vreg[i].vreg); mpu_vreg[i].vreg = NULL; goto error_vdd; } } return rc; } else { i = num_reg; } error_vdd: while (--i >= 0) { if (!IS_ERR_OR_NULL(mpu_vreg[i].vreg)) { if (regulator_count_voltages( mpu_vreg[i].vreg) > 0) { regulator_set_voltage(mpu_vreg[i].vreg, 0, mpu_vreg[i].max_uV); } regulator_disable(mpu_vreg[i].vreg); regulator_put(mpu_vreg[i].vreg); mpu_vreg[i].vreg = NULL; } } return rc; } /** * mpu3050_attr_get_polling_rate - get the sampling rate */ static ssize_t mpu3050_attr_get_polling_rate(struct device *dev, struct device_attribute *attr, char *buf) { int val; struct mpu3050_sensor *sensor = dev_get_drvdata(dev); val = sensor ? sensor->poll_interval : 0; return snprintf(buf, 8, "%d\n", val); } /** * mpu3050_attr_set_polling_rate - set the sampling rate */ static ssize_t mpu3050_attr_set_polling_rate(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct mpu3050_sensor *sensor = dev_get_drvdata(dev); unsigned long interval_ms; unsigned int dlpf_index; u8 divider, reg; int ret; if (kstrtoul(buf, 10, &interval_ms)) return -EINVAL; if ((interval_ms < MPU3050_MIN_POLL_INTERVAL) || (interval_ms > MPU3050_MAX_POLL_INTERVAL)) return -EINVAL; dlpf_index = interval_to_dlpf_cfg(interval_ms); divider = interval_ms * dlpf_table[dlpf_index].sample_rate - 1; if (sensor->dlpf_index != dlpf_index) { /* Set low pass filter and full scale */ reg = dlpf_table[dlpf_index].cfg; reg |= MPU3050_DEFAULT_FS_RANGE << 3; reg |= MPU3050_EXT_SYNC_NONE << 5; ret = i2c_smbus_write_byte_data(sensor->client, MPU3050_DLPF_FS_SYNC, reg); if (ret == 0) sensor->dlpf_index = dlpf_index; } if (sensor->poll_interval != interval_ms) { /* Output frequency divider. The poll interval */ ret = i2c_smbus_write_byte_data(sensor->client, MPU3050_SMPLRT_DIV, divider); if (ret == 0) sensor->poll_interval = interval_ms; } return size; } static ssize_t mpu3050_attr_get_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct mpu3050_sensor *sensor = dev_get_drvdata(dev); int val = atomic_read(&sensor->enabled); return snprintf(buf, sizeof(val) + 2, "%d\n", val); } static ssize_t mpu3050_attr_set_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct mpu3050_sensor *sensor = dev_get_drvdata(dev); unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; if (val) mpu3050_start(sensor); else mpu3050_stop(sensor); return size; } static struct device_attribute attributes[] = { __ATTR(pollrate_ms, 0664, mpu3050_attr_get_polling_rate, mpu3050_attr_set_polling_rate), __ATTR(enable, 0664, mpu3050_attr_get_enable, mpu3050_attr_set_enable), }; static int create_sysfs_interfaces(struct device *dev) { int i; int err; for (i = 0; i < ARRAY_SIZE(attributes); i++) { err = device_create_file(dev, attributes + i); if (err) goto error; } return 0; error: for ( ; i >= 0; i--) device_remove_file(dev, attributes + i); dev_err(dev, "%s:Unable to create interface\n", __func__); return err; } static int remove_sysfs_interfaces(struct device *dev) { int i; for (i = 0; i < ARRAY_SIZE(attributes); i++) device_remove_file(dev, attributes + i); return 0; } /** * mpu3050_xyz_read_reg - read the axes values * @buffer: provide register addr and get register * @length: length of register * * Reads the register values in one transaction or returns a negative * error code on failure. */ static int mpu3050_xyz_read_reg(struct i2c_client *client, u8 *buffer, int length) { /* * Annoying we can't make this const because the i2c layer doesn't * declare input buffers const. */ char cmd = MPU3050_XOUT_H; struct i2c_msg msg[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = &cmd, }, { .addr = client->addr, .flags = I2C_M_RD, .len = length, .buf = buffer, }, }; return i2c_transfer(client->adapter, msg, 2); } /** * mpu3050_read_xyz - get co-ordinates from device * @client: i2c address of sensor * @coords: co-ordinates to update * * Return the converted X Y and Z co-ordinates from the sensor device */ static void mpu3050_read_xyz(struct i2c_client *client, struct axis_data *coords) { u16 buffer[3]; mpu3050_xyz_read_reg(client, (u8 *)buffer, 6); coords->x = be16_to_cpu(buffer[0]); coords->y = be16_to_cpu(buffer[1]); coords->z = be16_to_cpu(buffer[2]); dev_dbg(&client->dev, "%s: x %d, y %d, z %d\n", __func__, coords->x, coords->y, coords->z); } /** * mpu3050_set_power_mode - set the power mode * @client: i2c client for the sensor * @val: value to switch on/off of power, 1: normal power, 0: low power * * Put device to normal-power mode or low-power mode. */ static void mpu3050_set_power_mode(struct i2c_client *client, u8 val) { u8 value; if (val) { mpu3050_config_regulator(client, 1); udelay(10); } value = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM); value = (value & ~MPU3050_PWR_MGM_MASK) | (((val << MPU3050_PWR_MGM_POS) & MPU3050_PWR_MGM_MASK) ^ MPU3050_PWR_MGM_MASK); i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, value); if (!val) { udelay(10); mpu3050_config_regulator(client, 0); } } /** * mpu3050_start - called when sensor is enabled via sysfs * @sensor: the sensor * * The function gets called when the sensor is enabled via sysfs. * Interrupts will be enabled and the device will be ready to provide data. * */ static int mpu3050_start(struct mpu3050_sensor *sensor) { int error; pm_runtime_get_sync(sensor->dev); /* Enable interrupts */ error = i2c_smbus_write_byte_data(sensor->client, MPU3050_INT_CFG, MPU3050_ACTIVE_LOW | MPU3050_OPEN_DRAIN | MPU3050_RAW_RDY_EN); if (error < 0) { pm_runtime_put(sensor->dev); return error; } if (sensor->use_poll) schedule_delayed_work(&sensor->input_work, msecs_to_jiffies(sensor->poll_interval)); return 0; } /** * mpu3050_stop - called when sensor is disabled via sysfs * @sensor: the sensor * * The function gets called when the sensor is disabled via sysfs. * Device will be pushed to suspend mode. * */ static void mpu3050_stop(struct mpu3050_sensor *sensor) { if (sensor->use_poll) cancel_delayed_work_sync(&sensor->input_work); pm_runtime_put(sensor->dev); } /** * mpu3050_interrupt_thread - handle an IRQ * @irq: interrupt numner * @data: the sensor * * Called by the kernel single threaded after an interrupt occurs. Read * the sensor data and generate an input event for it. */ static irqreturn_t mpu3050_interrupt_thread(int irq, void *data) { struct mpu3050_sensor *sensor = data; struct axis_data axis; mpu3050_read_xyz(sensor->client, &axis); input_report_abs(sensor->idev, ABS_X, axis.x); input_report_abs(sensor->idev, ABS_Y, axis.y); input_report_abs(sensor->idev, ABS_Z, axis.z); input_sync(sensor->idev); return IRQ_HANDLED; } /** * mpu3050_input_work_fn - polling work * @work: the work struct * * Called by the work queue; read sensor data and generate an input * event */ static void mpu3050_input_work_fn(struct work_struct *work) { struct mpu3050_sensor *sensor; struct axis_data axis; sensor = container_of((struct delayed_work *)work, struct mpu3050_sensor, input_work); mpu3050_read_xyz(sensor->client, &axis); input_report_abs(sensor->idev, ABS_X, axis.x); input_report_abs(sensor->idev, ABS_Y, axis.y); input_report_abs(sensor->idev, ABS_Z, axis.z); input_sync(sensor->idev); if (sensor->use_poll) schedule_delayed_work(&sensor->input_work, msecs_to_jiffies(sensor->poll_interval)); } /** * mpu3050_hw_init - initialize hardware * @sensor: the sensor * * Called during device probe; configures the sampling method. */ static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor) { struct i2c_client *client = sensor->client; int ret; u8 reg; /* Reset */ ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, MPU3050_PWR_MGM_RESET); if (ret < 0) return ret; ret = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM); if (ret < 0) return ret; ret &= ~MPU3050_PWR_MGM_CLKSEL; ret |= MPU3050_PWR_MGM_PLL_Z; ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, ret); if (ret < 0) return ret; /* Output frequency divider. The poll interval */ ret = i2c_smbus_write_byte_data(client, MPU3050_SMPLRT_DIV, sensor->poll_interval - 1); if (ret < 0) return ret; /* Set low pass filter and full scale */ reg = MPU3050_DLPF_CFG_42HZ; reg |= MPU3050_DEFAULT_FS_RANGE << 3; reg |= MPU3050_EXT_SYNC_NONE << 5; ret = i2c_smbus_write_byte_data(client, MPU3050_DLPF_FS_SYNC, reg); if (ret < 0) return ret; return 0; } /** * mpu3050_probe - device detection callback * @client: i2c client of found device * @id: id match information * * The I2C layer calls us when it believes a sensor is present at this * address. Probe to see if this is correct and to validate the device. * * If present install the relevant sysfs interfaces and input device. */ static int __devinit mpu3050_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct mpu3050_sensor *sensor; struct input_dev *idev; int ret; int error; sensor = kzalloc(sizeof(struct mpu3050_sensor), GFP_KERNEL); idev = input_allocate_device(); if (!sensor || !idev) { dev_err(&client->dev, "failed to allocate driver data\n"); error = -ENOMEM; goto err_free_mem; } sensor->client = client; sensor->dev = &client->dev; sensor->idev = idev; sensor->platform_data = client->dev.platform_data; i2c_set_clientdata(client, sensor); if (sensor->platform_data) { u32 interval = sensor->platform_data->poll_interval; if ((interval < MPU3050_MIN_POLL_INTERVAL) || (interval > MPU3050_MAX_POLL_INTERVAL)) sensor->poll_interval = MPU3050_DEFAULT_POLL_INTERVAL; else sensor->poll_interval = interval; } else { sensor->poll_interval = MPU3050_DEFAULT_POLL_INTERVAL; } mpu3050_set_power_mode(client, 1); atomic_set(&sensor->enabled, 1); msleep(10); ret = i2c_smbus_read_byte_data(client, MPU3050_CHIP_ID_REG); if (ret < 0) { dev_err(&client->dev, "failed to detect device\n"); error = -ENXIO; goto err_free_mem; } if (ret != MPU3050_CHIP_ID) { dev_err(&client->dev, "unsupported chip id\n"); error = -ENXIO; goto err_free_mem; } idev->name = "MPU3050"; idev->id.bustype = BUS_I2C; idev->dev.parent = &client->dev; __set_bit(EV_ABS, idev->evbit); input_set_abs_params(idev, ABS_X, MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0); input_set_abs_params(idev, ABS_Y, MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0); input_set_abs_params(idev, ABS_Z, MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0); input_set_drvdata(idev, sensor); pm_runtime_set_active(&client->dev); error = mpu3050_hw_init(sensor); if (error) goto err_pm_set_suspended; if (client->irq == 0) { sensor->use_poll = 1; INIT_DELAYED_WORK(&sensor->input_work, mpu3050_input_work_fn); } else { sensor->use_poll = 0; if (gpio_is_valid(sensor->platform_data->gpio_int)) { /* configure interrupt gpio */ ret = gpio_request(sensor->platform_data->gpio_int, "gyro_gpio_int"); if (ret) { pr_err("%s: unable to request interrupt gpio %d\n", __func__, sensor->platform_data->gpio_int); goto err_pm_set_suspended; } ret = gpio_direction_input( sensor->platform_data->gpio_int); if (ret) { pr_err("%s: unable to set direction for gpio %d\n", __func__, sensor->platform_data->gpio_int); goto err_free_gpio; } } error = request_threaded_irq(client->irq, NULL, mpu3050_interrupt_thread, IRQF_TRIGGER_FALLING, "mpu3050", sensor); if (error) { dev_err(&client->dev, "can't get IRQ %d, error %d\n", client->irq, error); goto err_pm_set_suspended; } } error = input_register_device(idev); if (error) { dev_err(&client->dev, "failed to register input device\n"); goto err_free_irq; } error = create_sysfs_interfaces(&client->dev); if (error < 0) { dev_err(&client->dev, "failed to create sysfs\n"); goto err_input_cleanup; } pm_runtime_enable(&client->dev); pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY); return 0; err_input_cleanup: input_unregister_device(idev); err_free_irq: if (client->irq > 0) free_irq(client->irq, sensor); err_free_gpio: if ((client->irq > 0) && (gpio_is_valid(sensor->platform_data->gpio_int))) gpio_free(sensor->platform_data->gpio_int); err_pm_set_suspended: pm_runtime_set_suspended(&client->dev); err_free_mem: input_free_device(idev); kfree(sensor); return error; } /** * mpu3050_remove - remove a sensor * @client: i2c client of sensor being removed * * Our sensor is going away, clean up the resources. */ static int __devexit mpu3050_remove(struct i2c_client *client) { struct mpu3050_sensor *sensor = i2c_get_clientdata(client); pm_runtime_disable(&client->dev); pm_runtime_set_suspended(&client->dev); if (client->irq) free_irq(client->irq, sensor); remove_sysfs_interfaces(&client->dev); input_unregister_device(sensor->idev); kfree(sensor); return 0; } #ifdef CONFIG_PM /** * mpu3050_suspend - called on device suspend * @dev: device being suspended * * Put the device into sleep mode before we suspend the machine. */ static int mpu3050_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct mpu3050_sensor *sensor = i2c_get_clientdata(client); if (atomic_cmpxchg(&sensor->enabled, 1, 0)) { mpu3050_set_power_mode (sensor->client, 0); } return 0; } /** * mpu3050_resume - called on device resume * @dev: device being resumed * * Put the device into powered mode on resume. */ static int mpu3050_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct mpu3050_sensor *sensor = i2c_get_clientdata(client); if (!atomic_cmpxchg(&sensor->enabled, 0, 1)) { mpu3050_set_power_mode (sensor->client, 1); } msleep(100); /* wait for gyro chip resume */ return 0; } #endif static UNIVERSAL_DEV_PM_OPS(mpu3050_pm, mpu3050_suspend, mpu3050_resume, NULL); static const struct i2c_device_id mpu3050_ids[] = { { "mpu3050", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mpu3050_ids); static const struct of_device_id mpu3050_of_match[] = { { .compatible = "invn,mpu3050", }, { }, }; MODULE_DEVICE_TABLE(of, mpu3050_of_match); static struct i2c_driver mpu3050_i2c_driver = { .driver = { .name = "mpu3050", .owner = THIS_MODULE, .pm = &mpu3050_pm, .of_match_table = mpu3050_of_match, }, .probe = mpu3050_probe, .remove = __devexit_p(mpu3050_remove), .id_table = mpu3050_ids, }; module_i2c_driver(mpu3050_i2c_driver); MODULE_AUTHOR("Wistron Corp."); MODULE_DESCRIPTION("MPU3050 Tri-axis gyroscope driver"); MODULE_LICENSE("GPL");
gpl-2.0
Kali-/htc-kernel
fs/jfs/jfs_mount.c
1740
12996
/* * Copyright (C) International Business Machines Corp., 2000-2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * Module: jfs_mount.c * * note: file system in transition to aggregate/fileset: * * file system mount is interpreted as the mount of aggregate, * if not already mounted, and mount of the single/only fileset in * the aggregate; * * a file system/aggregate is represented by an internal inode * (aka mount inode) initialized with aggregate superblock; * each vfs represents a fileset, and points to its "fileset inode * allocation map inode" (aka fileset inode): * (an aggregate itself is structured recursively as a filset: * an internal vfs is constructed and points to its "fileset inode * allocation map inode" (aka aggregate inode) where each inode * represents a fileset inode) so that inode number is mapped to * on-disk inode in uniform way at both aggregate and fileset level; * * each vnode/inode of a fileset is linked to its vfs (to facilitate * per fileset inode operations, e.g., unmount of a fileset, etc.); * each inode points to the mount inode (to facilitate access to * per aggregate information, e.g., block size, etc.) as well as * its file set inode. * * aggregate * ipmnt * mntvfs -> fileset ipimap+ -> aggregate ipbmap -> aggregate ipaimap; * fileset vfs -> vp(1) <-> ... <-> vp(n) <->vproot; */ #include <linux/fs.h> #include <linux/buffer_head.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_superblock.h" #include "jfs_dmap.h" #include "jfs_imap.h" #include "jfs_metapage.h" #include "jfs_debug.h" /* * forward references */ static int chkSuper(struct super_block *); static int logMOUNT(struct super_block *sb); /* * NAME: jfs_mount(sb) * * FUNCTION: vfs_mount() * * PARAMETER: sb - super block * * RETURN: -EBUSY - device already mounted or open for write * -EBUSY - cvrdvp already mounted; * -EBUSY - mount table full * -ENOTDIR- cvrdvp not directory on a device mount * -ENXIO - device open failure */ int jfs_mount(struct super_block *sb) { int rc = 0; /* Return code */ struct jfs_sb_info *sbi = JFS_SBI(sb); struct inode *ipaimap = NULL; struct inode *ipaimap2 = NULL; struct inode *ipimap = NULL; struct inode *ipbmap = NULL; /* * read/validate superblock * (initialize mount inode from the superblock) */ if ((rc = chkSuper(sb))) { goto errout20; } ipaimap = diReadSpecial(sb, AGGREGATE_I, 0); if (ipaimap == NULL) { jfs_err("jfs_mount: Faild to read AGGREGATE_I"); rc = -EIO; goto errout20; } sbi->ipaimap = ipaimap; jfs_info("jfs_mount: ipaimap:0x%p", ipaimap); /* * initialize aggregate inode allocation map */ if ((rc = diMount(ipaimap))) { jfs_err("jfs_mount: diMount(ipaimap) failed w/rc = %d", rc); goto errout21; } /* * open aggregate block allocation map */ ipbmap = diReadSpecial(sb, BMAP_I, 0); if (ipbmap == NULL) { rc = -EIO; goto errout22; } jfs_info("jfs_mount: ipbmap:0x%p", ipbmap); sbi->ipbmap = ipbmap; /* * initialize aggregate block allocation map */ if ((rc = dbMount(ipbmap))) { jfs_err("jfs_mount: dbMount failed w/rc = %d", rc); goto errout22; } /* * open the secondary aggregate inode allocation map * * This is a duplicate of the aggregate inode allocation map. * * hand craft a vfs in the same fashion as we did to read ipaimap. * By adding INOSPEREXT (32) to the inode number, we are telling * diReadSpecial that we are reading from the secondary aggregate * inode table. This also creates a unique entry in the inode hash * table. */ if ((sbi->mntflag & JFS_BAD_SAIT) == 0) { ipaimap2 = diReadSpecial(sb, AGGREGATE_I, 1); if (!ipaimap2) { jfs_err("jfs_mount: Faild to read AGGREGATE_I"); rc = -EIO; goto errout35; } sbi->ipaimap2 = ipaimap2; jfs_info("jfs_mount: ipaimap2:0x%p", ipaimap2); /* * initialize secondary aggregate inode allocation map */ if ((rc = diMount(ipaimap2))) { jfs_err("jfs_mount: diMount(ipaimap2) failed, rc = %d", rc); goto errout35; } } else /* Secondary aggregate inode table is not valid */ sbi->ipaimap2 = NULL; /* * mount (the only/single) fileset */ /* * open fileset inode allocation map (aka fileset inode) */ ipimap = diReadSpecial(sb, FILESYSTEM_I, 0); if (ipimap == NULL) { jfs_err("jfs_mount: Failed to read FILESYSTEM_I"); /* open fileset secondary inode allocation map */ rc = -EIO; goto errout40; } jfs_info("jfs_mount: ipimap:0x%p", ipimap); /* map further access of per fileset inodes by the fileset inode */ sbi->ipimap = ipimap; /* initialize fileset inode allocation map */ if ((rc = diMount(ipimap))) { jfs_err("jfs_mount: diMount failed w/rc = %d", rc); goto errout41; } goto out; /* * unwind on error */ errout41: /* close fileset inode allocation map inode */ diFreeSpecial(ipimap); errout40: /* fileset closed */ /* close secondary aggregate inode allocation map */ if (ipaimap2) { diUnmount(ipaimap2, 1); diFreeSpecial(ipaimap2); } errout35: /* close aggregate block allocation map */ dbUnmount(ipbmap, 1); diFreeSpecial(ipbmap); errout22: /* close aggregate inode allocation map */ diUnmount(ipaimap, 1); errout21: /* close aggregate inodes */ diFreeSpecial(ipaimap); errout20: /* aggregate closed */ out: if (rc) jfs_err("Mount JFS Failure: %d", rc); return rc; } /* * NAME: jfs_mount_rw(sb, remount) * * FUNCTION: Completes read-write mount, or remounts read-only volume * as read-write */ int jfs_mount_rw(struct super_block *sb, int remount) { struct jfs_sb_info *sbi = JFS_SBI(sb); int rc; /* * If we are re-mounting a previously read-only volume, we want to * re-read the inode and block maps, since fsck.jfs may have updated * them. */ if (remount) { if (chkSuper(sb) || (sbi->state != FM_CLEAN)) return -EINVAL; truncate_inode_pages(sbi->ipimap->i_mapping, 0); truncate_inode_pages(sbi->ipbmap->i_mapping, 0); diUnmount(sbi->ipimap, 1); if ((rc = diMount(sbi->ipimap))) { jfs_err("jfs_mount_rw: diMount failed!"); return rc; } dbUnmount(sbi->ipbmap, 1); if ((rc = dbMount(sbi->ipbmap))) { jfs_err("jfs_mount_rw: dbMount failed!"); return rc; } } /* * open/initialize log */ if ((rc = lmLogOpen(sb))) return rc; /* * update file system superblock; */ if ((rc = updateSuper(sb, FM_MOUNT))) { jfs_err("jfs_mount: updateSuper failed w/rc = %d", rc); lmLogClose(sb); return rc; } /* * write MOUNT log record of the file system */ logMOUNT(sb); return rc; } /* * chkSuper() * * validate the superblock of the file system to be mounted and * get the file system parameters. * * returns * 0 with fragsize set if check successful * error code if not successful */ static int chkSuper(struct super_block *sb) { int rc = 0; struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_superblock *j_sb; struct buffer_head *bh; int AIM_bytesize, AIT_bytesize; int expected_AIM_bytesize, expected_AIT_bytesize; s64 AIM_byte_addr, AIT_byte_addr, fsckwsp_addr; s64 byte_addr_diff0, byte_addr_diff1; s32 bsize; if ((rc = readSuper(sb, &bh))) return rc; j_sb = (struct jfs_superblock *)bh->b_data; /* * validate superblock */ /* validate fs signature */ if (strncmp(j_sb->s_magic, JFS_MAGIC, 4) || le32_to_cpu(j_sb->s_version) > JFS_VERSION) { rc = -EINVAL; goto out; } bsize = le32_to_cpu(j_sb->s_bsize); #ifdef _JFS_4K if (bsize != PSIZE) { jfs_err("Currently only 4K block size supported!"); rc = -EINVAL; goto out; } #endif /* _JFS_4K */ jfs_info("superblock: flag:0x%08x state:0x%08x size:0x%Lx", le32_to_cpu(j_sb->s_flag), le32_to_cpu(j_sb->s_state), (unsigned long long) le64_to_cpu(j_sb->s_size)); /* validate the descriptors for Secondary AIM and AIT */ if ((j_sb->s_flag & cpu_to_le32(JFS_BAD_SAIT)) != cpu_to_le32(JFS_BAD_SAIT)) { expected_AIM_bytesize = 2 * PSIZE; AIM_bytesize = lengthPXD(&(j_sb->s_aim2)) * bsize; expected_AIT_bytesize = 4 * PSIZE; AIT_bytesize = lengthPXD(&(j_sb->s_ait2)) * bsize; AIM_byte_addr = addressPXD(&(j_sb->s_aim2)) * bsize; AIT_byte_addr = addressPXD(&(j_sb->s_ait2)) * bsize; byte_addr_diff0 = AIT_byte_addr - AIM_byte_addr; fsckwsp_addr = addressPXD(&(j_sb->s_fsckpxd)) * bsize; byte_addr_diff1 = fsckwsp_addr - AIT_byte_addr; if ((AIM_bytesize != expected_AIM_bytesize) || (AIT_bytesize != expected_AIT_bytesize) || (byte_addr_diff0 != AIM_bytesize) || (byte_addr_diff1 <= AIT_bytesize)) j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT); } if ((j_sb->s_flag & cpu_to_le32(JFS_GROUPCOMMIT)) != cpu_to_le32(JFS_GROUPCOMMIT)) j_sb->s_flag |= cpu_to_le32(JFS_GROUPCOMMIT); /* validate fs state */ if (j_sb->s_state != cpu_to_le32(FM_CLEAN) && !(sb->s_flags & MS_RDONLY)) { jfs_err("jfs_mount: Mount Failure: File System Dirty."); rc = -EINVAL; goto out; } sbi->state = le32_to_cpu(j_sb->s_state); sbi->mntflag = le32_to_cpu(j_sb->s_flag); /* * JFS always does I/O by 4K pages. Don't tell the buffer cache * that we use anything else (leave s_blocksize alone). */ sbi->bsize = bsize; sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize); /* * For now, ignore s_pbsize, l2bfactor. All I/O going through buffer * cache. */ sbi->nbperpage = PSIZE >> sbi->l2bsize; sbi->l2nbperpage = L2PSIZE - sbi->l2bsize; sbi->l2niperblk = sbi->l2bsize - L2DISIZE; if (sbi->mntflag & JFS_INLINELOG) sbi->logpxd = j_sb->s_logpxd; else { sbi->logdev = new_decode_dev(le32_to_cpu(j_sb->s_logdev)); memcpy(sbi->uuid, j_sb->s_uuid, sizeof(sbi->uuid)); memcpy(sbi->loguuid, j_sb->s_loguuid, sizeof(sbi->uuid)); } sbi->fsckpxd = j_sb->s_fsckpxd; sbi->ait2 = j_sb->s_ait2; out: brelse(bh); return rc; } /* * updateSuper() * * update synchronously superblock if it is mounted read-write. */ int updateSuper(struct super_block *sb, uint state) { struct jfs_superblock *j_sb; struct jfs_sb_info *sbi = JFS_SBI(sb); struct buffer_head *bh; int rc; if (sbi->flag & JFS_NOINTEGRITY) { if (state == FM_DIRTY) { sbi->p_state = state; return 0; } else if (state == FM_MOUNT) { sbi->p_state = sbi->state; state = FM_DIRTY; } else if (state == FM_CLEAN) { state = sbi->p_state; } else jfs_err("updateSuper: bad state"); } else if (sbi->state == FM_DIRTY) return 0; if ((rc = readSuper(sb, &bh))) return rc; j_sb = (struct jfs_superblock *)bh->b_data; j_sb->s_state = cpu_to_le32(state); sbi->state = state; if (state == FM_MOUNT) { /* record log's dev_t and mount serial number */ j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev)); j_sb->s_logserial = cpu_to_le32(sbi->log->serial); } else if (state == FM_CLEAN) { /* * If this volume is shared with OS/2, OS/2 will need to * recalculate DASD usage, since we don't deal with it. */ if (j_sb->s_flag & cpu_to_le32(JFS_DASD_ENABLED)) j_sb->s_flag |= cpu_to_le32(JFS_DASD_PRIME); } mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); return 0; } /* * readSuper() * * read superblock by raw sector address */ int readSuper(struct super_block *sb, struct buffer_head **bpp) { /* read in primary superblock */ *bpp = sb_bread(sb, SUPER1_OFF >> sb->s_blocksize_bits); if (*bpp) return 0; /* read in secondary/replicated superblock */ *bpp = sb_bread(sb, SUPER2_OFF >> sb->s_blocksize_bits); if (*bpp) return 0; return -EIO; } /* * logMOUNT() * * function: write a MOUNT log record for file system. * * MOUNT record keeps logredo() from processing log records * for this file system past this point in log. * it is harmless if mount fails. * * note: MOUNT record is at aggregate level, not at fileset level, * since log records of previous mounts of a fileset * (e.g., AFTER record of extent allocation) have to be processed * to update block allocation map at aggregate level. */ static int logMOUNT(struct super_block *sb) { struct jfs_log *log = JFS_SBI(sb)->log; struct lrd lrd; lrd.logtid = 0; lrd.backchain = 0; lrd.type = cpu_to_le16(LOG_MOUNT); lrd.length = 0; lrd.aggregate = cpu_to_le32(new_encode_dev(sb->s_bdev->bd_dev)); lmLog(log, NULL, &lrd, NULL); return 0; }
gpl-2.0
sohkis/leanKernel-shamu
net/mac80211/util.c
1740
58251
/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * utilities for mac80211 */ #include <net/mac80211.h> #include <linux/netdevice.h> #include <linux/export.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/bitmap.h> #include <linux/crc32.h> #include <net/net_namespace.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wme.h" #include "led.h" #include "wep.h" /* privid for wiphys to determine whether they belong to us or not */ void *mac80211_wiphy_privid = &mac80211_wiphy_privid; struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) { struct ieee80211_local *local; BUG_ON(!wiphy); local = wiphy_priv(wiphy); return &local->hw; } EXPORT_SYMBOL(wiphy_to_ieee80211_hw); u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, enum nl80211_iftype type) { __le16 fc = hdr->frame_control; /* drop ACK/CTS frames and incorrect hdr len (ctrl) */ if (len < 16) return NULL; if (ieee80211_is_data(fc)) { if (len < 24) /* drop incorrect hdr len (data) */ return NULL; if (ieee80211_has_a4(fc)) return NULL; if (ieee80211_has_tods(fc)) return hdr->addr1; if (ieee80211_has_fromds(fc)) return hdr->addr2; return hdr->addr3; } if (ieee80211_is_mgmt(fc)) { if (len < 24) /* drop incorrect hdr len (mgmt) */ return NULL; return hdr->addr3; } if (ieee80211_is_ctl(fc)) { if(ieee80211_is_pspoll(fc)) return hdr->addr1; if (ieee80211_is_back_req(fc)) { switch (type) { case NL80211_IFTYPE_STATION: return hdr->addr2; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: return hdr->addr1; default: break; /* fall through to the return */ } } } return NULL; } void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_hdr *hdr; skb_queue_walk(&tx->skbs, skb) { hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); } } int ieee80211_frame_duration(enum ieee80211_band band, size_t len, int rate, int erp, int short_preamble) { int dur; /* calculate duration (in microseconds, rounded up to next higher * integer if it includes a fractional microsecond) to send frame of * len bytes (does not include FCS) at the given rate. Duration will * also include SIFS. * * rate is in 100 kbps, so divident is multiplied by 10 in the * DIV_ROUND_UP() operations. */ if (band == IEEE80211_BAND_5GHZ || erp) { /* * OFDM: * * N_DBPS = DATARATE x 4 * N_SYM = Ceiling((16+8xLENGTH+6) / N_DBPS) * (16 = SIGNAL time, 6 = tail bits) * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext * * T_SYM = 4 usec * 802.11a - 17.5.2: aSIFSTime = 16 usec * 802.11g - 19.8.4: aSIFSTime = 10 usec + * signal ext = 6 usec */ dur = 16; /* SIFS + signal ext */ dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */ dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */ dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, 4 * rate); /* T_SYM x N_SYM */ } else { /* * 802.11b or 802.11g with 802.11b compatibility: * 18.3.4: TXTIME = PreambleLength + PLCPHeaderTime + * Ceiling(((LENGTH+PBCC)x8)/DATARATE). PBCC=0. * * 802.11 (DS): 15.3.3, 802.11b: 18.3.4 * aSIFSTime = 10 usec * aPreambleLength = 144 usec or 72 usec with short preamble * aPLCPHeaderLength = 48 usec or 24 usec with short preamble */ dur = 10; /* aSIFSTime = 10 usec */ dur += short_preamble ? (72 + 24) : (144 + 48); dur += DIV_ROUND_UP(8 * (len + 4) * 10, rate); } return dur; } /* Exported duration function for driver use */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_band band, size_t frame_len, struct ieee80211_rate *rate) { struct ieee80211_sub_if_data *sdata; u16 dur; int erp; bool short_preamble = false; erp = 0; if (vif) { sdata = vif_to_sdata(vif); short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) erp = rate->flags & IEEE80211_RATE_ERP_G; } dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp, short_preamble); return cpu_to_le16(dur); } EXPORT_SYMBOL(ieee80211_generic_frame_duration); __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_info *frame_txctl) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; int erp; u16 dur; struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; erp = 0; if (vif) { sdata = vif_to_sdata(vif); short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) erp = rate->flags & IEEE80211_RATE_ERP_G; } /* CTS duration */ dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate, erp, short_preamble); /* Data frame duration */ dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, erp, short_preamble); /* ACK duration */ dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, erp, short_preamble); return cpu_to_le16(dur); } EXPORT_SYMBOL(ieee80211_rts_duration); __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_info *frame_txctl) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; int erp; u16 dur; struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; erp = 0; if (vif) { sdata = vif_to_sdata(vif); short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) erp = rate->flags & IEEE80211_RATE_ERP_G; } /* Data frame duration */ dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, erp, short_preamble); if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { /* ACK duration */ dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, erp, short_preamble); } return cpu_to_le16(dur); } EXPORT_SYMBOL(ieee80211_ctstoself_duration); void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue) { struct ieee80211_sub_if_data *sdata; int n_acs = IEEE80211_NUM_ACS; if (local->hw.queues < IEEE80211_NUM_ACS) n_acs = 1; list_for_each_entry_rcu(sdata, &local->interfaces, list) { int ac; if (!sdata->dev) continue; if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) continue; if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE && local->queue_stop_reasons[sdata->vif.cab_queue] != 0) continue; for (ac = 0; ac < n_acs; ac++) { int ac_queue = sdata->vif.hw_queue[ac]; if (ac_queue == queue || (sdata->vif.cab_queue == queue && local->queue_stop_reasons[ac_queue] == 0 && skb_queue_empty(&local->pending[ac_queue]))) netif_wake_subqueue(sdata->dev, ac); } } } static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); trace_wake_queue(local, queue, reason); if (WARN_ON(queue >= hw->queues)) return; if (!test_bit(reason, &local->queue_stop_reasons[queue])) return; __clear_bit(reason, &local->queue_stop_reasons[queue]); if (local->queue_stop_reasons[queue] != 0) /* someone still has this queue stopped */ return; if (skb_queue_empty(&local->pending[queue])) { rcu_read_lock(); ieee80211_propagate_queue_wake(local, queue); rcu_read_unlock(); } else tasklet_schedule(&local->tx_pending_tasklet); } void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); __ieee80211_wake_queue(hw, queue, reason); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) { ieee80211_wake_queue_by_reason(hw, queue, IEEE80211_QUEUE_STOP_REASON_DRIVER); } EXPORT_SYMBOL(ieee80211_wake_queue); static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; int n_acs = IEEE80211_NUM_ACS; trace_stop_queue(local, queue, reason); if (WARN_ON(queue >= hw->queues)) return; if (test_bit(reason, &local->queue_stop_reasons[queue])) return; __set_bit(reason, &local->queue_stop_reasons[queue]); if (local->hw.queues < IEEE80211_NUM_ACS) n_acs = 1; rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { int ac; if (!sdata->dev) continue; for (ac = 0; ac < n_acs; ac++) { if (sdata->vif.hw_queue[ac] == queue || sdata->vif.cab_queue == queue) netif_stop_subqueue(sdata->dev, ac); } } rcu_read_unlock(); } void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); __ieee80211_stop_queue(hw, queue, reason); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) { ieee80211_stop_queue_by_reason(hw, queue, IEEE80211_QUEUE_STOP_REASON_DRIVER); } EXPORT_SYMBOL(ieee80211_stop_queue); void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_hw *hw = &local->hw; unsigned long flags; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int queue = info->hw_queue; if (WARN_ON(!info->control.vif)) { ieee80211_free_txskb(&local->hw, skb); return; } spin_lock_irqsave(&local->queue_stop_reason_lock, flags); __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); __skb_queue_tail(&local->pending[queue], skb); __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, struct sk_buff_head *skbs, void (*fn)(void *data), void *data) { struct ieee80211_hw *hw = &local->hw; struct sk_buff *skb; unsigned long flags; int queue, i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); while ((skb = skb_dequeue(skbs))) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (WARN_ON(!info->control.vif)) { ieee80211_free_txskb(&local->hw, skb); continue; } queue = info->hw_queue; __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); __skb_queue_tail(&local->pending[queue], skb); } if (fn) fn(data); for (i = 0; i < hw->queues; i++) __ieee80211_wake_queue(hw, i, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; int i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for_each_set_bit(i, &queues, hw->queues) __ieee80211_stop_queue(hw, i, reason); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_stop_queues(struct ieee80211_hw *hw) { ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_DRIVER); } EXPORT_SYMBOL(ieee80211_stop_queues); int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; int ret; if (WARN_ON(queue >= hw->queues)) return true; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); ret = test_bit(IEEE80211_QUEUE_STOP_REASON_DRIVER, &local->queue_stop_reasons[queue]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return ret; } EXPORT_SYMBOL(ieee80211_queue_stopped); void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; int i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for_each_set_bit(i, &queues, hw->queues) __ieee80211_wake_queue(hw, i, reason); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_wake_queues(struct ieee80211_hw *hw) { ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_DRIVER); } EXPORT_SYMBOL(ieee80211_wake_queues); void ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { u32 queues; if (!local->ops->flush) return; if (sdata && local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) { int ac; queues = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) queues |= BIT(sdata->vif.hw_queue[ac]); if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE) queues |= BIT(sdata->vif.cab_queue); } else { /* all queues */ queues = BIT(local->hw.queues) - 1; } ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_FLUSH); drv_flush(local, queues, false); ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_FLUSH); } void ieee80211_iterate_active_interfaces( struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: continue; default: break; } if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) && !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) continue; if (ieee80211_sdata_running(sdata)) iterator(data, sdata->vif.addr, &sdata->vif); } sdata = rcu_dereference_protected(local->monitor_sdata, lockdep_is_held(&local->iflist_mtx)); if (sdata && (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || sdata->flags & IEEE80211_SDATA_IN_DRIVER)) iterator(data, sdata->vif.addr, &sdata->vif); mutex_unlock(&local->iflist_mtx); } EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces); void ieee80211_iterate_active_interfaces_atomic( struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: continue; default: break; } if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) && !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) continue; if (ieee80211_sdata_running(sdata)) iterator(data, sdata->vif.addr, &sdata->vif); } sdata = rcu_dereference(local->monitor_sdata); if (sdata && (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || sdata->flags & IEEE80211_SDATA_IN_DRIVER)) iterator(data, sdata->vif.addr, &sdata->vif); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); /* * Nothing should have been stuffed into the workqueue during * the suspend->resume cycle. If this WARN is seen then there * is a bug with either the driver suspend or something in * mac80211 stuffing into the workqueue which we haven't yet * cleared during mac80211's suspend cycle. */ static bool ieee80211_can_queue_work(struct ieee80211_local *local) { if (WARN(local->suspended && !local->resuming, "queueing ieee80211 work while going to suspend\n")) return false; return true; } void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work) { struct ieee80211_local *local = hw_to_local(hw); if (!ieee80211_can_queue_work(local)) return; queue_work(local->workqueue, work); } EXPORT_SYMBOL(ieee80211_queue_work); void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, struct delayed_work *dwork, unsigned long delay) { struct ieee80211_local *local = hw_to_local(hw); if (!ieee80211_can_queue_work(local)) return; queue_delayed_work(local->workqueue, dwork, delay); } EXPORT_SYMBOL(ieee80211_queue_delayed_work); u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc) { size_t left = len; const u8 *pos = start; bool calc_crc = filter != 0; DECLARE_BITMAP(seen_elems, 256); const u8 *ie; bitmap_zero(seen_elems, 256); memset(elems, 0, sizeof(*elems)); elems->ie_start = start; elems->total_len = len; while (left >= 2) { u8 id, elen; bool elem_parse_failed; id = *pos++; elen = *pos++; left -= 2; if (elen > left) { elems->parse_error = true; break; } switch (id) { case WLAN_EID_SSID: case WLAN_EID_SUPP_RATES: case WLAN_EID_FH_PARAMS: case WLAN_EID_DS_PARAMS: case WLAN_EID_CF_PARAMS: case WLAN_EID_TIM: case WLAN_EID_IBSS_PARAMS: case WLAN_EID_CHALLENGE: case WLAN_EID_RSN: case WLAN_EID_ERP_INFO: case WLAN_EID_EXT_SUPP_RATES: case WLAN_EID_HT_CAPABILITY: case WLAN_EID_HT_OPERATION: case WLAN_EID_VHT_CAPABILITY: case WLAN_EID_VHT_OPERATION: case WLAN_EID_MESH_ID: case WLAN_EID_MESH_CONFIG: case WLAN_EID_PEER_MGMT: case WLAN_EID_PREQ: case WLAN_EID_PREP: case WLAN_EID_PERR: case WLAN_EID_RANN: case WLAN_EID_CHANNEL_SWITCH: case WLAN_EID_EXT_CHANSWITCH_ANN: case WLAN_EID_COUNTRY: case WLAN_EID_PWR_CONSTRAINT: case WLAN_EID_TIMEOUT_INTERVAL: case WLAN_EID_SECONDARY_CHANNEL_OFFSET: case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: /* * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible * that if the content gets bigger it might be needed more than once */ if (test_bit(id, seen_elems)) { elems->parse_error = true; left -= elen; pos += elen; continue; } break; } if (calc_crc && id < 64 && (filter & (1ULL << id))) crc = crc32_be(crc, pos - 2, elen + 2); elem_parse_failed = false; switch (id) { case WLAN_EID_SSID: elems->ssid = pos; elems->ssid_len = elen; break; case WLAN_EID_SUPP_RATES: elems->supp_rates = pos; elems->supp_rates_len = elen; break; case WLAN_EID_DS_PARAMS: if (elen >= 1) elems->ds_params = pos; else elem_parse_failed = true; break; case WLAN_EID_TIM: if (elen >= sizeof(struct ieee80211_tim_ie)) { elems->tim = (void *)pos; elems->tim_len = elen; } else elem_parse_failed = true; break; case WLAN_EID_CHALLENGE: elems->challenge = pos; elems->challenge_len = elen; break; case WLAN_EID_VENDOR_SPECIFIC: if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && pos[2] == 0xf2) { /* Microsoft OUI (00:50:F2) */ if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); if (elen >= 5 && pos[3] == 2) { /* OUI Type 2 - WMM IE */ if (pos[4] == 0) { elems->wmm_info = pos; elems->wmm_info_len = elen; } else if (pos[4] == 1) { elems->wmm_param = pos; elems->wmm_param_len = elen; } } } break; case WLAN_EID_RSN: elems->rsn = pos; elems->rsn_len = elen; break; case WLAN_EID_ERP_INFO: if (elen >= 1) elems->erp_info = pos; else elem_parse_failed = true; break; case WLAN_EID_EXT_SUPP_RATES: elems->ext_supp_rates = pos; elems->ext_supp_rates_len = elen; break; case WLAN_EID_HT_CAPABILITY: if (elen >= sizeof(struct ieee80211_ht_cap)) elems->ht_cap_elem = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_HT_OPERATION: if (elen >= sizeof(struct ieee80211_ht_operation)) elems->ht_operation = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_VHT_CAPABILITY: if (elen >= sizeof(struct ieee80211_vht_cap)) elems->vht_cap_elem = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_VHT_OPERATION: if (elen >= sizeof(struct ieee80211_vht_operation)) elems->vht_operation = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_OPMODE_NOTIF: if (elen > 0) elems->opmode_notif = pos; else elem_parse_failed = true; break; case WLAN_EID_MESH_ID: elems->mesh_id = pos; elems->mesh_id_len = elen; break; case WLAN_EID_MESH_CONFIG: if (elen >= sizeof(struct ieee80211_meshconf_ie)) elems->mesh_config = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_PEER_MGMT: elems->peering = pos; elems->peering_len = elen; break; case WLAN_EID_MESH_AWAKE_WINDOW: if (elen >= 2) elems->awake_window = (void *)pos; break; case WLAN_EID_PREQ: elems->preq = pos; elems->preq_len = elen; break; case WLAN_EID_PREP: elems->prep = pos; elems->prep_len = elen; break; case WLAN_EID_PERR: elems->perr = pos; elems->perr_len = elen; break; case WLAN_EID_RANN: if (elen >= sizeof(struct ieee80211_rann_ie)) elems->rann = (void *)pos; else elem_parse_failed = true; break; case WLAN_EID_CHANNEL_SWITCH: if (elen != sizeof(struct ieee80211_channel_sw_ie)) { elem_parse_failed = true; break; } elems->ch_switch_ie = (void *)pos; break; case WLAN_EID_EXT_CHANSWITCH_ANN: if (elen != sizeof(struct ieee80211_ext_chansw_ie)) { elem_parse_failed = true; break; } elems->ext_chansw_ie = (void *)pos; break; case WLAN_EID_SECONDARY_CHANNEL_OFFSET: if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) { elem_parse_failed = true; break; } elems->sec_chan_offs = (void *)pos; break; case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: if (!action || elen != sizeof(*elems->wide_bw_chansw_ie)) { elem_parse_failed = true; break; } elems->wide_bw_chansw_ie = (void *)pos; break; case WLAN_EID_CHANNEL_SWITCH_WRAPPER: if (action) { elem_parse_failed = true; break; } /* * This is a bit tricky, but as we only care about * the wide bandwidth channel switch element, so * just parse it out manually. */ ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH, pos, elen); if (ie) { if (ie[1] == sizeof(*elems->wide_bw_chansw_ie)) elems->wide_bw_chansw_ie = (void *)(ie + 2); else elem_parse_failed = true; } break; case WLAN_EID_COUNTRY: elems->country_elem = pos; elems->country_elem_len = elen; break; case WLAN_EID_PWR_CONSTRAINT: if (elen != 1) { elem_parse_failed = true; break; } elems->pwr_constr_elem = pos; break; case WLAN_EID_TIMEOUT_INTERVAL: if (elen >= sizeof(struct ieee80211_timeout_interval_ie)) elems->timeout_int = (void *)pos; else elem_parse_failed = true; break; default: break; } if (elem_parse_failed) elems->parse_error = true; else __set_bit(id, seen_elems); left -= elen; pos += elen; } if (left != 0) elems->parse_error = true; return crc; } void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, bool bss_notify) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_queue_params qparam; struct ieee80211_chanctx_conf *chanctx_conf; int ac; bool use_11b, enable_qos; int aCWmin, aCWmax; if (!local->ops->conf_tx) return; if (local->hw.queues < IEEE80211_NUM_ACS) return; memset(&qparam, 0, sizeof(qparam)); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); use_11b = (chanctx_conf && chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ) && !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); rcu_read_unlock(); /* * By default disable QoS in STA mode for old access points, which do * not support 802.11e. New APs will provide proper queue parameters, * that we will configure later. */ enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION); for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { /* Set defaults according to 802.11-2007 Table 7-37 */ aCWmax = 1023; if (use_11b) aCWmin = 31; else aCWmin = 15; if (enable_qos) { switch (ac) { case IEEE80211_AC_BK: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; qparam.aifs = 7; break; /* never happens but let's not leave undefined */ default: case IEEE80211_AC_BE: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; qparam.aifs = 3; break; case IEEE80211_AC_VI: qparam.cw_max = aCWmin; qparam.cw_min = (aCWmin + 1) / 2 - 1; if (use_11b) qparam.txop = 6016/32; else qparam.txop = 3008/32; qparam.aifs = 2; break; case IEEE80211_AC_VO: qparam.cw_max = (aCWmin + 1) / 2 - 1; qparam.cw_min = (aCWmin + 1) / 4 - 1; if (use_11b) qparam.txop = 3264/32; else qparam.txop = 1504/32; qparam.aifs = 2; break; } } else { /* Confiure old 802.11b/g medium access rules. */ qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; qparam.aifs = 2; } qparam.uapsd = false; sdata->tx_conf[ac] = qparam; drv_conf_tx(local, sdata, ac, &qparam); } if (sdata->vif.type != NL80211_IFTYPE_MONITOR && sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) { sdata->vif.bss_conf.qos = enable_qos; if (bss_notify) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); } } void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, const size_t supp_rates_len, const u8 *supp_rates) { struct ieee80211_chanctx_conf *chanctx_conf; int i, have_higher_than_11mbit = 0; /* cf. IEEE 802.11 9.2.12 */ for (i = 0; i < supp_rates_len; i++) if ((supp_rates[i] & 0x7f) * 5 > 110) have_higher_than_11mbit = 1; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (chanctx_conf && chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit) sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; else sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; rcu_read_unlock(); ieee80211_set_wmm_default(sdata, true); } u32 ieee80211_mandatory_rates(struct ieee80211_local *local, enum ieee80211_band band) { struct ieee80211_supported_band *sband; struct ieee80211_rate *bitrates; u32 mandatory_rates; enum ieee80211_rate_flags mandatory_flag; int i; sband = local->hw.wiphy->bands[band]; if (WARN_ON(!sband)) return 1; if (band == IEEE80211_BAND_2GHZ) mandatory_flag = IEEE80211_RATE_MANDATORY_B; else mandatory_flag = IEEE80211_RATE_MANDATORY_A; bitrates = sband->bitrates; mandatory_rates = 0; for (i = 0; i < sband->n_bitrates; i++) if (bitrates[i].flags & mandatory_flag) mandatory_rates |= BIT(i); return mandatory_rates; } void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *da, const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx, u32 tx_flags) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; int err; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 6 + extra_len); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); memset(mgmt, 0, 24 + 6); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, bssid, ETH_ALEN); mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); mgmt->u.auth.status_code = cpu_to_le16(status); if (extra) memcpy(skb_put(skb, extra_len), extra, extra_len); if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) { mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx); WARN_ON(err); } IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | tx_flags; ieee80211_tx_skb(sdata, skb); } void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, const u8 *bssid, u16 stype, u16 reason, bool send_frame, u8 *frame_buf) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt = (void *)frame_buf; /* build frame */ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); mgmt->duration = 0; /* initialize only */ mgmt->seq_ctrl = 0; /* initialize only */ memcpy(mgmt->da, bssid, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, bssid, ETH_ALEN); /* u.deauth.reason_code == u.disassoc.reason_code */ mgmt->u.deauth.reason_code = cpu_to_le16(reason); if (send_frame) { skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_DEAUTH_FRAME_LEN); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); /* copy in frame */ memcpy(skb_put(skb, IEEE80211_DEAUTH_FRAME_LEN), mgmt, IEEE80211_DEAUTH_FRAME_LEN); if (sdata->vif.type != NL80211_IFTYPE_STATION || !(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED)) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } } int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, size_t buffer_len, const u8 *ie, size_t ie_len, enum ieee80211_band band, u32 rate_mask, u8 channel) { struct ieee80211_supported_band *sband; u8 *pos = buffer, *end = buffer + buffer_len; size_t offset = 0, noffset; int supp_rates_len, i; u8 rates[32]; int num_rates; int ext_rates_len; sband = local->hw.wiphy->bands[band]; if (WARN_ON_ONCE(!sband)) return 0; num_rates = 0; for (i = 0; i < sband->n_bitrates; i++) { if ((BIT(i) & rate_mask) == 0) continue; /* skip rate */ rates[num_rates++] = (u8) (sband->bitrates[i].bitrate / 5); } supp_rates_len = min_t(int, num_rates, 8); if (end - pos < 2 + supp_rates_len) goto out_err; *pos++ = WLAN_EID_SUPP_RATES; *pos++ = supp_rates_len; memcpy(pos, rates, supp_rates_len); pos += supp_rates_len; /* insert "request information" if in custom IEs */ if (ie && ie_len) { static const u8 before_extrates[] = { WLAN_EID_SSID, WLAN_EID_SUPP_RATES, WLAN_EID_REQUEST, }; noffset = ieee80211_ie_split(ie, ie_len, before_extrates, ARRAY_SIZE(before_extrates), offset); if (end - pos < noffset - offset) goto out_err; memcpy(pos, ie + offset, noffset - offset); pos += noffset - offset; offset = noffset; } ext_rates_len = num_rates - supp_rates_len; if (ext_rates_len > 0) { if (end - pos < 2 + ext_rates_len) goto out_err; *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = ext_rates_len; memcpy(pos, rates + supp_rates_len, ext_rates_len); pos += ext_rates_len; } if (channel && sband->band == IEEE80211_BAND_2GHZ) { if (end - pos < 3) goto out_err; *pos++ = WLAN_EID_DS_PARAMS; *pos++ = 1; *pos++ = channel; } /* insert custom IEs that go before HT */ if (ie && ie_len) { static const u8 before_ht[] = { WLAN_EID_SSID, WLAN_EID_SUPP_RATES, WLAN_EID_REQUEST, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_DS_PARAMS, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, }; noffset = ieee80211_ie_split(ie, ie_len, before_ht, ARRAY_SIZE(before_ht), offset); if (end - pos < noffset - offset) goto out_err; memcpy(pos, ie + offset, noffset - offset); pos += noffset - offset; offset = noffset; } if (sband->ht_cap.ht_supported) { if (end - pos < 2 + sizeof(struct ieee80211_ht_cap)) goto out_err; pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); } /* * If adding more here, adjust code in main.c * that calculates local->scan_ies_len. */ /* add any remaining custom IEs */ if (ie && ie_len) { noffset = ie_len; if (end - pos < noffset - offset) goto out_err; memcpy(pos, ie + offset, noffset - offset); pos += noffset - offset; } if (sband->vht_cap.vht_supported) { if (end - pos < 2 + sizeof(struct ieee80211_vht_cap)) goto out_err; pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap); } return pos - buffer; out_err: WARN_ONCE(1, "not enough space for preq IEs\n"); return pos - buffer; } struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, u32 ratemask, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, bool directed) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u8 chan_no; int ies_len; /* * Do not send DS Channel parameter for directed probe requests * in order to maximize the chance that we get a response. Some * badly-behaved APs don't respond when this parameter is included. */ if (directed) chan_no = 0; else chan_no = ieee80211_frequency_to_channel(chan->center_freq); skb = ieee80211_probereq_get(&local->hw, &sdata->vif, ssid, ssid_len, 100 + ie_len); if (!skb) return NULL; ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb), skb_tailroom(skb), ie, ie_len, chan->band, ratemask, chan_no); skb_put(skb, ies_len); if (dst) { mgmt = (struct ieee80211_mgmt *) skb->data; memcpy(mgmt->da, dst, ETH_ALEN); memcpy(mgmt->bssid, dst, ETH_ALEN); } IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; return skb; } void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 ratemask, bool directed, u32 tx_flags, struct ieee80211_channel *channel, bool scan) { struct sk_buff *skb; skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel, ssid, ssid_len, ie, ie_len, directed); if (skb) { IEEE80211_SKB_CB(skb)->flags |= tx_flags; if (scan) ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); else ieee80211_tx_skb(sdata, skb); } } u32 ieee80211_sta_get_rates(struct ieee80211_local *local, struct ieee802_11_elems *elems, enum ieee80211_band band, u32 *basic_rates) { struct ieee80211_supported_band *sband; struct ieee80211_rate *bitrates; size_t num_rates; u32 supp_rates; int i, j; sband = local->hw.wiphy->bands[band]; if (WARN_ON(!sband)) return 1; bitrates = sband->bitrates; num_rates = sband->n_bitrates; supp_rates = 0; for (i = 0; i < elems->supp_rates_len + elems->ext_supp_rates_len; i++) { u8 rate = 0; int own_rate; bool is_basic; if (i < elems->supp_rates_len) rate = elems->supp_rates[i]; else if (elems->ext_supp_rates) rate = elems->ext_supp_rates [i - elems->supp_rates_len]; own_rate = 5 * (rate & 0x7f); is_basic = !!(rate & 0x80); if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY) continue; for (j = 0; j < num_rates; j++) { if (bitrates[j].bitrate == own_rate) { supp_rates |= BIT(j); if (basic_rates && is_basic) *basic_rates |= BIT(j); } } } return supp_rates; } void ieee80211_stop_device(struct ieee80211_local *local) { ieee80211_led_radio(local, false); ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO); cancel_work_sync(&local->reconfig_filter); flush_workqueue(local->workqueue); drv_stop(local); } static void ieee80211_assign_chanctx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; if (!local->use_chanctx) return; mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (conf) { ctx = container_of(conf, struct ieee80211_chanctx, conf); drv_assign_vif_chanctx(local, sdata, ctx); } mutex_unlock(&local->chanctx_mtx); } int ieee80211_reconfig(struct ieee80211_local *local) { struct ieee80211_hw *hw = &local->hw; struct ieee80211_sub_if_data *sdata; struct ieee80211_chanctx *ctx; struct sta_info *sta; int res, i; bool reconfig_due_to_wowlan = false; #ifdef CONFIG_PM if (local->suspended) local->resuming = true; if (local->wowlan) { local->wowlan = false; res = drv_resume(local); if (res < 0) { local->resuming = false; return res; } if (res == 0) goto wake_up; WARN_ON(res > 1); /* * res is 1, which means the driver requested * to go through a regular reset on wakeup. */ reconfig_due_to_wowlan = true; } #endif /* everything else happens only if HW was up & running */ if (!local->open_count) goto wake_up; /* * Upon resume hardware can sometimes be goofy due to * various platform / driver / bus issues, so restarting * the device may at times not work immediately. Propagate * the error. */ res = drv_start(local); if (res) { WARN(local->suspended, "Hardware became unavailable " "upon resume. This could be a software issue " "prior to suspend or a hardware issue.\n"); return res; } /* setup fragmentation threshold */ drv_set_frag_threshold(local, hw->wiphy->frag_threshold); /* setup RTS threshold */ drv_set_rts_threshold(local, hw->wiphy->rts_threshold); /* reset coverage class */ drv_set_coverage_class(local, hw->wiphy->coverage_class); ieee80211_led_radio(local, true); ieee80211_mod_tpt_led_trig(local, IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); /* add interfaces */ sdata = rtnl_dereference(local->monitor_sdata); if (sdata) { /* in HW restart it exists already */ WARN_ON(local->resuming); res = drv_add_interface(local, sdata); if (WARN_ON(res)) { rcu_assign_pointer(local->monitor_sdata, NULL); synchronize_net(); kfree(sdata); } } list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && ieee80211_sdata_running(sdata)) res = drv_add_interface(local, sdata); } /* add channel contexts */ if (local->use_chanctx) { mutex_lock(&local->chanctx_mtx); list_for_each_entry(ctx, &local->chanctx_list, list) WARN_ON(drv_add_chanctx(local, ctx)); mutex_unlock(&local->chanctx_mtx); } list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; ieee80211_assign_chanctx(local, sdata); } sdata = rtnl_dereference(local->monitor_sdata); if (sdata && ieee80211_sdata_running(sdata)) ieee80211_assign_chanctx(local, sdata); /* add STAs back */ mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { enum ieee80211_sta_state state; if (!sta->uploaded) continue; /* AP-mode stations will be added later */ if (sta->sdata->vif.type == NL80211_IFTYPE_AP) continue; for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) WARN_ON(drv_sta_state(local, sta->sdata, sta, state, state + 1)); } mutex_unlock(&local->sta_mtx); /* reconfigure tx conf */ if (hw->queues >= IEEE80211_NUM_ACS) { list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || sdata->vif.type == NL80211_IFTYPE_MONITOR || !ieee80211_sdata_running(sdata)) continue; for (i = 0; i < IEEE80211_NUM_ACS; i++) drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]); } } /* reconfigure hardware */ ieee80211_hw_config(local, ~0); ieee80211_configure_filter(local); /* Finally also reconfigure all the BSS information */ list_for_each_entry(sdata, &local->interfaces, list) { u32 changed; if (!ieee80211_sdata_running(sdata)) continue; /* common change flags for all interface types */ changed = BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT | BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT | BSS_CHANGED_BSSID | BSS_CHANGED_CQM | BSS_CHANGED_QOS | BSS_CHANGED_IDLE | BSS_CHANGED_TXPOWER; switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: changed |= BSS_CHANGED_ASSOC | BSS_CHANGED_ARP_FILTER | BSS_CHANGED_PS; if (sdata->u.mgd.dtim_period) changed |= BSS_CHANGED_DTIM_PERIOD; mutex_lock(&sdata->u.mgd.mtx); ieee80211_bss_info_change_notify(sdata, changed); mutex_unlock(&sdata->u.mgd.mtx); break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; /* fall through */ case NL80211_IFTYPE_AP: changed |= BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS; if (sdata->vif.type == NL80211_IFTYPE_AP) { changed |= BSS_CHANGED_AP_PROBE_RESP; if (rcu_access_pointer(sdata->u.ap.beacon)) drv_start_ap(local, sdata); } /* fall through */ case NL80211_IFTYPE_MESH_POINT: if (sdata->vif.bss_conf.enable_beacon) { changed |= BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED; ieee80211_bss_info_change_notify(sdata, changed); } break; case NL80211_IFTYPE_WDS: break; case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: /* ignore virtual */ break; case NL80211_IFTYPE_P2P_DEVICE: changed = BSS_CHANGED_IDLE; break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: WARN_ON(1); break; } } ieee80211_recalc_ps(local, -1); /* * The sta might be in psm against the ap (e.g. because * this was the state before a hw restart), so we * explicitly send a null packet in order to make sure * it'll sync against the ap (and get out of psm). */ if (!(local->hw.conf.flags & IEEE80211_CONF_PS)) { list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; if (!sdata->u.mgd.associated) continue; ieee80211_send_nullfunc(local, sdata, 0); } } /* APs are now beaconing, add back stations */ mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { enum ieee80211_sta_state state; if (!sta->uploaded) continue; if (sta->sdata->vif.type != NL80211_IFTYPE_AP) continue; for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) WARN_ON(drv_sta_state(local, sta->sdata, sta, state, state + 1)); } mutex_unlock(&local->sta_mtx); /* add back keys */ list_for_each_entry(sdata, &local->interfaces, list) if (ieee80211_sdata_running(sdata)) ieee80211_enable_keys(sdata); wake_up: local->in_reconfig = false; barrier(); if (local->monitors == local->open_count && local->monitors > 0) ieee80211_add_virtual_monitor(local); /* * Clear the WLAN_STA_BLOCK_BA flag so new aggregation * sessions can be established after a resume. * * Also tear down aggregation sessions since reconfiguring * them in a hardware restart scenario is not easily done * right now, and the hardware will have lost information * about the sessions, but we and the AP still think they * are active. This is really a workaround though. */ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { mutex_lock(&local->sta_mtx); list_for_each_entry(sta, &local->sta_list, list) { ieee80211_sta_tear_down_BA_sessions( sta, AGG_STOP_LOCAL_REQUEST); clear_sta_flag(sta, WLAN_STA_BLOCK_BA); } mutex_unlock(&local->sta_mtx); } ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND); /* * If this is for hw restart things are still running. * We may want to change that later, however. */ if (!local->suspended || reconfig_due_to_wowlan) drv_restart_complete(local); if (!local->suspended) return 0; #ifdef CONFIG_PM /* first set suspended false, then resuming */ local->suspended = false; mb(); local->resuming = false; list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_sta_restart(sdata); } mod_timer(&local->sta_cleanup, jiffies + 1); #else WARN_ON(1); #endif return 0; } void ieee80211_resume_disconnect(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata; struct ieee80211_local *local; struct ieee80211_key *key; if (WARN_ON(!vif)) return; sdata = vif_to_sdata(vif); local = sdata->local; if (WARN_ON(!local->resuming)) return; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return; sdata->flags |= IEEE80211_SDATA_DISCONNECT_RESUME; mutex_lock(&local->key_mtx); list_for_each_entry(key, &sdata->key_list, list) key->flags |= KEY_FLAG_TAINTED; mutex_unlock(&local->key_mtx); } EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect); void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_chanctx *chanctx; mutex_lock(&local->chanctx_mtx); chanctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (WARN_ON_ONCE(!chanctx_conf)) goto unlock; chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); ieee80211_recalc_smps_chanctx(local, chanctx); unlock: mutex_unlock(&local->chanctx_mtx); } static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id) { int i; for (i = 0; i < n_ids; i++) if (ids[i] == id) return true; return false; } /** * ieee80211_ie_split - split an IE buffer according to ordering * * @ies: the IE buffer * @ielen: the length of the IE buffer * @ids: an array with element IDs that are allowed before * the split * @n_ids: the size of the element ID array * @offset: offset where to start splitting in the buffer * * This function splits an IE buffer by updating the @offset * variable to point to the location where the buffer should be * split. * * It assumes that the given IE buffer is well-formed, this * has to be guaranteed by the caller! * * It also assumes that the IEs in the buffer are ordered * correctly, if not the result of using this function will not * be ordered correctly either, i.e. it does no reordering. * * The function returns the offset where the next part of the * buffer starts, which may be @ielen if the entire (remainder) * of the buffer should be used. */ size_t ieee80211_ie_split(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, size_t offset) { size_t pos = offset; while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos])) pos += 2 + ies[pos + 1]; return pos; } size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset) { size_t pos = offset; while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC) pos += 2 + ies[pos + 1]; return pos; } static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata, int rssi_min_thold, int rssi_max_thold) { trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold); if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; /* * Scale up threshold values before storing it, as the RSSI averaging * algorithm uses a scaled up value as well. Change this scaling * factor if the RSSI averaging algorithm changes. */ sdata->u.mgd.rssi_min_thold = rssi_min_thold*16; sdata->u.mgd.rssi_max_thold = rssi_max_thold*16; } void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, int rssi_min_thold, int rssi_max_thold) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); WARN_ON(rssi_min_thold == rssi_max_thold || rssi_min_thold > rssi_max_thold); _ieee80211_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold); } EXPORT_SYMBOL(ieee80211_enable_rssi_reports); void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); _ieee80211_enable_rssi_reports(sdata, 0, 0); } EXPORT_SYMBOL(ieee80211_disable_rssi_reports); u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 cap) { __le16 tmp; *pos++ = WLAN_EID_HT_CAPABILITY; *pos++ = sizeof(struct ieee80211_ht_cap); memset(pos, 0, sizeof(struct ieee80211_ht_cap)); /* capability flags */ tmp = cpu_to_le16(cap); memcpy(pos, &tmp, sizeof(u16)); pos += sizeof(u16); /* AMPDU parameters */ *pos++ = ht_cap->ampdu_factor | (ht_cap->ampdu_density << IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); /* MCS set */ memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs)); pos += sizeof(ht_cap->mcs); /* extended capabilities */ pos += sizeof(__le16); /* BF capabilities */ pos += sizeof(__le32); /* antenna selection */ pos += sizeof(u8); return pos; } u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap) { __le32 tmp; *pos++ = WLAN_EID_VHT_CAPABILITY; *pos++ = sizeof(struct ieee80211_vht_cap); memset(pos, 0, sizeof(struct ieee80211_vht_cap)); /* capability flags */ tmp = cpu_to_le32(cap); memcpy(pos, &tmp, sizeof(u32)); pos += sizeof(u32); /* VHT MCS set */ memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs)); pos += sizeof(vht_cap->vht_mcs); return pos; } u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, const struct cfg80211_chan_def *chandef, u16 prot_mode) { struct ieee80211_ht_operation *ht_oper; /* Build HT Information */ *pos++ = WLAN_EID_HT_OPERATION; *pos++ = sizeof(struct ieee80211_ht_operation); ht_oper = (struct ieee80211_ht_operation *)pos; ht_oper->primary_chan = ieee80211_frequency_to_channel( chandef->chan->center_freq); switch (chandef->width) { case NL80211_CHAN_WIDTH_160: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_40: if (chandef->center_freq1 > chandef->chan->center_freq) ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; else ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; break; default: ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; break; } if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && chandef->width != NL80211_CHAN_WIDTH_20_NOHT && chandef->width != NL80211_CHAN_WIDTH_20) ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; ht_oper->operation_mode = cpu_to_le16(prot_mode); ht_oper->stbc_param = 0x0000; /* It seems that Basic MCS set and Supported MCS set are identical for the first 10 bytes */ memset(&ht_oper->basic_set, 0, 16); memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10); return pos + sizeof(struct ieee80211_ht_operation); } void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan, const struct ieee80211_ht_operation *ht_oper, struct cfg80211_chan_def *chandef) { enum nl80211_channel_type channel_type; if (!ht_oper) { cfg80211_chandef_create(chandef, control_chan, NL80211_CHAN_NO_HT); return; } switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_NONE: channel_type = NL80211_CHAN_HT20; break; case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: channel_type = NL80211_CHAN_HT40PLUS; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: channel_type = NL80211_CHAN_HT40MINUS; break; default: channel_type = NL80211_CHAN_NO_HT; } cfg80211_chandef_create(chandef, control_chan, channel_type); } int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, enum ieee80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; int rate; u8 i, rates, *pos; u32 basic_rates = sdata->vif.bss_conf.basic_rates; sband = local->hw.wiphy->bands[band]; rates = sband->n_bitrates; if (rates > 8) rates = 8; if (skb_tailroom(skb) < rates + 2) return -ENOMEM; pos = skb_put(skb, rates + 2); *pos++ = WLAN_EID_SUPP_RATES; *pos++ = rates; for (i = 0; i < rates; i++) { u8 basic = 0; if (need_basic && basic_rates & BIT(i)) basic = 0x80; rate = sband->bitrates[i].bitrate; *pos++ = basic | (u8) (rate / 5); } return 0; } int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, enum ieee80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; int rate; u8 i, exrates, *pos; u32 basic_rates = sdata->vif.bss_conf.basic_rates; sband = local->hw.wiphy->bands[band]; exrates = sband->n_bitrates; if (exrates > 8) exrates -= 8; else exrates = 0; if (skb_tailroom(skb) < exrates + 2) return -ENOMEM; if (exrates) { pos = skb_put(skb, exrates + 2); *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = exrates; for (i = 8; i < sband->n_bitrates; i++) { u8 basic = 0; if (need_basic && basic_rates & BIT(i)) basic = 0x80; rate = sband->bitrates[i].bitrate; *pos++ = basic | (u8) (rate / 5); } } return 0; } int ieee80211_ave_rssi(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) { /* non-managed type inferfaces */ return 0; } return ifmgd->ave_beacon_signal / 16; } EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs) { if (!mcs) return 1; /* TODO: consider rx_highest */ if (mcs->rx_mask[3]) return 4; if (mcs->rx_mask[2]) return 3; if (mcs->rx_mask[1]) return 2; return 1; } /** * ieee80211_calculate_rx_timestamp - calculate timestamp in frame * @local: mac80211 hw info struct * @status: RX status * @mpdu_len: total MPDU length (including FCS) * @mpdu_offset: offset into MPDU to calculate timestamp at * * This function calculates the RX timestamp at the given MPDU offset, taking * into account what the RX timestamp was. An offset of 0 will just normalize * the timestamp to TSF at beginning of MPDU reception. */ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, struct ieee80211_rx_status *status, unsigned int mpdu_len, unsigned int mpdu_offset) { u64 ts = status->mactime; struct rate_info ri; u16 rate; if (WARN_ON(!ieee80211_have_rx_timestamp(status))) return 0; memset(&ri, 0, sizeof(ri)); /* Fill cfg80211 rate info */ if (status->flag & RX_FLAG_HT) { ri.mcs = status->rate_idx; ri.flags |= RATE_INFO_FLAGS_MCS; if (status->flag & RX_FLAG_40MHZ) ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; if (status->flag & RX_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; } else if (status->flag & RX_FLAG_VHT) { ri.flags |= RATE_INFO_FLAGS_VHT_MCS; ri.mcs = status->rate_idx; ri.nss = status->vht_nss; if (status->flag & RX_FLAG_40MHZ) ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; if (status->flag & RX_FLAG_80MHZ) ri.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH; if (status->flag & RX_FLAG_80P80MHZ) ri.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH; if (status->flag & RX_FLAG_160MHZ) ri.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH; if (status->flag & RX_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; } else { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[status->band]; ri.legacy = sband->bitrates[status->rate_idx].bitrate; } rate = cfg80211_calculate_bitrate(&ri); if (WARN_ONCE(!rate, "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n", status->flag, status->rate_idx, status->vht_nss)) return 0; /* rewind from end of MPDU */ if (status->flag & RX_FLAG_MACTIME_END) ts -= mpdu_len * 8 * 10 / rate; ts += mpdu_offset * 8 * 10 / rate; return ts; } void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); if (sdata->wdev.cac_started) { ieee80211_vif_release_channel(sdata); cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); } } mutex_unlock(&local->iflist_mtx); } void ieee80211_dfs_radar_detected_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, radar_detected_work); struct cfg80211_chan_def chandef; ieee80211_dfs_cac_cancel(local); if (local->use_chanctx) /* currently not handled */ WARN_ON(1); else { chandef = local->hw.conf.chandef; cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); } } void ieee80211_radar_detected(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_radar_detected(local); ieee80211_queue_work(hw, &local->radar_detected_work); } EXPORT_SYMBOL(ieee80211_radar_detected);
gpl-2.0
ShinySide/G530P_Permissive
drivers/media/usb/em28xx/em28xx-camera.c
2252
11171
/* em28xx-camera.c - driver for Empia EM25xx/27xx/28xx USB video capture devices Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@infradead.org> Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/i2c.h> #include <media/soc_camera.h> #include <media/mt9v011.h> #include <media/v4l2-common.h> #include "em28xx.h" /* Possible i2c addresses of Micron sensors */ static unsigned short micron_sensor_addrs[] = { 0xb8 >> 1, /* MT9V111, MT9V403 */ 0xba >> 1, /* MT9M001/011/111/112, MT9V011/012/112, MT9D011 */ 0x90 >> 1, /* MT9V012/112, MT9D011 (alternative address) */ I2C_CLIENT_END }; /* Possible i2c addresses of Omnivision sensors */ static unsigned short omnivision_sensor_addrs[] = { 0x42 >> 1, /* OV7725, OV7670/60/48 */ 0x60 >> 1, /* OV2640, OV9650/53/55 */ I2C_CLIENT_END }; static struct soc_camera_link camlink = { .bus_id = 0, .flags = 0, .module_name = "em28xx", }; /* FIXME: Should be replaced by a proper mt9m111 driver */ static int em28xx_initialize_mt9m111(struct em28xx *dev) { int i; unsigned char regs[][3] = { { 0x0d, 0x00, 0x01, }, /* reset and use defaults */ { 0x0d, 0x00, 0x00, }, { 0x0a, 0x00, 0x21, }, { 0x21, 0x04, 0x00, }, /* full readout speed, no row/col skipping */ }; for (i = 0; i < ARRAY_SIZE(regs); i++) i2c_master_send(&dev->i2c_client[dev->def_i2c_bus], &regs[i][0], 3); return 0; } /* FIXME: Should be replaced by a proper mt9m001 driver */ static int em28xx_initialize_mt9m001(struct em28xx *dev) { int i; unsigned char regs[][3] = { { 0x0d, 0x00, 0x01, }, { 0x0d, 0x00, 0x00, }, { 0x04, 0x05, 0x00, }, /* hres = 1280 */ { 0x03, 0x04, 0x00, }, /* vres = 1024 */ { 0x20, 0x11, 0x00, }, { 0x06, 0x00, 0x10, }, { 0x2b, 0x00, 0x24, }, { 0x2e, 0x00, 0x24, }, { 0x35, 0x00, 0x24, }, { 0x2d, 0x00, 0x20, }, { 0x2c, 0x00, 0x20, }, { 0x09, 0x0a, 0xd4, }, { 0x35, 0x00, 0x57, }, }; for (i = 0; i < ARRAY_SIZE(regs); i++) i2c_master_send(&dev->i2c_client[dev->def_i2c_bus], &regs[i][0], 3); return 0; } /* * Probes Micron sensors with 8 bit address and 16 bit register width */ static int em28xx_probe_sensor_micron(struct em28xx *dev) { int ret, i; char *name; u8 reg; __be16 id_be; u16 id; struct i2c_client client = dev->i2c_client[dev->def_i2c_bus]; dev->em28xx_sensor = EM28XX_NOSENSOR; for (i = 0; micron_sensor_addrs[i] != I2C_CLIENT_END; i++) { client.addr = micron_sensor_addrs[i]; /* NOTE: i2c_smbus_read_word_data() doesn't work with BE data */ /* Read chip ID from register 0x00 */ reg = 0x00; ret = i2c_master_send(&client, &reg, 1); if (ret < 0) { if (ret != -ENODEV) em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n", client.addr << 1, ret); continue; } ret = i2c_master_recv(&client, (u8 *)&id_be, 2); if (ret < 0) { em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n", client.addr << 1, ret); continue; } id = be16_to_cpu(id_be); /* Read chip ID from register 0xff */ reg = 0xff; ret = i2c_master_send(&client, &reg, 1); if (ret < 0) { em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n", client.addr << 1, ret); continue; } ret = i2c_master_recv(&client, (u8 *)&id_be, 2); if (ret < 0) { em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n", client.addr << 1, ret); continue; } /* Validate chip ID to be sure we have a Micron device */ if (id != be16_to_cpu(id_be)) continue; /* Check chip ID */ id = be16_to_cpu(id_be); switch (id) { case 0x1222: name = "MT9V012"; /* MI370 */ /* 640x480 */ break; case 0x1229: name = "MT9V112"; /* 640x480 */ break; case 0x1433: name = "MT9M011"; /* 1280x1024 */ break; case 0x143a: /* found in the ECS G200 */ name = "MT9M111"; /* MI1310 */ /* 1280x1024 */ dev->em28xx_sensor = EM28XX_MT9M111; break; case 0x148c: name = "MT9M112"; /* MI1320 */ /* 1280x1024 */ break; case 0x1511: name = "MT9D011"; /* MI2010 */ /* 1600x1200 */ break; case 0x8232: case 0x8243: /* rev B */ name = "MT9V011"; /* MI360 */ /* 640x480 */ dev->em28xx_sensor = EM28XX_MT9V011; break; case 0x8431: name = "MT9M001"; /* 1280x1024 */ dev->em28xx_sensor = EM28XX_MT9M001; break; default: em28xx_info("unknown Micron sensor detected: 0x%04x\n", id); return 0; } if (dev->em28xx_sensor == EM28XX_NOSENSOR) em28xx_info("unsupported sensor detected: %s\n", name); else em28xx_info("sensor %s detected\n", name); dev->i2c_client[dev->def_i2c_bus].addr = client.addr; return 0; } return -ENODEV; } /* * Probes Omnivision sensors with 8 bit address and register width */ static int em28xx_probe_sensor_omnivision(struct em28xx *dev) { int ret, i; char *name; u8 reg; u16 id; struct i2c_client client = dev->i2c_client[dev->def_i2c_bus]; dev->em28xx_sensor = EM28XX_NOSENSOR; /* NOTE: these devices have the register auto incrementation disabled * by default, so we have to use single byte reads ! */ for (i = 0; omnivision_sensor_addrs[i] != I2C_CLIENT_END; i++) { client.addr = omnivision_sensor_addrs[i]; /* Read manufacturer ID from registers 0x1c-0x1d (BE) */ reg = 0x1c; ret = i2c_smbus_read_byte_data(&client, reg); if (ret < 0) { if (ret != -ENODEV) em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n", client.addr << 1, ret); continue; } id = ret << 8; reg = 0x1d; ret = i2c_smbus_read_byte_data(&client, reg); if (ret < 0) { em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n", client.addr << 1, ret); continue; } id += ret; /* Check manufacturer ID */ if (id != 0x7fa2) continue; /* Read product ID from registers 0x0a-0x0b (BE) */ reg = 0x0a; ret = i2c_smbus_read_byte_data(&client, reg); if (ret < 0) { em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n", client.addr << 1, ret); continue; } id = ret << 8; reg = 0x0b; ret = i2c_smbus_read_byte_data(&client, reg); if (ret < 0) { em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n", client.addr << 1, ret); continue; } id += ret; /* Check product ID */ switch (id) { case 0x2642: name = "OV2640"; dev->em28xx_sensor = EM28XX_OV2640; break; case 0x7648: name = "OV7648"; break; case 0x7660: name = "OV7660"; break; case 0x7673: name = "OV7670"; break; case 0x7720: name = "OV7720"; break; case 0x7721: name = "OV7725"; break; case 0x9648: /* Rev 2 */ case 0x9649: /* Rev 3 */ name = "OV9640"; break; case 0x9650: case 0x9652: /* OV9653 */ name = "OV9650"; break; case 0x9656: /* Rev 4 */ case 0x9657: /* Rev 5 */ name = "OV9655"; break; default: em28xx_info("unknown OmniVision sensor detected: 0x%04x\n", id); return 0; } if (dev->em28xx_sensor == EM28XX_NOSENSOR) em28xx_info("unsupported sensor detected: %s\n", name); else em28xx_info("sensor %s detected\n", name); dev->i2c_client[dev->def_i2c_bus].addr = client.addr; return 0; } return -ENODEV; } int em28xx_detect_sensor(struct em28xx *dev) { int ret; ret = em28xx_probe_sensor_micron(dev); if (dev->em28xx_sensor == EM28XX_NOSENSOR && ret < 0) ret = em28xx_probe_sensor_omnivision(dev); /* * NOTE: the Windows driver also probes i2c addresses * 0x22 (Samsung ?) and 0x66 (Kodak ?) */ if (dev->em28xx_sensor == EM28XX_NOSENSOR && ret < 0) { em28xx_info("No sensor detected\n"); return -ENODEV; } return 0; } int em28xx_init_camera(struct em28xx *dev) { switch (dev->em28xx_sensor) { case EM28XX_MT9V011: { struct mt9v011_platform_data pdata; struct i2c_board_info mt9v011_info = { .type = "mt9v011", .addr = dev->i2c_client[dev->def_i2c_bus].addr, .platform_data = &pdata, }; dev->sensor_xres = 640; dev->sensor_yres = 480; /* * FIXME: mt9v011 uses I2S speed as xtal clk - at least with * the Silvercrest cam I have here for testing - for higher * resolutions, a high clock cause horizontal artifacts, so we * need to use a lower xclk frequency. * Yet, it would be possible to adjust xclk depending on the * desired resolution, since this affects directly the * frame rate. */ dev->board.xclk = EM28XX_XCLK_FREQUENCY_4_3MHZ; em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk); dev->sensor_xtal = 4300000; pdata.xtal = dev->sensor_xtal; if (NULL == v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus], &mt9v011_info, NULL)) return -ENODEV; /* probably means GRGB 16 bit bayer */ dev->vinmode = 0x0d; dev->vinctl = 0x00; break; } case EM28XX_MT9M001: dev->sensor_xres = 1280; dev->sensor_yres = 1024; em28xx_initialize_mt9m001(dev); /* probably means BGGR 16 bit bayer */ dev->vinmode = 0x0c; dev->vinctl = 0x00; break; case EM28XX_MT9M111: dev->sensor_xres = 640; dev->sensor_yres = 512; dev->board.xclk = EM28XX_XCLK_FREQUENCY_48MHZ; em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk); em28xx_initialize_mt9m111(dev); dev->vinmode = 0x0a; dev->vinctl = 0x00; break; case EM28XX_OV2640: { struct v4l2_subdev *subdev; struct i2c_board_info ov2640_info = { .type = "ov2640", .flags = I2C_CLIENT_SCCB, .addr = dev->i2c_client[dev->def_i2c_bus].addr, .platform_data = &camlink, }; struct v4l2_mbus_framefmt fmt; /* * FIXME: sensor supports resolutions up to 1600x1200, but * resolution setting/switching needs to be modified to * - switch sensor output resolution (including further * configuration changes) * - adjust bridge xclk * - disable 16 bit (12 bit) output formats on high resolutions */ dev->sensor_xres = 640; dev->sensor_yres = 480; subdev = v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus], &ov2640_info, NULL); fmt.code = V4L2_MBUS_FMT_YUYV8_2X8; fmt.width = 640; fmt.height = 480; v4l2_subdev_call(subdev, video, s_mbus_fmt, &fmt); /* NOTE: for UXGA=1600x1200 switch to 12MHz */ dev->board.xclk = EM28XX_XCLK_FREQUENCY_24MHZ; em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk); dev->vinmode = 0x08; dev->vinctl = 0x00; break; } case EM28XX_NOSENSOR: default: return -EINVAL; } return 0; }
gpl-2.0
elephone-dev/P8000-Kernel
drivers/staging/crystalhd/crystalhd_cmds.c
2252
26847
/*************************************************************************** * Copyright (c) 2005-2009, Broadcom Corporation. * * Name: crystalhd_cmds . c * * Description: * BCM70010 Linux driver user command interfaces. * * HISTORY: * ********************************************************************** * This file is part of the crystalhd device driver. * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver. If not, see <http://www.gnu.org/licenses/>. **********************************************************************/ #include "crystalhd.h" static struct crystalhd_user *bc_cproc_get_uid(struct crystalhd_cmd *ctx) { struct crystalhd_user *user = NULL; int i; for (i = 0; i < BC_LINK_MAX_OPENS; i++) { if (!ctx->user[i].in_use) { user = &ctx->user[i]; break; } } return user; } static int bc_cproc_get_user_count(struct crystalhd_cmd *ctx) { int i, count = 0; for (i = 0; i < BC_LINK_MAX_OPENS; i++) { if (ctx->user[i].in_use) count++; } return count; } static void bc_cproc_mark_pwr_state(struct crystalhd_cmd *ctx) { int i; for (i = 0; i < BC_LINK_MAX_OPENS; i++) { if (!ctx->user[i].in_use) continue; if (ctx->user[i].mode == DTS_DIAG_MODE || ctx->user[i].mode == DTS_PLAYBACK_MODE) { ctx->pwr_state_change = 1; break; } } } static enum BC_STATUS bc_cproc_notify_mode(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { int rc = 0, i = 0; if (!ctx || !idata) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } if (ctx->user[idata->u_id].mode != DTS_MODE_INV) { BCMLOG_ERR("Close the handle first..\n"); return BC_STS_ERR_USAGE; } if (idata->udata.u.NotifyMode.Mode == DTS_MONITOR_MODE) { ctx->user[idata->u_id].mode = idata->udata.u.NotifyMode.Mode; return BC_STS_SUCCESS; } if (ctx->state != BC_LINK_INVALID) { BCMLOG_ERR("Link invalid state %d\n", ctx->state); return BC_STS_ERR_USAGE; } /* Check for duplicate playback sessions..*/ for (i = 0; i < BC_LINK_MAX_OPENS; i++) { if (ctx->user[i].mode == DTS_DIAG_MODE || ctx->user[i].mode == DTS_PLAYBACK_MODE) { BCMLOG_ERR("multiple playback sessions are not " "supported..\n"); return BC_STS_ERR_USAGE; } } ctx->cin_wait_exit = 0; ctx->user[idata->u_id].mode = idata->udata.u.NotifyMode.Mode; /* Setup mmap pool for uaddr sgl mapping..*/ rc = crystalhd_create_dio_pool(ctx->adp, BC_LINK_MAX_SGLS); if (rc) return BC_STS_ERROR; /* Setup Hardware DMA rings */ return crystalhd_hw_setup_dma_rings(&ctx->hw_ctx); } static enum BC_STATUS bc_cproc_get_version(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { if (!ctx || !idata) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } idata->udata.u.VerInfo.DriverMajor = crystalhd_kmod_major; idata->udata.u.VerInfo.DriverMinor = crystalhd_kmod_minor; idata->udata.u.VerInfo.DriverRevision = crystalhd_kmod_rev; return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_get_hwtype(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { if (!ctx || !idata) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } crystalhd_pci_cfg_rd(ctx->adp, 0, 2, (uint32_t *)&idata->udata.u.hwType.PciVenId); crystalhd_pci_cfg_rd(ctx->adp, 2, 2, (uint32_t *)&idata->udata.u.hwType.PciDevId); crystalhd_pci_cfg_rd(ctx->adp, 8, 1, (uint32_t *)&idata->udata.u.hwType.HwRev); return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_reg_rd(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { if (!ctx || !idata) return BC_STS_INV_ARG; idata->udata.u.regAcc.Value = bc_dec_reg_rd(ctx->adp, idata->udata.u.regAcc.Offset); return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_reg_wr(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { if (!ctx || !idata) return BC_STS_INV_ARG; bc_dec_reg_wr(ctx->adp, idata->udata.u.regAcc.Offset, idata->udata.u.regAcc.Value); return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_link_reg_rd(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { if (!ctx || !idata) return BC_STS_INV_ARG; idata->udata.u.regAcc.Value = crystalhd_reg_rd(ctx->adp, idata->udata.u.regAcc.Offset); return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_link_reg_wr(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { if (!ctx || !idata) return BC_STS_INV_ARG; crystalhd_reg_wr(ctx->adp, idata->udata.u.regAcc.Offset, idata->udata.u.regAcc.Value); return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_mem_rd(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { enum BC_STATUS sts = BC_STS_SUCCESS; if (!ctx || !idata || !idata->add_cdata) return BC_STS_INV_ARG; if (idata->udata.u.devMem.NumDwords > (idata->add_cdata_sz / 4)) { BCMLOG_ERR("insufficient buffer\n"); return BC_STS_INV_ARG; } sts = crystalhd_mem_rd(ctx->adp, idata->udata.u.devMem.StartOff, idata->udata.u.devMem.NumDwords, (uint32_t *)idata->add_cdata); return sts; } static enum BC_STATUS bc_cproc_mem_wr(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { enum BC_STATUS sts = BC_STS_SUCCESS; if (!ctx || !idata || !idata->add_cdata) return BC_STS_INV_ARG; if (idata->udata.u.devMem.NumDwords > (idata->add_cdata_sz / 4)) { BCMLOG_ERR("insufficient buffer\n"); return BC_STS_INV_ARG; } sts = crystalhd_mem_wr(ctx->adp, idata->udata.u.devMem.StartOff, idata->udata.u.devMem.NumDwords, (uint32_t *)idata->add_cdata); return sts; } static enum BC_STATUS bc_cproc_cfg_rd(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { uint32_t ix, cnt, off, len; enum BC_STATUS sts = BC_STS_SUCCESS; uint32_t *temp; if (!ctx || !idata) return BC_STS_INV_ARG; temp = (uint32_t *) idata->udata.u.pciCfg.pci_cfg_space; off = idata->udata.u.pciCfg.Offset; len = idata->udata.u.pciCfg.Size; if (len <= 4) return crystalhd_pci_cfg_rd(ctx->adp, off, len, temp); /* Truncate to dword alignment..*/ len = 4; cnt = idata->udata.u.pciCfg.Size / len; for (ix = 0; ix < cnt; ix++) { sts = crystalhd_pci_cfg_rd(ctx->adp, off, len, &temp[ix]); if (sts != BC_STS_SUCCESS) { BCMLOG_ERR("config read : %d\n", sts); return sts; } off += len; } return sts; } static enum BC_STATUS bc_cproc_cfg_wr(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { uint32_t ix, cnt, off, len; enum BC_STATUS sts = BC_STS_SUCCESS; uint32_t *temp; if (!ctx || !idata) return BC_STS_INV_ARG; temp = (uint32_t *) idata->udata.u.pciCfg.pci_cfg_space; off = idata->udata.u.pciCfg.Offset; len = idata->udata.u.pciCfg.Size; if (len <= 4) return crystalhd_pci_cfg_wr(ctx->adp, off, len, temp[0]); /* Truncate to dword alignment..*/ len = 4; cnt = idata->udata.u.pciCfg.Size / len; for (ix = 0; ix < cnt; ix++) { sts = crystalhd_pci_cfg_wr(ctx->adp, off, len, temp[ix]); if (sts != BC_STS_SUCCESS) { BCMLOG_ERR("config write : %d\n", sts); return sts; } off += len; } return sts; } static enum BC_STATUS bc_cproc_download_fw(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { enum BC_STATUS sts = BC_STS_SUCCESS; if (!ctx || !idata || !idata->add_cdata || !idata->add_cdata_sz) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } if (ctx->state != BC_LINK_INVALID) { BCMLOG_ERR("Link invalid state %d\n", ctx->state); return BC_STS_ERR_USAGE; } sts = crystalhd_download_fw(ctx->adp, (uint8_t *)idata->add_cdata, idata->add_cdata_sz); if (sts != BC_STS_SUCCESS) BCMLOG_ERR("Firmware Download Failure!! - %d\n", sts); else ctx->state |= BC_LINK_INIT; return sts; } /* * We use the FW_CMD interface to sync up playback state with application * and firmware. This function will perform the required pre and post * processing of the Firmware commands. * * Pause - * Disable capture after decoder pause. * Resume - * First enable capture and issue decoder resume command. * Flush - * Abort pending input transfers and issue decoder flush command. * */ static enum BC_STATUS bc_cproc_do_fw_cmd(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { enum BC_STATUS sts; uint32_t *cmd; if (!(ctx->state & BC_LINK_INIT)) { BCMLOG_ERR("Link invalid state %d\n", ctx->state); return BC_STS_ERR_USAGE; } cmd = idata->udata.u.fwCmd.cmd; /* Pre-Process */ if (cmd[0] == eCMD_C011_DEC_CHAN_PAUSE) { if (!cmd[3]) { ctx->state &= ~BC_LINK_PAUSED; crystalhd_hw_unpause(&ctx->hw_ctx); } } else if (cmd[0] == eCMD_C011_DEC_CHAN_FLUSH) { BCMLOG(BCMLOG_INFO, "Flush issued\n"); if (cmd[3]) ctx->cin_wait_exit = 1; } sts = crystalhd_do_fw_cmd(&ctx->hw_ctx, &idata->udata.u.fwCmd); if (sts != BC_STS_SUCCESS) { BCMLOG(BCMLOG_INFO, "fw cmd %x failed\n", cmd[0]); return sts; } /* Post-Process */ if (cmd[0] == eCMD_C011_DEC_CHAN_PAUSE) { if (cmd[3]) { ctx->state |= BC_LINK_PAUSED; crystalhd_hw_pause(&ctx->hw_ctx); } } return sts; } static void bc_proc_in_completion(struct crystalhd_dio_req *dio_hnd, wait_queue_head_t *event, enum BC_STATUS sts) { if (!dio_hnd || !event) { BCMLOG_ERR("Invalid Arg!!\n"); return; } if (sts == BC_STS_IO_USER_ABORT) return; dio_hnd->uinfo.comp_sts = sts; dio_hnd->uinfo.ev_sts = 1; crystalhd_set_event(event); } static enum BC_STATUS bc_cproc_codein_sleep(struct crystalhd_cmd *ctx) { wait_queue_head_t sleep_ev; int rc = 0; if (ctx->state & BC_LINK_SUSPEND) return BC_STS_IO_USER_ABORT; if (ctx->cin_wait_exit) { ctx->cin_wait_exit = 0; return BC_STS_CMD_CANCELLED; } crystalhd_create_event(&sleep_ev); crystalhd_wait_on_event(&sleep_ev, 0, 100, rc, 0); if (rc == -EINTR) return BC_STS_IO_USER_ABORT; return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_hw_txdma(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata, struct crystalhd_dio_req *dio) { uint32_t tx_listid = 0; enum BC_STATUS sts = BC_STS_SUCCESS; wait_queue_head_t event; int rc = 0; if (!ctx || !idata || !dio) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } crystalhd_create_event(&event); ctx->tx_list_id = 0; /* msleep_interruptible(2000); */ sts = crystalhd_hw_post_tx(&ctx->hw_ctx, dio, bc_proc_in_completion, &event, &tx_listid, idata->udata.u.ProcInput.Encrypted); while (sts == BC_STS_BUSY) { sts = bc_cproc_codein_sleep(ctx); if (sts != BC_STS_SUCCESS) break; sts = crystalhd_hw_post_tx(&ctx->hw_ctx, dio, bc_proc_in_completion, &event, &tx_listid, idata->udata.u.ProcInput.Encrypted); } if (sts != BC_STS_SUCCESS) { BCMLOG(BCMLOG_DBG, "_hw_txdma returning sts:%d\n", sts); return sts; } if (ctx->cin_wait_exit) ctx->cin_wait_exit = 0; ctx->tx_list_id = tx_listid; /* _post() succeeded.. wait for the completion. */ crystalhd_wait_on_event(&event, (dio->uinfo.ev_sts), 3000, rc, 0); ctx->tx_list_id = 0; if (!rc) { return dio->uinfo.comp_sts; } else if (rc == -EBUSY) { BCMLOG(BCMLOG_DBG, "_tx_post() T/O\n"); sts = BC_STS_TIMEOUT; } else if (rc == -EINTR) { BCMLOG(BCMLOG_DBG, "Tx Wait Signal int.\n"); sts = BC_STS_IO_USER_ABORT; } else { sts = BC_STS_IO_ERROR; } /* We are cancelling the IO from the same context as the _post(). * so no need to wait on the event again.. the return itself * ensures the release of our resources. */ crystalhd_hw_cancel_tx(&ctx->hw_ctx, tx_listid); return sts; } /* Helper function to check on user buffers */ static enum BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff, uint32_t ub_sz, uint32_t uv_off, bool en_422) { if (!ubuff || !ub_sz) { BCMLOG_ERR("%s->Invalid Arg %p %x\n", ((pin) ? "TX" : "RX"), ubuff, ub_sz); return BC_STS_INV_ARG; } /* Check for alignment */ if (((uintptr_t)ubuff) & 0x03) { BCMLOG_ERR("%s-->Un-aligned address not implemented yet.. %p\n", ((pin) ? "TX" : "RX"), ubuff); return BC_STS_NOT_IMPL; } if (pin) return BC_STS_SUCCESS; if (!en_422 && !uv_off) { BCMLOG_ERR("Need UV offset for 420 mode.\n"); return BC_STS_INV_ARG; } if (en_422 && uv_off) { BCMLOG_ERR("UV offset in 422 mode ??\n"); return BC_STS_INV_ARG; } return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_proc_input(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { void *ubuff; uint32_t ub_sz; struct crystalhd_dio_req *dio_hnd = NULL; enum BC_STATUS sts = BC_STS_SUCCESS; if (!ctx || !idata) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } ubuff = idata->udata.u.ProcInput.pDmaBuff; ub_sz = idata->udata.u.ProcInput.BuffSz; sts = bc_cproc_check_inbuffs(1, ubuff, ub_sz, 0, 0); if (sts != BC_STS_SUCCESS) return sts; sts = crystalhd_map_dio(ctx->adp, ubuff, ub_sz, 0, 0, 1, &dio_hnd); if (sts != BC_STS_SUCCESS) { BCMLOG_ERR("dio map - %d\n", sts); return sts; } if (!dio_hnd) return BC_STS_ERROR; sts = bc_cproc_hw_txdma(ctx, idata, dio_hnd); crystalhd_unmap_dio(ctx->adp, dio_hnd); return sts; } static enum BC_STATUS bc_cproc_add_cap_buff(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { void *ubuff; uint32_t ub_sz, uv_off; bool en_422; struct crystalhd_dio_req *dio_hnd = NULL; enum BC_STATUS sts = BC_STS_SUCCESS; if (!ctx || !idata) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } ubuff = idata->udata.u.RxBuffs.YuvBuff; ub_sz = idata->udata.u.RxBuffs.YuvBuffSz; uv_off = idata->udata.u.RxBuffs.UVbuffOffset; en_422 = idata->udata.u.RxBuffs.b422Mode; sts = bc_cproc_check_inbuffs(0, ubuff, ub_sz, uv_off, en_422); if (sts != BC_STS_SUCCESS) return sts; sts = crystalhd_map_dio(ctx->adp, ubuff, ub_sz, uv_off, en_422, 0, &dio_hnd); if (sts != BC_STS_SUCCESS) { BCMLOG_ERR("dio map - %d\n", sts); return sts; } if (!dio_hnd) return BC_STS_ERROR; sts = crystalhd_hw_add_cap_buffer(&ctx->hw_ctx, dio_hnd, (ctx->state == BC_LINK_READY)); if ((sts != BC_STS_SUCCESS) && (sts != BC_STS_BUSY)) { crystalhd_unmap_dio(ctx->adp, dio_hnd); return sts; } return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_fmt_change(struct crystalhd_cmd *ctx, struct crystalhd_dio_req *dio) { enum BC_STATUS sts = BC_STS_SUCCESS; sts = crystalhd_hw_add_cap_buffer(&ctx->hw_ctx, dio, 0); if (sts != BC_STS_SUCCESS) return sts; ctx->state |= BC_LINK_FMT_CHG; if (ctx->state == BC_LINK_READY) sts = crystalhd_hw_start_capture(&ctx->hw_ctx); return sts; } static enum BC_STATUS bc_cproc_fetch_frame(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { struct crystalhd_dio_req *dio = NULL; enum BC_STATUS sts = BC_STS_SUCCESS; struct BC_DEC_OUT_BUFF *frame; if (!ctx || !idata) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } if (!(ctx->state & BC_LINK_CAP_EN)) { BCMLOG(BCMLOG_DBG, "Capture not enabled..%x\n", ctx->state); return BC_STS_ERR_USAGE; } frame = &idata->udata.u.DecOutData; sts = crystalhd_hw_get_cap_buffer(&ctx->hw_ctx, &frame->PibInfo, &dio); if (sts != BC_STS_SUCCESS) return (ctx->state & BC_LINK_SUSPEND) ? BC_STS_IO_USER_ABORT : sts; frame->Flags = dio->uinfo.comp_flags; if (frame->Flags & COMP_FLAG_FMT_CHANGE) return bc_cproc_fmt_change(ctx, dio); frame->OutPutBuffs.YuvBuff = dio->uinfo.xfr_buff; frame->OutPutBuffs.YuvBuffSz = dio->uinfo.xfr_len; frame->OutPutBuffs.UVbuffOffset = dio->uinfo.uv_offset; frame->OutPutBuffs.b422Mode = dio->uinfo.b422mode; frame->OutPutBuffs.YBuffDoneSz = dio->uinfo.y_done_sz; frame->OutPutBuffs.UVBuffDoneSz = dio->uinfo.uv_done_sz; crystalhd_unmap_dio(ctx->adp, dio); return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_start_capture(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { ctx->state |= BC_LINK_CAP_EN; if (ctx->state == BC_LINK_READY) return crystalhd_hw_start_capture(&ctx->hw_ctx); return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_flush_cap_buffs(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { struct crystalhd_dio_req *dio = NULL; enum BC_STATUS sts = BC_STS_SUCCESS; struct BC_DEC_OUT_BUFF *frame; uint32_t count; if (!ctx || !idata) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } if (!(ctx->state & BC_LINK_CAP_EN)) return BC_STS_ERR_USAGE; /* We should ack flush even when we are in paused/suspend state */ if (!(ctx->state & BC_LINK_READY)) return crystalhd_hw_stop_capture(&ctx->hw_ctx); ctx->state &= ~(BC_LINK_CAP_EN|BC_LINK_FMT_CHG); frame = &idata->udata.u.DecOutData; for (count = 0; count < BC_RX_LIST_CNT; count++) { sts = crystalhd_hw_get_cap_buffer(&ctx->hw_ctx, &frame->PibInfo, &dio); if (sts != BC_STS_SUCCESS) break; crystalhd_unmap_dio(ctx->adp, dio); } return crystalhd_hw_stop_capture(&ctx->hw_ctx); } static enum BC_STATUS bc_cproc_get_stats(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { struct BC_DTS_STATS *stats; struct crystalhd_hw_stats hw_stats; if (!ctx || !idata) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } crystalhd_hw_stats(&ctx->hw_ctx, &hw_stats); stats = &idata->udata.u.drvStat; stats->drvRLL = hw_stats.rdyq_count; stats->drvFLL = hw_stats.freeq_count; stats->DrvTotalFrmDropped = hw_stats.rx_errors; stats->DrvTotalHWErrs = hw_stats.rx_errors + hw_stats.tx_errors; stats->intCount = hw_stats.num_interrupts; stats->DrvIgnIntrCnt = hw_stats.num_interrupts - hw_stats.dev_interrupts; stats->TxFifoBsyCnt = hw_stats.cin_busy; stats->pauseCount = hw_stats.pause_cnt; if (ctx->pwr_state_change) stats->pwr_state_change = 1; if (ctx->state & BC_LINK_PAUSED) stats->DrvPauseTime = 1; return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_reset_stats(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { crystalhd_hw_stats(&ctx->hw_ctx, NULL); return BC_STS_SUCCESS; } static enum BC_STATUS bc_cproc_chg_clk(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { struct BC_CLOCK *clock; uint32_t oldClk; enum BC_STATUS sts = BC_STS_SUCCESS; if (!ctx || !idata) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } clock = &idata->udata.u.clockValue; oldClk = ctx->hw_ctx.core_clock_mhz; ctx->hw_ctx.core_clock_mhz = clock->clk; if (ctx->state & BC_LINK_READY) { sts = crystalhd_hw_set_core_clock(&ctx->hw_ctx); if (sts == BC_STS_CLK_NOCHG) ctx->hw_ctx.core_clock_mhz = oldClk; } clock->clk = ctx->hw_ctx.core_clock_mhz; return sts; } /*=============== Cmd Proc Table.. ======================================*/ static const struct crystalhd_cmd_tbl g_crystalhd_cproc_tbl[] = { { BCM_IOC_GET_VERSION, bc_cproc_get_version, 0}, { BCM_IOC_GET_HWTYPE, bc_cproc_get_hwtype, 0}, { BCM_IOC_REG_RD, bc_cproc_reg_rd, 0}, { BCM_IOC_REG_WR, bc_cproc_reg_wr, 0}, { BCM_IOC_FPGA_RD, bc_cproc_link_reg_rd, 0}, { BCM_IOC_FPGA_WR, bc_cproc_link_reg_wr, 0}, { BCM_IOC_MEM_RD, bc_cproc_mem_rd, 0}, { BCM_IOC_MEM_WR, bc_cproc_mem_wr, 0}, { BCM_IOC_RD_PCI_CFG, bc_cproc_cfg_rd, 0}, { BCM_IOC_WR_PCI_CFG, bc_cproc_cfg_wr, 1}, { BCM_IOC_FW_DOWNLOAD, bc_cproc_download_fw, 1}, { BCM_IOC_FW_CMD, bc_cproc_do_fw_cmd, 1}, { BCM_IOC_PROC_INPUT, bc_cproc_proc_input, 1}, { BCM_IOC_ADD_RXBUFFS, bc_cproc_add_cap_buff, 1}, { BCM_IOC_FETCH_RXBUFF, bc_cproc_fetch_frame, 1}, { BCM_IOC_START_RX_CAP, bc_cproc_start_capture, 1}, { BCM_IOC_FLUSH_RX_CAP, bc_cproc_flush_cap_buffs, 1}, { BCM_IOC_GET_DRV_STAT, bc_cproc_get_stats, 0}, { BCM_IOC_RST_DRV_STAT, bc_cproc_reset_stats, 0}, { BCM_IOC_NOTIFY_MODE, bc_cproc_notify_mode, 0}, { BCM_IOC_CHG_CLK, bc_cproc_chg_clk, 0}, { BCM_IOC_END, NULL}, }; /*=============== Cmd Proc Functions.. ===================================*/ /** * crystalhd_suspend - Power management suspend request. * @ctx: Command layer context. * @idata: Iodata - required for internal use. * * Return: * status * * 1. Set the state to Suspend. * 2. Flush the Rx Buffers it will unmap all the buffers and * stop the RxDMA engine. * 3. Cancel The TX Io and Stop Dma Engine. * 4. Put the DDR in to deep sleep. * 5. Stop the hardware putting it in to Reset State. * * Current gstreamer frame work does not provide any power management * related notification to user mode decoder plug-in. As a work-around * we pass on the power mangement notification to our plug-in by completing * all outstanding requests with BC_STS_IO_USER_ABORT return code. */ enum BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata) { enum BC_STATUS sts = BC_STS_SUCCESS; if (!ctx || !idata) { BCMLOG_ERR("Invalid Parameters\n"); return BC_STS_ERROR; } if (ctx->state & BC_LINK_SUSPEND) return BC_STS_SUCCESS; if (ctx->state == BC_LINK_INVALID) { BCMLOG(BCMLOG_DBG, "Nothing To Do Suspend Success\n"); return BC_STS_SUCCESS; } ctx->state |= BC_LINK_SUSPEND; bc_cproc_mark_pwr_state(ctx); if (ctx->state & BC_LINK_CAP_EN) { sts = bc_cproc_flush_cap_buffs(ctx, idata); if (sts != BC_STS_SUCCESS) return sts; } if (ctx->tx_list_id) { sts = crystalhd_hw_cancel_tx(&ctx->hw_ctx, ctx->tx_list_id); if (sts != BC_STS_SUCCESS) return sts; } sts = crystalhd_hw_suspend(&ctx->hw_ctx); if (sts != BC_STS_SUCCESS) return sts; BCMLOG(BCMLOG_DBG, "BCM70012 suspend success\n"); return BC_STS_SUCCESS; } /** * crystalhd_resume - Resume frame capture. * @ctx: Command layer contextx. * * Return: * status * * * Resume frame capture. * * PM_Resume can't resume the playback state back to pre-suspend state * because we don't keep video clip related information within driver. * To get back to the pre-suspend state App will re-open the device and * start a new playback session from the pre-suspend clip position. * */ enum BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx) { BCMLOG(BCMLOG_DBG, "crystalhd_resume Success %x\n", ctx->state); bc_cproc_mark_pwr_state(ctx); return BC_STS_SUCCESS; } /** * crystalhd_user_open - Create application handle. * @ctx: Command layer contextx. * @user_ctx: User ID context. * * Return: * status * * Creates an application specific UID and allocates * application specific resources. HW layer initialization * is done for the first open request. */ enum BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx, struct crystalhd_user **user_ctx) { struct crystalhd_user *uc; if (!ctx || !user_ctx) { BCMLOG_ERR("Invalid arg..\n"); return BC_STS_INV_ARG; } uc = bc_cproc_get_uid(ctx); if (!uc) { BCMLOG(BCMLOG_INFO, "No free user context...\n"); return BC_STS_BUSY; } BCMLOG(BCMLOG_INFO, "Opening new user[%x] handle\n", uc->uid); crystalhd_hw_open(&ctx->hw_ctx, ctx->adp); uc->in_use = 1; *user_ctx = uc; return BC_STS_SUCCESS; } /** * crystalhd_user_close - Close application handle. * @ctx: Command layer contextx. * @uc: User ID context. * * Return: * status * * Closer application handle and release app specific * resources. */ enum BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc) { uint32_t mode = uc->mode; ctx->user[uc->uid].mode = DTS_MODE_INV; ctx->user[uc->uid].in_use = 0; ctx->cin_wait_exit = 1; ctx->pwr_state_change = 0; BCMLOG(BCMLOG_INFO, "Closing user[%x] handle\n", uc->uid); if ((mode == DTS_DIAG_MODE) || (mode == DTS_PLAYBACK_MODE)) { crystalhd_hw_free_dma_rings(&ctx->hw_ctx); crystalhd_destroy_dio_pool(ctx->adp); } else if (bc_cproc_get_user_count(ctx)) { return BC_STS_SUCCESS; } crystalhd_hw_close(&ctx->hw_ctx); ctx->state = BC_LINK_INVALID; return BC_STS_SUCCESS; } /** * crystalhd_setup_cmd_context - Setup Command layer resources. * @ctx: Command layer contextx. * @adp: Adapter context * * Return: * status * * Called at the time of driver load. */ enum BC_STATUS crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx, struct crystalhd_adp *adp) { int i = 0; if (!ctx || !adp) { BCMLOG_ERR("Invalid arg!!\n"); return BC_STS_INV_ARG; } if (ctx->adp) BCMLOG(BCMLOG_DBG, "Resetting Cmd context delete missing..\n"); ctx->adp = adp; for (i = 0; i < BC_LINK_MAX_OPENS; i++) { ctx->user[i].uid = i; ctx->user[i].in_use = 0; ctx->user[i].mode = DTS_MODE_INV; } /*Open and Close the Hardware to put it in to sleep state*/ crystalhd_hw_open(&ctx->hw_ctx, ctx->adp); crystalhd_hw_close(&ctx->hw_ctx); return BC_STS_SUCCESS; } /** * crystalhd_delete_cmd_context - Release Command layer resources. * @ctx: Command layer contextx. * * Return: * status * * Called at the time of driver un-load. */ enum BC_STATUS crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx) { BCMLOG(BCMLOG_DBG, "Deleting Command context..\n"); ctx->adp = NULL; return BC_STS_SUCCESS; } /** * crystalhd_get_cmd_proc - Cproc table lookup. * @ctx: Command layer contextx. * @cmd: IOCTL command code. * @uc: User ID context. * * Return: * command proc function pointer * * This function checks the process context, application's * mode of operation and returns the function pointer * from the cproc table. */ crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cmd, struct crystalhd_user *uc) { crystalhd_cmd_proc cproc = NULL; unsigned int i, tbl_sz; if (!ctx) { BCMLOG_ERR("Invalid arg.. Cmd[%d]\n", cmd); return NULL; } if ((cmd != BCM_IOC_GET_DRV_STAT) && (ctx->state & BC_LINK_SUSPEND)) { BCMLOG_ERR("Invalid State [suspend Set].. Cmd[%d]\n", cmd); return NULL; } tbl_sz = sizeof(g_crystalhd_cproc_tbl) / sizeof(struct crystalhd_cmd_tbl); for (i = 0; i < tbl_sz; i++) { if (g_crystalhd_cproc_tbl[i].cmd_id == cmd) { if ((uc->mode == DTS_MONITOR_MODE) && (g_crystalhd_cproc_tbl[i].block_mon)) { BCMLOG(BCMLOG_INFO, "Blocking cmd %d\n", cmd); break; } cproc = g_crystalhd_cproc_tbl[i].cmd_proc; break; } } return cproc; } /** * crystalhd_cmd_interrupt - ISR entry point * @ctx: Command layer contextx. * * Return: * TRUE: If interrupt from bcm70012 device. * * * ISR entry point from OS layer. */ bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx) { if (!ctx) { BCMLOG_ERR("Invalid arg..\n"); return 0; } return crystalhd_hw_interrupt(ctx->adp, &ctx->hw_ctx); }
gpl-2.0
sour12/iamroot
drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
2252
12718
/* * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland * Copyright (c) 2002, 2003 Tuukka Toivonen * Copyright (c) 2008 Erik Andrén * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * P/N 861037: Sensor HDCS1000 ASIC STV0600 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam * P/N 861075-0040: Sensor HDCS1000 ASIC * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web */ /* * The spec file for the PB-0100 suggests the following for best quality * images after the sensor has been reset : * * PB_ADCGAINL = R60 = 0x03 (3 dec) : sets low reference of ADC to produce good black level * PB_PREADCTRL = R32 = 0x1400 (5120 dec) : Enables global gain changes through R53 * PB_ADCMINGAIN = R52 = 0x10 (16 dec) : Sets the minimum gain for auto-exposure * PB_ADCGLOBALGAIN = R53 = 0x10 (16 dec) : Sets the global gain * PB_EXPGAIN = R14 = 0x11 (17 dec) : Sets the auto-exposure value * PB_UPDATEINT = R23 = 0x02 (2 dec) : Sets the speed on auto-exposure routine * PB_CFILLIN = R5 = 0x0E (14 dec) : Sets the frame rate */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "stv06xx_pb0100.h" struct pb0100_ctrls { struct { /* one big happy control cluster... */ struct v4l2_ctrl *autogain; struct v4l2_ctrl *gain; struct v4l2_ctrl *exposure; struct v4l2_ctrl *red; struct v4l2_ctrl *blue; struct v4l2_ctrl *natural; }; struct v4l2_ctrl *target; }; static struct v4l2_pix_format pb0100_mode[] = { /* low res / subsample modes disabled as they are only half res horizontal, halving the vertical resolution does not seem to work */ { 320, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .sizeimage = 320 * 240, .bytesperline = 320, .colorspace = V4L2_COLORSPACE_SRGB, .priv = PB0100_CROP_TO_VGA }, { 352, 288, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .sizeimage = 352 * 288, .bytesperline = 352, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 } }; static int pb0100_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; struct pb0100_ctrls *ctrls = sd->sensor_priv; int err = -EINVAL; switch (ctrl->id) { case V4L2_CID_AUTOGAIN: err = pb0100_set_autogain(gspca_dev, ctrl->val); if (err) break; if (ctrl->val) break; err = pb0100_set_gain(gspca_dev, ctrls->gain->val); if (err) break; err = pb0100_set_exposure(gspca_dev, ctrls->exposure->val); break; case V4L2_CTRL_CLASS_USER + 0x1001: err = pb0100_set_autogain_target(gspca_dev, ctrl->val); break; } return err; } static const struct v4l2_ctrl_ops pb0100_ctrl_ops = { .s_ctrl = pb0100_s_ctrl, }; static int pb0100_init_controls(struct sd *sd) { struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler; struct pb0100_ctrls *ctrls; static const struct v4l2_ctrl_config autogain_target = { .ops = &pb0100_ctrl_ops, .id = V4L2_CTRL_CLASS_USER + 0x1000, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Automatic Gain Target", .max = 255, .step = 1, .def = 128, }; static const struct v4l2_ctrl_config natural_light = { .ops = &pb0100_ctrl_ops, .id = V4L2_CTRL_CLASS_USER + 0x1001, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Natural Light Source", .max = 1, .step = 1, .def = 1, }; ctrls = kzalloc(sizeof(*ctrls), GFP_KERNEL); if (!ctrls) return -ENOMEM; v4l2_ctrl_handler_init(hdl, 6); ctrls->autogain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); ctrls->exposure = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops, V4L2_CID_EXPOSURE, 0, 511, 1, 12); ctrls->gain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops, V4L2_CID_GAIN, 0, 255, 1, 128); ctrls->red = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops, V4L2_CID_RED_BALANCE, -255, 255, 1, 0); ctrls->blue = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops, V4L2_CID_BLUE_BALANCE, -255, 255, 1, 0); ctrls->natural = v4l2_ctrl_new_custom(hdl, &natural_light, NULL); ctrls->target = v4l2_ctrl_new_custom(hdl, &autogain_target, NULL); if (hdl->error) { kfree(ctrls); return hdl->error; } sd->sensor_priv = ctrls; v4l2_ctrl_auto_cluster(5, &ctrls->autogain, 0, false); return 0; } static int pb0100_probe(struct sd *sd) { u16 sensor; int err; err = stv06xx_read_sensor(sd, PB_IDENT, &sensor); if (err < 0) return -ENODEV; if ((sensor >> 8) != 0x64) return -ENODEV; pr_info("Photobit pb0100 sensor detected\n"); sd->gspca_dev.cam.cam_mode = pb0100_mode; sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode); return 0; } static int pb0100_start(struct sd *sd) { int err, packet_size, max_packet_size; struct usb_host_interface *alt; struct usb_interface *intf; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct cam *cam = &sd->gspca_dev.cam; u32 mode = cam->cam_mode[sd->gspca_dev.curr_mode].priv; intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) return -ENODEV; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); /* If we don't have enough bandwidth use a lower framerate */ max_packet_size = sd->sensor->max_packet_size[sd->gspca_dev.curr_mode]; if (packet_size < max_packet_size) stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(4)|BIT(3)|BIT(1)); else stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(5)|BIT(3)|BIT(1)); /* Setup sensor window */ if (mode & PB0100_CROP_TO_VGA) { stv06xx_write_sensor(sd, PB_RSTART, 30); stv06xx_write_sensor(sd, PB_CSTART, 20); stv06xx_write_sensor(sd, PB_RWSIZE, 240 - 1); stv06xx_write_sensor(sd, PB_CWSIZE, 320 - 1); } else { stv06xx_write_sensor(sd, PB_RSTART, 8); stv06xx_write_sensor(sd, PB_CSTART, 4); stv06xx_write_sensor(sd, PB_RWSIZE, 288 - 1); stv06xx_write_sensor(sd, PB_CWSIZE, 352 - 1); } if (mode & PB0100_SUBSAMPLE) { stv06xx_write_bridge(sd, STV_Y_CTRL, 0x02); /* Wrong, FIXME */ stv06xx_write_bridge(sd, STV_X_CTRL, 0x06); stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x10); } else { stv06xx_write_bridge(sd, STV_Y_CTRL, 0x01); stv06xx_write_bridge(sd, STV_X_CTRL, 0x0a); /* larger -> slower */ stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20); } err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3)|BIT(1)); PDEBUG(D_STREAM, "Started stream, status: %d", err); return (err < 0) ? err : 0; } static int pb0100_stop(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int err; err = stv06xx_write_sensor(sd, PB_ABORTFRAME, 1); if (err < 0) goto out; /* Set bit 1 to zero */ err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3)); PDEBUG(D_STREAM, "Halting stream"); out: return (err < 0) ? err : 0; } /* FIXME: Sort the init commands out and put them into tables, this is only for getting the camera to work */ /* FIXME: No error handling for now, add this once the init has been converted to proper tables */ static int pb0100_init(struct sd *sd) { stv06xx_write_bridge(sd, STV_REG00, 1); stv06xx_write_bridge(sd, STV_SCAN_RATE, 0); /* Reset sensor */ stv06xx_write_sensor(sd, PB_RESET, 1); stv06xx_write_sensor(sd, PB_RESET, 0); /* Disable chip */ stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3)); /* Gain stuff...*/ stv06xx_write_sensor(sd, PB_PREADCTRL, BIT(12)|BIT(10)|BIT(6)); stv06xx_write_sensor(sd, PB_ADCGLOBALGAIN, 12); /* Set up auto-exposure */ /* ADC VREF_HI new setting for a transition from the Expose1 to the Expose2 setting */ stv06xx_write_sensor(sd, PB_R28, 12); /* gain max for autoexposure */ stv06xx_write_sensor(sd, PB_ADCMAXGAIN, 180); /* gain min for autoexposure */ stv06xx_write_sensor(sd, PB_ADCMINGAIN, 12); /* Maximum frame integration time (programmed into R8) allowed for auto-exposure routine */ stv06xx_write_sensor(sd, PB_R54, 3); /* Minimum frame integration time (programmed into R8) allowed for auto-exposure routine */ stv06xx_write_sensor(sd, PB_R55, 0); stv06xx_write_sensor(sd, PB_UPDATEINT, 1); /* R15 Expose0 (maximum that auto-exposure may use) */ stv06xx_write_sensor(sd, PB_R15, 800); /* R17 Expose2 (minimum that auto-exposure may use) */ stv06xx_write_sensor(sd, PB_R17, 10); stv06xx_write_sensor(sd, PB_EXPGAIN, 0); /* 0x14 */ stv06xx_write_sensor(sd, PB_VOFFSET, 0); /* 0x0D */ stv06xx_write_sensor(sd, PB_ADCGAINH, 11); /* Set black level (important!) */ stv06xx_write_sensor(sd, PB_ADCGAINL, 0); /* ??? */ stv06xx_write_bridge(sd, STV_REG00, 0x11); stv06xx_write_bridge(sd, STV_REG03, 0x45); stv06xx_write_bridge(sd, STV_REG04, 0x07); /* Scan/timing for the sensor */ stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(4)|BIT(3)|BIT(1)); stv06xx_write_sensor(sd, PB_CFILLIN, 14); stv06xx_write_sensor(sd, PB_VBL, 0); stv06xx_write_sensor(sd, PB_FINTTIME, 0); stv06xx_write_sensor(sd, PB_RINTTIME, 123); stv06xx_write_bridge(sd, STV_REG01, 0xc2); stv06xx_write_bridge(sd, STV_REG02, 0xb0); return 0; } static int pb0100_dump(struct sd *sd) { return 0; } static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val) { int err; struct sd *sd = (struct sd *) gspca_dev; struct pb0100_ctrls *ctrls = sd->sensor_priv; err = stv06xx_write_sensor(sd, PB_G1GAIN, val); if (!err) err = stv06xx_write_sensor(sd, PB_G2GAIN, val); PDEBUG(D_CONF, "Set green gain to %d, status: %d", val, err); if (!err) err = pb0100_set_red_balance(gspca_dev, ctrls->red->val); if (!err) err = pb0100_set_blue_balance(gspca_dev, ctrls->blue->val); return err; } static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val) { int err; struct sd *sd = (struct sd *) gspca_dev; struct pb0100_ctrls *ctrls = sd->sensor_priv; val += ctrls->gain->val; if (val < 0) val = 0; else if (val > 255) val = 255; err = stv06xx_write_sensor(sd, PB_RGAIN, val); PDEBUG(D_CONF, "Set red gain to %d, status: %d", val, err); return err; } static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val) { int err; struct sd *sd = (struct sd *) gspca_dev; struct pb0100_ctrls *ctrls = sd->sensor_priv; val += ctrls->gain->val; if (val < 0) val = 0; else if (val > 255) val = 255; err = stv06xx_write_sensor(sd, PB_BGAIN, val); PDEBUG(D_CONF, "Set blue gain to %d, status: %d", val, err); return err; } static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; int err; err = stv06xx_write_sensor(sd, PB_RINTTIME, val); PDEBUG(D_CONF, "Set exposure to %d, status: %d", val, err); return err; } static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val) { int err; struct sd *sd = (struct sd *) gspca_dev; struct pb0100_ctrls *ctrls = sd->sensor_priv; if (val) { if (ctrls->natural->val) val = BIT(6)|BIT(4)|BIT(0); else val = BIT(4)|BIT(0); } else val = 0; err = stv06xx_write_sensor(sd, PB_EXPGAIN, val); PDEBUG(D_CONF, "Set autogain to %d (natural: %d), status: %d", val, ctrls->natural->val, err); return err; } static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val) { int err, totalpixels, brightpixels, darkpixels; struct sd *sd = (struct sd *) gspca_dev; /* Number of pixels counted by the sensor when subsampling the pixels. * Slightly larger than the real value to avoid oscillation */ totalpixels = gspca_dev->width * gspca_dev->height; totalpixels = totalpixels/(8*8) + totalpixels/(64*64); brightpixels = (totalpixels * val) >> 8; darkpixels = totalpixels - brightpixels; err = stv06xx_write_sensor(sd, PB_R21, brightpixels); if (!err) err = stv06xx_write_sensor(sd, PB_R22, darkpixels); PDEBUG(D_CONF, "Set autogain target to %d, status: %d", val, err); return err; }
gpl-2.0
akshay-shah/android_kernel_samsung_crater
drivers/tty/hvc/hvc_beat.c
4300
3216
/* * Beat hypervisor console driver * * (C) Copyright 2006 TOSHIBA CORPORATION * * This code is based on drivers/char/hvc_rtas.c: * (C) Copyright IBM Corporation 2001-2005 * (C) Copyright Red Hat, Inc. 2005 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/string.h> #include <linux/console.h> #include <asm/prom.h> #include <asm/hvconsole.h> #include <asm/firmware.h> #include "hvc_console.h" extern int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *); extern int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t); struct hvc_struct *hvc_beat_dev = NULL; /* bug: only one queue is available regardless of vtermno */ static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt) { static unsigned char q[sizeof(unsigned long) * 2] __attribute__((aligned(sizeof(unsigned long)))); static int qlen = 0; u64 got; again: if (qlen) { if (qlen > cnt) { memcpy(buf, q, cnt); qlen -= cnt; memmove(q + cnt, q, qlen); return cnt; } else { /* qlen <= cnt */ int r; memcpy(buf, q, qlen); r = qlen; qlen = 0; return r; } } if (beat_get_term_char(vtermno, &got, ((u64 *)q), ((u64 *)q) + 1) == 0) { qlen = got; goto again; } return 0; } static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt) { unsigned long kb[2]; int rest, nlen; for (rest = cnt; rest > 0; rest -= nlen) { nlen = (rest > 16) ? 16 : rest; memcpy(kb, buf, nlen); beat_put_term_char(vtermno, nlen, kb[0], kb[1]); buf += nlen; } return cnt; } static const struct hv_ops hvc_beat_get_put_ops = { .get_chars = hvc_beat_get_chars, .put_chars = hvc_beat_put_chars, }; static int hvc_beat_useit = 1; static int hvc_beat_config(char *p) { hvc_beat_useit = simple_strtoul(p, NULL, 0); return 0; } static int __init hvc_beat_console_init(void) { if (hvc_beat_useit && of_machine_is_compatible("Beat")) { hvc_instantiate(0, 0, &hvc_beat_get_put_ops); } return 0; } /* temp */ static int __init hvc_beat_init(void) { struct hvc_struct *hp; if (!firmware_has_feature(FW_FEATURE_BEAT)) return -ENODEV; hp = hvc_alloc(0, NO_IRQ, &hvc_beat_get_put_ops, 16); if (IS_ERR(hp)) return PTR_ERR(hp); hvc_beat_dev = hp; return 0; } static void __exit hvc_beat_exit(void) { if (hvc_beat_dev) hvc_remove(hvc_beat_dev); } module_init(hvc_beat_init); module_exit(hvc_beat_exit); __setup("hvc_beat=", hvc_beat_config); console_initcall(hvc_beat_console_init);
gpl-2.0
Split-Screen/android_kernel_motorola_ghost
arch/frv/mb93090-mb00/pci-vdk.c
4556
12784
/* pci-vdk.c: MB93090-MB00 (VDK) PCI support * * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <asm/segment.h> #include <asm/io.h> #include <asm/mb-regs.h> #include <asm/mb86943a.h> #include "pci-frv.h" unsigned int __nongpreldata pci_probe = 1; int __nongpreldata pcibios_last_bus = -1; struct pci_bus *__nongpreldata pci_root_bus; struct pci_ops *__nongpreldata pci_root_ops; /* * The accessible PCI window does not cover the entire CPU address space, but * there are devices we want to access outside of that window, so we need to * insert specific PCI bus resources instead of using the platform-level bus * resources directly for the PCI root bus. * * These are configured and inserted by pcibios_init() and are attached to the * root bus by pcibios_fixup_bus(). */ static struct resource pci_ioport_resource = { .name = "PCI IO", .start = 0, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; static struct resource pci_iomem_resource = { .name = "PCI mem", .start = 0, .end = -1, .flags = IORESOURCE_MEM, }; /* * Functions for accessing PCI configuration space */ #define CONFIG_CMD(bus, dev, where) \ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) #define __set_PciCfgAddr(A) writel((A), (volatile void __iomem *) __region_CS1 + 0x80) #define __get_PciCfgDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 3)) #define __get_PciCfgDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 2)) #define __get_PciCfgDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x88) #define __set_PciCfgDataB(A,V) \ writeb((V), (volatile void __iomem *) __region_CS1 + 0x88 + (3 - ((A) & 3))) #define __set_PciCfgDataW(A,V) \ writew((V), (volatile void __iomem *) __region_CS1 + 0x88 + (2 - ((A) & 2))) #define __set_PciCfgDataL(A,V) \ writel((V), (volatile void __iomem *) __region_CS1 + 0x88) #define __get_PciBridgeDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __get_PciBridgeDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __get_PciBridgeDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataB(A,V) writeb((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataW(A,V) writew((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataL(A,V) writel((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) static inline int __query(const struct pci_dev *dev) { // return dev->bus->number==0 && (dev->devfn==PCI_DEVFN(0,0)); // return dev->bus->number==1; // return dev->bus->number==0 && // (dev->devfn==PCI_DEVFN(2,0) || dev->devfn==PCI_DEVFN(3,0)); return 0; } /*****************************************************************************/ /* * */ static int pci_frv_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 _value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { _value = __get_PciBridgeDataL(where & ~3); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); _value = __get_PciCfgDataL(where & ~3); } switch (size) { case 1: _value = _value >> ((where & 3) * 8); break; case 2: _value = _value >> ((where & 2) * 8); break; case 4: break; default: BUG(); } *val = _value; return PCIBIOS_SUCCESSFUL; } static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { switch (size) { case 1: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataB(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataB(where, value); } break; case 2: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataW(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataW(where, value); } break; case 4: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataL(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataL(where, value); } break; default: BUG(); } return PCIBIOS_SUCCESSFUL; } static struct pci_ops pci_direct_frv = { pci_frv_read_config, pci_frv_write_config, }; /* * Before we decide to use direct hardware access mechanisms, we try to do some * trivial checks to ensure it at least _seems_ to be working -- we just test * whether bus 00 contains a host bridge (this is similar to checking * techniques used in XFree86, but ours should be more reliable since we * attempt to make use of direct access hints provided by the PCI BIOS). * * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ static int __init pci_sanity_check(struct pci_ops *o) { struct pci_bus bus; /* Fake bus and device */ u32 id; bus.number = 0; if (o->read(&bus, 0, PCI_VENDOR_ID, 4, &id) == PCIBIOS_SUCCESSFUL) { printk("PCI: VDK Bridge device:vendor: %08x\n", id); if (id == 0x200e10cf) return 1; } printk("PCI: VDK Bridge: Sanity check failed\n"); return 0; } static struct pci_ops * __init pci_check_direct(void) { unsigned long flags; local_irq_save(flags); /* check if access works */ if (pci_sanity_check(&pci_direct_frv)) { local_irq_restore(flags); printk("PCI: Using configuration frv\n"); // request_mem_region(0xBE040000, 256, "FRV bridge"); // request_mem_region(0xBFFFFFF4, 12, "PCI frv"); return &pci_direct_frv; } local_irq_restore(flags); return NULL; } /* * Discover remaining PCI buses in case there are peer host bridges. * We use the number of last PCI bus provided by the PCI BIOS. */ static void __init pcibios_fixup_peer_bridges(void) { struct pci_bus bus; struct pci_dev dev; int n; u16 l; if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff) return; printk("PCI: Peer bridge fixup\n"); for (n=0; n <= pcibios_last_bus; n++) { if (pci_find_bus(0, n)) continue; bus.number = n; bus.ops = pci_root_ops; dev.bus = &bus; for(dev.devfn=0; dev.devfn<256; dev.devfn += 8) if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) && l != 0x0000 && l != 0xffff) { printk("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l); printk("PCI: Discovered peer bus %02x\n", n); pci_scan_bus(n, pci_root_ops, NULL); break; } } } /* * Exceptions for specific devices. Usually work-arounds for fatal design flaws. */ static void __init pci_fixup_umc_ide(struct pci_dev *d) { /* * UM8886BF IDE controller sets region type bits incorrectly, * therefore they look like memory despite of them being I/O. */ int i; printk("PCI: Fixing base address flags for device %s\n", pci_name(d)); for(i=0; i<4; i++) d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO; } static void __init pci_fixup_ide_bases(struct pci_dev *d) { int i; /* * PCI IDE controllers use non-standard I/O port decoding, respect it. */ if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE) return; printk("PCI: IDE base address fixup for %s\n", pci_name(d)); for(i=0; i<4; i++) { struct resource *r = &d->resource[i]; if ((r->start & ~0x80) == 0x374) { r->start |= 2; r->end = r->start; } } } static void __init pci_fixup_ide_trash(struct pci_dev *d) { int i; /* * There exist PCI IDE controllers which have utter garbage * in first four base registers. Ignore that. */ printk("PCI: IDE base address trash cleared for %s\n", pci_name(d)); for(i=0; i<4; i++) d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0; } static void __devinit pci_fixup_latency(struct pci_dev *d) { /* * SiS 5597 and 5598 chipsets require latency timer set to * at most 32 to avoid lockups. */ DBG("PCI: Setting max latency to 32\n"); pcibios_max_latency = 32; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency); DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); /* * Called after each bus is probed, but before its children * are examined. */ void __init pcibios_fixup_bus(struct pci_bus *bus) { #if 0 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); #endif pci_read_bridge_bases(bus); if (bus->number == 0) { struct list_head *ln; struct pci_dev *dev; for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) { dev = pci_dev_b(ln); if (dev->devfn == 0) { dev->resource[0].start = 0; dev->resource[0].end = 0; } } } } /* * Initialization. Try all known PCI access methods. Note that we support * using both PCI BIOS and direct access: in such cases, we use I/O ports * to access config space, but we still keep BIOS order of cards to be * compatible with 2.0.X. This should go away some day. */ int __init pcibios_init(void) { struct pci_ops *dir = NULL; LIST_HEAD(resources); if (!mb93090_mb00_detected) return -ENXIO; __reg_MB86943_sl_ctl |= MB86943_SL_CTL_DRCT_MASTER_SWAP | MB86943_SL_CTL_DRCT_SLAVE_SWAP; __reg_MB86943_ecs_base(1) = ((__region_CS2 + 0x01000000) >> 9) | 0x08000000; __reg_MB86943_ecs_base(2) = ((__region_CS2 + 0x00000000) >> 9) | 0x08000000; *(volatile uint32_t *) (__region_CS1 + 0x848) = 0xe0000000; *(volatile uint32_t *) (__region_CS1 + 0x8b8) = 0x00000000; __reg_MB86943_sl_pci_io_base = (__region_CS2 + 0x04000000) >> 9; __reg_MB86943_sl_pci_mem_base = (__region_CS2 + 0x08000000) >> 9; __reg_MB86943_pci_sl_io_base = __region_CS2 + 0x04000000; __reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000; mb(); /* enable PCI arbitration */ __reg_MB86943_pci_arbiter = MB86943_PCIARB_EN; pci_ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00; pci_ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff; pci_ioport_resource.end += pci_ioport_resource.start; printk("PCI IO window: %08llx-%08llx\n", (unsigned long long) pci_ioport_resource.start, (unsigned long long) pci_ioport_resource.end); pci_iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00; pci_iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff; pci_iomem_resource.end += pci_iomem_resource.start; /* Reserve somewhere to write to flush posted writes. This is used by * __flush_PCI_writes() from asm/io.h to force the write FIFO in the * CPU-PCI bridge to flush as this doesn't happen automatically when a * read is performed on the MB93090 development kit motherboard. */ pci_iomem_resource.start += 0x400; printk("PCI MEM window: %08llx-%08llx\n", (unsigned long long) pci_iomem_resource.start, (unsigned long long) pci_iomem_resource.end); printk("PCI DMA memory: %08lx-%08lx\n", dma_coherent_mem_start, dma_coherent_mem_end); if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0) panic("Unable to insert PCI IOMEM resource\n"); if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0) panic("Unable to insert PCI IOPORT resource\n"); if (!pci_probe) return -ENXIO; dir = pci_check_direct(); if (dir) pci_root_ops = dir; else { printk("PCI: No PCI bus detected\n"); return -ENXIO; } printk("PCI: Probing PCI hardware\n"); pci_add_resource(&resources, &pci_ioport_resource); pci_add_resource(&resources, &pci_iomem_resource); pci_root_bus = pci_scan_root_bus(NULL, 0, pci_root_ops, NULL, &resources); pcibios_irq_init(); pcibios_fixup_peer_bridges(); pcibios_fixup_irqs(); pcibios_resource_survey(); return 0; } arch_initcall(pcibios_init); char * __init pcibios_setup(char *str) { if (!strcmp(str, "off")) { pci_probe = 0; return NULL; } else if (!strncmp(str, "lastbus=", 8)) { pcibios_last_bus = simple_strtol(str+8, NULL, 0); return NULL; } return str; } int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; if ((err = pci_enable_resources(dev, mask)) < 0) return err; if (!dev->msi_enabled) pcibios_enable_irq(dev); return 0; }
gpl-2.0
eoghan2t9/kernel_qcom_cfx
sound/soc/codecs/wm8940.c
4812
22564
/* * wm8940.c -- WM8940 ALSA Soc Audio driver * * Author: Jonathan Cameron <jic23@cam.ac.uk> * * Based on wm8510.c * Copyright 2006 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Not currently handled: * Notch filter control * AUXMode (inverting vs mixer) * No means to obtain current gain if alc enabled. * No use made of gpio * Fast VMID discharge for power down * Soft Start * DLR and ALR Swaps not enabled * Digital Sidetone not supported */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8940.h" struct wm8940_priv { unsigned int sysclk; enum snd_soc_control_type control_type; }; static int wm8940_volatile_register(struct snd_soc_codec *codec, unsigned int reg) { switch (reg) { case WM8940_SOFTRESET: return 1; default: return 0; } } static u16 wm8940_reg_defaults[] = { 0x8940, /* Soft Reset */ 0x0000, /* Power 1 */ 0x0000, /* Power 2 */ 0x0000, /* Power 3 */ 0x0010, /* Interface Control */ 0x0000, /* Companding Control */ 0x0140, /* Clock Control */ 0x0000, /* Additional Controls */ 0x0000, /* GPIO Control */ 0x0002, /* Auto Increment Control */ 0x0000, /* DAC Control */ 0x00FF, /* DAC Volume */ 0, 0, 0x0100, /* ADC Control */ 0x00FF, /* ADC Volume */ 0x0000, /* Notch Filter 1 Control 1 */ 0x0000, /* Notch Filter 1 Control 2 */ 0x0000, /* Notch Filter 2 Control 1 */ 0x0000, /* Notch Filter 2 Control 2 */ 0x0000, /* Notch Filter 3 Control 1 */ 0x0000, /* Notch Filter 3 Control 2 */ 0x0000, /* Notch Filter 4 Control 1 */ 0x0000, /* Notch Filter 4 Control 2 */ 0x0032, /* DAC Limit Control 1 */ 0x0000, /* DAC Limit Control 2 */ 0, 0, 0, 0, 0, 0, 0x0038, /* ALC Control 1 */ 0x000B, /* ALC Control 2 */ 0x0032, /* ALC Control 3 */ 0x0000, /* Noise Gate */ 0x0041, /* PLLN */ 0x000C, /* PLLK1 */ 0x0093, /* PLLK2 */ 0x00E9, /* PLLK3 */ 0, 0, 0x0030, /* ALC Control 4 */ 0, 0x0002, /* Input Control */ 0x0050, /* PGA Gain */ 0, 0x0002, /* ADC Boost Control */ 0, 0x0002, /* Output Control */ 0x0000, /* Speaker Mixer Control */ 0, 0, 0, 0x0079, /* Speaker Volume */ 0, 0x0000, /* Mono Mixer Control */ }; static const char *wm8940_companding[] = { "Off", "NC", "u-law", "A-law" }; static const struct soc_enum wm8940_adc_companding_enum = SOC_ENUM_SINGLE(WM8940_COMPANDINGCTL, 1, 4, wm8940_companding); static const struct soc_enum wm8940_dac_companding_enum = SOC_ENUM_SINGLE(WM8940_COMPANDINGCTL, 3, 4, wm8940_companding); static const char *wm8940_alc_mode_text[] = {"ALC", "Limiter"}; static const struct soc_enum wm8940_alc_mode_enum = SOC_ENUM_SINGLE(WM8940_ALC3, 8, 2, wm8940_alc_mode_text); static const char *wm8940_mic_bias_level_text[] = {"0.9", "0.65"}; static const struct soc_enum wm8940_mic_bias_level_enum = SOC_ENUM_SINGLE(WM8940_INPUTCTL, 8, 2, wm8940_mic_bias_level_text); static const char *wm8940_filter_mode_text[] = {"Audio", "Application"}; static const struct soc_enum wm8940_filter_mode_enum = SOC_ENUM_SINGLE(WM8940_ADC, 7, 2, wm8940_filter_mode_text); static DECLARE_TLV_DB_SCALE(wm8940_spk_vol_tlv, -5700, 100, 1); static DECLARE_TLV_DB_SCALE(wm8940_att_tlv, -1000, 1000, 0); static DECLARE_TLV_DB_SCALE(wm8940_pga_vol_tlv, -1200, 75, 0); static DECLARE_TLV_DB_SCALE(wm8940_alc_min_tlv, -1200, 600, 0); static DECLARE_TLV_DB_SCALE(wm8940_alc_max_tlv, 675, 600, 0); static DECLARE_TLV_DB_SCALE(wm8940_alc_tar_tlv, -2250, 50, 0); static DECLARE_TLV_DB_SCALE(wm8940_lim_boost_tlv, 0, 100, 0); static DECLARE_TLV_DB_SCALE(wm8940_lim_thresh_tlv, -600, 100, 0); static DECLARE_TLV_DB_SCALE(wm8940_adc_tlv, -12750, 50, 1); static DECLARE_TLV_DB_SCALE(wm8940_capture_boost_vol_tlv, 0, 2000, 0); static const struct snd_kcontrol_new wm8940_snd_controls[] = { SOC_SINGLE("Digital Loopback Switch", WM8940_COMPANDINGCTL, 6, 1, 0), SOC_ENUM("DAC Companding", wm8940_dac_companding_enum), SOC_ENUM("ADC Companding", wm8940_adc_companding_enum), SOC_ENUM("ALC Mode", wm8940_alc_mode_enum), SOC_SINGLE("ALC Switch", WM8940_ALC1, 8, 1, 0), SOC_SINGLE_TLV("ALC Capture Max Gain", WM8940_ALC1, 3, 7, 1, wm8940_alc_max_tlv), SOC_SINGLE_TLV("ALC Capture Min Gain", WM8940_ALC1, 0, 7, 0, wm8940_alc_min_tlv), SOC_SINGLE_TLV("ALC Capture Target", WM8940_ALC2, 0, 14, 0, wm8940_alc_tar_tlv), SOC_SINGLE("ALC Capture Hold", WM8940_ALC2, 4, 10, 0), SOC_SINGLE("ALC Capture Decay", WM8940_ALC3, 4, 10, 0), SOC_SINGLE("ALC Capture Attach", WM8940_ALC3, 0, 10, 0), SOC_SINGLE("ALC ZC Switch", WM8940_ALC4, 1, 1, 0), SOC_SINGLE("ALC Capture Noise Gate Switch", WM8940_NOISEGATE, 3, 1, 0), SOC_SINGLE("ALC Capture Noise Gate Threshold", WM8940_NOISEGATE, 0, 7, 0), SOC_SINGLE("DAC Playback Limiter Switch", WM8940_DACLIM1, 8, 1, 0), SOC_SINGLE("DAC Playback Limiter Attack", WM8940_DACLIM1, 0, 9, 0), SOC_SINGLE("DAC Playback Limiter Decay", WM8940_DACLIM1, 4, 11, 0), SOC_SINGLE_TLV("DAC Playback Limiter Threshold", WM8940_DACLIM2, 4, 9, 1, wm8940_lim_thresh_tlv), SOC_SINGLE_TLV("DAC Playback Limiter Boost", WM8940_DACLIM2, 0, 12, 0, wm8940_lim_boost_tlv), SOC_SINGLE("Capture PGA ZC Switch", WM8940_PGAGAIN, 7, 1, 0), SOC_SINGLE_TLV("Capture PGA Volume", WM8940_PGAGAIN, 0, 63, 0, wm8940_pga_vol_tlv), SOC_SINGLE_TLV("Digital Playback Volume", WM8940_DACVOL, 0, 255, 0, wm8940_adc_tlv), SOC_SINGLE_TLV("Digital Capture Volume", WM8940_ADCVOL, 0, 255, 0, wm8940_adc_tlv), SOC_ENUM("Mic Bias Level", wm8940_mic_bias_level_enum), SOC_SINGLE_TLV("Capture Boost Volue", WM8940_ADCBOOST, 8, 1, 0, wm8940_capture_boost_vol_tlv), SOC_SINGLE_TLV("Speaker Playback Volume", WM8940_SPKVOL, 0, 63, 0, wm8940_spk_vol_tlv), SOC_SINGLE("Speaker Playback Switch", WM8940_SPKVOL, 6, 1, 1), SOC_SINGLE_TLV("Speaker Mixer Line Bypass Volume", WM8940_SPKVOL, 8, 1, 1, wm8940_att_tlv), SOC_SINGLE("Speaker Playback ZC Switch", WM8940_SPKVOL, 7, 1, 0), SOC_SINGLE("Mono Out Switch", WM8940_MONOMIX, 6, 1, 1), SOC_SINGLE_TLV("Mono Mixer Line Bypass Volume", WM8940_MONOMIX, 7, 1, 1, wm8940_att_tlv), SOC_SINGLE("High Pass Filter Switch", WM8940_ADC, 8, 1, 0), SOC_ENUM("High Pass Filter Mode", wm8940_filter_mode_enum), SOC_SINGLE("High Pass Filter Cut Off", WM8940_ADC, 4, 7, 0), SOC_SINGLE("ADC Inversion Switch", WM8940_ADC, 0, 1, 0), SOC_SINGLE("DAC Inversion Switch", WM8940_DAC, 0, 1, 0), SOC_SINGLE("DAC Auto Mute Switch", WM8940_DAC, 2, 1, 0), SOC_SINGLE("ZC Timeout Clock Switch", WM8940_ADDCNTRL, 0, 1, 0), }; static const struct snd_kcontrol_new wm8940_speaker_mixer_controls[] = { SOC_DAPM_SINGLE("Line Bypass Switch", WM8940_SPKMIX, 1, 1, 0), SOC_DAPM_SINGLE("Aux Playback Switch", WM8940_SPKMIX, 5, 1, 0), SOC_DAPM_SINGLE("PCM Playback Switch", WM8940_SPKMIX, 0, 1, 0), }; static const struct snd_kcontrol_new wm8940_mono_mixer_controls[] = { SOC_DAPM_SINGLE("Line Bypass Switch", WM8940_MONOMIX, 1, 1, 0), SOC_DAPM_SINGLE("Aux Playback Switch", WM8940_MONOMIX, 2, 1, 0), SOC_DAPM_SINGLE("PCM Playback Switch", WM8940_MONOMIX, 0, 1, 0), }; static DECLARE_TLV_DB_SCALE(wm8940_boost_vol_tlv, -1500, 300, 1); static const struct snd_kcontrol_new wm8940_input_boost_controls[] = { SOC_DAPM_SINGLE("Mic PGA Switch", WM8940_PGAGAIN, 6, 1, 1), SOC_DAPM_SINGLE_TLV("Aux Volume", WM8940_ADCBOOST, 0, 7, 0, wm8940_boost_vol_tlv), SOC_DAPM_SINGLE_TLV("Mic Volume", WM8940_ADCBOOST, 4, 7, 0, wm8940_boost_vol_tlv), }; static const struct snd_kcontrol_new wm8940_micpga_controls[] = { SOC_DAPM_SINGLE("AUX Switch", WM8940_INPUTCTL, 2, 1, 0), SOC_DAPM_SINGLE("MICP Switch", WM8940_INPUTCTL, 0, 1, 0), SOC_DAPM_SINGLE("MICN Switch", WM8940_INPUTCTL, 1, 1, 0), }; static const struct snd_soc_dapm_widget wm8940_dapm_widgets[] = { SND_SOC_DAPM_MIXER("Speaker Mixer", WM8940_POWER3, 2, 0, &wm8940_speaker_mixer_controls[0], ARRAY_SIZE(wm8940_speaker_mixer_controls)), SND_SOC_DAPM_MIXER("Mono Mixer", WM8940_POWER3, 3, 0, &wm8940_mono_mixer_controls[0], ARRAY_SIZE(wm8940_mono_mixer_controls)), SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8940_POWER3, 0, 0), SND_SOC_DAPM_PGA("SpkN Out", WM8940_POWER3, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("SpkP Out", WM8940_POWER3, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("Mono Out", WM8940_POWER3, 7, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("MONOOUT"), SND_SOC_DAPM_OUTPUT("SPKOUTP"), SND_SOC_DAPM_OUTPUT("SPKOUTN"), SND_SOC_DAPM_PGA("Aux Input", WM8940_POWER1, 6, 0, NULL, 0), SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8940_POWER2, 0, 0), SND_SOC_DAPM_MIXER("Mic PGA", WM8940_POWER2, 2, 0, &wm8940_micpga_controls[0], ARRAY_SIZE(wm8940_micpga_controls)), SND_SOC_DAPM_MIXER("Boost Mixer", WM8940_POWER2, 4, 0, &wm8940_input_boost_controls[0], ARRAY_SIZE(wm8940_input_boost_controls)), SND_SOC_DAPM_MICBIAS("Mic Bias", WM8940_POWER1, 4, 0), SND_SOC_DAPM_INPUT("MICN"), SND_SOC_DAPM_INPUT("MICP"), SND_SOC_DAPM_INPUT("AUX"), }; static const struct snd_soc_dapm_route audio_map[] = { /* Mono output mixer */ {"Mono Mixer", "PCM Playback Switch", "DAC"}, {"Mono Mixer", "Aux Playback Switch", "Aux Input"}, {"Mono Mixer", "Line Bypass Switch", "Boost Mixer"}, /* Speaker output mixer */ {"Speaker Mixer", "PCM Playback Switch", "DAC"}, {"Speaker Mixer", "Aux Playback Switch", "Aux Input"}, {"Speaker Mixer", "Line Bypass Switch", "Boost Mixer"}, /* Outputs */ {"Mono Out", NULL, "Mono Mixer"}, {"MONOOUT", NULL, "Mono Out"}, {"SpkN Out", NULL, "Speaker Mixer"}, {"SpkP Out", NULL, "Speaker Mixer"}, {"SPKOUTN", NULL, "SpkN Out"}, {"SPKOUTP", NULL, "SpkP Out"}, /* Microphone PGA */ {"Mic PGA", "MICN Switch", "MICN"}, {"Mic PGA", "MICP Switch", "MICP"}, {"Mic PGA", "AUX Switch", "AUX"}, /* Boost Mixer */ {"Boost Mixer", "Mic PGA Switch", "Mic PGA"}, {"Boost Mixer", "Mic Volume", "MICP"}, {"Boost Mixer", "Aux Volume", "Aux Input"}, {"ADC", NULL, "Boost Mixer"}, }; static int wm8940_add_widgets(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; ret = snd_soc_dapm_new_controls(dapm, wm8940_dapm_widgets, ARRAY_SIZE(wm8940_dapm_widgets)); if (ret) goto error_ret; ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); error_ret: return ret; } #define wm8940_reset(c) snd_soc_write(c, WM8940_SOFTRESET, 0); static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = snd_soc_read(codec, WM8940_IFACE) & 0xFE67; u16 clk = snd_soc_read(codec, WM8940_CLOCK) & 0x1fe; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: clk |= 1; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } snd_soc_write(codec, WM8940_CLOCK, clk); switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= (2 << 3); break; case SND_SOC_DAIFMT_LEFT_J: iface |= (1 << 3); break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_DSP_A: iface |= (3 << 3); break; case SND_SOC_DAIFMT_DSP_B: iface |= (3 << 3) | (1 << 7); break; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_NB_IF: iface |= (1 << 7); break; case SND_SOC_DAIFMT_IB_NF: iface |= (1 << 8); break; case SND_SOC_DAIFMT_IB_IF: iface |= (1 << 8) | (1 << 7); break; } snd_soc_write(codec, WM8940_IFACE, iface); return 0; } static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; u16 iface = snd_soc_read(codec, WM8940_IFACE) & 0xFD9F; u16 addcntrl = snd_soc_read(codec, WM8940_ADDCNTRL) & 0xFFF1; u16 companding = snd_soc_read(codec, WM8940_COMPANDINGCTL) & 0xFFDF; int ret; /* LoutR control */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && params_channels(params) == 2) iface |= (1 << 9); switch (params_rate(params)) { case 8000: addcntrl |= (0x5 << 1); break; case 11025: addcntrl |= (0x4 << 1); break; case 16000: addcntrl |= (0x3 << 1); break; case 22050: addcntrl |= (0x2 << 1); break; case 32000: addcntrl |= (0x1 << 1); break; case 44100: case 48000: break; } ret = snd_soc_write(codec, WM8940_ADDCNTRL, addcntrl); if (ret) goto error_ret; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: companding = companding | (1 << 5); break; case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= (1 << 5); break; case SNDRV_PCM_FORMAT_S24_LE: iface |= (2 << 5); break; case SNDRV_PCM_FORMAT_S32_LE: iface |= (3 << 5); break; } ret = snd_soc_write(codec, WM8940_COMPANDINGCTL, companding); if (ret) goto error_ret; ret = snd_soc_write(codec, WM8940_IFACE, iface); error_ret: return ret; } static int wm8940_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8940_DAC) & 0xffbf; if (mute) mute_reg |= 0x40; return snd_soc_write(codec, WM8940_DAC, mute_reg); } static int wm8940_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { u16 val; u16 pwr_reg = snd_soc_read(codec, WM8940_POWER1) & 0x1F0; int ret = 0; switch (level) { case SND_SOC_BIAS_ON: /* ensure bufioen and biasen */ pwr_reg |= (1 << 2) | (1 << 3); /* Enable thermal shutdown */ val = snd_soc_read(codec, WM8940_OUTPUTCTL); ret = snd_soc_write(codec, WM8940_OUTPUTCTL, val | 0x2); if (ret) break; /* set vmid to 75k */ ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x1); break; case SND_SOC_BIAS_PREPARE: /* ensure bufioen and biasen */ pwr_reg |= (1 << 2) | (1 << 3); ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x1); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = snd_soc_cache_sync(codec); if (ret < 0) { dev_err(codec->dev, "Failed to sync cache: %d\n", ret); return ret; } } /* ensure bufioen and biasen */ pwr_reg |= (1 << 2) | (1 << 3); /* set vmid to 300k for standby */ ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x2); break; case SND_SOC_BIAS_OFF: ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg); break; } codec->dapm.bias_level = level; return ret; } struct pll_ { unsigned int pre_scale:2; unsigned int n:4; unsigned int k; }; static struct pll_ pll_div; /* The size in bits of the pll divide multiplied by 10 * to allow rounding later */ #define FIXED_PLL_SIZE ((1 << 24) * 10) static void pll_factors(unsigned int target, unsigned int source) { unsigned long long Kpart; unsigned int K, Ndiv, Nmod; /* The left shift ist to avoid accuracy loss when right shifting */ Ndiv = target / source; if (Ndiv > 12) { source <<= 1; /* Multiply by 2 */ pll_div.pre_scale = 0; Ndiv = target / source; } else if (Ndiv < 3) { source >>= 2; /* Divide by 4 */ pll_div.pre_scale = 3; Ndiv = target / source; } else if (Ndiv < 6) { source >>= 1; /* divide by 2 */ pll_div.pre_scale = 2; Ndiv = target / source; } else pll_div.pre_scale = 1; if ((Ndiv < 6) || (Ndiv > 12)) printk(KERN_WARNING "WM8940 N value %d outwith recommended range!d\n", Ndiv); pll_div.n = Ndiv; Nmod = target % source; Kpart = FIXED_PLL_SIZE * (long long)Nmod; do_div(Kpart, source); K = Kpart & 0xFFFFFFFF; /* Check if we need to round */ if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ K /= 10; pll_div.k = K; } /* Untested at the moment */ static int wm8940_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; /* Turn off PLL */ reg = snd_soc_read(codec, WM8940_POWER1); snd_soc_write(codec, WM8940_POWER1, reg & 0x1df); if (freq_in == 0 || freq_out == 0) { /* Clock CODEC directly from MCLK */ reg = snd_soc_read(codec, WM8940_CLOCK); snd_soc_write(codec, WM8940_CLOCK, reg & 0x0ff); /* Pll power down */ snd_soc_write(codec, WM8940_PLLN, (1 << 7)); return 0; } /* Pll is followed by a frequency divide by 4 */ pll_factors(freq_out*4, freq_in); if (pll_div.k) snd_soc_write(codec, WM8940_PLLN, (pll_div.pre_scale << 4) | pll_div.n | (1 << 6)); else /* No factional component */ snd_soc_write(codec, WM8940_PLLN, (pll_div.pre_scale << 4) | pll_div.n); snd_soc_write(codec, WM8940_PLLK1, pll_div.k >> 18); snd_soc_write(codec, WM8940_PLLK2, (pll_div.k >> 9) & 0x1ff); snd_soc_write(codec, WM8940_PLLK3, pll_div.k & 0x1ff); /* Enable the PLL */ reg = snd_soc_read(codec, WM8940_POWER1); snd_soc_write(codec, WM8940_POWER1, reg | 0x020); /* Run CODEC from PLL instead of MCLK */ reg = snd_soc_read(codec, WM8940_CLOCK); snd_soc_write(codec, WM8940_CLOCK, reg | 0x100); return 0; } static int wm8940_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8940_priv *wm8940 = snd_soc_codec_get_drvdata(codec); switch (freq) { case 11289600: case 12000000: case 12288000: case 16934400: case 18432000: wm8940->sysclk = freq; return 0; } return -EINVAL; } static int wm8940_set_dai_clkdiv(struct snd_soc_dai *codec_dai, int div_id, int div) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; int ret = 0; switch (div_id) { case WM8940_BCLKDIV: reg = snd_soc_read(codec, WM8940_CLOCK) & 0xFFE3; ret = snd_soc_write(codec, WM8940_CLOCK, reg | (div << 2)); break; case WM8940_MCLKDIV: reg = snd_soc_read(codec, WM8940_CLOCK) & 0xFF1F; ret = snd_soc_write(codec, WM8940_CLOCK, reg | (div << 5)); break; case WM8940_OPCLKDIV: reg = snd_soc_read(codec, WM8940_GPIO) & 0xFFCF; ret = snd_soc_write(codec, WM8940_GPIO, reg | (div << 4)); break; } return ret; } #define WM8940_RATES SNDRV_PCM_RATE_8000_48000 #define WM8940_FORMATS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops wm8940_dai_ops = { .hw_params = wm8940_i2s_hw_params, .set_sysclk = wm8940_set_dai_sysclk, .digital_mute = wm8940_mute, .set_fmt = wm8940_set_dai_fmt, .set_clkdiv = wm8940_set_dai_clkdiv, .set_pll = wm8940_set_dai_pll, }; static struct snd_soc_dai_driver wm8940_dai = { .name = "wm8940-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8940_RATES, .formats = WM8940_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8940_RATES, .formats = WM8940_FORMATS, }, .ops = &wm8940_dai_ops, .symmetric_rates = 1, }; static int wm8940_suspend(struct snd_soc_codec *codec) { return wm8940_set_bias_level(codec, SND_SOC_BIAS_OFF); } static int wm8940_resume(struct snd_soc_codec *codec) { wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8940_probe(struct snd_soc_codec *codec) { struct wm8940_priv *wm8940 = snd_soc_codec_get_drvdata(codec); struct wm8940_setup_data *pdata = codec->dev->platform_data; int ret; u16 reg; ret = snd_soc_codec_set_cache_io(codec, 8, 16, wm8940->control_type); if (ret < 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } ret = wm8940_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset\n"); return ret; } wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY); ret = snd_soc_write(codec, WM8940_POWER1, 0x180); if (ret < 0) return ret; if (!pdata) dev_warn(codec->dev, "No platform data supplied\n"); else { reg = snd_soc_read(codec, WM8940_OUTPUTCTL); ret = snd_soc_write(codec, WM8940_OUTPUTCTL, reg | pdata->vroi); if (ret < 0) return ret; } ret = snd_soc_add_codec_controls(codec, wm8940_snd_controls, ARRAY_SIZE(wm8940_snd_controls)); if (ret) return ret; ret = wm8940_add_widgets(codec); return ret; } static int wm8940_remove(struct snd_soc_codec *codec) { wm8940_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8940 = { .probe = wm8940_probe, .remove = wm8940_remove, .suspend = wm8940_suspend, .resume = wm8940_resume, .set_bias_level = wm8940_set_bias_level, .reg_cache_size = ARRAY_SIZE(wm8940_reg_defaults), .reg_word_size = sizeof(u16), .reg_cache_default = wm8940_reg_defaults, .volatile_register = wm8940_volatile_register, }; static __devinit int wm8940_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8940_priv *wm8940; int ret; wm8940 = devm_kzalloc(&i2c->dev, sizeof(struct wm8940_priv), GFP_KERNEL); if (wm8940 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm8940); wm8940->control_type = SND_SOC_I2C; ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8940, &wm8940_dai, 1); return ret; } static __devexit int wm8940_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id wm8940_i2c_id[] = { { "wm8940", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8940_i2c_id); static struct i2c_driver wm8940_i2c_driver = { .driver = { .name = "wm8940", .owner = THIS_MODULE, }, .probe = wm8940_i2c_probe, .remove = __devexit_p(wm8940_i2c_remove), .id_table = wm8940_i2c_id, }; static int __init wm8940_modinit(void) { int ret = 0; ret = i2c_add_driver(&wm8940_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register wm8940 I2C driver: %d\n", ret); } return ret; } module_init(wm8940_modinit); static void __exit wm8940_exit(void) { i2c_del_driver(&wm8940_i2c_driver); } module_exit(wm8940_exit); MODULE_DESCRIPTION("ASoC WM8940 driver"); MODULE_AUTHOR("Jonathan Cameron"); MODULE_LICENSE("GPL");
gpl-2.0
Nicklas373/AoiCore-Kernel-MSM8627-CM13
drivers/hwmon/sch5636.c
4812
17153
/*************************************************************************** * Copyright (C) 2011-2012 Hans de Goede <hdegoede@redhat.com> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include "sch56xx-common.h" #define DRVNAME "sch5636" #define DEVNAME "theseus" /* We only support one model for now */ #define SCH5636_REG_FUJITSU_ID 0x780 #define SCH5636_REG_FUJITSU_REV 0x783 #define SCH5636_NO_INS 5 #define SCH5636_NO_TEMPS 16 #define SCH5636_NO_FANS 8 static const u16 SCH5636_REG_IN_VAL[SCH5636_NO_INS] = { 0x22, 0x23, 0x24, 0x25, 0x189 }; static const u16 SCH5636_REG_IN_FACTORS[SCH5636_NO_INS] = { 4400, 1500, 4000, 4400, 16000 }; static const char * const SCH5636_IN_LABELS[SCH5636_NO_INS] = { "3.3V", "VREF", "VBAT", "3.3AUX", "12V" }; static const u16 SCH5636_REG_TEMP_VAL[SCH5636_NO_TEMPS] = { 0x2B, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x180, 0x181, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C }; #define SCH5636_REG_TEMP_CTRL(i) (0x790 + (i)) #define SCH5636_TEMP_WORKING 0x01 #define SCH5636_TEMP_ALARM 0x02 #define SCH5636_TEMP_DEACTIVATED 0x80 static const u16 SCH5636_REG_FAN_VAL[SCH5636_NO_FANS] = { 0x2C, 0x2E, 0x30, 0x32, 0x62, 0x64, 0x66, 0x68 }; #define SCH5636_REG_FAN_CTRL(i) (0x880 + (i)) /* FAULT in datasheet, but acts as an alarm */ #define SCH5636_FAN_ALARM 0x04 #define SCH5636_FAN_NOT_PRESENT 0x08 #define SCH5636_FAN_DEACTIVATED 0x80 struct sch5636_data { unsigned short addr; struct device *hwmon_dev; struct sch56xx_watchdog_data *watchdog; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[SCH5636_NO_INS]; u8 temp_val[SCH5636_NO_TEMPS]; u8 temp_ctrl[SCH5636_NO_TEMPS]; u16 fan_val[SCH5636_NO_FANS]; u8 fan_ctrl[SCH5636_NO_FANS]; }; static struct sch5636_data *sch5636_update_device(struct device *dev) { struct sch5636_data *data = dev_get_drvdata(dev); struct sch5636_data *ret = data; int i, val; mutex_lock(&data->update_lock); /* Cache the values for 1 second */ if (data->valid && !time_after(jiffies, data->last_updated + HZ)) goto abort; for (i = 0; i < SCH5636_NO_INS; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_IN_VAL[i]); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->in[i] = val; } for (i = 0; i < SCH5636_NO_TEMPS; i++) { if (data->temp_ctrl[i] & SCH5636_TEMP_DEACTIVATED) continue; val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_TEMP_VAL[i]); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp_val[i] = val; val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_TEMP_CTRL(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp_ctrl[i] = val; /* Alarms need to be explicitly write-cleared */ if (val & SCH5636_TEMP_ALARM) { sch56xx_write_virtual_reg(data->addr, SCH5636_REG_TEMP_CTRL(i), val); } } for (i = 0; i < SCH5636_NO_FANS; i++) { if (data->fan_ctrl[i] & SCH5636_FAN_DEACTIVATED) continue; val = sch56xx_read_virtual_reg16(data->addr, SCH5636_REG_FAN_VAL[i]); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->fan_val[i] = val; val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_FAN_CTRL(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->fan_ctrl[i] = val; /* Alarms need to be explicitly write-cleared */ if (val & SCH5636_FAN_ALARM) { sch56xx_write_virtual_reg(data->addr, SCH5636_REG_FAN_CTRL(i), val); } } data->last_updated = jiffies; data->valid = 1; abort: mutex_unlock(&data->update_lock); return ret; } static int reg_to_rpm(u16 reg) { if (reg == 0) return -EIO; if (reg == 0xffff) return 0; return 5400540 / reg; } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", DEVNAME); } static ssize_t show_in_value(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = DIV_ROUND_CLOSEST( data->in[attr->index] * SCH5636_REG_IN_FACTORS[attr->index], 255); return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_in_label(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); return snprintf(buf, PAGE_SIZE, "%s\n", SCH5636_IN_LABELS[attr->index]); } static ssize_t show_temp_value(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->temp_val[attr->index] - 64) * 1000; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_temp_fault(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->temp_ctrl[attr->index] & SCH5636_TEMP_WORKING) ? 0 : 1; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_temp_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->temp_ctrl[attr->index] & SCH5636_TEMP_ALARM) ? 1 : 0; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_fan_value(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = reg_to_rpm(data->fan_val[attr->index]); if (val < 0) return val; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_fan_fault(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->fan_ctrl[attr->index] & SCH5636_FAN_NOT_PRESENT) ? 1 : 0; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->fan_ctrl[attr->index] & SCH5636_FAN_ALARM) ? 1 : 0; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static struct sensor_device_attribute sch5636_attr[] = { SENSOR_ATTR(name, 0444, show_name, NULL, 0), SENSOR_ATTR(in0_input, 0444, show_in_value, NULL, 0), SENSOR_ATTR(in0_label, 0444, show_in_label, NULL, 0), SENSOR_ATTR(in1_input, 0444, show_in_value, NULL, 1), SENSOR_ATTR(in1_label, 0444, show_in_label, NULL, 1), SENSOR_ATTR(in2_input, 0444, show_in_value, NULL, 2), SENSOR_ATTR(in2_label, 0444, show_in_label, NULL, 2), SENSOR_ATTR(in3_input, 0444, show_in_value, NULL, 3), SENSOR_ATTR(in3_label, 0444, show_in_label, NULL, 3), SENSOR_ATTR(in4_input, 0444, show_in_value, NULL, 4), SENSOR_ATTR(in4_label, 0444, show_in_label, NULL, 4), }; static struct sensor_device_attribute sch5636_temp_attr[] = { SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0), SENSOR_ATTR(temp1_fault, 0444, show_temp_fault, NULL, 0), SENSOR_ATTR(temp1_alarm, 0444, show_temp_alarm, NULL, 0), SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1), SENSOR_ATTR(temp2_fault, 0444, show_temp_fault, NULL, 1), SENSOR_ATTR(temp2_alarm, 0444, show_temp_alarm, NULL, 1), SENSOR_ATTR(temp3_input, 0444, show_temp_value, NULL, 2), SENSOR_ATTR(temp3_fault, 0444, show_temp_fault, NULL, 2), SENSOR_ATTR(temp3_alarm, 0444, show_temp_alarm, NULL, 2), SENSOR_ATTR(temp4_input, 0444, show_temp_value, NULL, 3), SENSOR_ATTR(temp4_fault, 0444, show_temp_fault, NULL, 3), SENSOR_ATTR(temp4_alarm, 0444, show_temp_alarm, NULL, 3), SENSOR_ATTR(temp5_input, 0444, show_temp_value, NULL, 4), SENSOR_ATTR(temp5_fault, 0444, show_temp_fault, NULL, 4), SENSOR_ATTR(temp5_alarm, 0444, show_temp_alarm, NULL, 4), SENSOR_ATTR(temp6_input, 0444, show_temp_value, NULL, 5), SENSOR_ATTR(temp6_fault, 0444, show_temp_fault, NULL, 5), SENSOR_ATTR(temp6_alarm, 0444, show_temp_alarm, NULL, 5), SENSOR_ATTR(temp7_input, 0444, show_temp_value, NULL, 6), SENSOR_ATTR(temp7_fault, 0444, show_temp_fault, NULL, 6), SENSOR_ATTR(temp7_alarm, 0444, show_temp_alarm, NULL, 6), SENSOR_ATTR(temp8_input, 0444, show_temp_value, NULL, 7), SENSOR_ATTR(temp8_fault, 0444, show_temp_fault, NULL, 7), SENSOR_ATTR(temp8_alarm, 0444, show_temp_alarm, NULL, 7), SENSOR_ATTR(temp9_input, 0444, show_temp_value, NULL, 8), SENSOR_ATTR(temp9_fault, 0444, show_temp_fault, NULL, 8), SENSOR_ATTR(temp9_alarm, 0444, show_temp_alarm, NULL, 8), SENSOR_ATTR(temp10_input, 0444, show_temp_value, NULL, 9), SENSOR_ATTR(temp10_fault, 0444, show_temp_fault, NULL, 9), SENSOR_ATTR(temp10_alarm, 0444, show_temp_alarm, NULL, 9), SENSOR_ATTR(temp11_input, 0444, show_temp_value, NULL, 10), SENSOR_ATTR(temp11_fault, 0444, show_temp_fault, NULL, 10), SENSOR_ATTR(temp11_alarm, 0444, show_temp_alarm, NULL, 10), SENSOR_ATTR(temp12_input, 0444, show_temp_value, NULL, 11), SENSOR_ATTR(temp12_fault, 0444, show_temp_fault, NULL, 11), SENSOR_ATTR(temp12_alarm, 0444, show_temp_alarm, NULL, 11), SENSOR_ATTR(temp13_input, 0444, show_temp_value, NULL, 12), SENSOR_ATTR(temp13_fault, 0444, show_temp_fault, NULL, 12), SENSOR_ATTR(temp13_alarm, 0444, show_temp_alarm, NULL, 12), SENSOR_ATTR(temp14_input, 0444, show_temp_value, NULL, 13), SENSOR_ATTR(temp14_fault, 0444, show_temp_fault, NULL, 13), SENSOR_ATTR(temp14_alarm, 0444, show_temp_alarm, NULL, 13), SENSOR_ATTR(temp15_input, 0444, show_temp_value, NULL, 14), SENSOR_ATTR(temp15_fault, 0444, show_temp_fault, NULL, 14), SENSOR_ATTR(temp15_alarm, 0444, show_temp_alarm, NULL, 14), SENSOR_ATTR(temp16_input, 0444, show_temp_value, NULL, 15), SENSOR_ATTR(temp16_fault, 0444, show_temp_fault, NULL, 15), SENSOR_ATTR(temp16_alarm, 0444, show_temp_alarm, NULL, 15), }; static struct sensor_device_attribute sch5636_fan_attr[] = { SENSOR_ATTR(fan1_input, 0444, show_fan_value, NULL, 0), SENSOR_ATTR(fan1_fault, 0444, show_fan_fault, NULL, 0), SENSOR_ATTR(fan1_alarm, 0444, show_fan_alarm, NULL, 0), SENSOR_ATTR(fan2_input, 0444, show_fan_value, NULL, 1), SENSOR_ATTR(fan2_fault, 0444, show_fan_fault, NULL, 1), SENSOR_ATTR(fan2_alarm, 0444, show_fan_alarm, NULL, 1), SENSOR_ATTR(fan3_input, 0444, show_fan_value, NULL, 2), SENSOR_ATTR(fan3_fault, 0444, show_fan_fault, NULL, 2), SENSOR_ATTR(fan3_alarm, 0444, show_fan_alarm, NULL, 2), SENSOR_ATTR(fan4_input, 0444, show_fan_value, NULL, 3), SENSOR_ATTR(fan4_fault, 0444, show_fan_fault, NULL, 3), SENSOR_ATTR(fan4_alarm, 0444, show_fan_alarm, NULL, 3), SENSOR_ATTR(fan5_input, 0444, show_fan_value, NULL, 4), SENSOR_ATTR(fan5_fault, 0444, show_fan_fault, NULL, 4), SENSOR_ATTR(fan5_alarm, 0444, show_fan_alarm, NULL, 4), SENSOR_ATTR(fan6_input, 0444, show_fan_value, NULL, 5), SENSOR_ATTR(fan6_fault, 0444, show_fan_fault, NULL, 5), SENSOR_ATTR(fan6_alarm, 0444, show_fan_alarm, NULL, 5), SENSOR_ATTR(fan7_input, 0444, show_fan_value, NULL, 6), SENSOR_ATTR(fan7_fault, 0444, show_fan_fault, NULL, 6), SENSOR_ATTR(fan7_alarm, 0444, show_fan_alarm, NULL, 6), SENSOR_ATTR(fan8_input, 0444, show_fan_value, NULL, 7), SENSOR_ATTR(fan8_fault, 0444, show_fan_fault, NULL, 7), SENSOR_ATTR(fan8_alarm, 0444, show_fan_alarm, NULL, 7), }; static int sch5636_remove(struct platform_device *pdev) { struct sch5636_data *data = platform_get_drvdata(pdev); int i; if (data->watchdog) sch56xx_watchdog_unregister(data->watchdog); if (data->hwmon_dev) hwmon_device_unregister(data->hwmon_dev); for (i = 0; i < ARRAY_SIZE(sch5636_attr); i++) device_remove_file(&pdev->dev, &sch5636_attr[i].dev_attr); for (i = 0; i < SCH5636_NO_TEMPS * 3; i++) device_remove_file(&pdev->dev, &sch5636_temp_attr[i].dev_attr); for (i = 0; i < SCH5636_NO_FANS * 3; i++) device_remove_file(&pdev->dev, &sch5636_fan_attr[i].dev_attr); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static int __devinit sch5636_probe(struct platform_device *pdev) { struct sch5636_data *data; int i, err, val, revision[2]; char id[4]; data = kzalloc(sizeof(struct sch5636_data), GFP_KERNEL); if (!data) return -ENOMEM; data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); for (i = 0; i < 3; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_FUJITSU_ID + i); if (val < 0) { pr_err("Could not read Fujitsu id byte at %#x\n", SCH5636_REG_FUJITSU_ID + i); err = val; goto error; } id[i] = val; } id[i] = '\0'; if (strcmp(id, "THS")) { pr_err("Unknown Fujitsu id: %02x%02x%02x\n", id[0], id[1], id[2]); err = -ENODEV; goto error; } for (i = 0; i < 2; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_FUJITSU_REV + i); if (val < 0) { err = val; goto error; } revision[i] = val; } pr_info("Found %s chip at %#hx, revison: %d.%02d\n", DEVNAME, data->addr, revision[0], revision[1]); /* Read all temp + fan ctrl registers to determine which are active */ for (i = 0; i < SCH5636_NO_TEMPS; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_TEMP_CTRL(i)); if (unlikely(val < 0)) { err = val; goto error; } data->temp_ctrl[i] = val; } for (i = 0; i < SCH5636_NO_FANS; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_FAN_CTRL(i)); if (unlikely(val < 0)) { err = val; goto error; } data->fan_ctrl[i] = val; } for (i = 0; i < ARRAY_SIZE(sch5636_attr); i++) { err = device_create_file(&pdev->dev, &sch5636_attr[i].dev_attr); if (err) goto error; } for (i = 0; i < (SCH5636_NO_TEMPS * 3); i++) { if (data->temp_ctrl[i/3] & SCH5636_TEMP_DEACTIVATED) continue; err = device_create_file(&pdev->dev, &sch5636_temp_attr[i].dev_attr); if (err) goto error; } for (i = 0; i < (SCH5636_NO_FANS * 3); i++) { if (data->fan_ctrl[i/3] & SCH5636_FAN_DEACTIVATED) continue; err = device_create_file(&pdev->dev, &sch5636_fan_attr[i].dev_attr); if (err) goto error; } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); data->hwmon_dev = NULL; goto error; } /* Note failing to register the watchdog is not a fatal error */ data->watchdog = sch56xx_watchdog_register(data->addr, (revision[0] << 8) | revision[1], &data->update_lock, 0); return 0; error: sch5636_remove(pdev); return err; } static struct platform_driver sch5636_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = sch5636_probe, .remove = sch5636_remove, }; module_platform_driver(sch5636_driver); MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver"); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
TeamRegular/android_kernel_zara
net/batman-adv/bat_debugfs.c
4812
9053
/* * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA * */ #include "main.h" #include <linux/debugfs.h> #include "bat_debugfs.h" #include "translation-table.h" #include "originator.h" #include "hard-interface.h" #include "gateway_common.h" #include "gateway_client.h" #include "soft-interface.h" #include "vis.h" #include "icmp_socket.h" static struct dentry *bat_debugfs; #ifdef CONFIG_BATMAN_ADV_DEBUG #define LOG_BUFF_MASK (log_buff_len-1) #define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK]) static int log_buff_len = LOG_BUF_LEN; static void emit_log_char(struct debug_log *debug_log, char c) { LOG_BUFF(debug_log->log_end) = c; debug_log->log_end++; if (debug_log->log_end - debug_log->log_start > log_buff_len) debug_log->log_start = debug_log->log_end - log_buff_len; } __printf(2, 3) static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...) { va_list args; static char debug_log_buf[256]; char *p; if (!debug_log) return 0; spin_lock_bh(&debug_log->lock); va_start(args, fmt); vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args); va_end(args); for (p = debug_log_buf; *p != 0; p++) emit_log_char(debug_log, *p); spin_unlock_bh(&debug_log->lock); wake_up(&debug_log->queue_wait); return 0; } int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) { va_list args; char tmp_log_buf[256]; va_start(args, fmt); vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args); fdebug_log(bat_priv->debug_log, "[%10lu] %s", (jiffies / HZ), tmp_log_buf); va_end(args); return 0; } static int log_open(struct inode *inode, struct file *file) { nonseekable_open(inode, file); file->private_data = inode->i_private; inc_module_count(); return 0; } static int log_release(struct inode *inode, struct file *file) { dec_module_count(); return 0; } static ssize_t log_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct bat_priv *bat_priv = file->private_data; struct debug_log *debug_log = bat_priv->debug_log; int error, i = 0; char c; if ((file->f_flags & O_NONBLOCK) && !(debug_log->log_end - debug_log->log_start)) return -EAGAIN; if (!buf) return -EINVAL; if (count == 0) return 0; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; error = wait_event_interruptible(debug_log->queue_wait, (debug_log->log_start - debug_log->log_end)); if (error) return error; spin_lock_bh(&debug_log->lock); while ((!error) && (i < count) && (debug_log->log_start != debug_log->log_end)) { c = LOG_BUFF(debug_log->log_start); debug_log->log_start++; spin_unlock_bh(&debug_log->lock); error = __put_user(c, buf); spin_lock_bh(&debug_log->lock); buf++; i++; } spin_unlock_bh(&debug_log->lock); if (!error) return i; return error; } static unsigned int log_poll(struct file *file, poll_table *wait) { struct bat_priv *bat_priv = file->private_data; struct debug_log *debug_log = bat_priv->debug_log; poll_wait(file, &debug_log->queue_wait, wait); if (debug_log->log_end - debug_log->log_start) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations log_fops = { .open = log_open, .release = log_release, .read = log_read, .poll = log_poll, .llseek = no_llseek, }; static int debug_log_setup(struct bat_priv *bat_priv) { struct dentry *d; if (!bat_priv->debug_dir) goto err; bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC); if (!bat_priv->debug_log) goto err; spin_lock_init(&bat_priv->debug_log->lock); init_waitqueue_head(&bat_priv->debug_log->queue_wait); d = debugfs_create_file("log", S_IFREG | S_IRUSR, bat_priv->debug_dir, bat_priv, &log_fops); if (d) goto err; return 0; err: return 1; } static void debug_log_cleanup(struct bat_priv *bat_priv) { kfree(bat_priv->debug_log); bat_priv->debug_log = NULL; } #else /* CONFIG_BATMAN_ADV_DEBUG */ static int debug_log_setup(struct bat_priv *bat_priv) { bat_priv->debug_log = NULL; return 0; } static void debug_log_cleanup(struct bat_priv *bat_priv) { return; } #endif static int bat_algorithms_open(struct inode *inode, struct file *file) { return single_open(file, bat_algo_seq_print_text, NULL); } static int originators_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; return single_open(file, orig_seq_print_text, net_dev); } static int gateways_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; return single_open(file, gw_client_seq_print_text, net_dev); } static int softif_neigh_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; return single_open(file, softif_neigh_seq_print_text, net_dev); } static int transtable_global_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; return single_open(file, tt_global_seq_print_text, net_dev); } static int transtable_local_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; return single_open(file, tt_local_seq_print_text, net_dev); } static int vis_data_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; return single_open(file, vis_seq_print_text, net_dev); } struct bat_debuginfo { struct attribute attr; const struct file_operations fops; }; #define BAT_DEBUGINFO(_name, _mode, _open) \ struct bat_debuginfo bat_debuginfo_##_name = { \ .attr = { .name = __stringify(_name), \ .mode = _mode, }, \ .fops = { .owner = THIS_MODULE, \ .open = _open, \ .read = seq_read, \ .llseek = seq_lseek, \ .release = single_release, \ } \ }; static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open); static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open); static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); static struct bat_debuginfo *mesh_debuginfos[] = { &bat_debuginfo_originators, &bat_debuginfo_gateways, &bat_debuginfo_softif_neigh, &bat_debuginfo_transtable_global, &bat_debuginfo_transtable_local, &bat_debuginfo_vis_data, NULL, }; void debugfs_init(void) { struct bat_debuginfo *bat_debug; struct dentry *file; bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL); if (bat_debugfs == ERR_PTR(-ENODEV)) bat_debugfs = NULL; if (!bat_debugfs) goto out; bat_debug = &bat_debuginfo_routing_algos; file = debugfs_create_file(bat_debug->attr.name, S_IFREG | bat_debug->attr.mode, bat_debugfs, NULL, &bat_debug->fops); if (!file) pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name); out: return; } void debugfs_destroy(void) { if (bat_debugfs) { debugfs_remove_recursive(bat_debugfs); bat_debugfs = NULL; } } int debugfs_add_meshif(struct net_device *dev) { struct bat_priv *bat_priv = netdev_priv(dev); struct bat_debuginfo **bat_debug; struct dentry *file; if (!bat_debugfs) goto out; bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs); if (!bat_priv->debug_dir) goto out; bat_socket_setup(bat_priv); debug_log_setup(bat_priv); for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) { file = debugfs_create_file(((*bat_debug)->attr).name, S_IFREG | ((*bat_debug)->attr).mode, bat_priv->debug_dir, dev, &(*bat_debug)->fops); if (!file) { bat_err(dev, "Can't add debugfs file: %s/%s\n", dev->name, ((*bat_debug)->attr).name); goto rem_attr; } } return 0; rem_attr: debugfs_remove_recursive(bat_priv->debug_dir); bat_priv->debug_dir = NULL; out: #ifdef CONFIG_DEBUG_FS return -ENOMEM; #else return 0; #endif /* CONFIG_DEBUG_FS */ } void debugfs_del_meshif(struct net_device *dev) { struct bat_priv *bat_priv = netdev_priv(dev); debug_log_cleanup(bat_priv); if (bat_debugfs) { debugfs_remove_recursive(bat_priv->debug_dir); bat_priv->debug_dir = NULL; } }
gpl-2.0
basr/Hammerhead
drivers/media/video/tda9840.c
4812
5608
/* tda9840 - i2c-driver for the tda9840 by SGS Thomson Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de> Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl> The tda9840 is a stereo/dual sound processor with digital identification. It can be found at address 0x84 on the i2c-bus. For detailed informations download the specifications directly from SGS Thomson at http://www.st.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/i2c.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_DESCRIPTION("tda9840 driver"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); #define SWITCH 0x00 #define LEVEL_ADJUST 0x02 #define STEREO_ADJUST 0x03 #define TEST 0x04 #define TDA9840_SET_MUTE 0x00 #define TDA9840_SET_MONO 0x10 #define TDA9840_SET_STEREO 0x2a #define TDA9840_SET_LANG1 0x12 #define TDA9840_SET_LANG2 0x1e #define TDA9840_SET_BOTH 0x1a #define TDA9840_SET_BOTH_R 0x16 #define TDA9840_SET_EXTERNAL 0x7a static void tda9840_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (i2c_smbus_write_byte_data(client, reg, val)) v4l2_dbg(1, debug, sd, "error writing %02x to %02x\n", val, reg); } static int tda9840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t) { int byte; if (t->index) return -EINVAL; switch (t->audmode) { case V4L2_TUNER_MODE_STEREO: byte = TDA9840_SET_STEREO; break; case V4L2_TUNER_MODE_LANG1_LANG2: byte = TDA9840_SET_BOTH; break; case V4L2_TUNER_MODE_LANG1: byte = TDA9840_SET_LANG1; break; case V4L2_TUNER_MODE_LANG2: byte = TDA9840_SET_LANG2; break; default: byte = TDA9840_SET_MONO; break; } v4l2_dbg(1, debug, sd, "TDA9840_SWITCH: 0x%02x\n", byte); tda9840_write(sd, SWITCH, byte); return 0; } static int tda9840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 byte; t->rxsubchans = V4L2_TUNER_SUB_MONO; if (1 != i2c_master_recv(client, &byte, 1)) { v4l2_dbg(1, debug, sd, "i2c_master_recv() failed\n"); return -EIO; } if (byte & 0x80) { v4l2_dbg(1, debug, sd, "TDA9840_DETECT: register contents invalid\n"); return -EINVAL; } v4l2_dbg(1, debug, sd, "TDA9840_DETECT: byte: 0x%02x\n", byte); switch (byte & 0x60) { case 0x00: t->rxsubchans = V4L2_TUNER_SUB_MONO; break; case 0x20: t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; break; case 0x40: t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO; break; default: /* Incorrect detect */ t->rxsubchans = V4L2_TUNER_MODE_MONO; break; } return 0; } static int tda9840_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TDA9840, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops tda9840_core_ops = { .g_chip_ident = tda9840_g_chip_ident, }; static const struct v4l2_subdev_tuner_ops tda9840_tuner_ops = { .s_tuner = tda9840_s_tuner, .g_tuner = tda9840_g_tuner, }; static const struct v4l2_subdev_ops tda9840_ops = { .core = &tda9840_core_ops, .tuner = &tda9840_tuner_ops, }; /* ----------------------------------------------------------------------- */ static int tda9840_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct v4l2_subdev *sd; /* let's see whether this adapter can support what we need */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tda9840_ops); /* set initial values for level & stereo - adjustment, mode */ tda9840_write(sd, LEVEL_ADJUST, 0); tda9840_write(sd, STEREO_ADJUST, 0); tda9840_write(sd, SWITCH, TDA9840_SET_STEREO); return 0; } static int tda9840_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(sd); return 0; } static const struct i2c_device_id tda9840_id[] = { { "tda9840", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tda9840_id); static struct i2c_driver tda9840_driver = { .driver = { .owner = THIS_MODULE, .name = "tda9840", }, .probe = tda9840_probe, .remove = tda9840_remove, .id_table = tda9840_id, }; module_i2c_driver(tda9840_driver);
gpl-2.0
SohamJ/android_kernel_motorola_msm8226
sound/soc/codecs/wm8940.c
4812
22564
/* * wm8940.c -- WM8940 ALSA Soc Audio driver * * Author: Jonathan Cameron <jic23@cam.ac.uk> * * Based on wm8510.c * Copyright 2006 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Not currently handled: * Notch filter control * AUXMode (inverting vs mixer) * No means to obtain current gain if alc enabled. * No use made of gpio * Fast VMID discharge for power down * Soft Start * DLR and ALR Swaps not enabled * Digital Sidetone not supported */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8940.h" struct wm8940_priv { unsigned int sysclk; enum snd_soc_control_type control_type; }; static int wm8940_volatile_register(struct snd_soc_codec *codec, unsigned int reg) { switch (reg) { case WM8940_SOFTRESET: return 1; default: return 0; } } static u16 wm8940_reg_defaults[] = { 0x8940, /* Soft Reset */ 0x0000, /* Power 1 */ 0x0000, /* Power 2 */ 0x0000, /* Power 3 */ 0x0010, /* Interface Control */ 0x0000, /* Companding Control */ 0x0140, /* Clock Control */ 0x0000, /* Additional Controls */ 0x0000, /* GPIO Control */ 0x0002, /* Auto Increment Control */ 0x0000, /* DAC Control */ 0x00FF, /* DAC Volume */ 0, 0, 0x0100, /* ADC Control */ 0x00FF, /* ADC Volume */ 0x0000, /* Notch Filter 1 Control 1 */ 0x0000, /* Notch Filter 1 Control 2 */ 0x0000, /* Notch Filter 2 Control 1 */ 0x0000, /* Notch Filter 2 Control 2 */ 0x0000, /* Notch Filter 3 Control 1 */ 0x0000, /* Notch Filter 3 Control 2 */ 0x0000, /* Notch Filter 4 Control 1 */ 0x0000, /* Notch Filter 4 Control 2 */ 0x0032, /* DAC Limit Control 1 */ 0x0000, /* DAC Limit Control 2 */ 0, 0, 0, 0, 0, 0, 0x0038, /* ALC Control 1 */ 0x000B, /* ALC Control 2 */ 0x0032, /* ALC Control 3 */ 0x0000, /* Noise Gate */ 0x0041, /* PLLN */ 0x000C, /* PLLK1 */ 0x0093, /* PLLK2 */ 0x00E9, /* PLLK3 */ 0, 0, 0x0030, /* ALC Control 4 */ 0, 0x0002, /* Input Control */ 0x0050, /* PGA Gain */ 0, 0x0002, /* ADC Boost Control */ 0, 0x0002, /* Output Control */ 0x0000, /* Speaker Mixer Control */ 0, 0, 0, 0x0079, /* Speaker Volume */ 0, 0x0000, /* Mono Mixer Control */ }; static const char *wm8940_companding[] = { "Off", "NC", "u-law", "A-law" }; static const struct soc_enum wm8940_adc_companding_enum = SOC_ENUM_SINGLE(WM8940_COMPANDINGCTL, 1, 4, wm8940_companding); static const struct soc_enum wm8940_dac_companding_enum = SOC_ENUM_SINGLE(WM8940_COMPANDINGCTL, 3, 4, wm8940_companding); static const char *wm8940_alc_mode_text[] = {"ALC", "Limiter"}; static const struct soc_enum wm8940_alc_mode_enum = SOC_ENUM_SINGLE(WM8940_ALC3, 8, 2, wm8940_alc_mode_text); static const char *wm8940_mic_bias_level_text[] = {"0.9", "0.65"}; static const struct soc_enum wm8940_mic_bias_level_enum = SOC_ENUM_SINGLE(WM8940_INPUTCTL, 8, 2, wm8940_mic_bias_level_text); static const char *wm8940_filter_mode_text[] = {"Audio", "Application"}; static const struct soc_enum wm8940_filter_mode_enum = SOC_ENUM_SINGLE(WM8940_ADC, 7, 2, wm8940_filter_mode_text); static DECLARE_TLV_DB_SCALE(wm8940_spk_vol_tlv, -5700, 100, 1); static DECLARE_TLV_DB_SCALE(wm8940_att_tlv, -1000, 1000, 0); static DECLARE_TLV_DB_SCALE(wm8940_pga_vol_tlv, -1200, 75, 0); static DECLARE_TLV_DB_SCALE(wm8940_alc_min_tlv, -1200, 600, 0); static DECLARE_TLV_DB_SCALE(wm8940_alc_max_tlv, 675, 600, 0); static DECLARE_TLV_DB_SCALE(wm8940_alc_tar_tlv, -2250, 50, 0); static DECLARE_TLV_DB_SCALE(wm8940_lim_boost_tlv, 0, 100, 0); static DECLARE_TLV_DB_SCALE(wm8940_lim_thresh_tlv, -600, 100, 0); static DECLARE_TLV_DB_SCALE(wm8940_adc_tlv, -12750, 50, 1); static DECLARE_TLV_DB_SCALE(wm8940_capture_boost_vol_tlv, 0, 2000, 0); static const struct snd_kcontrol_new wm8940_snd_controls[] = { SOC_SINGLE("Digital Loopback Switch", WM8940_COMPANDINGCTL, 6, 1, 0), SOC_ENUM("DAC Companding", wm8940_dac_companding_enum), SOC_ENUM("ADC Companding", wm8940_adc_companding_enum), SOC_ENUM("ALC Mode", wm8940_alc_mode_enum), SOC_SINGLE("ALC Switch", WM8940_ALC1, 8, 1, 0), SOC_SINGLE_TLV("ALC Capture Max Gain", WM8940_ALC1, 3, 7, 1, wm8940_alc_max_tlv), SOC_SINGLE_TLV("ALC Capture Min Gain", WM8940_ALC1, 0, 7, 0, wm8940_alc_min_tlv), SOC_SINGLE_TLV("ALC Capture Target", WM8940_ALC2, 0, 14, 0, wm8940_alc_tar_tlv), SOC_SINGLE("ALC Capture Hold", WM8940_ALC2, 4, 10, 0), SOC_SINGLE("ALC Capture Decay", WM8940_ALC3, 4, 10, 0), SOC_SINGLE("ALC Capture Attach", WM8940_ALC3, 0, 10, 0), SOC_SINGLE("ALC ZC Switch", WM8940_ALC4, 1, 1, 0), SOC_SINGLE("ALC Capture Noise Gate Switch", WM8940_NOISEGATE, 3, 1, 0), SOC_SINGLE("ALC Capture Noise Gate Threshold", WM8940_NOISEGATE, 0, 7, 0), SOC_SINGLE("DAC Playback Limiter Switch", WM8940_DACLIM1, 8, 1, 0), SOC_SINGLE("DAC Playback Limiter Attack", WM8940_DACLIM1, 0, 9, 0), SOC_SINGLE("DAC Playback Limiter Decay", WM8940_DACLIM1, 4, 11, 0), SOC_SINGLE_TLV("DAC Playback Limiter Threshold", WM8940_DACLIM2, 4, 9, 1, wm8940_lim_thresh_tlv), SOC_SINGLE_TLV("DAC Playback Limiter Boost", WM8940_DACLIM2, 0, 12, 0, wm8940_lim_boost_tlv), SOC_SINGLE("Capture PGA ZC Switch", WM8940_PGAGAIN, 7, 1, 0), SOC_SINGLE_TLV("Capture PGA Volume", WM8940_PGAGAIN, 0, 63, 0, wm8940_pga_vol_tlv), SOC_SINGLE_TLV("Digital Playback Volume", WM8940_DACVOL, 0, 255, 0, wm8940_adc_tlv), SOC_SINGLE_TLV("Digital Capture Volume", WM8940_ADCVOL, 0, 255, 0, wm8940_adc_tlv), SOC_ENUM("Mic Bias Level", wm8940_mic_bias_level_enum), SOC_SINGLE_TLV("Capture Boost Volue", WM8940_ADCBOOST, 8, 1, 0, wm8940_capture_boost_vol_tlv), SOC_SINGLE_TLV("Speaker Playback Volume", WM8940_SPKVOL, 0, 63, 0, wm8940_spk_vol_tlv), SOC_SINGLE("Speaker Playback Switch", WM8940_SPKVOL, 6, 1, 1), SOC_SINGLE_TLV("Speaker Mixer Line Bypass Volume", WM8940_SPKVOL, 8, 1, 1, wm8940_att_tlv), SOC_SINGLE("Speaker Playback ZC Switch", WM8940_SPKVOL, 7, 1, 0), SOC_SINGLE("Mono Out Switch", WM8940_MONOMIX, 6, 1, 1), SOC_SINGLE_TLV("Mono Mixer Line Bypass Volume", WM8940_MONOMIX, 7, 1, 1, wm8940_att_tlv), SOC_SINGLE("High Pass Filter Switch", WM8940_ADC, 8, 1, 0), SOC_ENUM("High Pass Filter Mode", wm8940_filter_mode_enum), SOC_SINGLE("High Pass Filter Cut Off", WM8940_ADC, 4, 7, 0), SOC_SINGLE("ADC Inversion Switch", WM8940_ADC, 0, 1, 0), SOC_SINGLE("DAC Inversion Switch", WM8940_DAC, 0, 1, 0), SOC_SINGLE("DAC Auto Mute Switch", WM8940_DAC, 2, 1, 0), SOC_SINGLE("ZC Timeout Clock Switch", WM8940_ADDCNTRL, 0, 1, 0), }; static const struct snd_kcontrol_new wm8940_speaker_mixer_controls[] = { SOC_DAPM_SINGLE("Line Bypass Switch", WM8940_SPKMIX, 1, 1, 0), SOC_DAPM_SINGLE("Aux Playback Switch", WM8940_SPKMIX, 5, 1, 0), SOC_DAPM_SINGLE("PCM Playback Switch", WM8940_SPKMIX, 0, 1, 0), }; static const struct snd_kcontrol_new wm8940_mono_mixer_controls[] = { SOC_DAPM_SINGLE("Line Bypass Switch", WM8940_MONOMIX, 1, 1, 0), SOC_DAPM_SINGLE("Aux Playback Switch", WM8940_MONOMIX, 2, 1, 0), SOC_DAPM_SINGLE("PCM Playback Switch", WM8940_MONOMIX, 0, 1, 0), }; static DECLARE_TLV_DB_SCALE(wm8940_boost_vol_tlv, -1500, 300, 1); static const struct snd_kcontrol_new wm8940_input_boost_controls[] = { SOC_DAPM_SINGLE("Mic PGA Switch", WM8940_PGAGAIN, 6, 1, 1), SOC_DAPM_SINGLE_TLV("Aux Volume", WM8940_ADCBOOST, 0, 7, 0, wm8940_boost_vol_tlv), SOC_DAPM_SINGLE_TLV("Mic Volume", WM8940_ADCBOOST, 4, 7, 0, wm8940_boost_vol_tlv), }; static const struct snd_kcontrol_new wm8940_micpga_controls[] = { SOC_DAPM_SINGLE("AUX Switch", WM8940_INPUTCTL, 2, 1, 0), SOC_DAPM_SINGLE("MICP Switch", WM8940_INPUTCTL, 0, 1, 0), SOC_DAPM_SINGLE("MICN Switch", WM8940_INPUTCTL, 1, 1, 0), }; static const struct snd_soc_dapm_widget wm8940_dapm_widgets[] = { SND_SOC_DAPM_MIXER("Speaker Mixer", WM8940_POWER3, 2, 0, &wm8940_speaker_mixer_controls[0], ARRAY_SIZE(wm8940_speaker_mixer_controls)), SND_SOC_DAPM_MIXER("Mono Mixer", WM8940_POWER3, 3, 0, &wm8940_mono_mixer_controls[0], ARRAY_SIZE(wm8940_mono_mixer_controls)), SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8940_POWER3, 0, 0), SND_SOC_DAPM_PGA("SpkN Out", WM8940_POWER3, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("SpkP Out", WM8940_POWER3, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("Mono Out", WM8940_POWER3, 7, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("MONOOUT"), SND_SOC_DAPM_OUTPUT("SPKOUTP"), SND_SOC_DAPM_OUTPUT("SPKOUTN"), SND_SOC_DAPM_PGA("Aux Input", WM8940_POWER1, 6, 0, NULL, 0), SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8940_POWER2, 0, 0), SND_SOC_DAPM_MIXER("Mic PGA", WM8940_POWER2, 2, 0, &wm8940_micpga_controls[0], ARRAY_SIZE(wm8940_micpga_controls)), SND_SOC_DAPM_MIXER("Boost Mixer", WM8940_POWER2, 4, 0, &wm8940_input_boost_controls[0], ARRAY_SIZE(wm8940_input_boost_controls)), SND_SOC_DAPM_MICBIAS("Mic Bias", WM8940_POWER1, 4, 0), SND_SOC_DAPM_INPUT("MICN"), SND_SOC_DAPM_INPUT("MICP"), SND_SOC_DAPM_INPUT("AUX"), }; static const struct snd_soc_dapm_route audio_map[] = { /* Mono output mixer */ {"Mono Mixer", "PCM Playback Switch", "DAC"}, {"Mono Mixer", "Aux Playback Switch", "Aux Input"}, {"Mono Mixer", "Line Bypass Switch", "Boost Mixer"}, /* Speaker output mixer */ {"Speaker Mixer", "PCM Playback Switch", "DAC"}, {"Speaker Mixer", "Aux Playback Switch", "Aux Input"}, {"Speaker Mixer", "Line Bypass Switch", "Boost Mixer"}, /* Outputs */ {"Mono Out", NULL, "Mono Mixer"}, {"MONOOUT", NULL, "Mono Out"}, {"SpkN Out", NULL, "Speaker Mixer"}, {"SpkP Out", NULL, "Speaker Mixer"}, {"SPKOUTN", NULL, "SpkN Out"}, {"SPKOUTP", NULL, "SpkP Out"}, /* Microphone PGA */ {"Mic PGA", "MICN Switch", "MICN"}, {"Mic PGA", "MICP Switch", "MICP"}, {"Mic PGA", "AUX Switch", "AUX"}, /* Boost Mixer */ {"Boost Mixer", "Mic PGA Switch", "Mic PGA"}, {"Boost Mixer", "Mic Volume", "MICP"}, {"Boost Mixer", "Aux Volume", "Aux Input"}, {"ADC", NULL, "Boost Mixer"}, }; static int wm8940_add_widgets(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; ret = snd_soc_dapm_new_controls(dapm, wm8940_dapm_widgets, ARRAY_SIZE(wm8940_dapm_widgets)); if (ret) goto error_ret; ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); error_ret: return ret; } #define wm8940_reset(c) snd_soc_write(c, WM8940_SOFTRESET, 0); static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = snd_soc_read(codec, WM8940_IFACE) & 0xFE67; u16 clk = snd_soc_read(codec, WM8940_CLOCK) & 0x1fe; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: clk |= 1; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } snd_soc_write(codec, WM8940_CLOCK, clk); switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= (2 << 3); break; case SND_SOC_DAIFMT_LEFT_J: iface |= (1 << 3); break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_DSP_A: iface |= (3 << 3); break; case SND_SOC_DAIFMT_DSP_B: iface |= (3 << 3) | (1 << 7); break; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_NB_IF: iface |= (1 << 7); break; case SND_SOC_DAIFMT_IB_NF: iface |= (1 << 8); break; case SND_SOC_DAIFMT_IB_IF: iface |= (1 << 8) | (1 << 7); break; } snd_soc_write(codec, WM8940_IFACE, iface); return 0; } static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; u16 iface = snd_soc_read(codec, WM8940_IFACE) & 0xFD9F; u16 addcntrl = snd_soc_read(codec, WM8940_ADDCNTRL) & 0xFFF1; u16 companding = snd_soc_read(codec, WM8940_COMPANDINGCTL) & 0xFFDF; int ret; /* LoutR control */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && params_channels(params) == 2) iface |= (1 << 9); switch (params_rate(params)) { case 8000: addcntrl |= (0x5 << 1); break; case 11025: addcntrl |= (0x4 << 1); break; case 16000: addcntrl |= (0x3 << 1); break; case 22050: addcntrl |= (0x2 << 1); break; case 32000: addcntrl |= (0x1 << 1); break; case 44100: case 48000: break; } ret = snd_soc_write(codec, WM8940_ADDCNTRL, addcntrl); if (ret) goto error_ret; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: companding = companding | (1 << 5); break; case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= (1 << 5); break; case SNDRV_PCM_FORMAT_S24_LE: iface |= (2 << 5); break; case SNDRV_PCM_FORMAT_S32_LE: iface |= (3 << 5); break; } ret = snd_soc_write(codec, WM8940_COMPANDINGCTL, companding); if (ret) goto error_ret; ret = snd_soc_write(codec, WM8940_IFACE, iface); error_ret: return ret; } static int wm8940_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8940_DAC) & 0xffbf; if (mute) mute_reg |= 0x40; return snd_soc_write(codec, WM8940_DAC, mute_reg); } static int wm8940_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { u16 val; u16 pwr_reg = snd_soc_read(codec, WM8940_POWER1) & 0x1F0; int ret = 0; switch (level) { case SND_SOC_BIAS_ON: /* ensure bufioen and biasen */ pwr_reg |= (1 << 2) | (1 << 3); /* Enable thermal shutdown */ val = snd_soc_read(codec, WM8940_OUTPUTCTL); ret = snd_soc_write(codec, WM8940_OUTPUTCTL, val | 0x2); if (ret) break; /* set vmid to 75k */ ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x1); break; case SND_SOC_BIAS_PREPARE: /* ensure bufioen and biasen */ pwr_reg |= (1 << 2) | (1 << 3); ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x1); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = snd_soc_cache_sync(codec); if (ret < 0) { dev_err(codec->dev, "Failed to sync cache: %d\n", ret); return ret; } } /* ensure bufioen and biasen */ pwr_reg |= (1 << 2) | (1 << 3); /* set vmid to 300k for standby */ ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x2); break; case SND_SOC_BIAS_OFF: ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg); break; } codec->dapm.bias_level = level; return ret; } struct pll_ { unsigned int pre_scale:2; unsigned int n:4; unsigned int k; }; static struct pll_ pll_div; /* The size in bits of the pll divide multiplied by 10 * to allow rounding later */ #define FIXED_PLL_SIZE ((1 << 24) * 10) static void pll_factors(unsigned int target, unsigned int source) { unsigned long long Kpart; unsigned int K, Ndiv, Nmod; /* The left shift ist to avoid accuracy loss when right shifting */ Ndiv = target / source; if (Ndiv > 12) { source <<= 1; /* Multiply by 2 */ pll_div.pre_scale = 0; Ndiv = target / source; } else if (Ndiv < 3) { source >>= 2; /* Divide by 4 */ pll_div.pre_scale = 3; Ndiv = target / source; } else if (Ndiv < 6) { source >>= 1; /* divide by 2 */ pll_div.pre_scale = 2; Ndiv = target / source; } else pll_div.pre_scale = 1; if ((Ndiv < 6) || (Ndiv > 12)) printk(KERN_WARNING "WM8940 N value %d outwith recommended range!d\n", Ndiv); pll_div.n = Ndiv; Nmod = target % source; Kpart = FIXED_PLL_SIZE * (long long)Nmod; do_div(Kpart, source); K = Kpart & 0xFFFFFFFF; /* Check if we need to round */ if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ K /= 10; pll_div.k = K; } /* Untested at the moment */ static int wm8940_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; /* Turn off PLL */ reg = snd_soc_read(codec, WM8940_POWER1); snd_soc_write(codec, WM8940_POWER1, reg & 0x1df); if (freq_in == 0 || freq_out == 0) { /* Clock CODEC directly from MCLK */ reg = snd_soc_read(codec, WM8940_CLOCK); snd_soc_write(codec, WM8940_CLOCK, reg & 0x0ff); /* Pll power down */ snd_soc_write(codec, WM8940_PLLN, (1 << 7)); return 0; } /* Pll is followed by a frequency divide by 4 */ pll_factors(freq_out*4, freq_in); if (pll_div.k) snd_soc_write(codec, WM8940_PLLN, (pll_div.pre_scale << 4) | pll_div.n | (1 << 6)); else /* No factional component */ snd_soc_write(codec, WM8940_PLLN, (pll_div.pre_scale << 4) | pll_div.n); snd_soc_write(codec, WM8940_PLLK1, pll_div.k >> 18); snd_soc_write(codec, WM8940_PLLK2, (pll_div.k >> 9) & 0x1ff); snd_soc_write(codec, WM8940_PLLK3, pll_div.k & 0x1ff); /* Enable the PLL */ reg = snd_soc_read(codec, WM8940_POWER1); snd_soc_write(codec, WM8940_POWER1, reg | 0x020); /* Run CODEC from PLL instead of MCLK */ reg = snd_soc_read(codec, WM8940_CLOCK); snd_soc_write(codec, WM8940_CLOCK, reg | 0x100); return 0; } static int wm8940_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8940_priv *wm8940 = snd_soc_codec_get_drvdata(codec); switch (freq) { case 11289600: case 12000000: case 12288000: case 16934400: case 18432000: wm8940->sysclk = freq; return 0; } return -EINVAL; } static int wm8940_set_dai_clkdiv(struct snd_soc_dai *codec_dai, int div_id, int div) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; int ret = 0; switch (div_id) { case WM8940_BCLKDIV: reg = snd_soc_read(codec, WM8940_CLOCK) & 0xFFE3; ret = snd_soc_write(codec, WM8940_CLOCK, reg | (div << 2)); break; case WM8940_MCLKDIV: reg = snd_soc_read(codec, WM8940_CLOCK) & 0xFF1F; ret = snd_soc_write(codec, WM8940_CLOCK, reg | (div << 5)); break; case WM8940_OPCLKDIV: reg = snd_soc_read(codec, WM8940_GPIO) & 0xFFCF; ret = snd_soc_write(codec, WM8940_GPIO, reg | (div << 4)); break; } return ret; } #define WM8940_RATES SNDRV_PCM_RATE_8000_48000 #define WM8940_FORMATS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops wm8940_dai_ops = { .hw_params = wm8940_i2s_hw_params, .set_sysclk = wm8940_set_dai_sysclk, .digital_mute = wm8940_mute, .set_fmt = wm8940_set_dai_fmt, .set_clkdiv = wm8940_set_dai_clkdiv, .set_pll = wm8940_set_dai_pll, }; static struct snd_soc_dai_driver wm8940_dai = { .name = "wm8940-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8940_RATES, .formats = WM8940_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8940_RATES, .formats = WM8940_FORMATS, }, .ops = &wm8940_dai_ops, .symmetric_rates = 1, }; static int wm8940_suspend(struct snd_soc_codec *codec) { return wm8940_set_bias_level(codec, SND_SOC_BIAS_OFF); } static int wm8940_resume(struct snd_soc_codec *codec) { wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8940_probe(struct snd_soc_codec *codec) { struct wm8940_priv *wm8940 = snd_soc_codec_get_drvdata(codec); struct wm8940_setup_data *pdata = codec->dev->platform_data; int ret; u16 reg; ret = snd_soc_codec_set_cache_io(codec, 8, 16, wm8940->control_type); if (ret < 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } ret = wm8940_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset\n"); return ret; } wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY); ret = snd_soc_write(codec, WM8940_POWER1, 0x180); if (ret < 0) return ret; if (!pdata) dev_warn(codec->dev, "No platform data supplied\n"); else { reg = snd_soc_read(codec, WM8940_OUTPUTCTL); ret = snd_soc_write(codec, WM8940_OUTPUTCTL, reg | pdata->vroi); if (ret < 0) return ret; } ret = snd_soc_add_codec_controls(codec, wm8940_snd_controls, ARRAY_SIZE(wm8940_snd_controls)); if (ret) return ret; ret = wm8940_add_widgets(codec); return ret; } static int wm8940_remove(struct snd_soc_codec *codec) { wm8940_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8940 = { .probe = wm8940_probe, .remove = wm8940_remove, .suspend = wm8940_suspend, .resume = wm8940_resume, .set_bias_level = wm8940_set_bias_level, .reg_cache_size = ARRAY_SIZE(wm8940_reg_defaults), .reg_word_size = sizeof(u16), .reg_cache_default = wm8940_reg_defaults, .volatile_register = wm8940_volatile_register, }; static __devinit int wm8940_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8940_priv *wm8940; int ret; wm8940 = devm_kzalloc(&i2c->dev, sizeof(struct wm8940_priv), GFP_KERNEL); if (wm8940 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm8940); wm8940->control_type = SND_SOC_I2C; ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8940, &wm8940_dai, 1); return ret; } static __devexit int wm8940_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id wm8940_i2c_id[] = { { "wm8940", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8940_i2c_id); static struct i2c_driver wm8940_i2c_driver = { .driver = { .name = "wm8940", .owner = THIS_MODULE, }, .probe = wm8940_i2c_probe, .remove = __devexit_p(wm8940_i2c_remove), .id_table = wm8940_i2c_id, }; static int __init wm8940_modinit(void) { int ret = 0; ret = i2c_add_driver(&wm8940_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register wm8940 I2C driver: %d\n", ret); } return ret; } module_init(wm8940_modinit); static void __exit wm8940_exit(void) { i2c_del_driver(&wm8940_i2c_driver); } module_exit(wm8940_exit); MODULE_DESCRIPTION("ASoC WM8940 driver"); MODULE_AUTHOR("Jonathan Cameron"); MODULE_LICENSE("GPL");
gpl-2.0
AlbertXingZhang/android_kernel_sony_msm8x60
drivers/media/video/pvrusb2/pvrusb2-v4l2.c
4812
32645
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/version.h> #include "pvrusb2-context.h" #include "pvrusb2-hdw.h" #include "pvrusb2.h" #include "pvrusb2-debug.h" #include "pvrusb2-v4l2.h" #include "pvrusb2-ioread.h" #include <linux/videodev2.h> #include <linux/module.h> #include <media/v4l2-dev.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> struct pvr2_v4l2_dev; struct pvr2_v4l2_fh; struct pvr2_v4l2; struct pvr2_v4l2_dev { struct video_device devbase; /* MUST be first! */ struct pvr2_v4l2 *v4lp; struct pvr2_context_stream *stream; /* Information about this device: */ enum pvr2_config config; /* Expected stream format */ int v4l_type; /* V4L defined type for this device node */ enum pvr2_v4l_type minor_type; /* pvr2-understood minor device type */ }; struct pvr2_v4l2_fh { struct pvr2_channel channel; struct pvr2_v4l2_dev *pdi; enum v4l2_priority prio; struct pvr2_ioread *rhp; struct file *file; struct pvr2_v4l2 *vhead; struct pvr2_v4l2_fh *vnext; struct pvr2_v4l2_fh *vprev; wait_queue_head_t wait_data; int fw_mode_flag; /* Map contiguous ordinal value to input id */ unsigned char *input_map; unsigned int input_cnt; }; struct pvr2_v4l2 { struct pvr2_channel channel; struct pvr2_v4l2_fh *vfirst; struct pvr2_v4l2_fh *vlast; struct v4l2_prio_state prio; /* streams - Note that these must be separately, individually, * allocated pointers. This is because the v4l core is going to * manage their deletion - separately, individually... */ struct pvr2_v4l2_dev *dev_video; struct pvr2_v4l2_dev *dev_radio; }; static int video_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(video_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr, "Offset for device's video dev minor"); static int radio_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_nr, "Offset for device's radio dev minor"); static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(vbi_nr, int, NULL, 0444); MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor"); static struct v4l2_capability pvr_capability ={ .driver = "pvrusb2", .card = "Hauppauge WinTV pvr-usb2", .bus_info = "usb", .version = LINUX_VERSION_CODE, .capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | V4L2_CAP_READWRITE), }; static struct v4l2_fmtdesc pvr_fmtdesc [] = { { .index = 0, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .flags = V4L2_FMT_FLAG_COMPRESSED, .description = "MPEG1/2", // This should really be V4L2_PIX_FMT_MPEG, but xawtv // breaks when I do that. .pixelformat = 0, // V4L2_PIX_FMT_MPEG, .reserved = { 0, 0, 0, 0 } } }; #define PVR_FORMAT_PIX 0 #define PVR_FORMAT_VBI 1 static struct v4l2_format pvr_format [] = { [PVR_FORMAT_PIX] = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt = { .pix = { .width = 720, .height = 576, // This should really be V4L2_PIX_FMT_MPEG, // but xawtv breaks when I do that. .pixelformat = 0, // V4L2_PIX_FMT_MPEG, .field = V4L2_FIELD_INTERLACED, .bytesperline = 0, // doesn't make sense // here //FIXME : Don't know what to put here... .sizeimage = (32*1024), .colorspace = 0, // doesn't make sense here .priv = 0 } } }, [PVR_FORMAT_VBI] = { .type = V4L2_BUF_TYPE_VBI_CAPTURE, .fmt = { .vbi = { .sampling_rate = 27000000, .offset = 248, .samples_per_line = 1443, .sample_format = V4L2_PIX_FMT_GREY, .start = { 0, 0 }, .count = { 0, 0 }, .flags = 0, .reserved = { 0, 0 } } } } }; /* * pvr_ioctl() * * This is part of Video 4 Linux API. The procedure handles ioctl() calls. * */ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) { struct pvr2_v4l2_fh *fh = file->private_data; struct pvr2_v4l2 *vp = fh->vhead; struct pvr2_v4l2_dev *pdi = fh->pdi; struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; long ret = -EINVAL; if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),cmd); } if (!pvr2_hdw_dev_ok(hdw)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "ioctl failed - bad or no context"); return -EFAULT; } /* check priority */ switch (cmd) { case VIDIOC_S_CTRL: case VIDIOC_S_STD: case VIDIOC_S_INPUT: case VIDIOC_S_TUNER: case VIDIOC_S_FREQUENCY: ret = v4l2_prio_check(&vp->prio, fh->prio); if (ret) return ret; } switch (cmd) { case VIDIOC_QUERYCAP: { struct v4l2_capability *cap = arg; memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability)); strlcpy(cap->bus_info,pvr2_hdw_get_bus_info(hdw), sizeof(cap->bus_info)); strlcpy(cap->card,pvr2_hdw_get_desc(hdw),sizeof(cap->card)); ret = 0; break; } case VIDIOC_G_PRIORITY: { enum v4l2_priority *p = arg; *p = v4l2_prio_max(&vp->prio); ret = 0; break; } case VIDIOC_S_PRIORITY: { enum v4l2_priority *prio = arg; ret = v4l2_prio_change(&vp->prio, &fh->prio, *prio); break; } case VIDIOC_ENUMSTD: { struct v4l2_standard *vs = (struct v4l2_standard *)arg; int idx = vs->index; ret = pvr2_hdw_get_stdenum_value(hdw,vs,idx+1); break; } case VIDIOC_QUERYSTD: { v4l2_std_id *std = arg; *std = V4L2_STD_ALL; ret = pvr2_hdw_get_detected_std(hdw, std); break; } case VIDIOC_G_STD: { int val = 0; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),&val); *(v4l2_std_id *)arg = val; break; } case VIDIOC_S_STD: { ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR), *(v4l2_std_id *)arg); break; } case VIDIOC_ENUMINPUT: { struct pvr2_ctrl *cptr; struct v4l2_input *vi = (struct v4l2_input *)arg; struct v4l2_input tmp; unsigned int cnt; int val; cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); memset(&tmp,0,sizeof(tmp)); tmp.index = vi->index; ret = 0; if (vi->index >= fh->input_cnt) { ret = -EINVAL; break; } val = fh->input_map[vi->index]; switch (val) { case PVR2_CVAL_INPUT_TV: case PVR2_CVAL_INPUT_DTV: case PVR2_CVAL_INPUT_RADIO: tmp.type = V4L2_INPUT_TYPE_TUNER; break; case PVR2_CVAL_INPUT_SVIDEO: case PVR2_CVAL_INPUT_COMPOSITE: tmp.type = V4L2_INPUT_TYPE_CAMERA; break; default: ret = -EINVAL; break; } if (ret < 0) break; cnt = 0; pvr2_ctrl_get_valname(cptr,val, tmp.name,sizeof(tmp.name)-1,&cnt); tmp.name[cnt] = 0; /* Don't bother with audioset, since this driver currently always switches the audio whenever the video is switched. */ /* Handling std is a tougher problem. It doesn't make sense in cases where a device might be multi-standard. We could just copy out the current value for the standard, but it can change over time. For now just leave it zero. */ memcpy(vi, &tmp, sizeof(tmp)); ret = 0; break; } case VIDIOC_G_INPUT: { unsigned int idx; struct pvr2_ctrl *cptr; struct v4l2_input *vi = (struct v4l2_input *)arg; int val; cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); val = 0; ret = pvr2_ctrl_get_value(cptr,&val); vi->index = 0; for (idx = 0; idx < fh->input_cnt; idx++) { if (fh->input_map[idx] == val) { vi->index = idx; break; } } break; } case VIDIOC_S_INPUT: { struct v4l2_input *vi = (struct v4l2_input *)arg; if (vi->index >= fh->input_cnt) { ret = -ERANGE; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT), fh->input_map[vi->index]); break; } case VIDIOC_ENUMAUDIO: { /* pkt: FIXME: We are returning one "fake" input here which could very well be called "whatever_we_like". This is for apps that want to see an audio input just to feel comfortable, as well as to test if it can do stereo or sth. There is actually no guarantee that the actual audio input cannot change behind the app's back, but most applications should not mind that either. Hopefully, mplayer people will work with us on this (this whole mess is to support mplayer pvr://), or Hans will come up with a more standard way to say "we have inputs but we don 't want you to change them independent of video" which will sort this mess. */ struct v4l2_audio *vin = arg; ret = -EINVAL; if (vin->index > 0) break; strncpy(vin->name, "PVRUSB2 Audio",14); vin->capability = V4L2_AUDCAP_STEREO; ret = 0; break; break; } case VIDIOC_G_AUDIO: { /* pkt: FIXME: see above comment (VIDIOC_ENUMAUDIO) */ struct v4l2_audio *vin = arg; memset(vin,0,sizeof(*vin)); vin->index = 0; strncpy(vin->name, "PVRUSB2 Audio",14); vin->capability = V4L2_AUDCAP_STEREO; ret = 0; break; } case VIDIOC_G_TUNER: { struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; if (vt->index != 0) break; /* Only answer for the 1st tuner */ pvr2_hdw_execute_tuner_poll(hdw); ret = pvr2_hdw_get_tuner_status(hdw,vt); break; } case VIDIOC_S_TUNER: { struct v4l2_tuner *vt=(struct v4l2_tuner *)arg; if (vt->index != 0) break; ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE), vt->audmode); break; } case VIDIOC_S_FREQUENCY: { const struct v4l2_frequency *vf = (struct v4l2_frequency *)arg; unsigned long fv; struct v4l2_tuner vt; int cur_input; struct pvr2_ctrl *ctrlp; ret = pvr2_hdw_get_tuner_status(hdw,&vt); if (ret != 0) break; ctrlp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); ret = pvr2_ctrl_get_value(ctrlp,&cur_input); if (ret != 0) break; if (vf->type == V4L2_TUNER_RADIO) { if (cur_input != PVR2_CVAL_INPUT_RADIO) { pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_RADIO); } } else { if (cur_input == PVR2_CVAL_INPUT_RADIO) { pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_TV); } } fv = vf->frequency; if (vt.capability & V4L2_TUNER_CAP_LOW) { fv = (fv * 125) / 2; } else { fv = fv * 62500; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),fv); break; } case VIDIOC_G_FREQUENCY: { struct v4l2_frequency *vf = (struct v4l2_frequency *)arg; int val = 0; int cur_input; struct v4l2_tuner vt; ret = pvr2_hdw_get_tuner_status(hdw,&vt); if (ret != 0) break; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY), &val); if (ret != 0) break; pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT), &cur_input); if (cur_input == PVR2_CVAL_INPUT_RADIO) { vf->type = V4L2_TUNER_RADIO; } else { vf->type = V4L2_TUNER_ANALOG_TV; } if (vt.capability & V4L2_TUNER_CAP_LOW) { val = (val * 2) / 125; } else { val /= 62500; } vf->frequency = val; break; } case VIDIOC_ENUM_FMT: { struct v4l2_fmtdesc *fd = (struct v4l2_fmtdesc *)arg; /* Only one format is supported : mpeg.*/ if (fd->index != 0) break; memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc)); ret = 0; break; } case VIDIOC_G_FMT: { struct v4l2_format *vf = (struct v4l2_format *)arg; int val; switch(vf->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: memcpy(vf, &pvr_format[PVR_FORMAT_PIX], sizeof(struct v4l2_format)); val = 0; pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES), &val); vf->fmt.pix.width = val; val = 0; pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES), &val); vf->fmt.pix.height = val; ret = 0; break; case V4L2_BUF_TYPE_VBI_CAPTURE: // ????? Still need to figure out to do VBI correctly ret = -EINVAL; break; default: ret = -EINVAL; break; } break; } case VIDIOC_TRY_FMT: case VIDIOC_S_FMT: { struct v4l2_format *vf = (struct v4l2_format *)arg; ret = 0; switch(vf->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: { int lmin,lmax,ldef; struct pvr2_ctrl *hcp,*vcp; int h = vf->fmt.pix.height; int w = vf->fmt.pix.width; hcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES); vcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES); lmin = pvr2_ctrl_get_min(hcp); lmax = pvr2_ctrl_get_max(hcp); pvr2_ctrl_get_def(hcp, &ldef); if (w == -1) { w = ldef; } else if (w < lmin) { w = lmin; } else if (w > lmax) { w = lmax; } lmin = pvr2_ctrl_get_min(vcp); lmax = pvr2_ctrl_get_max(vcp); pvr2_ctrl_get_def(vcp, &ldef); if (h == -1) { h = ldef; } else if (h < lmin) { h = lmin; } else if (h > lmax) { h = lmax; } memcpy(vf, &pvr_format[PVR_FORMAT_PIX], sizeof(struct v4l2_format)); vf->fmt.pix.width = w; vf->fmt.pix.height = h; if (cmd == VIDIOC_S_FMT) { pvr2_ctrl_set_value(hcp,vf->fmt.pix.width); pvr2_ctrl_set_value(vcp,vf->fmt.pix.height); } } break; case V4L2_BUF_TYPE_VBI_CAPTURE: // ????? Still need to figure out to do VBI correctly ret = -EINVAL; break; default: ret = -EINVAL; break; } break; } case VIDIOC_STREAMON: { if (!fh->pdi->stream) { /* No stream defined for this node. This means that we're not currently allowed to stream from this node. */ ret = -EPERM; break; } ret = pvr2_hdw_set_stream_type(hdw,pdi->config); if (ret < 0) return ret; ret = pvr2_hdw_set_streaming(hdw,!0); break; } case VIDIOC_STREAMOFF: { if (!fh->pdi->stream) { /* No stream defined for this node. This means that we're not currently allowed to stream from this node. */ ret = -EPERM; break; } ret = pvr2_hdw_set_streaming(hdw,0); break; } case VIDIOC_QUERYCTRL: { struct pvr2_ctrl *cptr; int val; struct v4l2_queryctrl *vc = (struct v4l2_queryctrl *)arg; ret = 0; if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) { cptr = pvr2_hdw_get_ctrl_nextv4l( hdw,(vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL)); if (cptr) vc->id = pvr2_ctrl_get_v4lid(cptr); } else { cptr = pvr2_hdw_get_ctrl_v4l(hdw,vc->id); } if (!cptr) { pvr2_trace(PVR2_TRACE_V4LIOCTL, "QUERYCTRL id=0x%x not implemented here", vc->id); ret = -EINVAL; break; } pvr2_trace(PVR2_TRACE_V4LIOCTL, "QUERYCTRL id=0x%x mapping name=%s (%s)", vc->id,pvr2_ctrl_get_name(cptr), pvr2_ctrl_get_desc(cptr)); strlcpy(vc->name,pvr2_ctrl_get_desc(cptr),sizeof(vc->name)); vc->flags = pvr2_ctrl_get_v4lflags(cptr); pvr2_ctrl_get_def(cptr, &val); vc->default_value = val; switch (pvr2_ctrl_get_type(cptr)) { case pvr2_ctl_enum: vc->type = V4L2_CTRL_TYPE_MENU; vc->minimum = 0; vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1; vc->step = 1; break; case pvr2_ctl_bool: vc->type = V4L2_CTRL_TYPE_BOOLEAN; vc->minimum = 0; vc->maximum = 1; vc->step = 1; break; case pvr2_ctl_int: vc->type = V4L2_CTRL_TYPE_INTEGER; vc->minimum = pvr2_ctrl_get_min(cptr); vc->maximum = pvr2_ctrl_get_max(cptr); vc->step = 1; break; default: pvr2_trace(PVR2_TRACE_V4LIOCTL, "QUERYCTRL id=0x%x name=%s not mappable", vc->id,pvr2_ctrl_get_name(cptr)); ret = -EINVAL; break; } break; } case VIDIOC_QUERYMENU: { struct v4l2_querymenu *vm = (struct v4l2_querymenu *)arg; unsigned int cnt = 0; ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw,vm->id), vm->index, vm->name,sizeof(vm->name)-1, &cnt); vm->name[cnt] = 0; break; } case VIDIOC_G_CTRL: { struct v4l2_control *vc = (struct v4l2_control *)arg; int val = 0; ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id), &val); vc->value = val; break; } case VIDIOC_S_CTRL: { struct v4l2_control *vc = (struct v4l2_control *)arg; ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id), vc->value); break; } case VIDIOC_G_EXT_CTRLS: { struct v4l2_ext_controls *ctls = (struct v4l2_ext_controls *)arg; struct v4l2_ext_control *ctrl; unsigned int idx; int val; ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),&val); if (ret) { ctls->error_idx = idx; break; } /* Ensure that if read as a 64 bit value, the user will still get a hopefully sane value */ ctrl->value64 = 0; ctrl->value = val; } break; } case VIDIOC_S_EXT_CTRLS: { struct v4l2_ext_controls *ctls = (struct v4l2_ext_controls *)arg; struct v4l2_ext_control *ctrl; unsigned int idx; ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id), ctrl->value); if (ret) { ctls->error_idx = idx; break; } } break; } case VIDIOC_TRY_EXT_CTRLS: { struct v4l2_ext_controls *ctls = (struct v4l2_ext_controls *)arg; struct v4l2_ext_control *ctrl; struct pvr2_ctrl *pctl; unsigned int idx; /* For the moment just validate that the requested control actually exists. */ ret = 0; for (idx = 0; idx < ctls->count; idx++) { ctrl = ctls->controls + idx; pctl = pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id); if (!pctl) { ret = -EINVAL; ctls->error_idx = idx; break; } } break; } case VIDIOC_CROPCAP: { struct v4l2_cropcap *cap = (struct v4l2_cropcap *)arg; if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = -EINVAL; break; } ret = pvr2_hdw_get_cropcap(hdw, cap); cap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* paranoia */ break; } case VIDIOC_G_CROP: { struct v4l2_crop *crop = (struct v4l2_crop *)arg; int val = 0; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = -EINVAL; break; } ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.left = val; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.top = val; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.width = val; ret = pvr2_ctrl_get_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), &val); if (ret != 0) { ret = -EINVAL; break; } crop->c.height = val; } case VIDIOC_S_CROP: { struct v4l2_crop *crop = (struct v4l2_crop *)arg; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), crop->c.left); if (ret != 0) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), crop->c.top); if (ret != 0) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), crop->c.width); if (ret != 0) { ret = -EINVAL; break; } ret = pvr2_ctrl_set_value( pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), crop->c.height); if (ret != 0) { ret = -EINVAL; break; } } case VIDIOC_LOG_STATUS: { pvr2_hdw_trigger_module_log(hdw); ret = 0; break; } #ifdef CONFIG_VIDEO_ADV_DEBUG case VIDIOC_DBG_S_REGISTER: case VIDIOC_DBG_G_REGISTER: { u64 val; struct v4l2_dbg_register *req = (struct v4l2_dbg_register *)arg; if (cmd == VIDIOC_DBG_S_REGISTER) val = req->val; ret = pvr2_hdw_register_access( hdw, &req->match, req->reg, cmd == VIDIOC_DBG_S_REGISTER, &val); if (cmd == VIDIOC_DBG_G_REGISTER) req->val = val; break; } #endif default : ret = -ENOTTY; break; } pvr2_hdw_commit_ctl(hdw); if (ret < 0) { if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { pvr2_trace(PVR2_TRACE_V4LIOCTL, "pvr2_v4l2_do_ioctl failure, ret=%ld", ret); } else { if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { pvr2_trace(PVR2_TRACE_V4LIOCTL, "pvr2_v4l2_do_ioctl failure, ret=%ld" " command was:", ret); v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw), cmd); } } } else { pvr2_trace(PVR2_TRACE_V4LIOCTL, "pvr2_v4l2_do_ioctl complete, ret=%ld (0x%lx)", ret, ret); } return ret; } static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip) { struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw; enum pvr2_config cfg = dip->config; char msg[80]; unsigned int mcnt; /* Construct the unregistration message *before* we actually perform the unregistration step. By doing it this way we don't have to worry about potentially touching deleted resources. */ mcnt = scnprintf(msg, sizeof(msg) - 1, "pvrusb2: unregistered device %s [%s]", video_device_node_name(&dip->devbase), pvr2_config_get_name(cfg)); msg[mcnt] = 0; pvr2_hdw_v4l_store_minor_number(hdw,dip->minor_type,-1); /* Paranoia */ dip->v4lp = NULL; dip->stream = NULL; /* Actual deallocation happens later when all internal references are gone. */ video_unregister_device(&dip->devbase); printk(KERN_INFO "%s\n", msg); } static void pvr2_v4l2_dev_disassociate_parent(struct pvr2_v4l2_dev *dip) { if (!dip) return; if (!dip->devbase.parent) return; dip->devbase.parent = NULL; device_move(&dip->devbase.dev, NULL, DPM_ORDER_NONE); } static void pvr2_v4l2_destroy_no_lock(struct pvr2_v4l2 *vp) { if (vp->dev_video) { pvr2_v4l2_dev_destroy(vp->dev_video); vp->dev_video = NULL; } if (vp->dev_radio) { pvr2_v4l2_dev_destroy(vp->dev_radio); vp->dev_radio = NULL; } pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_v4l2 id=%p",vp); pvr2_channel_done(&vp->channel); kfree(vp); } static void pvr2_video_device_release(struct video_device *vdev) { struct pvr2_v4l2_dev *dev; dev = container_of(vdev,struct pvr2_v4l2_dev,devbase); kfree(dev); } static void pvr2_v4l2_internal_check(struct pvr2_channel *chp) { struct pvr2_v4l2 *vp; vp = container_of(chp,struct pvr2_v4l2,channel); if (!vp->channel.mc_head->disconnect_flag) return; pvr2_v4l2_dev_disassociate_parent(vp->dev_video); pvr2_v4l2_dev_disassociate_parent(vp->dev_radio); if (vp->vfirst) return; pvr2_v4l2_destroy_no_lock(vp); } static long pvr2_v4l2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return video_usercopy(file, cmd, arg, pvr2_v4l2_do_ioctl); } static int pvr2_v4l2_release(struct file *file) { struct pvr2_v4l2_fh *fhp = file->private_data; struct pvr2_v4l2 *vp = fhp->vhead; struct pvr2_hdw *hdw = fhp->channel.mc_head->hdw; pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_release"); if (fhp->rhp) { struct pvr2_stream *sp; pvr2_hdw_set_streaming(hdw,0); sp = pvr2_ioread_get_stream(fhp->rhp); if (sp) pvr2_stream_set_callback(sp,NULL,NULL); pvr2_ioread_destroy(fhp->rhp); fhp->rhp = NULL; } v4l2_prio_close(&vp->prio, fhp->prio); file->private_data = NULL; if (fhp->vnext) { fhp->vnext->vprev = fhp->vprev; } else { vp->vlast = fhp->vprev; } if (fhp->vprev) { fhp->vprev->vnext = fhp->vnext; } else { vp->vfirst = fhp->vnext; } fhp->vnext = NULL; fhp->vprev = NULL; fhp->vhead = NULL; pvr2_channel_done(&fhp->channel); pvr2_trace(PVR2_TRACE_STRUCT, "Destroying pvr_v4l2_fh id=%p",fhp); if (fhp->input_map) { kfree(fhp->input_map); fhp->input_map = NULL; } kfree(fhp); if (vp->channel.mc_head->disconnect_flag && !vp->vfirst) { pvr2_v4l2_destroy_no_lock(vp); } return 0; } static int pvr2_v4l2_open(struct file *file) { struct pvr2_v4l2_dev *dip; /* Our own context pointer */ struct pvr2_v4l2_fh *fhp; struct pvr2_v4l2 *vp; struct pvr2_hdw *hdw; unsigned int input_mask = 0; unsigned int input_cnt,idx; int ret = 0; dip = container_of(video_devdata(file),struct pvr2_v4l2_dev,devbase); vp = dip->v4lp; hdw = vp->channel.hdw; pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_open"); if (!pvr2_hdw_dev_ok(hdw)) { pvr2_trace(PVR2_TRACE_OPEN_CLOSE, "pvr2_v4l2_open: hardware not ready"); return -EIO; } fhp = kzalloc(sizeof(*fhp),GFP_KERNEL); if (!fhp) { return -ENOMEM; } init_waitqueue_head(&fhp->wait_data); fhp->pdi = dip; pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp); pvr2_channel_init(&fhp->channel,vp->channel.mc_head); if (dip->v4l_type == VFL_TYPE_RADIO) { /* Opening device as a radio, legal input selection subset is just the radio. */ input_mask = (1 << PVR2_CVAL_INPUT_RADIO); } else { /* Opening the main V4L device, legal input selection subset includes all analog inputs. */ input_mask = ((1 << PVR2_CVAL_INPUT_RADIO) | (1 << PVR2_CVAL_INPUT_TV) | (1 << PVR2_CVAL_INPUT_COMPOSITE) | (1 << PVR2_CVAL_INPUT_SVIDEO)); } ret = pvr2_channel_limit_inputs(&fhp->channel,input_mask); if (ret) { pvr2_channel_done(&fhp->channel); pvr2_trace(PVR2_TRACE_STRUCT, "Destroying pvr_v4l2_fh id=%p (input mask error)", fhp); kfree(fhp); return ret; } input_mask &= pvr2_hdw_get_input_available(hdw); input_cnt = 0; for (idx = 0; idx < (sizeof(input_mask) << 3); idx++) { if (input_mask & (1 << idx)) input_cnt++; } fhp->input_cnt = input_cnt; fhp->input_map = kzalloc(input_cnt,GFP_KERNEL); if (!fhp->input_map) { pvr2_channel_done(&fhp->channel); pvr2_trace(PVR2_TRACE_STRUCT, "Destroying pvr_v4l2_fh id=%p (input map failure)", fhp); kfree(fhp); return -ENOMEM; } input_cnt = 0; for (idx = 0; idx < (sizeof(input_mask) << 3); idx++) { if (!(input_mask & (1 << idx))) continue; fhp->input_map[input_cnt++] = idx; } fhp->vnext = NULL; fhp->vprev = vp->vlast; if (vp->vlast) { vp->vlast->vnext = fhp; } else { vp->vfirst = fhp; } vp->vlast = fhp; fhp->vhead = vp; fhp->file = file; file->private_data = fhp; v4l2_prio_open(&vp->prio, &fhp->prio); fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw); return 0; } static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp) { wake_up(&fhp->wait_data); } static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh) { int ret; struct pvr2_stream *sp; struct pvr2_hdw *hdw; if (fh->rhp) return 0; if (!fh->pdi->stream) { /* No stream defined for this node. This means that we're not currently allowed to stream from this node. */ return -EPERM; } /* First read() attempt. Try to claim the stream and start it... */ if ((ret = pvr2_channel_claim_stream(&fh->channel, fh->pdi->stream)) != 0) { /* Someone else must already have it */ return ret; } fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream); if (!fh->rhp) { pvr2_channel_claim_stream(&fh->channel,NULL); return -ENOMEM; } hdw = fh->channel.mc_head->hdw; sp = fh->pdi->stream->stream; pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh); pvr2_hdw_set_stream_type(hdw,fh->pdi->config); if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret; return pvr2_ioread_set_enabled(fh->rhp,!0); } static ssize_t pvr2_v4l2_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) { struct pvr2_v4l2_fh *fh = file->private_data; int ret; if (fh->fw_mode_flag) { struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; char *tbuf; int c1,c2; int tcnt = 0; unsigned int offs = *ppos; tbuf = kmalloc(PAGE_SIZE,GFP_KERNEL); if (!tbuf) return -ENOMEM; while (count) { c1 = count; if (c1 > PAGE_SIZE) c1 = PAGE_SIZE; c2 = pvr2_hdw_cpufw_get(hdw,offs,tbuf,c1); if (c2 < 0) { tcnt = c2; break; } if (!c2) break; if (copy_to_user(buff,tbuf,c2)) { tcnt = -EFAULT; break; } offs += c2; tcnt += c2; buff += c2; count -= c2; *ppos += c2; } kfree(tbuf); return tcnt; } if (!fh->rhp) { ret = pvr2_v4l2_iosetup(fh); if (ret) { return ret; } } for (;;) { ret = pvr2_ioread_read(fh->rhp,buff,count); if (ret >= 0) break; if (ret != -EAGAIN) break; if (file->f_flags & O_NONBLOCK) break; /* Doing blocking I/O. Wait here. */ ret = wait_event_interruptible( fh->wait_data, pvr2_ioread_avail(fh->rhp) >= 0); if (ret < 0) break; } return ret; } static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; struct pvr2_v4l2_fh *fh = file->private_data; int ret; if (fh->fw_mode_flag) { mask |= POLLIN | POLLRDNORM; return mask; } if (!fh->rhp) { ret = pvr2_v4l2_iosetup(fh); if (ret) return POLLERR; } poll_wait(file,&fh->wait_data,wait); if (pvr2_ioread_avail(fh->rhp) >= 0) { mask |= POLLIN | POLLRDNORM; } return mask; } static const struct v4l2_file_operations vdev_fops = { .owner = THIS_MODULE, .open = pvr2_v4l2_open, .release = pvr2_v4l2_release, .read = pvr2_v4l2_read, .ioctl = pvr2_v4l2_ioctl, .poll = pvr2_v4l2_poll, }; static struct video_device vdev_template = { .fops = &vdev_fops, }; static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip, struct pvr2_v4l2 *vp, int v4l_type) { struct usb_device *usbdev; int mindevnum; int unit_number; int *nr_ptr = NULL; dip->v4lp = vp; usbdev = pvr2_hdw_get_dev(vp->channel.mc_head->hdw); dip->v4l_type = v4l_type; switch (v4l_type) { case VFL_TYPE_GRABBER: dip->stream = &vp->channel.mc_head->video_stream; dip->config = pvr2_config_mpeg; dip->minor_type = pvr2_v4l_type_video; nr_ptr = video_nr; if (!dip->stream) { pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l video dev" " due to missing stream instance\n"); return; } break; case VFL_TYPE_VBI: dip->config = pvr2_config_vbi; dip->minor_type = pvr2_v4l_type_vbi; nr_ptr = vbi_nr; break; case VFL_TYPE_RADIO: dip->stream = &vp->channel.mc_head->video_stream; dip->config = pvr2_config_mpeg; dip->minor_type = pvr2_v4l_type_radio; nr_ptr = radio_nr; break; default: /* Bail out (this should be impossible) */ pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l dev" " due to unrecognized config\n"); return; } memcpy(&dip->devbase,&vdev_template,sizeof(vdev_template)); dip->devbase.release = pvr2_video_device_release; mindevnum = -1; unit_number = pvr2_hdw_get_unit_number(vp->channel.mc_head->hdw); if (nr_ptr && (unit_number >= 0) && (unit_number < PVR_NUM)) { mindevnum = nr_ptr[unit_number]; } dip->devbase.parent = &usbdev->dev; if ((video_register_device(&dip->devbase, dip->v4l_type, mindevnum) < 0) && (video_register_device(&dip->devbase, dip->v4l_type, -1) < 0)) { pr_err(KBUILD_MODNAME ": Failed to register pvrusb2 v4l device\n"); } printk(KERN_INFO "pvrusb2: registered device %s [%s]\n", video_device_node_name(&dip->devbase), pvr2_config_get_name(dip->config)); pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw, dip->minor_type,dip->devbase.minor); } struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp) { struct pvr2_v4l2 *vp; vp = kzalloc(sizeof(*vp),GFP_KERNEL); if (!vp) return vp; pvr2_channel_init(&vp->channel,mnp); pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_v4l2 id=%p",vp); vp->channel.check_func = pvr2_v4l2_internal_check; /* register streams */ vp->dev_video = kzalloc(sizeof(*vp->dev_video),GFP_KERNEL); if (!vp->dev_video) goto fail; pvr2_v4l2_dev_init(vp->dev_video,vp,VFL_TYPE_GRABBER); if (pvr2_hdw_get_input_available(vp->channel.mc_head->hdw) & (1 << PVR2_CVAL_INPUT_RADIO)) { vp->dev_radio = kzalloc(sizeof(*vp->dev_radio),GFP_KERNEL); if (!vp->dev_radio) goto fail; pvr2_v4l2_dev_init(vp->dev_radio,vp,VFL_TYPE_RADIO); } return vp; fail: pvr2_trace(PVR2_TRACE_STRUCT,"Failure creating pvr2_v4l2 id=%p",vp); pvr2_v4l2_destroy_no_lock(vp); return NULL; } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 75 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
Dm47021/LGE_Kernel_F6mt
drivers/power/wm831x_backup.c
5068
5643
/* * Backup battery driver for Wolfson Microelectronics wm831x PMICs * * Copyright 2009 Wolfson Microelectronics PLC. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/slab.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/auxadc.h> #include <linux/mfd/wm831x/pmu.h> #include <linux/mfd/wm831x/pdata.h> struct wm831x_backup { struct wm831x *wm831x; struct power_supply backup; char name[20]; }; static int wm831x_backup_read_voltage(struct wm831x *wm831x, enum wm831x_auxadc src, union power_supply_propval *val) { int ret; ret = wm831x_auxadc_read_uv(wm831x, src); if (ret >= 0) val->intval = ret; return ret; } /********************************************************************* * Backup supply properties *********************************************************************/ static void wm831x_config_backup(struct wm831x *wm831x) { struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; struct wm831x_backup_pdata *pdata; int ret, reg; if (!wm831x_pdata || !wm831x_pdata->backup) { dev_warn(wm831x->dev, "No backup battery charger configuration\n"); return; } pdata = wm831x_pdata->backup; reg = 0; if (pdata->charger_enable) reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA; if (pdata->no_constant_voltage) reg |= WM831X_BKUP_CHG_MODE; switch (pdata->vlim) { case 2500: break; case 3100: reg |= WM831X_BKUP_CHG_VLIM; break; default: dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n", pdata->vlim); } switch (pdata->ilim) { case 100: break; case 200: reg |= 1; break; case 300: reg |= 2; break; case 400: reg |= 3; break; default: dev_err(wm831x->dev, "Invalid backup current limit %duA\n", pdata->ilim); } ret = wm831x_reg_unlock(wm831x); if (ret != 0) { dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret); return; } ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL, WM831X_BKUP_CHG_ENA_MASK | WM831X_BKUP_CHG_MODE_MASK | WM831X_BKUP_BATT_DET_ENA_MASK | WM831X_BKUP_CHG_VLIM_MASK | WM831X_BKUP_CHG_ILIM_MASK, reg); if (ret != 0) dev_err(wm831x->dev, "Failed to set backup charger config: %d\n", ret); wm831x_reg_lock(wm831x); } static int wm831x_backup_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct wm831x_backup *devdata = dev_get_drvdata(psy->dev->parent); struct wm831x *wm831x = devdata->wm831x; int ret = 0; ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL); if (ret < 0) return ret; switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (ret & WM831X_BKUP_CHG_STS) val->intval = POWER_SUPPLY_STATUS_CHARGING; else val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = wm831x_backup_read_voltage(wm831x, WM831X_AUX_BKUP_BATT, val); break; case POWER_SUPPLY_PROP_PRESENT: if (ret & WM831X_BKUP_CHG_STS) val->intval = 1; else val->intval = 0; break; default: ret = -EINVAL; break; } return ret; } static enum power_supply_property wm831x_backup_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_PRESENT, }; /********************************************************************* * Initialisation *********************************************************************/ static __devinit int wm831x_backup_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; struct wm831x_backup *devdata; struct power_supply *backup; int ret; devdata = kzalloc(sizeof(struct wm831x_backup), GFP_KERNEL); if (devdata == NULL) return -ENOMEM; devdata->wm831x = wm831x; platform_set_drvdata(pdev, devdata); backup = &devdata->backup; /* We ignore configuration failures since we can still read * back the status without enabling the charger (which may * already be enabled anyway). */ wm831x_config_backup(wm831x); if (wm831x_pdata && wm831x_pdata->wm831x_num) snprintf(devdata->name, sizeof(devdata->name), "wm831x-backup.%d", wm831x_pdata->wm831x_num); else snprintf(devdata->name, sizeof(devdata->name), "wm831x-backup"); backup->name = devdata->name; backup->type = POWER_SUPPLY_TYPE_BATTERY; backup->properties = wm831x_backup_props; backup->num_properties = ARRAY_SIZE(wm831x_backup_props); backup->get_property = wm831x_backup_get_prop; ret = power_supply_register(&pdev->dev, backup); if (ret) goto err_kmalloc; return ret; err_kmalloc: kfree(devdata); return ret; } static __devexit int wm831x_backup_remove(struct platform_device *pdev) { struct wm831x_backup *devdata = platform_get_drvdata(pdev); power_supply_unregister(&devdata->backup); kfree(devdata->backup.name); kfree(devdata); return 0; } static struct platform_driver wm831x_backup_driver = { .probe = wm831x_backup_probe, .remove = __devexit_p(wm831x_backup_remove), .driver = { .name = "wm831x-backup", }, }; module_platform_driver(wm831x_backup_driver); MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-backup");
gpl-2.0
vickyvca/MindEater-dior
drivers/staging/comedi/drivers/comedi_test.c
5580
15339
/* comedi/drivers/comedi_test.c Generates fake waveform signals that can be read through the command interface. It does _not_ read from any board; it just generates deterministic waveforms. Useful for various testing purposes. Copyright (C) 2002 Joachim Wuttke <Joachim.Wuttke@icn.siemens.de> Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************/ /* Driver: comedi_test Description: generates fake waveforms Author: Joachim Wuttke <Joachim.Wuttke@icn.siemens.de>, Frank Mori Hess <fmhess@users.sourceforge.net>, ds Devices: Status: works Updated: Sat, 16 Mar 2002 17:34:48 -0800 This driver is mainly for testing purposes, but can also be used to generate sample waveforms on systems that don't have data acquisition hardware. Configuration options: [0] - Amplitude in microvolts for fake waveforms (default 1 volt) [1] - Period in microseconds for fake waveforms (default 0.1 sec) Generates a sawtooth wave on channel 0, square wave on channel 1, additional waveforms could be added to other channels (currently they return flatline zero volts). */ #include "../comedidev.h" #include <asm/div64.h> #include "comedi_fc.h" #include <linux/timer.h> /* Board descriptions */ struct waveform_board { const char *name; int ai_chans; int ai_bits; int have_dio; }; #define N_CHANS 8 static const struct waveform_board waveform_boards[] = { { .name = "comedi_test", .ai_chans = N_CHANS, .ai_bits = 16, .have_dio = 0, }, }; #define thisboard ((const struct waveform_board *)dev->board_ptr) /* Data unique to this driver */ struct waveform_private { struct timer_list timer; struct timeval last; /* time at which last timer interrupt occurred */ unsigned int uvolt_amplitude; /* waveform amplitude in microvolts */ unsigned long usec_period; /* waveform period in microseconds */ unsigned long usec_current; /* current time (modulo waveform period) */ unsigned long usec_remainder; /* usec since last scan; */ unsigned long ai_count; /* number of conversions remaining */ unsigned int scan_period; /* scan period in usec */ unsigned int convert_period; /* conversion period in usec */ unsigned timer_running:1; unsigned int ao_loopbacks[N_CHANS]; }; #define devpriv ((struct waveform_private *)dev->private) static int waveform_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int waveform_detach(struct comedi_device *dev); static struct comedi_driver driver_waveform = { .driver_name = "comedi_test", .module = THIS_MODULE, .attach = waveform_attach, .detach = waveform_detach, .board_name = &waveform_boards[0].name, .offset = sizeof(struct waveform_board), .num_names = ARRAY_SIZE(waveform_boards), }; static int __init driver_waveform_init_module(void) { return comedi_driver_register(&driver_waveform); } static void __exit driver_waveform_cleanup_module(void) { comedi_driver_unregister(&driver_waveform); } module_init(driver_waveform_init_module); module_exit(driver_waveform_cleanup_module); static int waveform_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int waveform_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int waveform_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int waveform_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int waveform_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static short fake_sawtooth(struct comedi_device *dev, unsigned int range, unsigned long current_time); static short fake_squarewave(struct comedi_device *dev, unsigned int range, unsigned long current_time); static short fake_flatline(struct comedi_device *dev, unsigned int range, unsigned long current_time); static short fake_waveform(struct comedi_device *dev, unsigned int channel, unsigned int range, unsigned long current_time); /* 1000 nanosec in a microsec */ static const int nano_per_micro = 1000; /* fake analog input ranges */ static const struct comedi_lrange waveform_ai_ranges = { 2, { BIP_RANGE(10), BIP_RANGE(5), } }; /* This is the background routine used to generate arbitrary data. It should run in the background; therefore it is scheduled by a timer mechanism. */ static void waveform_ai_interrupt(unsigned long arg) { struct comedi_device *dev = (struct comedi_device *)arg; struct comedi_async *async = dev->read_subdev->async; struct comedi_cmd *cmd = &async->cmd; unsigned int i, j; /* all times in microsec */ unsigned long elapsed_time; unsigned int num_scans; struct timeval now; do_gettimeofday(&now); elapsed_time = 1000000 * (now.tv_sec - devpriv->last.tv_sec) + now.tv_usec - devpriv->last.tv_usec; devpriv->last = now; num_scans = (devpriv->usec_remainder + elapsed_time) / devpriv->scan_period; devpriv->usec_remainder = (devpriv->usec_remainder + elapsed_time) % devpriv->scan_period; async->events = 0; for (i = 0; i < num_scans; i++) { for (j = 0; j < cmd->chanlist_len; j++) { cfc_write_to_buffer(dev->read_subdev, fake_waveform(dev, CR_CHAN(cmd-> chanlist[j]), CR_RANGE(cmd-> chanlist[j]), devpriv-> usec_current + i * devpriv->scan_period + j * devpriv-> convert_period)); } devpriv->ai_count++; if (cmd->stop_src == TRIG_COUNT && devpriv->ai_count >= cmd->stop_arg) { async->events |= COMEDI_CB_EOA; break; } } devpriv->usec_current += elapsed_time; devpriv->usec_current %= devpriv->usec_period; if ((async->events & COMEDI_CB_EOA) == 0 && devpriv->timer_running) mod_timer(&devpriv->timer, jiffies + 1); else del_timer(&devpriv->timer); comedi_event(dev, dev->read_subdev); } static int waveform_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int amplitude = it->options[0]; int period = it->options[1]; int i; dev->board_name = thisboard->name; if (alloc_private(dev, sizeof(struct waveform_private)) < 0) return -ENOMEM; /* set default amplitude and period */ if (amplitude <= 0) amplitude = 1000000; /* 1 volt */ if (period <= 0) period = 100000; /* 0.1 sec */ devpriv->uvolt_amplitude = amplitude; devpriv->usec_period = period; dev->n_subdevices = 2; if (alloc_subdevices(dev, dev->n_subdevices) < 0) return -ENOMEM; s = dev->subdevices + 0; dev->read_subdev = s; /* analog input subdevice */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; s->n_chan = thisboard->ai_chans; s->maxdata = (1 << thisboard->ai_bits) - 1; s->range_table = &waveform_ai_ranges; s->len_chanlist = s->n_chan * 2; s->insn_read = waveform_ai_insn_read; s->do_cmd = waveform_ai_cmd; s->do_cmdtest = waveform_ai_cmdtest; s->cancel = waveform_ai_cancel; s = dev->subdevices + 1; dev->write_subdev = s; /* analog output subdevice (loopback) */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND; s->n_chan = thisboard->ai_chans; s->maxdata = (1 << thisboard->ai_bits) - 1; s->range_table = &waveform_ai_ranges; s->len_chanlist = s->n_chan * 2; s->insn_write = waveform_ao_insn_write; s->do_cmd = NULL; s->do_cmdtest = NULL; s->cancel = NULL; /* Our default loopback value is just a 0V flatline */ for (i = 0; i < s->n_chan; i++) devpriv->ao_loopbacks[i] = s->maxdata / 2; init_timer(&(devpriv->timer)); devpriv->timer.function = waveform_ai_interrupt; devpriv->timer.data = (unsigned long)dev; printk(KERN_INFO "comedi%d: comedi_test: " "%i microvolt, %li microsecond waveform attached\n", dev->minor, devpriv->uvolt_amplitude, devpriv->usec_period); return 1; } static int waveform_detach(struct comedi_device *dev) { printk("comedi%d: comedi_test: remove\n", dev->minor); if (dev->private) waveform_ai_cancel(dev, dev->read_subdev); return 0; } static int waveform_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW | TRIG_TIMER; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* * step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->convert_src != TRIG_NOW && cmd->convert_src != TRIG_TIMER) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->convert_src == TRIG_NOW) { if (cmd->convert_arg != 0) { cmd->convert_arg = 0; err++; } } if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < nano_per_micro) { cmd->scan_begin_arg = nano_per_micro; err++; } if (cmd->convert_src == TRIG_TIMER && cmd->scan_begin_arg < cmd->convert_arg * cmd->chanlist_len) { cmd->scan_begin_arg = cmd->convert_arg * cmd->chanlist_len; err++; } } /* * XXX these checks are generic and should go in core if not there * already */ if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; /* round to nearest microsec */ cmd->scan_begin_arg = nano_per_micro * ((tmp + (nano_per_micro / 2)) / nano_per_micro); if (tmp != cmd->scan_begin_arg) err++; } if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; /* round to nearest microsec */ cmd->convert_arg = nano_per_micro * ((tmp + (nano_per_micro / 2)) / nano_per_micro); if (tmp != cmd->convert_arg) err++; } if (err) return 4; return 0; } static int waveform_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; if (cmd->flags & TRIG_RT) { comedi_error(dev, "commands at RT priority not supported in this driver"); return -1; } devpriv->timer_running = 1; devpriv->ai_count = 0; devpriv->scan_period = cmd->scan_begin_arg / nano_per_micro; if (cmd->convert_src == TRIG_NOW) devpriv->convert_period = 0; else if (cmd->convert_src == TRIG_TIMER) devpriv->convert_period = cmd->convert_arg / nano_per_micro; else { comedi_error(dev, "bug setting conversion period"); return -1; } do_gettimeofday(&devpriv->last); devpriv->usec_current = devpriv->last.tv_usec % devpriv->usec_period; devpriv->usec_remainder = 0; devpriv->timer.expires = jiffies + 1; add_timer(&devpriv->timer); return 0; } static int waveform_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { devpriv->timer_running = 0; del_timer(&devpriv->timer); return 0; } static short fake_sawtooth(struct comedi_device *dev, unsigned int range_index, unsigned long current_time) { struct comedi_subdevice *s = dev->read_subdev; unsigned int offset = s->maxdata / 2; u64 value; const struct comedi_krange *krange = &s->range_table->range[range_index]; u64 binary_amplitude; binary_amplitude = s->maxdata; binary_amplitude *= devpriv->uvolt_amplitude; do_div(binary_amplitude, krange->max - krange->min); current_time %= devpriv->usec_period; value = current_time; value *= binary_amplitude * 2; do_div(value, devpriv->usec_period); value -= binary_amplitude; /* get rid of sawtooth's dc offset */ return offset + value; } static short fake_squarewave(struct comedi_device *dev, unsigned int range_index, unsigned long current_time) { struct comedi_subdevice *s = dev->read_subdev; unsigned int offset = s->maxdata / 2; u64 value; const struct comedi_krange *krange = &s->range_table->range[range_index]; current_time %= devpriv->usec_period; value = s->maxdata; value *= devpriv->uvolt_amplitude; do_div(value, krange->max - krange->min); if (current_time < devpriv->usec_period / 2) value *= -1; return offset + value; } static short fake_flatline(struct comedi_device *dev, unsigned int range_index, unsigned long current_time) { return dev->read_subdev->maxdata / 2; } /* generates a different waveform depending on what channel is read */ static short fake_waveform(struct comedi_device *dev, unsigned int channel, unsigned int range, unsigned long current_time) { enum { SAWTOOTH_CHAN, SQUARE_CHAN, }; switch (channel) { case SAWTOOTH_CHAN: return fake_sawtooth(dev, range, current_time); break; case SQUARE_CHAN: return fake_squarewave(dev, range, current_time); break; default: break; } return fake_flatline(dev, range, current_time); } static int waveform_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_loopbacks[chan]; return insn->n; } static int waveform_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) devpriv->ao_loopbacks[chan] = data[i]; return insn->n; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
compulab/cm-fx6-kernel
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
12748
16088
/* * Aic7xxx SCSI host adapter firmware assembler symbol table implementation * * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_symbol.c#24 $ * * $FreeBSD$ */ #include <sys/types.h> #ifdef __linux__ #include "aicdb.h" #else #include <db.h> #endif #include <fcntl.h> #include <inttypes.h> #include <regex.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sysexits.h> #include "aicasm_symbol.h" #include "aicasm.h" static DB *symtable; symbol_t * symbol_create(char *name) { symbol_t *new_symbol; new_symbol = (symbol_t *)malloc(sizeof(symbol_t)); if (new_symbol == NULL) { perror("Unable to create new symbol"); exit(EX_SOFTWARE); } memset(new_symbol, 0, sizeof(*new_symbol)); new_symbol->name = strdup(name); if (new_symbol->name == NULL) stop("Unable to strdup symbol name", EX_SOFTWARE); new_symbol->type = UNINITIALIZED; new_symbol->count = 1; return (new_symbol); } void symbol_delete(symbol_t *symbol) { if (symtable != NULL) { DBT key; key.data = symbol->name; key.size = strlen(symbol->name); symtable->del(symtable, &key, /*flags*/0); } switch(symbol->type) { case SCBLOC: case SRAMLOC: case REGISTER: if (symbol->info.rinfo != NULL) free(symbol->info.rinfo); break; case ALIAS: if (symbol->info.ainfo != NULL) free(symbol->info.ainfo); break; case MASK: case FIELD: case ENUM: case ENUM_ENTRY: if (symbol->info.finfo != NULL) { symlist_free(&symbol->info.finfo->symrefs); free(symbol->info.finfo); } break; case DOWNLOAD_CONST: case CONST: if (symbol->info.cinfo != NULL) free(symbol->info.cinfo); break; case LABEL: if (symbol->info.linfo != NULL) free(symbol->info.linfo); break; case UNINITIALIZED: default: break; } free(symbol->name); free(symbol); } void symtable_open() { symtable = dbopen(/*filename*/NULL, O_CREAT | O_NONBLOCK | O_RDWR, /*mode*/0, DB_HASH, /*openinfo*/NULL); if (symtable == NULL) { perror("Symbol table creation failed"); exit(EX_SOFTWARE); /* NOTREACHED */ } } void symtable_close() { if (symtable != NULL) { DBT key; DBT data; while (symtable->seq(symtable, &key, &data, R_FIRST) == 0) { symbol_t *stored_ptr; memcpy(&stored_ptr, data.data, sizeof(stored_ptr)); symbol_delete(stored_ptr); } symtable->close(symtable); } } /* * The semantics of get is to return an uninitialized symbol entry * if a lookup fails. */ symbol_t * symtable_get(char *name) { symbol_t *stored_ptr; DBT key; DBT data; int retval; key.data = (void *)name; key.size = strlen(name); if ((retval = symtable->get(symtable, &key, &data, /*flags*/0)) != 0) { if (retval == -1) { perror("Symbol table get operation failed"); exit(EX_SOFTWARE); /* NOTREACHED */ } else if (retval == 1) { /* Symbol wasn't found, so create a new one */ symbol_t *new_symbol; new_symbol = symbol_create(name); data.data = &new_symbol; data.size = sizeof(new_symbol); if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) { perror("Symtable put failed"); exit(EX_SOFTWARE); } return (new_symbol); } else { perror("Unexpected return value from db get routine"); exit(EX_SOFTWARE); /* NOTREACHED */ } } memcpy(&stored_ptr, data.data, sizeof(stored_ptr)); stored_ptr->count++; data.data = &stored_ptr; if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) { perror("Symtable put failed"); exit(EX_SOFTWARE); } return (stored_ptr); } symbol_node_t * symlist_search(symlist_t *symlist, char *symname) { symbol_node_t *curnode; curnode = SLIST_FIRST(symlist); while(curnode != NULL) { if (strcmp(symname, curnode->symbol->name) == 0) break; curnode = SLIST_NEXT(curnode, links); } return (curnode); } void symlist_add(symlist_t *symlist, symbol_t *symbol, int how) { symbol_node_t *newnode; newnode = (symbol_node_t *)malloc(sizeof(symbol_node_t)); if (newnode == NULL) { stop("symlist_add: Unable to malloc symbol_node", EX_SOFTWARE); /* NOTREACHED */ } newnode->symbol = symbol; if (how == SYMLIST_SORT) { symbol_node_t *curnode; int field; field = FALSE; switch(symbol->type) { case REGISTER: case SCBLOC: case SRAMLOC: break; case FIELD: case MASK: case ENUM: case ENUM_ENTRY: field = TRUE; break; default: stop("symlist_add: Invalid symbol type for sorting", EX_SOFTWARE); /* NOTREACHED */ } curnode = SLIST_FIRST(symlist); if (curnode == NULL || (field && (curnode->symbol->type > newnode->symbol->type || (curnode->symbol->type == newnode->symbol->type && (curnode->symbol->info.finfo->value > newnode->symbol->info.finfo->value)))) || (!field && (curnode->symbol->info.rinfo->address > newnode->symbol->info.rinfo->address))) { SLIST_INSERT_HEAD(symlist, newnode, links); return; } while (1) { if (SLIST_NEXT(curnode, links) == NULL) { SLIST_INSERT_AFTER(curnode, newnode, links); break; } else { symbol_t *cursymbol; cursymbol = SLIST_NEXT(curnode, links)->symbol; if ((field && (cursymbol->type > symbol->type || (cursymbol->type == symbol->type && (cursymbol->info.finfo->value > symbol->info.finfo->value)))) || (!field && (cursymbol->info.rinfo->address > symbol->info.rinfo->address))) { SLIST_INSERT_AFTER(curnode, newnode, links); break; } } curnode = SLIST_NEXT(curnode, links); } } else { SLIST_INSERT_HEAD(symlist, newnode, links); } } void symlist_free(symlist_t *symlist) { symbol_node_t *node1, *node2; node1 = SLIST_FIRST(symlist); while (node1 != NULL) { node2 = SLIST_NEXT(node1, links); free(node1); node1 = node2; } SLIST_INIT(symlist); } void symlist_merge(symlist_t *symlist_dest, symlist_t *symlist_src1, symlist_t *symlist_src2) { symbol_node_t *node; *symlist_dest = *symlist_src1; while((node = SLIST_FIRST(symlist_src2)) != NULL) { SLIST_REMOVE_HEAD(symlist_src2, links); SLIST_INSERT_HEAD(symlist_dest, node, links); } /* These are now empty */ SLIST_INIT(symlist_src1); SLIST_INIT(symlist_src2); } void aic_print_file_prologue(FILE *ofile) { if (ofile == NULL) return; fprintf(ofile, "/*\n" " * DO NOT EDIT - This file is automatically generated\n" " * from the following source files:\n" " *\n" "%s */\n", versions); } void aic_print_include(FILE *dfile, char *include_file) { if (dfile == NULL) return; fprintf(dfile, "\n#include \"%s\"\n\n", include_file); } void aic_print_reg_dump_types(FILE *ofile) { if (ofile == NULL) return; fprintf(ofile, "typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n" "typedef struct %sreg_parse_entry {\n" " char *name;\n" " uint8_t value;\n" " uint8_t mask;\n" "} %sreg_parse_entry_t;\n" "\n", prefix, prefix, prefix); } static void aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode) { if (dfile == NULL) return; fprintf(dfile, "static const %sreg_parse_entry_t %s_parse_table[] = {\n", prefix, regnode->symbol->name); } static void aic_print_reg_dump_end(FILE *ofile, FILE *dfile, symbol_node_t *regnode, u_int num_entries) { char *lower_name; char *letter; lower_name = strdup(regnode->symbol->name); if (lower_name == NULL) stop("Unable to strdup symbol name", EX_SOFTWARE); for (letter = lower_name; *letter != '\0'; letter++) *letter = tolower(*letter); if (dfile != NULL) { if (num_entries != 0) fprintf(dfile, "\n" "};\n" "\n"); fprintf(dfile, "int\n" "%s%s_print(u_int regvalue, u_int *cur_col, u_int wrap)\n" "{\n" " return (%sprint_register(%s%s, %d, \"%s\",\n" " 0x%02x, regvalue, cur_col, wrap));\n" "}\n" "\n", prefix, lower_name, prefix, num_entries != 0 ? regnode->symbol->name : "NULL", num_entries != 0 ? "_parse_table" : "", num_entries, regnode->symbol->name, regnode->symbol->info.rinfo->address); } fprintf(ofile, "#if AIC_DEBUG_REGISTERS\n" "%sreg_print_t %s%s_print;\n" "#else\n" "#define %s%s_print(regvalue, cur_col, wrap) \\\n" " %sprint_register(NULL, 0, \"%s\", 0x%02x, regvalue, cur_col, wrap)\n" "#endif\n" "\n", prefix, prefix, lower_name, prefix, lower_name, prefix, regnode->symbol->name, regnode->symbol->info.rinfo->address); } static void aic_print_reg_dump_entry(FILE *dfile, symbol_node_t *curnode) { int num_tabs; if (dfile == NULL) return; fprintf(dfile, " { \"%s\",", curnode->symbol->name); num_tabs = 3 - (strlen(curnode->symbol->name) + 5) / 8; while (num_tabs-- > 0) fputc('\t', dfile); fprintf(dfile, "0x%02x, 0x%02x }", curnode->symbol->info.finfo->value, curnode->symbol->info.finfo->mask); } void symtable_dump(FILE *ofile, FILE *dfile) { /* * Sort the registers by address with a simple insertion sort. * Put bitmasks next to the first register that defines them. * Put constants at the end. */ symlist_t registers; symlist_t masks; symlist_t constants; symlist_t download_constants; symlist_t aliases; symlist_t exported_labels; symbol_node_t *curnode; symbol_node_t *regnode; DBT key; DBT data; int flag; int reg_count = 0, reg_used = 0; u_int i; if (symtable == NULL) return; SLIST_INIT(&registers); SLIST_INIT(&masks); SLIST_INIT(&constants); SLIST_INIT(&download_constants); SLIST_INIT(&aliases); SLIST_INIT(&exported_labels); flag = R_FIRST; while (symtable->seq(symtable, &key, &data, flag) == 0) { symbol_t *cursym; memcpy(&cursym, data.data, sizeof(cursym)); switch(cursym->type) { case REGISTER: case SCBLOC: case SRAMLOC: symlist_add(&registers, cursym, SYMLIST_SORT); break; case MASK: case FIELD: case ENUM: case ENUM_ENTRY: symlist_add(&masks, cursym, SYMLIST_SORT); break; case CONST: symlist_add(&constants, cursym, SYMLIST_INSERT_HEAD); break; case DOWNLOAD_CONST: symlist_add(&download_constants, cursym, SYMLIST_INSERT_HEAD); break; case ALIAS: symlist_add(&aliases, cursym, SYMLIST_INSERT_HEAD); break; case LABEL: if (cursym->info.linfo->exported == 0) break; symlist_add(&exported_labels, cursym, SYMLIST_INSERT_HEAD); break; default: break; } flag = R_NEXT; } /* Register dianostic functions/declarations first. */ aic_print_file_prologue(ofile); aic_print_reg_dump_types(ofile); aic_print_file_prologue(dfile); aic_print_include(dfile, stock_include_file); SLIST_FOREACH(curnode, &registers, links) { if (curnode->symbol->dont_generate_debug_code) continue; switch(curnode->symbol->type) { case REGISTER: case SCBLOC: case SRAMLOC: { symlist_t *fields; symbol_node_t *fieldnode; int num_entries; num_entries = 0; reg_count++; if (curnode->symbol->count == 1) break; fields = &curnode->symbol->info.rinfo->fields; SLIST_FOREACH(fieldnode, fields, links) { if (num_entries == 0) aic_print_reg_dump_start(dfile, curnode); else if (dfile != NULL) fputs(",\n", dfile); num_entries++; aic_print_reg_dump_entry(dfile, fieldnode); } aic_print_reg_dump_end(ofile, dfile, curnode, num_entries); reg_used++; } default: break; } } fprintf(stderr, "%s: %d of %d register definitions used\n", appname, reg_used, reg_count); /* Fold in the masks and bits */ while (SLIST_FIRST(&masks) != NULL) { char *regname; curnode = SLIST_FIRST(&masks); SLIST_REMOVE_HEAD(&masks, links); regnode = SLIST_FIRST(&curnode->symbol->info.finfo->symrefs); regname = regnode->symbol->name; regnode = symlist_search(&registers, regname); SLIST_INSERT_AFTER(regnode, curnode, links); } /* Add the aliases */ while (SLIST_FIRST(&aliases) != NULL) { char *regname; curnode = SLIST_FIRST(&aliases); SLIST_REMOVE_HEAD(&aliases, links); regname = curnode->symbol->info.ainfo->parent->name; regnode = symlist_search(&registers, regname); SLIST_INSERT_AFTER(regnode, curnode, links); } /* Output generated #defines. */ while (SLIST_FIRST(&registers) != NULL) { symbol_node_t *curnode; u_int value; char *tab_str; char *tab_str2; curnode = SLIST_FIRST(&registers); SLIST_REMOVE_HEAD(&registers, links); switch(curnode->symbol->type) { case REGISTER: case SCBLOC: case SRAMLOC: fprintf(ofile, "\n"); value = curnode->symbol->info.rinfo->address; tab_str = "\t"; tab_str2 = "\t\t"; break; case ALIAS: { symbol_t *parent; parent = curnode->symbol->info.ainfo->parent; value = parent->info.rinfo->address; tab_str = "\t"; tab_str2 = "\t\t"; break; } case MASK: case FIELD: case ENUM: case ENUM_ENTRY: value = curnode->symbol->info.finfo->value; tab_str = "\t\t"; tab_str2 = "\t"; break; default: value = 0; /* Quiet compiler */ tab_str = NULL; tab_str2 = NULL; stop("symtable_dump: Invalid symbol type " "encountered", EX_SOFTWARE); break; } fprintf(ofile, "#define%s%-16s%s0x%02x\n", tab_str, curnode->symbol->name, tab_str2, value); free(curnode); } fprintf(ofile, "\n\n"); while (SLIST_FIRST(&constants) != NULL) { symbol_node_t *curnode; curnode = SLIST_FIRST(&constants); SLIST_REMOVE_HEAD(&constants, links); fprintf(ofile, "#define\t%-8s\t0x%02x\n", curnode->symbol->name, curnode->symbol->info.cinfo->value); free(curnode); } fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n"); for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) { symbol_node_t *curnode; curnode = SLIST_FIRST(&download_constants); SLIST_REMOVE_HEAD(&download_constants, links); fprintf(ofile, "#define\t%-8s\t0x%02x\n", curnode->symbol->name, curnode->symbol->info.cinfo->value); free(curnode); } fprintf(ofile, "#define\tDOWNLOAD_CONST_COUNT\t0x%02x\n", i); fprintf(ofile, "\n\n/* Exported Labels */\n"); while (SLIST_FIRST(&exported_labels) != NULL) { symbol_node_t *curnode; curnode = SLIST_FIRST(&exported_labels); SLIST_REMOVE_HEAD(&exported_labels, links); fprintf(ofile, "#define\tLABEL_%-8s\t0x%02x\n", curnode->symbol->name, curnode->symbol->info.linfo->address); free(curnode); } }
gpl-2.0
VegaDevTeam/android_kernel_pantech_ef60s
drivers/char/agp/compat_ioctl.c
12748
7555
/* * AGPGART driver frontend compatibility ioctls * Copyright (C) 2004 Silicon Graphics, Inc. * Copyright (C) 2002-2003 Dave Jones * Copyright (C) 1999 Jeff Hartmann * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/fs.h> #include <linux/agpgart.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "agp.h" #include "compat_ioctl.h" static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_info32 userinfo; struct agp_kern_info kerninfo; agp_copy_info(agp_bridge, &kerninfo); userinfo.version.major = kerninfo.version.major; userinfo.version.minor = kerninfo.version.minor; userinfo.bridge_id = kerninfo.device->vendor | (kerninfo.device->device << 16); userinfo.agp_mode = kerninfo.mode; userinfo.aper_base = (compat_long_t)kerninfo.aper_base; userinfo.aper_size = kerninfo.aper_size; userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; userinfo.pg_used = kerninfo.current_memory; if (copy_to_user(arg, &userinfo, sizeof(userinfo))) return -EFAULT; return 0; } static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_region32 ureserve; struct agp_region kreserve; struct agp_client *client; struct agp_file_private *client_priv; DBG(""); if (copy_from_user(&ureserve, arg, sizeof(ureserve))) return -EFAULT; if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32)) return -EFAULT; kreserve.pid = ureserve.pid; kreserve.seg_count = ureserve.seg_count; client = agp_find_client_by_pid(kreserve.pid); if (kreserve.seg_count == 0) { /* remove a client */ client_priv = agp_find_private(kreserve.pid); if (client_priv != NULL) { set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); } if (client == NULL) { /* client is already removed */ return 0; } return agp_remove_client(kreserve.pid); } else { struct agp_segment32 *usegment; struct agp_segment *ksegment; int seg; if (ureserve.seg_count >= 16384) return -EINVAL; usegment = kmalloc(sizeof(*usegment) * ureserve.seg_count, GFP_KERNEL); if (!usegment) return -ENOMEM; ksegment = kmalloc(sizeof(*ksegment) * kreserve.seg_count, GFP_KERNEL); if (!ksegment) { kfree(usegment); return -ENOMEM; } if (copy_from_user(usegment, (void __user *) ureserve.seg_list, sizeof(*usegment) * ureserve.seg_count)) { kfree(usegment); kfree(ksegment); return -EFAULT; } for (seg = 0; seg < ureserve.seg_count; seg++) { ksegment[seg].pg_start = usegment[seg].pg_start; ksegment[seg].pg_count = usegment[seg].pg_count; ksegment[seg].prot = usegment[seg].prot; } kfree(usegment); kreserve.seg_list = ksegment; if (client == NULL) { /* Create the client and add the segment */ client = agp_create_client(kreserve.pid); if (client == NULL) { kfree(ksegment); return -ENOMEM; } client_priv = agp_find_private(kreserve.pid); if (client_priv != NULL) { set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); } } return agp_create_segment(client, &kreserve); } /* Will never really happen */ return -EINVAL; } static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_memory *memory; struct agp_allocate32 alloc; DBG(""); if (copy_from_user(&alloc, arg, sizeof(alloc))) return -EFAULT; memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); if (memory == NULL) return -ENOMEM; alloc.key = memory->key; alloc.physical = memory->physical; if (copy_to_user(arg, &alloc, sizeof(alloc))) { agp_free_memory_wrap(memory); return -EFAULT; } return 0; } static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_bind32 bind_info; struct agp_memory *memory; DBG(""); if (copy_from_user(&bind_info, arg, sizeof(bind_info))) return -EFAULT; memory = agp_find_mem_by_key(bind_info.key); if (memory == NULL) return -EINVAL; return agp_bind_memory(memory, bind_info.pg_start); } static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_memory *memory; struct agp_unbind32 unbind; DBG(""); if (copy_from_user(&unbind, arg, sizeof(unbind))) return -EFAULT; memory = agp_find_mem_by_key(unbind.key); if (memory == NULL) return -EINVAL; return agp_unbind_memory(memory); } long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct agp_file_private *curr_priv = file->private_data; int ret_val = -ENOTTY; mutex_lock(&(agp_fe.agp_mutex)); if ((agp_fe.current_controller == NULL) && (cmd != AGPIOC_ACQUIRE32)) { ret_val = -EINVAL; goto ioctl_out; } if ((agp_fe.backend_acquired != true) && (cmd != AGPIOC_ACQUIRE32)) { ret_val = -EBUSY; goto ioctl_out; } if (cmd != AGPIOC_ACQUIRE32) { if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { ret_val = -EPERM; goto ioctl_out; } /* Use the original pid of the controller, * in case it's threaded */ if (agp_fe.current_controller->pid != curr_priv->my_pid) { ret_val = -EBUSY; goto ioctl_out; } } switch (cmd) { case AGPIOC_INFO32: ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_ACQUIRE32: ret_val = agpioc_acquire_wrap(curr_priv); break; case AGPIOC_RELEASE32: ret_val = agpioc_release_wrap(curr_priv); break; case AGPIOC_SETUP32: ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_RESERVE32: ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_PROTECT32: ret_val = agpioc_protect_wrap(curr_priv); break; case AGPIOC_ALLOCATE32: ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_DEALLOCATE32: ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); break; case AGPIOC_BIND32: ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_UNBIND32: ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg); break; case AGPIOC_CHIPSET_FLUSH32: break; } ioctl_out: DBG("ioctl returns %d\n", ret_val); mutex_unlock(&(agp_fe.agp_mutex)); return ret_val; }
gpl-2.0
agrabren/android_kernel_htc_shooter
arch/sparc/oprofile/init.c
13772
1669
/** * @file init.c * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author John Levon <levon@movementarian.org> */ #include <linux/kernel.h> #include <linux/oprofile.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/param.h> /* for HZ */ #ifdef CONFIG_SPARC64 #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/kdebug.h> #include <asm/nmi.h> static int profile_timer_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = data; int ret = NOTIFY_DONE; switch (val) { case DIE_NMI: oprofile_add_sample(args->regs, 0); ret = NOTIFY_STOP; break; default: break; } return ret; } static struct notifier_block profile_timer_exceptions_nb = { .notifier_call = profile_timer_exceptions_notify, }; static int timer_start(void) { if (register_die_notifier(&profile_timer_exceptions_nb)) return 1; nmi_adjust_hz(HZ); return 0; } static void timer_stop(void) { nmi_adjust_hz(1); unregister_die_notifier(&profile_timer_exceptions_nb); synchronize_sched(); /* Allow already-started NMIs to complete. */ } static int op_nmi_timer_init(struct oprofile_operations *ops) { if (atomic_read(&nmi_active) <= 0) return -ENODEV; ops->start = timer_start; ops->stop = timer_stop; ops->cpu_type = "timer"; printk(KERN_INFO "oprofile: Using perfctr NMI timer interrupt.\n"); return 0; } #endif int __init oprofile_arch_init(struct oprofile_operations *ops) { int ret = -ENODEV; #ifdef CONFIG_SPARC64 ret = op_nmi_timer_init(ops); if (!ret) return ret; #endif return ret; } void oprofile_arch_exit(void) { }
gpl-2.0
goodwinos/linux-latest
drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
205
16050
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ /****************************************************************************** * * * Module: rtl8192c_rf6052.c (Source C File) * * Note: Provide RF 6052 series relative API. * * Function: * * Export: * * Abbrev: * * History: * Data Who Remark * * 09/25/2008 MHC Create initial version. * 11/05/2008 MHC Add API for tw power setting. * * ******************************************************************************/ #define _RTL8723A_RF6052_C_ #include <osdep_service.h> #include <drv_types.h> #include <rtl8723a_hal.h> #include <usb_ops_linux.h> /*----------------------------------------------------------------------------- * Function: PHY_RF6052SetBandwidth() * * Overview: This function is called by SetBWMode23aCallback8190Pci() only * * Input: struct rtw_adapter * Adapter * WIRELESS_BANDWIDTH_E Bandwidth 20M or 40M * * Output: NONE * * Return: NONE * * Note: For RF type 0222D *---------------------------------------------------------------------------*/ void rtl8723a_phy_rf6052set_bw(struct rtw_adapter *Adapter, enum ht_channel_width Bandwidth) /* 20M or 40M */ { struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); switch (Bandwidth) { case HT_CHANNEL_WIDTH_20: pHalData->RfRegChnlVal[0] = (pHalData->RfRegChnlVal[0] & 0xfffff3ff) | 0x0400; PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]); break; case HT_CHANNEL_WIDTH_40: pHalData->RfRegChnlVal[0] = (pHalData->RfRegChnlVal[0] & 0xfffff3ff); PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]); break; default: break; } } /*----------------------------------------------------------------------------- * Function: PHY_RF6052SetCckTxPower * * Overview: * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 11/05/2008 MHC Simulate 8192series.. * *---------------------------------------------------------------------------*/ void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerlevel) { struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); struct dm_priv *pdmpriv = &pHalData->dmpriv; struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv; u32 TxAGC[2] = {0, 0}, tmpval = 0; bool TurboScanOff = false; u8 idx1, idx2; u8 *ptr; /* According to SD3 eechou's suggestion, we need to disable turbo scan for RU. */ /* Otherwise, external PA will be broken if power index > 0x20. */ if (pHalData->EEPROMRegulatory != 0 || pHalData->ExternalPA) TurboScanOff = true; if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) { TxAGC[RF_PATH_A] = 0x3f3f3f3f; TxAGC[RF_PATH_B] = 0x3f3f3f3f; TurboScanOff = true;/* disable turbo scan */ if (TurboScanOff) { for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) { TxAGC[idx1] = pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) | (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24); /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */ if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA) TxAGC[idx1] = 0x20; } } } else { /* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx * power. It shall be determined by power training mechanism. */ /* Currently, we cannot fully disable driver dynamic tx power * mechanism because it is referenced by BT coexist mechanism. */ /* In the future, two mechanism shall be separated from each other * and maintained independantly. Thanks for Lanhsin's reminder. */ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) { TxAGC[RF_PATH_A] = 0x10101010; TxAGC[RF_PATH_B] = 0x10101010; } else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2) { TxAGC[RF_PATH_A] = 0x00000000; TxAGC[RF_PATH_B] = 0x00000000; } else { for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) { TxAGC[idx1] = pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) | (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24); } if (pHalData->EEPROMRegulatory == 0) { tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][6]) + (pHalData->MCSTxPowerLevelOriginalOffset[0][7]<<8); TxAGC[RF_PATH_A] += tmpval; tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][14]) + (pHalData->MCSTxPowerLevelOriginalOffset[0][15]<<24); TxAGC[RF_PATH_B] += tmpval; } } } for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) { ptr = (u8 *)(&TxAGC[idx1]); for (idx2 = 0; idx2 < 4; idx2++) { if (*ptr > RF6052_MAX_TX_PWR) *ptr = RF6052_MAX_TX_PWR; ptr++; } } /* rf-A cck tx power */ tmpval = TxAGC[RF_PATH_A] & 0xff; PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval); tmpval = TxAGC[RF_PATH_A] >> 8; PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); /* rf-B cck tx power */ tmpval = TxAGC[RF_PATH_B] >> 24; PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval); tmpval = TxAGC[RF_PATH_B] & 0x00ffffff; PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval); } /* PHY_RF6052SetCckTxPower */ /* powerbase0 for OFDM rates */ /* powerbase1 for HT MCS rates */ static void getPowerBase(struct rtw_adapter *Adapter, u8 *pPowerLevel, u8 Channel, u32 *OfdmBase, u32 *MCSBase) { struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); u32 powerBase0, powerBase1; u8 Legacy_pwrdiff = 0; s8 HT20_pwrdiff = 0; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { powerlevel[i] = pPowerLevel[i]; Legacy_pwrdiff = pHalData->TxPwrLegacyHtDiff[i][Channel-1]; powerBase0 = powerlevel[i] + Legacy_pwrdiff; powerBase0 = powerBase0 << 24 | powerBase0 << 16 | powerBase0 << 8 | powerBase0; *(OfdmBase + i) = powerBase0; } for (i = 0; i < 2; i++) { /* Check HT20 to HT40 diff */ if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20) { HT20_pwrdiff = pHalData->TxPwrHt20Diff[i][Channel-1]; powerlevel[i] += HT20_pwrdiff; } powerBase1 = powerlevel[i]; powerBase1 = powerBase1 << 24 | powerBase1 << 16 | powerBase1 << 8 | powerBase1; *(MCSBase + i) = powerBase1; } } static void getTxPowerWriteValByRegulatory(struct rtw_adapter *Adapter, u8 Channel, u8 index, u32 *powerBase0, u32 *powerBase1, u32 *pOutWriteVal) { struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); struct dm_priv *pdmpriv = &pHalData->dmpriv; u8 i, chnlGroup = 0, pwr_diff_limit[4]; u32 writeVal, customer_limit, rf; /* Index 0 & 1 = legacy OFDM, 2-5 = HT_MCS rate */ for (rf = 0; rf < 2; rf++) { switch (pHalData->EEPROMRegulatory) { case 0: /* Realtek better performance */ /* increase power diff defined by Realtek for * large power */ chnlGroup = 0; writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); break; case 1: /* Realtek regulatory */ /* increase power diff defined by Realtek for * regulatory */ if (pHalData->pwrGroupCnt == 1) chnlGroup = 0; if (pHalData->pwrGroupCnt >= 3) { if (Channel <= 3) chnlGroup = 0; else if (Channel >= 4 && Channel <= 9) chnlGroup = 1; else if (Channel > 9) chnlGroup = 2; if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20) chnlGroup++; else chnlGroup += 4; } writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); break; case 2: /* Better regulatory */ /* don't increase any power diff */ writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]); break; case 3: /* Customer defined power diff. */ chnlGroup = 0; for (i = 0; i < 4; i++) { pwr_diff_limit[i] = (u8)((pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index + (rf ? 8 : 0)]&(0x7f << (i*8))) >> (i*8)); if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_40) { if (pwr_diff_limit[i] > pHalData->PwrGroupHT40[rf][Channel-1]) pwr_diff_limit[i] = pHalData->PwrGroupHT40[rf][Channel-1]; } else { if (pwr_diff_limit[i] > pHalData->PwrGroupHT20[rf][Channel-1]) pwr_diff_limit[i] = pHalData->PwrGroupHT20[rf][Channel-1]; } } customer_limit = (pwr_diff_limit[3]<<24) | (pwr_diff_limit[2]<<16) | (pwr_diff_limit[1]<<8) | (pwr_diff_limit[0]); writeVal = customer_limit + ((index<2)?powerBase0[rf]:powerBase1[rf]); break; default: chnlGroup = 0; writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); break; } /* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */ /* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */ /* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) writeVal = 0x14141414; else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2) writeVal = 0x00000000; /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */ /* This mechanism is only applied when Driver-Highpower-Mechanism is OFF. */ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT1) writeVal = writeVal - 0x06060606; else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT2) writeVal = writeVal; *(pOutWriteVal + rf) = writeVal; } } static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue) { struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); u16 RegOffset_A[6] = { rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24, rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04, rTxAGC_A_Mcs11_Mcs08, rTxAGC_A_Mcs15_Mcs12 }; u16 RegOffset_B[6] = { rTxAGC_B_Rate18_06, rTxAGC_B_Rate54_24, rTxAGC_B_Mcs03_Mcs00, rTxAGC_B_Mcs07_Mcs04, rTxAGC_B_Mcs11_Mcs08, rTxAGC_B_Mcs15_Mcs12 }; u8 i, rf, pwr_val[4]; u32 writeVal; u16 RegOffset; for (rf = 0; rf < 2; rf++) { writeVal = pValue[rf]; for (i = 0; i < 4; i++) { pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } writeVal = pwr_val[3] << 24 | pwr_val[2] << 16 | pwr_val[1] << 8 | pwr_val[0]; if (rf == 0) RegOffset = RegOffset_A[index]; else RegOffset = RegOffset_B[index]; PHY_SetBBReg(Adapter, RegOffset, bMaskDWord, writeVal); /* 201005115 Joseph: Set Tx Power diff for Tx power training mechanism. */ if (((pHalData->rf_type == RF_2T2R) && (RegOffset == rTxAGC_A_Mcs15_Mcs12 || RegOffset == rTxAGC_B_Mcs15_Mcs12)) || ((pHalData->rf_type != RF_2T2R) && (RegOffset == rTxAGC_A_Mcs07_Mcs04 || RegOffset == rTxAGC_B_Mcs07_Mcs04))) { writeVal = pwr_val[3]; if (RegOffset == rTxAGC_A_Mcs15_Mcs12 || RegOffset == rTxAGC_A_Mcs07_Mcs04) RegOffset = 0xc90; if (RegOffset == rTxAGC_B_Mcs15_Mcs12 || RegOffset == rTxAGC_B_Mcs07_Mcs04) RegOffset = 0xc98; for (i = 0; i < 3; i++) { if (i != 2) writeVal = (writeVal > 8) ? (writeVal - 8) : 0; else writeVal = (writeVal > 6) ? (writeVal - 6) : 0; rtl8723au_write8(Adapter, RegOffset + i, (u8)writeVal); } } } } /*----------------------------------------------------------------------------- * Function: PHY_RF6052SetOFDMTxPower * * Overview: For legacy and HY OFDM, we must read EEPROM TX power index for * different channel and read original value in TX power * register area from 0xe00. We increase offset and * original value to be correct tx pwr. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Remark * 11/05/2008 MHC Simulate 8192 series method. * 01/06/2009 MHC 1. Prevent Path B tx power overflow or * underflow dure to A/B pwr difference or * legacy/HT pwr diff. * 2. We concern with path B legacy/HT OFDM difference. * 01/22/2009 MHC Support new EPRO format from SD3. * *---------------------------------------------------------------------------*/ void rtl8723a_PHY_RF6052SetOFDMTxPower(struct rtw_adapter *Adapter, u8 *pPowerLevel, u8 Channel) { u32 writeVal[2], powerBase0[2], powerBase1[2]; u8 index = 0; getPowerBase(Adapter, pPowerLevel, Channel, &powerBase0[0], &powerBase1[0]); for (index = 0; index < 6; index++) { getTxPowerWriteValByRegulatory(Adapter, Channel, index, &powerBase0[0], &powerBase1[0], &writeVal[0]); writeOFDMPowerReg(Adapter, index, &writeVal[0]); } } static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter) { u32 u4RegValue = 0; u8 eRFPath; struct bb_reg_define *pPhyReg; int rtStatus = _SUCCESS; struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); /* 3----------------------------------------------------------------- */ /* 3 <2> Initialize RF */ /* 3----------------------------------------------------------------- */ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) { pPhyReg = &pHalData->PHYRegDef[eRFPath]; /*----Store original RFENV control type----*/ switch (eRFPath) { case RF_PATH_A: u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV); break; case RF_PATH_B: u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV << 16); break; } /*----Set RF_ENV enable----*/ PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1); udelay(1);/* PlatformStallExecution(1); */ /*----Set RF_ENV output high----*/ PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1); udelay(1);/* PlatformStallExecution(1); */ /* Set bit number of Address and Data for RF register */ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */ udelay(1);/* PlatformStallExecution(1); */ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */ udelay(1);/* PlatformStallExecution(1); */ /*----Initialize RF fom connfiguration file----*/ switch (eRFPath) { case RF_PATH_A: ODM_ReadAndConfig_RadioA_1T_8723A(&pHalData->odmpriv); break; case RF_PATH_B: break; } /*----Restore RFENV control type----*/; switch (eRFPath) { case RF_PATH_A: PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue); break; case RF_PATH_B: PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV << 16, u4RegValue); break; } if (rtStatus != _SUCCESS) { goto phy_RF6052_Config_ParaFile_Fail; } } phy_RF6052_Config_ParaFile_Fail: return rtStatus; } int PHY_RF6052_Config8723A(struct rtw_adapter *Adapter) { struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); int rtStatus = _SUCCESS; /* Initialize general global value */ /* TODO: Extend RF_PATH_C and RF_PATH_D in the future */ if (pHalData->rf_type == RF_1T1R) pHalData->NumTotalRFPath = 1; else pHalData->NumTotalRFPath = 2; /* Config BB and RF */ rtStatus = phy_RF6052_Config_ParaFile(Adapter); return rtStatus; } /* End of HalRf6052.c */
gpl-2.0
kaylorchen/Linux_for_mini2440
tools/perf/builtin-record.c
461
16801
/* * builtin-record.c * * Builtin record command: Record the profile of a workload * (or a CPU, or a PID) into the perf.data output file - for * later analysis via perf report. */ #include "builtin.h" #include "perf.h" #include "util/util.h" #include "util/parse-options.h" #include "util/parse-events.h" #include "util/string.h" #include "util/header.h" #include "util/event.h" #include "util/debug.h" #include "util/trace-event.h" #include <unistd.h> #include <sched.h> #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) #define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static long default_interval = 100000; static int nr_cpus = 0; static unsigned int page_size; static unsigned int mmap_pages = 128; static int freq = 0; static int output; static const char *output_name = "perf.data"; static int group = 0; static unsigned int realtime_prio = 0; static int raw_samples = 0; static int system_wide = 0; static int profile_cpu = -1; static pid_t target_pid = -1; static pid_t child_pid = -1; static int inherit = 1; static int force = 0; static int append_file = 0; static int call_graph = 0; static int inherit_stat = 0; static int no_samples = 0; static int sample_address = 0; static int multiplex = 0; static int multiplex_fd = -1; static long samples; static struct timeval last_read; static struct timeval this_read; static u64 bytes_written; static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; static int nr_poll; static int nr_cpu; static int file_new = 1; struct perf_header *header; struct mmap_data { int counter; void *base; unsigned int mask; unsigned int prev; }; static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; static unsigned long mmap_read_head(struct mmap_data *md) { struct perf_event_mmap_page *pc = md->base; long head; head = pc->data_head; rmb(); return head; } static void mmap_write_tail(struct mmap_data *md, unsigned long tail) { struct perf_event_mmap_page *pc = md->base; /* * ensure all reads are done before we write the tail out. */ /* mb(); */ pc->data_tail = tail; } static void write_output(void *buf, size_t size) { while (size) { int ret = write(output, buf, size); if (ret < 0) die("failed to write"); size -= ret; buf += ret; bytes_written += ret; } } static void mmap_read(struct mmap_data *md) { unsigned int head = mmap_read_head(md); unsigned int old = md->prev; unsigned char *data = md->base + page_size; unsigned long size; void *buf; int diff; gettimeofday(&this_read, NULL); /* * If we're further behind than half the buffer, there's a chance * the writer will bite our tail and mess up the samples under us. * * If we somehow ended up ahead of the head, we got messed up. * * In either case, truncate and restart at head. */ diff = head - old; if (diff < 0) { struct timeval iv; unsigned long msecs; timersub(&this_read, &last_read, &iv); msecs = iv.tv_sec*1000 + iv.tv_usec/1000; fprintf(stderr, "WARNING: failed to keep up with mmap data." " Last read %lu msecs ago.\n", msecs); /* * head points to a known good entry, start there. */ old = head; } last_read = this_read; if (old != head) samples++; size = head - old; if ((old & md->mask) + size != (head & md->mask)) { buf = &data[old & md->mask]; size = md->mask + 1 - (old & md->mask); old += size; write_output(buf, size); } buf = &data[old & md->mask]; size = head - old; old += size; write_output(buf, size); md->prev = old; mmap_write_tail(md, old); } static volatile int done = 0; static volatile int signr = -1; static void sig_handler(int sig) { done = 1; signr = sig; } static void sig_atexit(void) { if (child_pid != -1) kill(child_pid, SIGTERM); if (signr == -1) return; signal(signr, SIG_DFL); kill(getpid(), signr); } static pid_t pid_synthesize_comm_event(pid_t pid, int full) { struct comm_event comm_ev; char filename[PATH_MAX]; char bf[BUFSIZ]; FILE *fp; size_t size = 0; DIR *tasks; struct dirent dirent, *next; pid_t tgid = 0; snprintf(filename, sizeof(filename), "/proc/%d/status", pid); fp = fopen(filename, "r"); if (fp == NULL) { /* * We raced with a task exiting - just return: */ if (verbose) fprintf(stderr, "couldn't open %s\n", filename); return 0; } memset(&comm_ev, 0, sizeof(comm_ev)); while (!comm_ev.comm[0] || !comm_ev.pid) { if (fgets(bf, sizeof(bf), fp) == NULL) goto out_failure; if (memcmp(bf, "Name:", 5) == 0) { char *name = bf + 5; while (*name && isspace(*name)) ++name; size = strlen(name) - 1; memcpy(comm_ev.comm, name, size++); } else if (memcmp(bf, "Tgid:", 5) == 0) { char *tgids = bf + 5; while (*tgids && isspace(*tgids)) ++tgids; tgid = comm_ev.pid = atoi(tgids); } } comm_ev.header.type = PERF_RECORD_COMM; size = ALIGN(size, sizeof(u64)); comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); if (!full) { comm_ev.tid = pid; write_output(&comm_ev, comm_ev.header.size); goto out_fclose; } snprintf(filename, sizeof(filename), "/proc/%d/task", pid); tasks = opendir(filename); while (!readdir_r(tasks, &dirent, &next) && next) { char *end; pid = strtol(dirent.d_name, &end, 10); if (*end) continue; comm_ev.tid = pid; write_output(&comm_ev, comm_ev.header.size); } closedir(tasks); out_fclose: fclose(fp); return tgid; out_failure: fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", filename); exit(EXIT_FAILURE); } static void pid_synthesize_mmap_samples(pid_t pid, pid_t tgid) { char filename[PATH_MAX]; FILE *fp; snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); fp = fopen(filename, "r"); if (fp == NULL) { /* * We raced with a task exiting - just return: */ if (verbose) fprintf(stderr, "couldn't open %s\n", filename); return; } while (1) { char bf[BUFSIZ], *pbf = bf; struct mmap_event mmap_ev = { .header = { .type = PERF_RECORD_MMAP }, }; int n; size_t size; if (fgets(bf, sizeof(bf), fp) == NULL) break; /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ n = hex2u64(pbf, &mmap_ev.start); if (n < 0) continue; pbf += n + 1; n = hex2u64(pbf, &mmap_ev.len); if (n < 0) continue; pbf += n + 3; if (*pbf == 'x') { /* vm_exec */ char *execname = strchr(bf, '/'); /* Catch VDSO */ if (execname == NULL) execname = strstr(bf, "[vdso]"); if (execname == NULL) continue; size = strlen(execname); execname[size - 1] = '\0'; /* Remove \n */ memcpy(mmap_ev.filename, execname, size); size = ALIGN(size, sizeof(u64)); mmap_ev.len -= mmap_ev.start; mmap_ev.header.size = (sizeof(mmap_ev) - (sizeof(mmap_ev.filename) - size)); mmap_ev.pid = tgid; mmap_ev.tid = pid; write_output(&mmap_ev, mmap_ev.header.size); } } fclose(fp); } static void synthesize_all(void) { DIR *proc; struct dirent dirent, *next; proc = opendir("/proc"); while (!readdir_r(proc, &dirent, &next) && next) { char *end; pid_t pid, tgid; pid = strtol(dirent.d_name, &end, 10); if (*end) /* only interested in proper numerical dirents */ continue; tgid = pid_synthesize_comm_event(pid, 1); pid_synthesize_mmap_samples(pid, tgid); } closedir(proc); } static int group_fd; static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr) { struct perf_header_attr *h_attr; if (nr < header->attrs) { h_attr = header->attr[nr]; } else { h_attr = perf_header_attr__new(a); perf_header__add_attr(header, h_attr); } return h_attr; } static void create_counter(int counter, int cpu, pid_t pid) { struct perf_event_attr *attr = attrs + counter; struct perf_header_attr *h_attr; int track = !counter; /* only the first counter needs these */ struct { u64 count; u64 time_enabled; u64 time_running; u64 id; } read_data; attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING | PERF_FORMAT_ID; attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; if (freq) { attr->sample_type |= PERF_SAMPLE_PERIOD; attr->freq = 1; attr->sample_freq = freq; } if (no_samples) attr->sample_freq = 0; if (inherit_stat) attr->inherit_stat = 1; if (sample_address) attr->sample_type |= PERF_SAMPLE_ADDR; if (call_graph) attr->sample_type |= PERF_SAMPLE_CALLCHAIN; if (raw_samples) { attr->sample_type |= PERF_SAMPLE_TIME; attr->sample_type |= PERF_SAMPLE_RAW; attr->sample_type |= PERF_SAMPLE_CPU; } attr->mmap = track; attr->comm = track; attr->inherit = (cpu < 0) && inherit; attr->disabled = 1; try_again: fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0); if (fd[nr_cpu][counter] < 0) { int err = errno; if (err == EPERM || err == EACCES) die("Permission error - are you root?\n"); else if (err == ENODEV && profile_cpu != -1) die("No such device - did you specify an out-of-range profile CPU?\n"); /* * If it's cycles then fall back to hrtimer * based cpu-clock-tick sw counter, which * is always available even if no PMU support: */ if (attr->type == PERF_TYPE_HARDWARE && attr->config == PERF_COUNT_HW_CPU_CYCLES) { if (verbose) warning(" ... trying to fall back to cpu-clock-ticks\n"); attr->type = PERF_TYPE_SOFTWARE; attr->config = PERF_COUNT_SW_CPU_CLOCK; goto try_again; } printf("\n"); error("perfcounter syscall returned with %d (%s)\n", fd[nr_cpu][counter], strerror(err)); die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); exit(-1); } h_attr = get_header_attr(attr, counter); if (!file_new) { if (memcmp(&h_attr->attr, attr, sizeof(*attr))) { fprintf(stderr, "incompatible append\n"); exit(-1); } } if (read(fd[nr_cpu][counter], &read_data, sizeof(read_data)) == -1) { perror("Unable to read perf file descriptor\n"); exit(-1); } perf_header_attr__add_id(h_attr, read_data.id); assert(fd[nr_cpu][counter] >= 0); fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); /* * First counter acts as the group leader: */ if (group && group_fd == -1) group_fd = fd[nr_cpu][counter]; if (multiplex && multiplex_fd == -1) multiplex_fd = fd[nr_cpu][counter]; if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { int ret; ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd); assert(ret != -1); } else { event_array[nr_poll].fd = fd[nr_cpu][counter]; event_array[nr_poll].events = POLLIN; nr_poll++; mmap_array[nr_cpu][counter].counter = counter; mmap_array[nr_cpu][counter].prev = 0; mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1; mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter], 0); if (mmap_array[nr_cpu][counter].base == MAP_FAILED) { error("failed to mmap with %d (%s)\n", errno, strerror(errno)); exit(-1); } } ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE); } static void open_counters(int cpu, pid_t pid) { int counter; group_fd = -1; for (counter = 0; counter < nr_counters; counter++) create_counter(counter, cpu, pid); nr_cpu++; } static void atexit_header(void) { header->data_size += bytes_written; perf_header__write(header, output); } static int __cmd_record(int argc, const char **argv) { int i, counter; struct stat st; pid_t pid = 0; int flags; int ret; unsigned long waking = 0; page_size = sysconf(_SC_PAGE_SIZE); nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); assert(nr_cpus <= MAX_NR_CPUS); assert(nr_cpus >= 0); atexit(sig_atexit); signal(SIGCHLD, sig_handler); signal(SIGINT, sig_handler); if (!stat(output_name, &st) && st.st_size) { if (!force && !append_file) { fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n", output_name); exit(-1); } } else { append_file = 0; } flags = O_CREAT|O_RDWR; if (append_file) file_new = 0; else flags |= O_TRUNC; output = open(output_name, flags, S_IRUSR|S_IWUSR); if (output < 0) { perror("failed to create output file"); exit(-1); } if (!file_new) header = perf_header__read(output); else header = perf_header__new(); if (raw_samples) { read_tracing_data(attrs, nr_counters); } else { for (i = 0; i < nr_counters; i++) { if (attrs[i].sample_type & PERF_SAMPLE_RAW) { read_tracing_data(attrs, nr_counters); break; } } } atexit(atexit_header); if (!system_wide) { pid = target_pid; if (pid == -1) pid = getpid(); open_counters(profile_cpu, pid); } else { if (profile_cpu != -1) { open_counters(profile_cpu, target_pid); } else { for (i = 0; i < nr_cpus; i++) open_counters(i, target_pid); } } if (file_new) perf_header__write(header, output); if (!system_wide) { pid_t tgid = pid_synthesize_comm_event(pid, 0); pid_synthesize_mmap_samples(pid, tgid); } else synthesize_all(); if (target_pid == -1 && argc) { pid = fork(); if (pid < 0) perror("failed to fork"); if (!pid) { if (execvp(argv[0], (char **)argv)) { perror(argv[0]); exit(-1); } } child_pid = pid; } if (realtime_prio) { struct sched_param param; param.sched_priority = realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, &param)) { printf("Could not set realtime priority.\n"); exit(-1); } } for (;;) { int hits = samples; for (i = 0; i < nr_cpu; i++) { for (counter = 0; counter < nr_counters; counter++) { if (mmap_array[i][counter].base) mmap_read(&mmap_array[i][counter]); } } if (hits == samples) { if (done) break; ret = poll(event_array, nr_poll, -1); waking++; } if (done) { for (i = 0; i < nr_cpu; i++) { for (counter = 0; counter < nr_counters; counter++) ioctl(fd[i][counter], PERF_EVENT_IOC_DISABLE); } } } fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); /* * Approximate RIP event size: 24 bytes. */ fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n", (double)bytes_written / 1024.0 / 1024.0, output_name, bytes_written / 24); return 0; } static const char * const record_usage[] = { "perf record [<options>] [<command>]", "perf record [<options>] -- <command> [<options>]", NULL }; static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", "event selector. use 'perf list' to list available events", parse_events), OPT_INTEGER('p', "pid", &target_pid, "record events on existing pid"), OPT_INTEGER('r', "realtime", &realtime_prio, "collect data with this RT SCHED_FIFO priority"), OPT_BOOLEAN('R', "raw-samples", &raw_samples, "collect raw sample records from all opened counters"), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), OPT_BOOLEAN('A', "append", &append_file, "append to the output file to do incremental profiling"), OPT_INTEGER('C', "profile_cpu", &profile_cpu, "CPU to profile on"), OPT_BOOLEAN('f', "force", &force, "overwrite existing data file"), OPT_LONG('c', "count", &default_interval, "event period to sample"), OPT_STRING('o', "output", &output_name, "file", "output file name"), OPT_BOOLEAN('i', "inherit", &inherit, "child tasks inherit counters"), OPT_INTEGER('F', "freq", &freq, "profile at this frequency"), OPT_INTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), OPT_BOOLEAN('g', "call-graph", &call_graph, "do call-graph (stack chain/backtrace) recording"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_BOOLEAN('s', "stat", &inherit_stat, "per thread counts"), OPT_BOOLEAN('d', "data", &sample_address, "Sample addresses"), OPT_BOOLEAN('n', "no-samples", &no_samples, "don't sample"), OPT_BOOLEAN('M', "multiplex", &multiplex, "multiplex counter output in a single channel"), OPT_END() }; int cmd_record(int argc, const char **argv, const char *prefix __used) { int counter; argc = parse_options(argc, argv, options, record_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc && target_pid == -1 && !system_wide) usage_with_options(record_usage, options); if (!nr_counters) { nr_counters = 1; attrs[0].type = PERF_TYPE_HARDWARE; attrs[0].config = PERF_COUNT_HW_CPU_CYCLES; } for (counter = 0; counter < nr_counters; counter++) { if (attrs[counter].sample_period) continue; attrs[counter].sample_period = default_interval; } return __cmd_record(argc, argv); }
gpl-2.0
gdayton/linux-cmps107
arch/alpha/oprofile/op_model_ev5.c
1485
5427
/** * @file arch/alpha/oprofile/op_model_ev5.c * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author Richard Henderson <rth@twiddle.net> */ #include <linux/oprofile.h> #include <linux/smp.h> #include <asm/ptrace.h> #include "op_impl.h" /* Compute all of the registers in preparation for enabling profiling. The 21164 (EV5) and 21164PC (PCA65) vary in the bit placement and meaning of the "CBOX" events. Given that we don't care about meaning at this point, arrange for the difference in bit placement to be handled by common code. */ static void common_reg_setup(struct op_register_config *reg, struct op_counter_config *ctr, struct op_system_config *sys, int cbox1_ofs, int cbox2_ofs) { int i, ctl, reset, need_reset; /* Select desired events. The event numbers are selected such that they map directly into the event selection fields: PCSEL0: 0, 1 PCSEL1: 24-39 CBOX1: 40-47 PCSEL2: 48-63 CBOX2: 64-71 There are two special cases, in that CYCLES can be measured on PCSEL[02], and SCACHE_WRITE can be measured on CBOX[12]. These event numbers are canonicalizes to their first appearance. */ ctl = 0; for (i = 0; i < 3; ++i) { unsigned long event = ctr[i].event; if (!ctr[i].enabled) continue; /* Remap the duplicate events, as described above. */ if (i == 2) { if (event == 0) event = 12+48; else if (event == 2+41) event = 4+65; } /* Convert the event numbers onto mux_select bit mask. */ if (event < 2) ctl |= event << 31; else if (event < 24) /* error */; else if (event < 40) ctl |= (event - 24) << 4; else if (event < 48) ctl |= (event - 40) << cbox1_ofs | 15 << 4; else if (event < 64) ctl |= event - 48; else if (event < 72) ctl |= (event - 64) << cbox2_ofs | 15; } reg->mux_select = ctl; /* Select processor mode. */ /* ??? Need to come up with some mechanism to trace only selected processes. For now select from pal, kernel and user mode. */ ctl = 0; ctl |= !sys->enable_pal << 9; ctl |= !sys->enable_kernel << 8; ctl |= !sys->enable_user << 30; reg->proc_mode = ctl; /* Select interrupt frequencies. Take the interrupt count selected by the user, and map it onto one of the possible counter widths. If the user value is in between, compute a value to which the counter is reset at each interrupt. */ ctl = reset = need_reset = 0; for (i = 0; i < 3; ++i) { unsigned long max, hilo, count = ctr[i].count; if (!ctr[i].enabled) continue; if (count <= 256) count = 256, hilo = 3, max = 256; else { max = (i == 2 ? 16384 : 65536); hilo = 2; if (count > max) count = max; } ctr[i].count = count; ctl |= hilo << (8 - i*2); reset |= (max - count) << (48 - 16*i); if (count != max) need_reset |= 1 << i; } reg->freq = ctl; reg->reset_values = reset; reg->need_reset = need_reset; } static void ev5_reg_setup(struct op_register_config *reg, struct op_counter_config *ctr, struct op_system_config *sys) { common_reg_setup(reg, ctr, sys, 19, 22); } static void pca56_reg_setup(struct op_register_config *reg, struct op_counter_config *ctr, struct op_system_config *sys) { common_reg_setup(reg, ctr, sys, 8, 11); } /* Program all of the registers in preparation for enabling profiling. */ static void ev5_cpu_setup (void *x) { struct op_register_config *reg = x; wrperfmon(2, reg->mux_select); wrperfmon(3, reg->proc_mode); wrperfmon(4, reg->freq); wrperfmon(6, reg->reset_values); } /* CTR is a counter for which the user has requested an interrupt count in between one of the widths selectable in hardware. Reset the count for CTR to the value stored in REG->RESET_VALUES. For EV5, this means disabling profiling, reading the current values, masking in the value for the desired register, writing, then turning profiling back on. This can be streamlined if profiling is only enabled for user mode. In that case we know that the counters are not currently incrementing (due to being in kernel mode). */ static void ev5_reset_ctr(struct op_register_config *reg, unsigned long ctr) { unsigned long values, mask, not_pk, reset_values; mask = (ctr == 0 ? 0xfffful << 48 : ctr == 1 ? 0xfffful << 32 : 0x3fff << 16); not_pk = 1 << 9 | 1 << 8; reset_values = reg->reset_values; if ((reg->proc_mode & not_pk) == not_pk) { values = wrperfmon(5, 0); values = (reset_values & mask) | (values & ~mask & -2); wrperfmon(6, values); } else { wrperfmon(0, -1); values = wrperfmon(5, 0); values = (reset_values & mask) | (values & ~mask & -2); wrperfmon(6, values); wrperfmon(1, reg->enable); } } static void ev5_handle_interrupt(unsigned long which, struct pt_regs *regs, struct op_counter_config *ctr) { /* Record the sample. */ oprofile_add_sample(regs, which); } struct op_axp_model op_model_ev5 = { .reg_setup = ev5_reg_setup, .cpu_setup = ev5_cpu_setup, .reset_ctr = ev5_reset_ctr, .handle_interrupt = ev5_handle_interrupt, .cpu_type = "alpha/ev5", .num_counters = 3, .can_set_proc_mode = 1, }; struct op_axp_model op_model_pca56 = { .reg_setup = pca56_reg_setup, .cpu_setup = ev5_cpu_setup, .reset_ctr = ev5_reset_ctr, .handle_interrupt = ev5_handle_interrupt, .cpu_type = "alpha/pca56", .num_counters = 3, .can_set_proc_mode = 1, };
gpl-2.0
idl3r/ktsan
arch/alpha/oprofile/op_model_ev67.c
1485
7138
/** * @file arch/alpha/oprofile/op_model_ev67.c * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author Richard Henderson <rth@twiddle.net> * @author Falk Hueffner <falk@debian.org> */ #include <linux/oprofile.h> #include <linux/smp.h> #include <asm/ptrace.h> #include "op_impl.h" /* Compute all of the registers in preparation for enabling profiling. */ static void ev67_reg_setup(struct op_register_config *reg, struct op_counter_config *ctr, struct op_system_config *sys) { unsigned long ctl, reset, need_reset, i; /* Select desired events. */ ctl = 1UL << 4; /* Enable ProfileMe mode. */ /* The event numbers are chosen so we can use them directly if PCTR1 is enabled. */ if (ctr[1].enabled) { ctl |= (ctr[1].event & 3) << 2; } else { if (ctr[0].event == 0) /* cycles */ ctl |= 1UL << 2; } reg->mux_select = ctl; /* Select logging options. */ /* ??? Need to come up with some mechanism to trace only selected processes. EV67 does not have a mechanism to select kernel or user mode only. For now, enable always. */ reg->proc_mode = 0; /* EV67 cannot change the width of the counters as with the other implementations. But fortunately, we can write to the counters and set the value such that it will overflow at the right time. */ reset = need_reset = 0; for (i = 0; i < 2; ++i) { unsigned long count = ctr[i].count; if (!ctr[i].enabled) continue; if (count > 0x100000) count = 0x100000; ctr[i].count = count; reset |= (0x100000 - count) << (i ? 6 : 28); if (count != 0x100000) need_reset |= 1 << i; } reg->reset_values = reset; reg->need_reset = need_reset; } /* Program all of the registers in preparation for enabling profiling. */ static void ev67_cpu_setup (void *x) { struct op_register_config *reg = x; wrperfmon(2, reg->mux_select); wrperfmon(3, reg->proc_mode); wrperfmon(6, reg->reset_values | 3); } /* CTR is a counter for which the user has requested an interrupt count in between one of the widths selectable in hardware. Reset the count for CTR to the value stored in REG->RESET_VALUES. */ static void ev67_reset_ctr(struct op_register_config *reg, unsigned long ctr) { wrperfmon(6, reg->reset_values | (1 << ctr)); } /* ProfileMe conditions which will show up as counters. We can also detect the following, but it seems unlikely that anybody is interested in counting them: * Reset * MT_FPCR (write to floating point control register) * Arithmetic trap * Dstream Fault * Machine Check (ECC fault, etc.) * OPCDEC (illegal opcode) * Floating point disabled * Differentiate between DTB single/double misses and 3 or 4 level page tables * Istream access violation * Interrupt * Icache Parity Error. * Instruction killed (nop, trapb) Unfortunately, there seems to be no way to detect Dcache and Bcache misses; the latter could be approximated by making the counter count Bcache misses, but that is not precise. We model this as 20 counters: * PCTR0 * PCTR1 * 9 ProfileMe events, induced by PCTR0 * 9 ProfileMe events, induced by PCTR1 */ enum profileme_counters { PM_STALLED, /* Stalled for at least one cycle between the fetch and map stages */ PM_TAKEN, /* Conditional branch taken */ PM_MISPREDICT, /* Branch caused mispredict trap */ PM_ITB_MISS, /* ITB miss */ PM_DTB_MISS, /* DTB miss */ PM_REPLAY, /* Replay trap */ PM_LOAD_STORE, /* Load-store order trap */ PM_ICACHE_MISS, /* Icache miss */ PM_UNALIGNED, /* Unaligned Load/Store */ PM_NUM_COUNTERS }; static inline void op_add_pm(unsigned long pc, int kern, unsigned long counter, struct op_counter_config *ctr, unsigned long event) { unsigned long fake_counter = 2 + event; if (counter == 1) fake_counter += PM_NUM_COUNTERS; if (ctr[fake_counter].enabled) oprofile_add_pc(pc, kern, fake_counter); } static void ev67_handle_interrupt(unsigned long which, struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pmpc, pctr_ctl; int kern = !user_mode(regs); int mispredict = 0; union { unsigned long v; struct { unsigned reserved: 30; /* 0-29 */ unsigned overcount: 3; /* 30-32 */ unsigned icache_miss: 1; /* 33 */ unsigned trap_type: 4; /* 34-37 */ unsigned load_store: 1; /* 38 */ unsigned trap: 1; /* 39 */ unsigned mispredict: 1; /* 40 */ } fields; } i_stat; enum trap_types { TRAP_REPLAY, TRAP_INVALID0, TRAP_DTB_DOUBLE_MISS_3, TRAP_DTB_DOUBLE_MISS_4, TRAP_FP_DISABLED, TRAP_UNALIGNED, TRAP_DTB_SINGLE_MISS, TRAP_DSTREAM_FAULT, TRAP_OPCDEC, TRAP_INVALID1, TRAP_MACHINE_CHECK, TRAP_INVALID2, TRAP_ARITHMETIC, TRAP_INVALID3, TRAP_MT_FPCR, TRAP_RESET }; pmpc = wrperfmon(9, 0); /* ??? Don't know how to handle physical-mode PALcode address. */ if (pmpc & 1) return; pmpc &= ~2; /* clear reserved bit */ i_stat.v = wrperfmon(8, 0); if (i_stat.fields.trap) { switch (i_stat.fields.trap_type) { case TRAP_INVALID1: case TRAP_INVALID2: case TRAP_INVALID3: /* Pipeline redirection occurred. PMPC points to PALcode. Recognize ITB miss by PALcode offset address, and get actual PC from EXC_ADDR. */ oprofile_add_pc(regs->pc, kern, which); if ((pmpc & ((1 << 15) - 1)) == 581) op_add_pm(regs->pc, kern, which, ctr, PM_ITB_MISS); /* Most other bit and counter values will be those for the first instruction in the fault handler, so we're done. */ return; case TRAP_REPLAY: op_add_pm(pmpc, kern, which, ctr, (i_stat.fields.load_store ? PM_LOAD_STORE : PM_REPLAY)); break; case TRAP_DTB_DOUBLE_MISS_3: case TRAP_DTB_DOUBLE_MISS_4: case TRAP_DTB_SINGLE_MISS: op_add_pm(pmpc, kern, which, ctr, PM_DTB_MISS); break; case TRAP_UNALIGNED: op_add_pm(pmpc, kern, which, ctr, PM_UNALIGNED); break; case TRAP_INVALID0: case TRAP_FP_DISABLED: case TRAP_DSTREAM_FAULT: case TRAP_OPCDEC: case TRAP_MACHINE_CHECK: case TRAP_ARITHMETIC: case TRAP_MT_FPCR: case TRAP_RESET: break; } /* ??? JSR/JMP/RET/COR or HW_JSR/HW_JMP/HW_RET/HW_COR mispredicts do not set this bit but can be recognized by the presence of one of these instructions at the PMPC location with bit 39 set. */ if (i_stat.fields.mispredict) { mispredict = 1; op_add_pm(pmpc, kern, which, ctr, PM_MISPREDICT); } } oprofile_add_pc(pmpc, kern, which); pctr_ctl = wrperfmon(5, 0); if (pctr_ctl & (1UL << 27)) op_add_pm(pmpc, kern, which, ctr, PM_STALLED); /* Unfortunately, TAK is undefined on mispredicted branches. ??? It is also undefined for non-cbranch insns, should check that. */ if (!mispredict && pctr_ctl & (1UL << 0)) op_add_pm(pmpc, kern, which, ctr, PM_TAKEN); } struct op_axp_model op_model_ev67 = { .reg_setup = ev67_reg_setup, .cpu_setup = ev67_cpu_setup, .reset_ctr = ev67_reset_ctr, .handle_interrupt = ev67_handle_interrupt, .cpu_type = "alpha/ev67", .num_counters = 20, .can_set_proc_mode = 0, };
gpl-2.0
esialb/yocto-3.10-edison
drivers/usb/phy/phy-fsm-usb.c
1997
9680
/* * OTG Finite State Machine from OTG spec * * Copyright (C) 2007,2008 Freescale Semiconductor, Inc. * * Author: Li Yang <LeoLi@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/usb.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #include "phy-fsm-usb.h" /* Change USB protocol when there is a protocol change */ static int otg_set_protocol(struct otg_fsm *fsm, int protocol) { int ret = 0; if (fsm->protocol != protocol) { VDBG("Changing role fsm->protocol= %d; new protocol= %d\n", fsm->protocol, protocol); /* stop old protocol */ if (fsm->protocol == PROTO_HOST) ret = fsm->ops->start_host(fsm, 0); else if (fsm->protocol == PROTO_GADGET) ret = fsm->ops->start_gadget(fsm, 0); if (ret) return ret; /* start new protocol */ if (protocol == PROTO_HOST) ret = fsm->ops->start_host(fsm, 1); else if (protocol == PROTO_GADGET) ret = fsm->ops->start_gadget(fsm, 1); if (ret) return ret; fsm->protocol = protocol; return 0; } return 0; } static int state_changed; /* Called when leaving a state. Do state clean up jobs here */ void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state) { switch (old_state) { case OTG_STATE_B_IDLE: otg_del_timer(fsm, b_se0_srp_tmr); fsm->b_se0_srp = 0; break; case OTG_STATE_B_SRP_INIT: fsm->b_srp_done = 0; break; case OTG_STATE_B_PERIPHERAL: break; case OTG_STATE_B_WAIT_ACON: otg_del_timer(fsm, b_ase0_brst_tmr); fsm->b_ase0_brst_tmout = 0; break; case OTG_STATE_B_HOST: break; case OTG_STATE_A_IDLE: break; case OTG_STATE_A_WAIT_VRISE: otg_del_timer(fsm, a_wait_vrise_tmr); fsm->a_wait_vrise_tmout = 0; break; case OTG_STATE_A_WAIT_BCON: otg_del_timer(fsm, a_wait_bcon_tmr); fsm->a_wait_bcon_tmout = 0; break; case OTG_STATE_A_HOST: otg_del_timer(fsm, a_wait_enum_tmr); break; case OTG_STATE_A_SUSPEND: otg_del_timer(fsm, a_aidl_bdis_tmr); fsm->a_aidl_bdis_tmout = 0; fsm->a_suspend_req = 0; break; case OTG_STATE_A_PERIPHERAL: break; case OTG_STATE_A_WAIT_VFALL: otg_del_timer(fsm, a_wait_vrise_tmr); break; case OTG_STATE_A_VBUS_ERR: break; default: break; } } /* Called when entering a state */ int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) { state_changed = 1; if (fsm->otg->phy->state == new_state) return 0; VDBG("Set state: %s\n", usb_otg_state_string(new_state)); otg_leave_state(fsm, fsm->otg->phy->state); switch (new_state) { case OTG_STATE_B_IDLE: otg_drv_vbus(fsm, 0); otg_chrg_vbus(fsm, 0); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_UNDEF); otg_add_timer(fsm, b_se0_srp_tmr); break; case OTG_STATE_B_SRP_INIT: otg_start_pulse(fsm); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_UNDEF); otg_add_timer(fsm, b_srp_fail_tmr); break; case OTG_STATE_B_PERIPHERAL: otg_chrg_vbus(fsm, 0); otg_loc_conn(fsm, 1); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_GADGET); break; case OTG_STATE_B_WAIT_ACON: otg_chrg_vbus(fsm, 0); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_HOST); otg_add_timer(fsm, b_ase0_brst_tmr); fsm->a_bus_suspend = 0; break; case OTG_STATE_B_HOST: otg_chrg_vbus(fsm, 0); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 1); otg_set_protocol(fsm, PROTO_HOST); usb_bus_start_enum(fsm->otg->host, fsm->otg->host->otg_port); break; case OTG_STATE_A_IDLE: otg_drv_vbus(fsm, 0); otg_chrg_vbus(fsm, 0); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_HOST); break; case OTG_STATE_A_WAIT_VRISE: otg_drv_vbus(fsm, 1); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_HOST); otg_add_timer(fsm, a_wait_vrise_tmr); break; case OTG_STATE_A_WAIT_BCON: otg_drv_vbus(fsm, 1); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_HOST); otg_add_timer(fsm, a_wait_bcon_tmr); break; case OTG_STATE_A_HOST: otg_drv_vbus(fsm, 1); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 1); otg_set_protocol(fsm, PROTO_HOST); /* * When HNP is triggered while a_bus_req = 0, a_host will * suspend too fast to complete a_set_b_hnp_en */ if (!fsm->a_bus_req || fsm->a_suspend_req) otg_add_timer(fsm, a_wait_enum_tmr); break; case OTG_STATE_A_SUSPEND: otg_drv_vbus(fsm, 1); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_HOST); otg_add_timer(fsm, a_aidl_bdis_tmr); break; case OTG_STATE_A_PERIPHERAL: otg_loc_conn(fsm, 1); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_GADGET); otg_drv_vbus(fsm, 1); break; case OTG_STATE_A_WAIT_VFALL: otg_drv_vbus(fsm, 0); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_HOST); break; case OTG_STATE_A_VBUS_ERR: otg_drv_vbus(fsm, 0); otg_loc_conn(fsm, 0); otg_loc_sof(fsm, 0); otg_set_protocol(fsm, PROTO_UNDEF); break; default: break; } fsm->otg->phy->state = new_state; return 0; } /* State change judgement */ int otg_statemachine(struct otg_fsm *fsm) { enum usb_otg_state state; unsigned long flags; spin_lock_irqsave(&fsm->lock, flags); state = fsm->otg->phy->state; state_changed = 0; /* State machine state change judgement */ switch (state) { case OTG_STATE_UNDEFINED: VDBG("fsm->id = %d\n", fsm->id); if (fsm->id) otg_set_state(fsm, OTG_STATE_B_IDLE); else otg_set_state(fsm, OTG_STATE_A_IDLE); break; case OTG_STATE_B_IDLE: if (!fsm->id) otg_set_state(fsm, OTG_STATE_A_IDLE); else if (fsm->b_sess_vld && fsm->otg->gadget) otg_set_state(fsm, OTG_STATE_B_PERIPHERAL); else if (fsm->b_bus_req && fsm->b_sess_end && fsm->b_se0_srp) otg_set_state(fsm, OTG_STATE_B_SRP_INIT); break; case OTG_STATE_B_SRP_INIT: if (!fsm->id || fsm->b_srp_done) otg_set_state(fsm, OTG_STATE_B_IDLE); break; case OTG_STATE_B_PERIPHERAL: if (!fsm->id || !fsm->b_sess_vld) otg_set_state(fsm, OTG_STATE_B_IDLE); else if (fsm->b_bus_req && fsm->otg-> gadget->b_hnp_enable && fsm->a_bus_suspend) otg_set_state(fsm, OTG_STATE_B_WAIT_ACON); break; case OTG_STATE_B_WAIT_ACON: if (fsm->a_conn) otg_set_state(fsm, OTG_STATE_B_HOST); else if (!fsm->id || !fsm->b_sess_vld) otg_set_state(fsm, OTG_STATE_B_IDLE); else if (fsm->a_bus_resume || fsm->b_ase0_brst_tmout) { fsm->b_ase0_brst_tmout = 0; otg_set_state(fsm, OTG_STATE_B_PERIPHERAL); } break; case OTG_STATE_B_HOST: if (!fsm->id || !fsm->b_sess_vld) otg_set_state(fsm, OTG_STATE_B_IDLE); else if (!fsm->b_bus_req || !fsm->a_conn) otg_set_state(fsm, OTG_STATE_B_PERIPHERAL); break; case OTG_STATE_A_IDLE: if (fsm->id) otg_set_state(fsm, OTG_STATE_B_IDLE); else if (!fsm->a_bus_drop && (fsm->a_bus_req || fsm->a_srp_det)) otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE); break; case OTG_STATE_A_WAIT_VRISE: if (fsm->id || fsm->a_bus_drop || fsm->a_vbus_vld || fsm->a_wait_vrise_tmout) { otg_set_state(fsm, OTG_STATE_A_WAIT_BCON); } break; case OTG_STATE_A_WAIT_BCON: if (!fsm->a_vbus_vld) otg_set_state(fsm, OTG_STATE_A_VBUS_ERR); else if (fsm->b_conn) otg_set_state(fsm, OTG_STATE_A_HOST); else if (fsm->id | fsm->a_bus_drop | fsm->a_wait_bcon_tmout) otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); break; case OTG_STATE_A_HOST: if ((!fsm->a_bus_req || fsm->a_suspend_req) && fsm->otg->host->b_hnp_enable) otg_set_state(fsm, OTG_STATE_A_SUSPEND); else if (fsm->id || !fsm->b_conn || fsm->a_bus_drop) otg_set_state(fsm, OTG_STATE_A_WAIT_BCON); else if (!fsm->a_vbus_vld) otg_set_state(fsm, OTG_STATE_A_VBUS_ERR); break; case OTG_STATE_A_SUSPEND: if (!fsm->b_conn && fsm->otg->host->b_hnp_enable) otg_set_state(fsm, OTG_STATE_A_PERIPHERAL); else if (!fsm->b_conn && !fsm->otg->host->b_hnp_enable) otg_set_state(fsm, OTG_STATE_A_WAIT_BCON); else if (fsm->a_bus_req || fsm->b_bus_resume) otg_set_state(fsm, OTG_STATE_A_HOST); else if (fsm->id || fsm->a_bus_drop || fsm->a_aidl_bdis_tmout) otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); else if (!fsm->a_vbus_vld) otg_set_state(fsm, OTG_STATE_A_VBUS_ERR); break; case OTG_STATE_A_PERIPHERAL: if (fsm->id || fsm->a_bus_drop) otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); else if (fsm->b_bus_suspend) otg_set_state(fsm, OTG_STATE_A_WAIT_BCON); else if (!fsm->a_vbus_vld) otg_set_state(fsm, OTG_STATE_A_VBUS_ERR); break; case OTG_STATE_A_WAIT_VFALL: if (fsm->id || fsm->a_bus_req || (!fsm->a_sess_vld && !fsm->b_conn)) otg_set_state(fsm, OTG_STATE_A_IDLE); break; case OTG_STATE_A_VBUS_ERR: if (fsm->id || fsm->a_bus_drop || fsm->a_clr_err) otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); break; default: break; } spin_unlock_irqrestore(&fsm->lock, flags); VDBG("quit statemachine, changed = %d\n", state_changed); return state_changed; }
gpl-2.0
davidmueller13/f2fs-backport
arch/arm/kernel/stacktrace.c
1997
3346
#include <linux/module.h> #include <linux/sched.h> #include <linux/stacktrace.h> #include <asm/stacktrace.h> #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) /* * Unwind the current stack frame and store the new register values in the * structure passed as argument. Unwinding is equivalent to a function return, * hence the new PC value rather than LR should be used for backtrace. * * With framepointer enabled, a simple function prologue looks like this: * mov ip, sp * stmdb sp!, {fp, ip, lr, pc} * sub fp, ip, #4 * * A simple function epilogue looks like this: * ldm sp, {fp, sp, pc} * * Note that with framepointer enabled, even the leaf functions have the same * prologue and epilogue, therefore we can ignore the LR value in this case. */ int notrace unwind_frame(struct stackframe *frame) { unsigned long high, low; unsigned long fp = frame->fp; /* only go to a higher address on the stack */ low = frame->sp; high = ALIGN(low, THREAD_SIZE); /* check current frame pointer is within bounds */ if (fp < (low + 12) || fp + 4 >= high) return -EINVAL; /* restore the registers from the stack frame */ frame->fp = *(unsigned long *)(fp - 12); frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); return 0; } #endif void notrace walk_stackframe(struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data) { while (1) { int ret; if (fn(frame, data)) break; ret = unwind_frame(frame); if (ret < 0) break; } } EXPORT_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE struct stack_trace_data { struct stack_trace *trace; unsigned int no_sched_functions; unsigned int skip; }; static int save_trace(struct stackframe *frame, void *d) { struct stack_trace_data *data = d; struct stack_trace *trace = data->trace; unsigned long addr = frame->pc; if (data->no_sched_functions && in_sched_functions(addr)) return 0; if (data->skip) { data->skip--; return 0; } trace->entries[trace->nr_entries++] = addr; return trace->nr_entries >= trace->max_entries; } void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; if (tsk != current) { #ifdef CONFIG_SMP /* * What guarantees do we have here that 'tsk' is not * running on another CPU? For now, ignore it as we * can't guarantee we won't explode. */ if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(tsk); #endif } else { register unsigned long current_sp asm ("sp"); data.no_sched_functions = 0; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)save_stack_trace_tsk; } walk_stackframe(&frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } void save_stack_trace(struct stack_trace *trace) { save_stack_trace_tsk(current, trace); } EXPORT_SYMBOL_GPL(save_stack_trace); #endif
gpl-2.0
mikehollinger/linux
drivers/staging/crystalhd/crystalhd_misc.c
2253
24573
/*************************************************************************** * Copyright (c) 2005-2009, Broadcom Corporation. * * Name: crystalhd_misc . c * * Description: * BCM70012 Linux driver misc routines. * * HISTORY: * ********************************************************************** * This file is part of the crystalhd device driver. * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver. If not, see <http://www.gnu.org/licenses/>. **********************************************************************/ #include "crystalhd.h" #include <linux/slab.h> uint32_t g_linklog_level; static inline uint32_t crystalhd_dram_rd(struct crystalhd_adp *adp, uint32_t mem_off) { crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (mem_off >> 19)); return bc_dec_reg_rd(adp, (0x00380000 | (mem_off & 0x0007FFFF))); } static inline void crystalhd_dram_wr(struct crystalhd_adp *adp, uint32_t mem_off, uint32_t val) { crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (mem_off >> 19)); bc_dec_reg_wr(adp, (0x00380000 | (mem_off & 0x0007FFFF)), val); } static inline enum BC_STATUS bc_chk_dram_range(struct crystalhd_adp *adp, uint32_t start_off, uint32_t cnt) { return BC_STS_SUCCESS; } static struct crystalhd_dio_req *crystalhd_alloc_dio(struct crystalhd_adp *adp) { unsigned long flags = 0; struct crystalhd_dio_req *temp = NULL; if (!adp) { BCMLOG_ERR("Invalid Arg!!\n"); return temp; } spin_lock_irqsave(&adp->lock, flags); temp = adp->ua_map_free_head; if (temp) adp->ua_map_free_head = adp->ua_map_free_head->next; spin_unlock_irqrestore(&adp->lock, flags); return temp; } static void crystalhd_free_dio(struct crystalhd_adp *adp, struct crystalhd_dio_req *dio) { unsigned long flags = 0; if (!adp || !dio) return; spin_lock_irqsave(&adp->lock, flags); dio->sig = crystalhd_dio_inv; dio->page_cnt = 0; dio->fb_size = 0; memset(&dio->uinfo, 0, sizeof(dio->uinfo)); dio->next = adp->ua_map_free_head; adp->ua_map_free_head = dio; spin_unlock_irqrestore(&adp->lock, flags); } static struct crystalhd_elem *crystalhd_alloc_elem(struct crystalhd_adp *adp) { unsigned long flags = 0; struct crystalhd_elem *temp = NULL; if (!adp) return temp; spin_lock_irqsave(&adp->lock, flags); temp = adp->elem_pool_head; if (temp) { adp->elem_pool_head = adp->elem_pool_head->flink; memset(temp, 0, sizeof(*temp)); } spin_unlock_irqrestore(&adp->lock, flags); return temp; } static void crystalhd_free_elem(struct crystalhd_adp *adp, struct crystalhd_elem *elem) { unsigned long flags = 0; if (!adp || !elem) return; spin_lock_irqsave(&adp->lock, flags); elem->flink = adp->elem_pool_head; adp->elem_pool_head = elem; spin_unlock_irqrestore(&adp->lock, flags); } static inline void crystalhd_set_sg(struct scatterlist *sg, struct page *page, unsigned int len, unsigned int offset) { sg_set_page(sg, page, len, offset); #ifdef CONFIG_X86_64 sg->dma_length = len; #endif } static inline void crystalhd_init_sg(struct scatterlist *sg, unsigned int entries) { /* http://lkml.org/lkml/2007/11/27/68 */ sg_init_table(sg, entries); } /*========================== Extern ========================================*/ /** * bc_dec_reg_rd - Read 7412's device register. * @adp: Adapter instance * @reg_off: Register offset. * * Return: * 32bit value read * * 7412's device register read routine. This interface use * 7412's device access range mapped from BAR-2 (4M) of PCIe * configuration space. */ uint32_t bc_dec_reg_rd(struct crystalhd_adp *adp, uint32_t reg_off) { if (!adp || (reg_off > adp->pci_mem_len)) { BCMLOG_ERR("dec_rd_reg_off outof range: 0x%08x\n", reg_off); return 0; } return readl(adp->addr + reg_off); } /** * bc_dec_reg_wr - Write 7412's device register * @adp: Adapter instance * @reg_off: Register offset. * @val: Dword value to be written. * * Return: * none. * * 7412's device register write routine. This interface use * 7412's device access range mapped from BAR-2 (4M) of PCIe * configuration space. */ void bc_dec_reg_wr(struct crystalhd_adp *adp, uint32_t reg_off, uint32_t val) { if (!adp || (reg_off > adp->pci_mem_len)) { BCMLOG_ERR("dec_wr_reg_off outof range: 0x%08x\n", reg_off); return; } writel(val, adp->addr + reg_off); udelay(8); } /** * crystalhd_reg_rd - Read Link's device register. * @adp: Adapter instance * @reg_off: Register offset. * * Return: * 32bit value read * * Link device register read routine. This interface use * Link's device access range mapped from BAR-1 (64K) of PCIe * configuration space. * */ uint32_t crystalhd_reg_rd(struct crystalhd_adp *adp, uint32_t reg_off) { if (!adp || (reg_off > adp->pci_i2o_len)) { BCMLOG_ERR("link_rd_reg_off outof range: 0x%08x\n", reg_off); return 0; } return readl(adp->i2o_addr + reg_off); } /** * crystalhd_reg_wr - Write Link's device register * @adp: Adapter instance * @reg_off: Register offset. * @val: Dword value to be written. * * Return: * none. * * Link device register write routine. This interface use * Link's device access range mapped from BAR-1 (64K) of PCIe * configuration space. * */ void crystalhd_reg_wr(struct crystalhd_adp *adp, uint32_t reg_off, uint32_t val) { if (!adp || (reg_off > adp->pci_i2o_len)) { BCMLOG_ERR("link_wr_reg_off outof range: 0x%08x\n", reg_off); return; } writel(val, adp->i2o_addr + reg_off); } /** * crystalhd_mem_rd - Read data from 7412's DRAM area. * @adp: Adapter instance * @start_off: Start offset. * @dw_cnt: Count in dwords. * @rd_buff: Buffer to copy the data from dram. * * Return: * Status. * * 7412's Dram read routine. */ enum BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *adp, uint32_t start_off, uint32_t dw_cnt, uint32_t *rd_buff) { uint32_t ix = 0; if (!adp || !rd_buff || (bc_chk_dram_range(adp, start_off, dw_cnt) != BC_STS_SUCCESS)) { BCMLOG_ERR("Invalid arg\n"); return BC_STS_INV_ARG; } for (ix = 0; ix < dw_cnt; ix++) rd_buff[ix] = crystalhd_dram_rd(adp, (start_off + (ix * 4))); return BC_STS_SUCCESS; } /** * crystalhd_mem_wr - Write data to 7412's DRAM area. * @adp: Adapter instance * @start_off: Start offset. * @dw_cnt: Count in dwords. * @wr_buff: Data Buffer to be written. * * Return: * Status. * * 7412's Dram write routine. */ enum BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *adp, uint32_t start_off, uint32_t dw_cnt, uint32_t *wr_buff) { uint32_t ix = 0; if (!adp || !wr_buff || (bc_chk_dram_range(adp, start_off, dw_cnt) != BC_STS_SUCCESS)) { BCMLOG_ERR("Invalid arg\n"); return BC_STS_INV_ARG; } for (ix = 0; ix < dw_cnt; ix++) crystalhd_dram_wr(adp, (start_off + (ix * 4)), wr_buff[ix]); return BC_STS_SUCCESS; } /** * crystalhd_pci_cfg_rd - PCIe config read * @adp: Adapter instance * @off: PCI config space offset. * @len: Size -- Byte, Word & dword. * @val: Value read * * Return: * Status. * * Get value from Link's PCIe config space. */ enum BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *adp, uint32_t off, uint32_t len, uint32_t *val) { enum BC_STATUS sts = BC_STS_SUCCESS; int rc = 0; if (!adp || !val) { BCMLOG_ERR("Invalid arg\n"); return BC_STS_INV_ARG; } switch (len) { case 1: rc = pci_read_config_byte(adp->pdev, off, (u8 *)val); break; case 2: rc = pci_read_config_word(adp->pdev, off, (u16 *)val); break; case 4: rc = pci_read_config_dword(adp->pdev, off, (u32 *)val); break; default: rc = -EINVAL; sts = BC_STS_INV_ARG; BCMLOG_ERR("Invalid len:%d\n", len); } if (rc && (sts == BC_STS_SUCCESS)) sts = BC_STS_ERROR; return sts; } /** * crystalhd_pci_cfg_wr - PCIe config write * @adp: Adapter instance * @off: PCI config space offset. * @len: Size -- Byte, Word & dword. * @val: Value to be written * * Return: * Status. * * Set value to Link's PCIe config space. */ enum BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *adp, uint32_t off, uint32_t len, uint32_t val) { enum BC_STATUS sts = BC_STS_SUCCESS; int rc = 0; if (!adp || !val) { BCMLOG_ERR("Invalid arg\n"); return BC_STS_INV_ARG; } switch (len) { case 1: rc = pci_write_config_byte(adp->pdev, off, (u8)val); break; case 2: rc = pci_write_config_word(adp->pdev, off, (u16)val); break; case 4: rc = pci_write_config_dword(adp->pdev, off, val); break; default: rc = -EINVAL; sts = BC_STS_INV_ARG; BCMLOG_ERR("Invalid len:%d\n", len); } if (rc && (sts == BC_STS_SUCCESS)) sts = BC_STS_ERROR; return sts; } /** * bc_kern_dma_alloc - Allocate memory for Dma rings * @adp: Adapter instance * @sz: Size of the memory to allocate. * @phy_addr: Physical address of the memory allocated. * Typedef to system's dma_addr_t (u64) * * Return: * Pointer to allocated memory.. * * Wrapper to Linux kernel interface. * */ void *bc_kern_dma_alloc(struct crystalhd_adp *adp, uint32_t sz, dma_addr_t *phy_addr) { void *temp = NULL; if (!adp || !sz || !phy_addr) { BCMLOG_ERR("Invalide Arg..\n"); return temp; } temp = pci_alloc_consistent(adp->pdev, sz, phy_addr); if (temp) memset(temp, 0, sz); return temp; } /** * bc_kern_dma_free - Release Dma ring memory. * @adp: Adapter instance * @sz: Size of the memory to allocate. * @ka: Kernel virtual address returned during _dio_alloc() * @phy_addr: Physical address of the memory allocated. * Typedef to system's dma_addr_t (u64) * * Return: * none. */ void bc_kern_dma_free(struct crystalhd_adp *adp, uint32_t sz, void *ka, dma_addr_t phy_addr) { if (!adp || !ka || !sz || !phy_addr) { BCMLOG_ERR("Invalide Arg..\n"); return; } pci_free_consistent(adp->pdev, sz, ka, phy_addr); } /** * crystalhd_create_dioq - Create Generic DIO queue * @adp: Adapter instance * @dioq_hnd: Handle to the dio queue created * @cb : Optional - Call back To free the element. * @cbctx: Context to pass to callback. * * Return: * status * * Initialize Generic DIO queue to hold any data. Callback * will be used to free elements while deleting the queue. */ enum BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *adp, struct crystalhd_dioq **dioq_hnd, crystalhd_data_free_cb cb, void *cbctx) { struct crystalhd_dioq *dioq = NULL; if (!adp || !dioq_hnd) { BCMLOG_ERR("Invalid arg!!\n"); return BC_STS_INV_ARG; } dioq = kzalloc(sizeof(*dioq), GFP_KERNEL); if (!dioq) return BC_STS_INSUFF_RES; spin_lock_init(&dioq->lock); dioq->sig = BC_LINK_DIOQ_SIG; dioq->head = (struct crystalhd_elem *)&dioq->head; dioq->tail = (struct crystalhd_elem *)&dioq->head; crystalhd_create_event(&dioq->event); dioq->adp = adp; dioq->data_rel_cb = cb; dioq->cb_context = cbctx; *dioq_hnd = dioq; return BC_STS_SUCCESS; } /** * crystalhd_delete_dioq - Delete Generic DIO queue * @adp: Adapter instance * @dioq: DIOQ instance.. * * Return: * None. * * Release Generic DIO queue. This function will remove * all the entries from the Queue and will release data * by calling the call back provided during creation. * */ void crystalhd_delete_dioq(struct crystalhd_adp *adp, struct crystalhd_dioq *dioq) { void *temp; if (!dioq || (dioq->sig != BC_LINK_DIOQ_SIG)) return; do { temp = crystalhd_dioq_fetch(dioq); if (temp && dioq->data_rel_cb) dioq->data_rel_cb(dioq->cb_context, temp); } while (temp); dioq->sig = 0; kfree(dioq); } /** * crystalhd_dioq_add - Add new DIO request element. * @ioq: DIO queue instance * @t: DIO request to be added. * @wake: True - Wake up suspended process. * @tag: Special tag to assign - For search and get. * * Return: * Status. * * Insert new element to Q tail. */ enum BC_STATUS crystalhd_dioq_add(struct crystalhd_dioq *ioq, void *data, bool wake, uint32_t tag) { unsigned long flags = 0; struct crystalhd_elem *tmp; if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !data) { BCMLOG_ERR("Invalid arg!!\n"); return BC_STS_INV_ARG; } tmp = crystalhd_alloc_elem(ioq->adp); if (!tmp) { BCMLOG_ERR("No free elements.\n"); return BC_STS_INSUFF_RES; } tmp->data = data; tmp->tag = tag; spin_lock_irqsave(&ioq->lock, flags); tmp->flink = (struct crystalhd_elem *)&ioq->head; tmp->blink = ioq->tail; tmp->flink->blink = tmp; tmp->blink->flink = tmp; ioq->count++; spin_unlock_irqrestore(&ioq->lock, flags); if (wake) crystalhd_set_event(&ioq->event); return BC_STS_SUCCESS; } /** * crystalhd_dioq_fetch - Fetch element from head. * @ioq: DIO queue instance * * Return: * data element from the head.. * * Remove an element from Queue. */ void *crystalhd_dioq_fetch(struct crystalhd_dioq *ioq) { unsigned long flags = 0; struct crystalhd_elem *tmp; struct crystalhd_elem *ret = NULL; void *data = NULL; if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG)) { BCMLOG_ERR("Invalid arg!!\n"); return data; } spin_lock_irqsave(&ioq->lock, flags); tmp = ioq->head; if (tmp != (struct crystalhd_elem *)&ioq->head) { ret = tmp; tmp->flink->blink = tmp->blink; tmp->blink->flink = tmp->flink; ioq->count--; } spin_unlock_irqrestore(&ioq->lock, flags); if (ret) { data = ret->data; crystalhd_free_elem(ioq->adp, ret); } return data; } /** * crystalhd_dioq_find_and_fetch - Search the tag and Fetch element * @ioq: DIO queue instance * @tag: Tag to search for. * * Return: * element from the head.. * * Search TAG and remove the element. */ void *crystalhd_dioq_find_and_fetch(struct crystalhd_dioq *ioq, uint32_t tag) { unsigned long flags = 0; struct crystalhd_elem *tmp; struct crystalhd_elem *ret = NULL; void *data = NULL; if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG)) { BCMLOG_ERR("Invalid arg!!\n"); return data; } spin_lock_irqsave(&ioq->lock, flags); tmp = ioq->head; while (tmp != (struct crystalhd_elem *)&ioq->head) { if (tmp->tag == tag) { ret = tmp; tmp->flink->blink = tmp->blink; tmp->blink->flink = tmp->flink; ioq->count--; break; } tmp = tmp->flink; } spin_unlock_irqrestore(&ioq->lock, flags); if (ret) { data = ret->data; crystalhd_free_elem(ioq->adp, ret); } return data; } /** * crystalhd_dioq_fetch_wait - Fetch element from Head. * @ioq: DIO queue instance * @to_secs: Wait timeout in seconds.. * * Return: * element from the head.. * * Return element from head if Q is not empty. Wait for new element * if Q is empty for Timeout seconds. */ void *crystalhd_dioq_fetch_wait(struct crystalhd_dioq *ioq, uint32_t to_secs, uint32_t *sig_pend) { unsigned long flags = 0; int rc = 0, count; void *tmp = NULL; if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !to_secs || !sig_pend) { BCMLOG_ERR("Invalid arg!!\n"); return tmp; } count = to_secs; spin_lock_irqsave(&ioq->lock, flags); while ((ioq->count == 0) && count) { spin_unlock_irqrestore(&ioq->lock, flags); crystalhd_wait_on_event(&ioq->event, (ioq->count > 0), 1000, rc, 0); if (rc == 0) { goto out; } else if (rc == -EINTR) { BCMLOG(BCMLOG_INFO, "Cancelling fetch wait\n"); *sig_pend = 1; return tmp; } spin_lock_irqsave(&ioq->lock, flags); count--; } spin_unlock_irqrestore(&ioq->lock, flags); out: return crystalhd_dioq_fetch(ioq); } /** * crystalhd_map_dio - Map user address for DMA * @adp: Adapter instance * @ubuff: User buffer to map. * @ubuff_sz: User buffer size. * @uv_offset: UV buffer offset. * @en_422mode: TRUE:422 FALSE:420 Capture mode. * @dir_tx: TRUE for Tx (To device from host) * @dio_hnd: Handle to mapped DIO request. * * Return: * Status. * * This routine maps user address and lock pages for DMA. * */ enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff, uint32_t ubuff_sz, uint32_t uv_offset, bool en_422mode, bool dir_tx, struct crystalhd_dio_req **dio_hnd) { struct crystalhd_dio_req *dio; /* FIXME: jarod: should some of these unsigned longs be uint32_t or uintptr_t? */ unsigned long start = 0, end = 0, uaddr = 0, count = 0; unsigned long spsz = 0, uv_start = 0; int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0; if (!adp || !ubuff || !ubuff_sz || !dio_hnd) { BCMLOG_ERR("Invalid arg\n"); return BC_STS_INV_ARG; } /* Compute pages */ uaddr = (unsigned long)ubuff; count = (unsigned long)ubuff_sz; end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; start = uaddr >> PAGE_SHIFT; nr_pages = end - start; if (!count || ((uaddr + count) < uaddr)) { BCMLOG_ERR("User addr overflow!!\n"); return BC_STS_INV_ARG; } dio = crystalhd_alloc_dio(adp); if (!dio) { BCMLOG_ERR("dio pool empty..\n"); return BC_STS_INSUFF_RES; } if (dir_tx) { rw = WRITE; dio->direction = DMA_TO_DEVICE; } else { rw = READ; dio->direction = DMA_FROM_DEVICE; } if (nr_pages > dio->max_pages) { BCMLOG_ERR("max_pages(%d) exceeded(%d)!!\n", dio->max_pages, nr_pages); crystalhd_unmap_dio(adp, dio); return BC_STS_INSUFF_RES; } if (uv_offset) { uv_start = (uaddr + (unsigned long)uv_offset) >> PAGE_SHIFT; dio->uinfo.uv_sg_ix = uv_start - start; dio->uinfo.uv_sg_off = ((uaddr + (unsigned long)uv_offset) & ~PAGE_MASK); } dio->fb_size = ubuff_sz & 0x03; if (dio->fb_size) { res = copy_from_user(dio->fb_va, (void *)(uaddr + count - dio->fb_size), dio->fb_size); if (res) { BCMLOG_ERR("failed %d to copy %u fill bytes from %p\n", res, dio->fb_size, (void *)(uaddr + count-dio->fb_size)); crystalhd_unmap_dio(adp, dio); return BC_STS_INSUFF_RES; } } down_read(&current->mm->mmap_sem); res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ, 0, dio->pages, NULL); up_read(&current->mm->mmap_sem); /* Save for release..*/ dio->sig = crystalhd_dio_locked; if (res < nr_pages) { BCMLOG_ERR("get pages failed: %d-%d\n", nr_pages, res); dio->page_cnt = res; crystalhd_unmap_dio(adp, dio); return BC_STS_ERROR; } dio->page_cnt = nr_pages; /* Get scatter/gather */ crystalhd_init_sg(dio->sg, dio->page_cnt); crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK); if (nr_pages > 1) { dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset; #ifdef CONFIG_X86_64 dio->sg[0].dma_length = dio->sg[0].length; #endif count -= dio->sg[0].length; for (i = 1; i < nr_pages; i++) { if (count < 4) { spsz = count; skip_fb_sg = 1; } else { spsz = (count < PAGE_SIZE) ? (count & ~0x03) : PAGE_SIZE; } crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0); count -= spsz; } } else { if (count < 4) { dio->sg[0].length = count; skip_fb_sg = 1; } else { dio->sg[0].length = count - dio->fb_size; } #ifdef CONFIG_X86_64 dio->sg[0].dma_length = dio->sg[0].length; #endif } dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction); if (dio->sg_cnt <= 0) { BCMLOG_ERR("sg map %d-%d\n", dio->sg_cnt, dio->page_cnt); crystalhd_unmap_dio(adp, dio); return BC_STS_ERROR; } if (dio->sg_cnt && skip_fb_sg) dio->sg_cnt -= 1; dio->sig = crystalhd_dio_sg_mapped; /* Fill in User info.. */ dio->uinfo.xfr_len = ubuff_sz; dio->uinfo.xfr_buff = ubuff; dio->uinfo.uv_offset = uv_offset; dio->uinfo.b422mode = en_422mode; dio->uinfo.dir_tx = dir_tx; *dio_hnd = dio; return BC_STS_SUCCESS; } /** * crystalhd_unmap_sgl - Release mapped resources * @adp: Adapter instance * @dio: DIO request instance * * Return: * Status. * * This routine is to unmap the user buffer pages. */ enum BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, struct crystalhd_dio_req *dio) { struct page *page = NULL; int j = 0; if (!adp || !dio) { BCMLOG_ERR("Invalid arg\n"); return BC_STS_INV_ARG; } if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) { for (j = 0; j < dio->page_cnt; j++) { page = dio->pages[j]; if (page) { if (!PageReserved(page) && (dio->direction == DMA_FROM_DEVICE)) SetPageDirty(page); page_cache_release(page); } } } if (dio->sig == crystalhd_dio_sg_mapped) pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction); crystalhd_free_dio(adp, dio); return BC_STS_SUCCESS; } /** * crystalhd_create_dio_pool - Allocate mem pool for DIO management. * @adp: Adapter instance * @max_pages: Max pages for size calculation. * * Return: * system error. * * This routine creates a memory pool to hold dio context for * for HW Direct IO operation. */ int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages) { uint32_t asz = 0, i = 0; uint8_t *temp; struct crystalhd_dio_req *dio; if (!adp || !max_pages) { BCMLOG_ERR("Invalid Arg!!\n"); return -EINVAL; } /* Get dma memory for fill byte handling..*/ adp->fill_byte_pool = pci_pool_create("crystalhd_fbyte", adp->pdev, 8, 8, 0); if (!adp->fill_byte_pool) { BCMLOG_ERR("failed to create fill byte pool\n"); return -ENOMEM; } /* Get the max size from user based on 420/422 modes */ asz = (sizeof(*dio->pages) * max_pages) + (sizeof(*dio->sg) * max_pages) + sizeof(*dio); BCMLOG(BCMLOG_DBG, "Initializing Dio pool %d %d %x %p\n", BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool); for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) { temp = kzalloc(asz, GFP_KERNEL); if ((temp) == NULL) { BCMLOG_ERR("Failed to alloc %d mem\n", asz); return -ENOMEM; } dio = (struct crystalhd_dio_req *)temp; temp += sizeof(*dio); dio->pages = (struct page **)temp; temp += (sizeof(*dio->pages) * max_pages); dio->sg = (struct scatterlist *)temp; dio->max_pages = max_pages; dio->fb_va = pci_pool_alloc(adp->fill_byte_pool, GFP_KERNEL, &dio->fb_pa); if (!dio->fb_va) { BCMLOG_ERR("fill byte alloc failed.\n"); return -ENOMEM; } crystalhd_free_dio(adp, dio); } return 0; } /** * crystalhd_destroy_dio_pool - Release DIO mem pool. * @adp: Adapter instance * * Return: * none. * * This routine releases dio memory pool during close. */ void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp) { struct crystalhd_dio_req *dio; int count = 0; if (!adp) { BCMLOG_ERR("Invalid Arg!!\n"); return; } do { dio = crystalhd_alloc_dio(adp); if (dio) { if (dio->fb_va) pci_pool_free(adp->fill_byte_pool, dio->fb_va, dio->fb_pa); count++; kfree(dio); } } while (dio); if (adp->fill_byte_pool) { pci_pool_destroy(adp->fill_byte_pool); adp->fill_byte_pool = NULL; } BCMLOG(BCMLOG_DBG, "Released dio pool %d\n", count); } /** * crystalhd_create_elem_pool - List element pool creation. * @adp: Adapter instance * @pool_size: Number of elements in the pool. * * Return: * 0 - success, <0 error * * Create general purpose list element pool to hold pending, * and active requests. */ int crystalhd_create_elem_pool(struct crystalhd_adp *adp, uint32_t pool_size) { uint32_t i; struct crystalhd_elem *temp; if (!adp || !pool_size) return -EINVAL; for (i = 0; i < pool_size; i++) { temp = kzalloc(sizeof(*temp), GFP_KERNEL); if (!temp) { BCMLOG_ERR("kalloc failed\n"); return -ENOMEM; } crystalhd_free_elem(adp, temp); } BCMLOG(BCMLOG_DBG, "allocated %d elem\n", pool_size); return 0; } /** * crystalhd_delete_elem_pool - List element pool deletion. * @adp: Adapter instance * * Return: * none * * Delete general purpose list element pool. */ void crystalhd_delete_elem_pool(struct crystalhd_adp *adp) { struct crystalhd_elem *temp; int dbg_cnt = 0; if (!adp) return; do { temp = crystalhd_alloc_elem(adp); if (temp) { kfree(temp); dbg_cnt++; } } while (temp); BCMLOG(BCMLOG_DBG, "released %d elem\n", dbg_cnt); } /*================ Debug support routines.. ================================*/ void crystalhd_show_buffer(uint32_t off, uint8_t *buff, uint32_t dwcount) { uint32_t i, k = 1; for (i = 0; i < dwcount; i++) { if (k == 1) BCMLOG(BCMLOG_DATA, "0x%08X : ", off); BCMLOG(BCMLOG_DATA, " 0x%08X ", *((uint32_t *)buff)); buff += sizeof(uint32_t); off += sizeof(uint32_t); k++; if ((i == dwcount - 1) || (k > 4)) { BCMLOG(BCMLOG_DATA, "\n"); k = 1; } } }
gpl-2.0
TeamRegular/android_kernel_amazon_ford
drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
2253
21004
/* cx231xx-pcb-config.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx231xx.h" #include "cx231xx-conf-reg.h" static unsigned int pcb_debug; module_param(pcb_debug, int, 0644); MODULE_PARM_DESC(pcb_debug, "enable pcb config debug messages [video]"); /******************************************************************************/ struct pcb_config cx231xx_Scenario[] = { { INDEX_SELFPOWER_DIGITAL_ONLY, /* index */ USB_SELF_POWER, /* power_type */ 0, /* speed , not decide yet */ MOD_DIGITAL, /* mode */ SOURCE_TS_BDA, /* ts1_source, digital tv only */ NOT_SUPPORTED, /* ts2_source */ NOT_SUPPORTED, /* analog source */ 0, /* digital_index */ 0, /* analog index */ 0, /* dif_index */ 0, /* external_index */ 1, /* only one configuration */ { { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ NOT_SUPPORTED, /* AUDIO */ NOT_SUPPORTED, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } , } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } , /* full-speed config */ { { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ NOT_SUPPORTED, /* AUDIO */ NOT_SUPPORTED, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } } , { INDEX_SELFPOWER_DUAL_DIGITAL, /* index */ USB_SELF_POWER, /* power_type */ 0, /* speed , not decide yet */ MOD_DIGITAL, /* mode */ SOURCE_TS_BDA, /* ts1_source, digital tv only */ 0, /* ts2_source,need update from register */ NOT_SUPPORTED, /* analog source */ 0, /* digital_index */ 0, /* analog index */ 0, /* dif_index */ 0, /* external_index */ 1, /* only one configuration */ { { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ 2, /* TS2 index */ NOT_SUPPORTED, /* AUDIO */ NOT_SUPPORTED, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } , /* full-speed */ { { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ 2, /* TS2 index */ NOT_SUPPORTED, /* AUDIO */ NOT_SUPPORTED, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } } , { INDEX_SELFPOWER_ANALOG_ONLY, /* index */ USB_SELF_POWER, /* power_type */ 0, /* speed , not decide yet */ MOD_ANALOG | MOD_DIF | MOD_EXTERNAL, /* mode ,analog tv only */ NOT_SUPPORTED, /* ts1_source, NOT SUPPORT */ NOT_SUPPORTED, /* ts2_source,NOT SUPPORT */ 0, /* analog source, need update */ 0, /* digital_index */ 0, /* analog index */ 0, /* dif_index */ 0, /* external_index */ 1, /* only one configuration */ { { 0, /* config index */ { 0, /* interrupt ep index */ NOT_SUPPORTED, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ 1, /* AUDIO */ 2, /* VIDEO */ 3, /* VANC */ 4, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } , /* full-speed */ { { 0, /* config index */ { 0, /* interrupt ep index */ NOT_SUPPORTED, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ 1, /* AUDIO */ 2, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } } , { INDEX_SELFPOWER_DUAL, /* index */ USB_SELF_POWER, /* power_type */ 0, /* speed , not decide yet */ /* mode ,analog tv and digital path */ MOD_ANALOG | MOD_DIF | MOD_DIGITAL | MOD_EXTERNAL, 0, /* ts1_source,will update in register */ NOT_SUPPORTED, /* ts2_source,NOT SUPPORT */ 0, /* analog source need update */ 0, /* digital_index */ 0, /* analog index */ 0, /* dif_index */ 0, /* external_index */ 1, /* only one configuration */ { { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ 2, /* AUDIO */ 3, /* VIDEO */ 4, /* VANC */ 5, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } , /* full-speed */ { { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ 2, /* AUDIO */ 3, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } } , { INDEX_SELFPOWER_TRIPLE, /* index */ USB_SELF_POWER, /* power_type */ 0, /* speed , not decide yet */ /* mode ,analog tv and digital path */ MOD_ANALOG | MOD_DIF | MOD_DIGITAL | MOD_EXTERNAL, 0, /* ts1_source, update in register */ 0, /* ts2_source,update in register */ 0, /* analog source, need update */ 0, /* digital_index */ 0, /* analog index */ 0, /* dif_index */ 0, /* external_index */ 1, /* only one configuration */ { { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ 2, /* TS2 index */ 3, /* AUDIO */ 4, /* VIDEO */ 5, /* VANC */ 6, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } , /* full-speed */ { { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ 2, /* TS2 index */ 3, /* AUDIO */ 4, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } } , { INDEX_SELFPOWER_COMPRESSOR, /* index */ USB_SELF_POWER, /* power_type */ 0, /* speed , not decide yet */ /* mode ,analog tv AND DIGITAL path */ MOD_ANALOG | MOD_DIF | MOD_DIGITAL | MOD_EXTERNAL, NOT_SUPPORTED, /* ts1_source, disable */ SOURCE_TS_BDA, /* ts2_source */ 0, /* analog source,need update */ 0, /* digital_index */ 0, /* analog index */ 0, /* dif_index */ 0, /* external_index */ 1, /* only one configuration */ { { 0, /* config index */ { 0, /* interrupt ep index */ NOT_SUPPORTED, /* ts1 index */ 1, /* TS2 index */ 2, /* AUDIO */ 3, /* VIDEO */ 4, /* VANC */ 5, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } , /* full-speed */ { { 0, /* config index */ { 0, /* interrupt ep index */ NOT_SUPPORTED, /* ts1 index */ 1, /* TS2 index */ 2, /* AUDIO */ 3, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } } , { INDEX_BUSPOWER_DIGITAL_ONLY, /* index */ USB_BUS_POWER, /* power_type */ 0, /* speed , not decide yet */ MOD_DIGITAL, /* mode ,analog tv AND DIGITAL path */ SOURCE_TS_BDA, /* ts1_source, disable */ NOT_SUPPORTED, /* ts2_source */ NOT_SUPPORTED, /* analog source */ 0, /* digital_index */ 0, /* analog index */ 0, /* dif_index */ 0, /* external_index */ 1, /* only one configuration */ { { 0, /* config index */ { 0, /* interrupt ep index = 2 */ 1, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ NOT_SUPPORTED, /* AUDIO */ NOT_SUPPORTED, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } , /* full-speed */ { { 0, /* config index */ { 0, /* interrupt ep index = 2 */ 1, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ NOT_SUPPORTED, /* AUDIO */ NOT_SUPPORTED, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } } , { INDEX_BUSPOWER_ANALOG_ONLY, /* index */ USB_BUS_POWER, /* power_type */ 0, /* speed , not decide yet */ MOD_ANALOG, /* mode ,analog tv AND DIGITAL path */ NOT_SUPPORTED, /* ts1_source, disable */ NOT_SUPPORTED, /* ts2_source */ SOURCE_ANALOG, /* analog source--analog */ 0, /* digital_index */ 0, /* analog index */ 0, /* dif_index */ 0, /* external_index */ 1, /* only one configuration */ { { 0, /* config index */ { 0, /* interrupt ep index */ NOT_SUPPORTED, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ 1, /* AUDIO */ 2, /* VIDEO */ 3, /* VANC */ 4, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } , { /* full-speed */ { 0, /* config index */ { 0, /* interrupt ep index */ NOT_SUPPORTED, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ 1, /* AUDIO */ 2, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } } , { INDEX_BUSPOWER_DIF_ONLY, /* index */ USB_BUS_POWER, /* power_type */ 0, /* speed , not decide yet */ /* mode ,analog tv AND DIGITAL path */ MOD_DIF | MOD_ANALOG | MOD_DIGITAL | MOD_EXTERNAL, SOURCE_TS_BDA, /* ts1_source, disable */ NOT_SUPPORTED, /* ts2_source */ SOURCE_DIF | SOURCE_ANALOG | SOURCE_EXTERNAL, /* analog source, dif */ 0, /* digital_index */ 0, /* analog index */ 0, /* dif_index */ 0, /* external_index */ 1, /* only one configuration */ { { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ 2, /* AUDIO */ 3, /* VIDEO */ 4, /* VANC */ 5, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } , { /* full speed */ { 0, /* config index */ { 0, /* interrupt ep index */ 1, /* ts1 index */ NOT_SUPPORTED, /* TS2 index */ 2, /* AUDIO */ 3, /* VIDEO */ NOT_SUPPORTED, /* VANC */ NOT_SUPPORTED, /* HANC */ NOT_SUPPORTED /* ir_index */ } } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } , {NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED} } } } , }; /*****************************************************************/ u32 initialize_cx231xx(struct cx231xx *dev) { u32 config_info = 0; struct pcb_config *p_pcb_info; u8 usb_speed = 1; /* from register,1--HS, 0--FS */ u8 data[4] = { 0, 0, 0, 0 }; u32 ts1_source = 0; u32 ts2_source = 0; u32 analog_source = 0; u8 _current_scenario_idx = 0xff; ts1_source = SOURCE_TS_BDA; ts2_source = SOURCE_TS_BDA; /* read board config register to find out which pcb config it is related to */ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, data, 4); config_info = le32_to_cpu(*((u32 *) data)); usb_speed = (u8) (config_info & 0x1); /* Verify this device belongs to Bus power or Self power device */ if (config_info & BUS_POWER) { /* bus-power */ switch (config_info & BUSPOWER_MASK) { case TS1_PORT | BUS_POWER: cx231xx_Scenario[INDEX_BUSPOWER_DIGITAL_ONLY].speed = usb_speed; p_pcb_info = &cx231xx_Scenario[INDEX_BUSPOWER_DIGITAL_ONLY]; _current_scenario_idx = INDEX_BUSPOWER_DIGITAL_ONLY; break; case AVDEC_ENABLE | BUS_POWER: cx231xx_Scenario[INDEX_BUSPOWER_ANALOG_ONLY].speed = usb_speed; p_pcb_info = &cx231xx_Scenario[INDEX_BUSPOWER_ANALOG_ONLY]; _current_scenario_idx = INDEX_BUSPOWER_ANALOG_ONLY; break; case AVDEC_ENABLE | BUS_POWER | TS1_PORT: cx231xx_Scenario[INDEX_BUSPOWER_DIF_ONLY].speed = usb_speed; p_pcb_info = &cx231xx_Scenario[INDEX_BUSPOWER_DIF_ONLY]; _current_scenario_idx = INDEX_BUSPOWER_DIF_ONLY; break; default: cx231xx_info("bad config in buspower!!!!\n"); cx231xx_info("config_info=%x\n", (config_info & BUSPOWER_MASK)); return 1; } } else { /* self-power */ switch (config_info & SELFPOWER_MASK) { case TS1_PORT | SELF_POWER: cx231xx_Scenario[INDEX_SELFPOWER_DIGITAL_ONLY].speed = usb_speed; p_pcb_info = &cx231xx_Scenario[INDEX_SELFPOWER_DIGITAL_ONLY]; _current_scenario_idx = INDEX_SELFPOWER_DIGITAL_ONLY; break; case TS1_TS2_PORT | SELF_POWER: cx231xx_Scenario[INDEX_SELFPOWER_DUAL_DIGITAL].speed = usb_speed; cx231xx_Scenario[INDEX_SELFPOWER_DUAL_DIGITAL]. ts2_source = ts2_source; p_pcb_info = &cx231xx_Scenario[INDEX_SELFPOWER_DUAL_DIGITAL]; _current_scenario_idx = INDEX_SELFPOWER_DUAL_DIGITAL; break; case AVDEC_ENABLE | SELF_POWER: cx231xx_Scenario[INDEX_SELFPOWER_ANALOG_ONLY].speed = usb_speed; cx231xx_Scenario[INDEX_SELFPOWER_ANALOG_ONLY]. analog_source = analog_source; p_pcb_info = &cx231xx_Scenario[INDEX_SELFPOWER_ANALOG_ONLY]; _current_scenario_idx = INDEX_SELFPOWER_ANALOG_ONLY; break; case AVDEC_ENABLE | TS1_PORT | SELF_POWER: cx231xx_Scenario[INDEX_SELFPOWER_DUAL].speed = usb_speed; cx231xx_Scenario[INDEX_SELFPOWER_DUAL].ts1_source = ts1_source; cx231xx_Scenario[INDEX_SELFPOWER_DUAL].analog_source = analog_source; p_pcb_info = &cx231xx_Scenario[INDEX_SELFPOWER_DUAL]; _current_scenario_idx = INDEX_SELFPOWER_DUAL; break; case AVDEC_ENABLE | TS1_TS2_PORT | SELF_POWER: cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE].speed = usb_speed; cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE].ts1_source = ts1_source; cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE].ts2_source = ts2_source; cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE].analog_source = analog_source; p_pcb_info = &cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE]; _current_scenario_idx = INDEX_SELFPOWER_TRIPLE; break; case AVDEC_ENABLE | TS1VIP_TS2_PORT | SELF_POWER: cx231xx_Scenario[INDEX_SELFPOWER_COMPRESSOR].speed = usb_speed; cx231xx_Scenario[INDEX_SELFPOWER_COMPRESSOR]. analog_source = analog_source; p_pcb_info = &cx231xx_Scenario[INDEX_SELFPOWER_COMPRESSOR]; _current_scenario_idx = INDEX_SELFPOWER_COMPRESSOR; break; default: cx231xx_info("bad senario!!!!!\n"); cx231xx_info("config_info=%x\n", (config_info & SELFPOWER_MASK)); return 1; } } dev->current_scenario_idx = _current_scenario_idx; memcpy(&dev->current_pcb_config, p_pcb_info, sizeof(struct pcb_config)); if (pcb_debug) { cx231xx_info("SC(0x00) register = 0x%x\n", config_info); cx231xx_info("scenario %d\n", (dev->current_pcb_config.index) + 1); cx231xx_info("type=%x\n", dev->current_pcb_config.type); cx231xx_info("mode=%x\n", dev->current_pcb_config.mode); cx231xx_info("speed=%x\n", dev->current_pcb_config.speed); cx231xx_info("ts1_source=%x\n", dev->current_pcb_config.ts1_source); cx231xx_info("ts2_source=%x\n", dev->current_pcb_config.ts2_source); cx231xx_info("analog_source=%x\n", dev->current_pcb_config.analog_source); } return 0; }
gpl-2.0
ubuntustudio-kernel/ubuntu-quantal-lowlatency
kernel/trace/trace_output.c
4045
28440
/* * trace_output.c * * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> * */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/ftrace.h> #include "trace_output.h" /* must be a power of 2 */ #define EVENT_HASHSIZE 128 DECLARE_RWSEM(trace_event_mutex); static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static int next_event_type = __TRACE_LAST_TYPE + 1; int trace_print_seq(struct seq_file *m, struct trace_seq *s) { int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; int ret; ret = seq_write(m, s->buffer, len); /* * Only reset this buffer if we successfully wrote to the * seq_file buffer. */ if (!ret) trace_seq_init(s); return ret; } enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry = iter->ent; struct bprint_entry *field; int ret; trace_assign_type(field, entry); ret = trace_seq_bprintf(s, field->fmt, field->buf); if (!ret) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; } enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry = iter->ent; struct print_entry *field; int ret; trace_assign_type(field, entry); ret = trace_seq_printf(s, "%s", field->buf); if (!ret) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; } /** * trace_seq_printf - sequence printing of trace information * @s: trace sequence descriptor * @fmt: printf format string * * It returns 0 if the trace oversizes the buffer's free * space, 1 otherwise. * * The tracer may use either sequence operations or its own * copy to user routines. To simplify formating of a trace * trace_seq_printf is used to store strings into a special * buffer (@s). Then the output may be either used by * the sequencer or pulled into another buffer. */ int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { int len = (PAGE_SIZE - 1) - s->len; va_list ap; int ret; if (s->full || !len) return 0; va_start(ap, fmt); ret = vsnprintf(s->buffer + s->len, len, fmt, ap); va_end(ap); /* If we can't write it all, don't bother writing anything */ if (ret >= len) { s->full = 1; return 0; } s->len += ret; return 1; } EXPORT_SYMBOL_GPL(trace_seq_printf); /** * trace_seq_vprintf - sequence printing of trace information * @s: trace sequence descriptor * @fmt: printf format string * * The tracer may use either sequence operations or its own * copy to user routines. To simplify formating of a trace * trace_seq_printf is used to store strings into a special * buffer (@s). Then the output may be either used by * the sequencer or pulled into another buffer. */ int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) { int len = (PAGE_SIZE - 1) - s->len; int ret; if (s->full || !len) return 0; ret = vsnprintf(s->buffer + s->len, len, fmt, args); /* If we can't write it all, don't bother writing anything */ if (ret >= len) { s->full = 1; return 0; } s->len += ret; return len; } EXPORT_SYMBOL_GPL(trace_seq_vprintf); int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) { int len = (PAGE_SIZE - 1) - s->len; int ret; if (s->full || !len) return 0; ret = bstr_printf(s->buffer + s->len, len, fmt, binary); /* If we can't write it all, don't bother writing anything */ if (ret >= len) { s->full = 1; return 0; } s->len += ret; return len; } /** * trace_seq_puts - trace sequence printing of simple string * @s: trace sequence descriptor * @str: simple string to record * * The tracer may use either the sequence operations or its own * copy to user routines. This function records a simple string * into a special buffer (@s) for later retrieval by a sequencer * or other mechanism. */ int trace_seq_puts(struct trace_seq *s, const char *str) { int len = strlen(str); if (s->full) return 0; if (len > ((PAGE_SIZE - 1) - s->len)) { s->full = 1; return 0; } memcpy(s->buffer + s->len, str, len); s->len += len; return len; } int trace_seq_putc(struct trace_seq *s, unsigned char c) { if (s->full) return 0; if (s->len >= (PAGE_SIZE - 1)) { s->full = 1; return 0; } s->buffer[s->len++] = c; return 1; } EXPORT_SYMBOL(trace_seq_putc); int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) { if (s->full) return 0; if (len > ((PAGE_SIZE - 1) - s->len)) { s->full = 1; return 0; } memcpy(s->buffer + s->len, mem, len); s->len += len; return len; } int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len) { unsigned char hex[HEX_CHARS]; const unsigned char *data = mem; int i, j; if (s->full) return 0; #ifdef __BIG_ENDIAN for (i = 0, j = 0; i < len; i++) { #else for (i = len-1, j = 0; i >= 0; i--) { #endif hex[j++] = hex_asc_hi(data[i]); hex[j++] = hex_asc_lo(data[i]); } hex[j++] = ' '; return trace_seq_putmem(s, hex, j); } void *trace_seq_reserve(struct trace_seq *s, size_t len) { void *ret; if (s->full) return NULL; if (len > ((PAGE_SIZE - 1) - s->len)) { s->full = 1; return NULL; } ret = s->buffer + s->len; s->len += len; return ret; } int trace_seq_path(struct trace_seq *s, const struct path *path) { unsigned char *p; if (s->full) return 0; if (s->len >= (PAGE_SIZE - 1)) { s->full = 1; return 0; } p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); if (!IS_ERR(p)) { p = mangle_path(s->buffer + s->len, p, "\n"); if (p) { s->len = p - s->buffer; return 1; } } else { s->buffer[s->len++] = '?'; return 1; } s->full = 1; return 0; } const char * ftrace_print_flags_seq(struct trace_seq *p, const char *delim, unsigned long flags, const struct trace_print_flags *flag_array) { unsigned long mask; const char *str; const char *ret = p->buffer + p->len; int i, first = 1; for (i = 0; flag_array[i].name && flags; i++) { mask = flag_array[i].mask; if ((flags & mask) != mask) continue; str = flag_array[i].name; flags &= ~mask; if (!first && delim) trace_seq_puts(p, delim); else first = 0; trace_seq_puts(p, str); } /* check for left over flags */ if (flags) { if (!first && delim) trace_seq_puts(p, delim); trace_seq_printf(p, "0x%lx", flags); } trace_seq_putc(p, 0); return ret; } EXPORT_SYMBOL(ftrace_print_flags_seq); const char * ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, const struct trace_print_flags *symbol_array) { int i; const char *ret = p->buffer + p->len; for (i = 0; symbol_array[i].name; i++) { if (val != symbol_array[i].mask) continue; trace_seq_puts(p, symbol_array[i].name); break; } if (ret == (const char *)(p->buffer + p->len)) trace_seq_printf(p, "0x%lx", val); trace_seq_putc(p, 0); return ret; } EXPORT_SYMBOL(ftrace_print_symbols_seq); #if BITS_PER_LONG == 32 const char * ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, const struct trace_print_flags_u64 *symbol_array) { int i; const char *ret = p->buffer + p->len; for (i = 0; symbol_array[i].name; i++) { if (val != symbol_array[i].mask) continue; trace_seq_puts(p, symbol_array[i].name); break; } if (ret == (const char *)(p->buffer + p->len)) trace_seq_printf(p, "0x%llx", val); trace_seq_putc(p, 0); return ret; } EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); #endif const char * ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) { int i; const char *ret = p->buffer + p->len; for (i = 0; i < buf_len; i++) trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); trace_seq_putc(p, 0); return ret; } EXPORT_SYMBOL(ftrace_print_hex_seq); #ifdef CONFIG_KRETPROBES static inline const char *kretprobed(const char *name) { static const char tramp_name[] = "kretprobe_trampoline"; int size = sizeof(tramp_name); if (strncmp(tramp_name, name, size) == 0) return "[unknown/kretprobe'd]"; return name; } #else static inline const char *kretprobed(const char *name) { return name; } #endif /* CONFIG_KRETPROBES */ static int seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) { #ifdef CONFIG_KALLSYMS char str[KSYM_SYMBOL_LEN]; const char *name; kallsyms_lookup(address, NULL, NULL, NULL, str); name = kretprobed(str); return trace_seq_printf(s, fmt, name); #endif return 1; } static int seq_print_sym_offset(struct trace_seq *s, const char *fmt, unsigned long address) { #ifdef CONFIG_KALLSYMS char str[KSYM_SYMBOL_LEN]; const char *name; sprint_symbol(str, address); name = kretprobed(str); return trace_seq_printf(s, fmt, name); #endif return 1; } #ifndef CONFIG_64BIT # define IP_FMT "%08lx" #else # define IP_FMT "%016lx" #endif int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, unsigned long ip, unsigned long sym_flags) { struct file *file = NULL; unsigned long vmstart = 0; int ret = 1; if (s->full) return 0; if (mm) { const struct vm_area_struct *vma; down_read(&mm->mmap_sem); vma = find_vma(mm, ip); if (vma) { file = vma->vm_file; vmstart = vma->vm_start; } if (file) { ret = trace_seq_path(s, &file->f_path); if (ret) ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart); } up_read(&mm->mmap_sem); } if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) ret = trace_seq_printf(s, " <" IP_FMT ">", ip); return ret; } int seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, unsigned long sym_flags) { struct mm_struct *mm = NULL; int ret = 1; unsigned int i; if (trace_flags & TRACE_ITER_SYM_USEROBJ) { struct task_struct *task; /* * we do the lookup on the thread group leader, * since individual threads might have already quit! */ rcu_read_lock(); task = find_task_by_vpid(entry->tgid); if (task) mm = get_task_mm(task); rcu_read_unlock(); } for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { unsigned long ip = entry->caller[i]; if (ip == ULONG_MAX || !ret) break; if (ret) ret = trace_seq_puts(s, " => "); if (!ip) { if (ret) ret = trace_seq_puts(s, "??"); if (ret) ret = trace_seq_puts(s, "\n"); continue; } if (!ret) break; if (ret) ret = seq_print_user_ip(s, mm, ip, sym_flags); ret = trace_seq_puts(s, "\n"); } if (mm) mmput(mm); return ret; } int seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) { int ret; if (!ip) return trace_seq_printf(s, "0"); if (sym_flags & TRACE_ITER_SYM_OFFSET) ret = seq_print_sym_offset(s, "%s", ip); else ret = seq_print_sym_short(s, "%s", ip); if (!ret) return 0; if (sym_flags & TRACE_ITER_SYM_ADDR) ret = trace_seq_printf(s, " <" IP_FMT ">", ip); return ret; } /** * trace_print_lat_fmt - print the irq, preempt and lockdep fields * @s: trace seq struct to write to * @entry: The trace entry field from the ring buffer * * Prints the generic fields of irqs off, in hard or softirq, preempt * count. */ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) { char hardsoft_irq; char need_resched; char irqs_off; int hardirq; int softirq; int ret; hardirq = entry->flags & TRACE_FLAG_HARDIRQ; softirq = entry->flags & TRACE_FLAG_SOFTIRQ; irqs_off = (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.'; need_resched = (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'; hardsoft_irq = (hardirq && softirq) ? 'H' : hardirq ? 'h' : softirq ? 's' : '.'; if (!trace_seq_printf(s, "%c%c%c", irqs_off, need_resched, hardsoft_irq)) return 0; if (entry->preempt_count) ret = trace_seq_printf(s, "%x", entry->preempt_count); else ret = trace_seq_putc(s, '.'); return ret; } static int lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) { char comm[TASK_COMM_LEN]; trace_find_cmdline(entry->pid, comm); if (!trace_seq_printf(s, "%8.8s-%-5d %3d", comm, entry->pid, cpu)) return 0; return trace_print_lat_fmt(s, entry); } static unsigned long preempt_mark_thresh = 100; static int lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, unsigned long rel_usecs) { return trace_seq_printf(s, " %4lldus%c: ", abs_usecs, rel_usecs > preempt_mark_thresh ? '!' : rel_usecs > 1 ? '+' : ' '); } int trace_print_context(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry = iter->ent; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned long secs = (unsigned long)t; char comm[TASK_COMM_LEN]; int ret; trace_find_cmdline(entry->pid, comm); ret = trace_seq_printf(s, "%16s-%-5d [%03d] ", comm, entry->pid, iter->cpu); if (!ret) return 0; if (trace_flags & TRACE_ITER_IRQ_INFO) { ret = trace_print_lat_fmt(s, entry); if (!ret) return 0; } return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); } int trace_print_lat_context(struct trace_iterator *iter) { u64 next_ts; int ret; /* trace_find_next_entry will reset ent_size */ int ent_size = iter->ent_size; struct trace_seq *s = &iter->seq; struct trace_entry *entry = iter->ent, *next_entry = trace_find_next_entry(iter, NULL, &next_ts); unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); unsigned long rel_usecs; /* Restore the original ent_size */ iter->ent_size = ent_size; if (!next_entry) next_ts = iter->ts; rel_usecs = ns2usecs(next_ts - iter->ts); if (verbose) { char comm[TASK_COMM_LEN]; trace_find_cmdline(entry->pid, comm); ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]" " %ld.%03ldms (+%ld.%03ldms): ", comm, entry->pid, iter->cpu, entry->flags, entry->preempt_count, iter->idx, ns2usecs(iter->ts), abs_usecs / USEC_PER_MSEC, abs_usecs % USEC_PER_MSEC, rel_usecs / USEC_PER_MSEC, rel_usecs % USEC_PER_MSEC); } else { ret = lat_print_generic(s, entry, iter->cpu); if (ret) ret = lat_print_timestamp(s, abs_usecs, rel_usecs); } return ret; } static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; static int task_state_char(unsigned long state) { int bit = state ? __ffs(state) + 1 : 0; return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; } /** * ftrace_find_event - find a registered event * @type: the type of event to look for * * Returns an event of type @type otherwise NULL * Called with trace_event_read_lock() held. */ struct trace_event *ftrace_find_event(int type) { struct trace_event *event; struct hlist_node *n; unsigned key; key = type & (EVENT_HASHSIZE - 1); hlist_for_each_entry(event, n, &event_hash[key], node) { if (event->type == type) return event; } return NULL; } static LIST_HEAD(ftrace_event_list); static int trace_search_list(struct list_head **list) { struct trace_event *e; int last = __TRACE_LAST_TYPE; if (list_empty(&ftrace_event_list)) { *list = &ftrace_event_list; return last + 1; } /* * We used up all possible max events, * lets see if somebody freed one. */ list_for_each_entry(e, &ftrace_event_list, list) { if (e->type != last + 1) break; last++; } /* Did we used up all 65 thousand events??? */ if ((last + 1) > FTRACE_MAX_EVENT) return 0; *list = &e->list; return last + 1; } void trace_event_read_lock(void) { down_read(&trace_event_mutex); } void trace_event_read_unlock(void) { up_read(&trace_event_mutex); } /** * register_ftrace_event - register output for an event type * @event: the event type to register * * Event types are stored in a hash and this hash is used to * find a way to print an event. If the @event->type is set * then it will use that type, otherwise it will assign a * type to use. * * If you assign your own type, please make sure it is added * to the trace_type enum in trace.h, to avoid collisions * with the dynamic types. * * Returns the event type number or zero on error. */ int register_ftrace_event(struct trace_event *event) { unsigned key; int ret = 0; down_write(&trace_event_mutex); if (WARN_ON(!event)) goto out; if (WARN_ON(!event->funcs)) goto out; INIT_LIST_HEAD(&event->list); if (!event->type) { struct list_head *list = NULL; if (next_event_type > FTRACE_MAX_EVENT) { event->type = trace_search_list(&list); if (!event->type) goto out; } else { event->type = next_event_type++; list = &ftrace_event_list; } if (WARN_ON(ftrace_find_event(event->type))) goto out; list_add_tail(&event->list, list); } else if (event->type > __TRACE_LAST_TYPE) { printk(KERN_WARNING "Need to add type to trace.h\n"); WARN_ON(1); goto out; } else { /* Is this event already used */ if (ftrace_find_event(event->type)) goto out; } if (event->funcs->trace == NULL) event->funcs->trace = trace_nop_print; if (event->funcs->raw == NULL) event->funcs->raw = trace_nop_print; if (event->funcs->hex == NULL) event->funcs->hex = trace_nop_print; if (event->funcs->binary == NULL) event->funcs->binary = trace_nop_print; key = event->type & (EVENT_HASHSIZE - 1); hlist_add_head(&event->node, &event_hash[key]); ret = event->type; out: up_write(&trace_event_mutex); return ret; } EXPORT_SYMBOL_GPL(register_ftrace_event); /* * Used by module code with the trace_event_mutex held for write. */ int __unregister_ftrace_event(struct trace_event *event) { hlist_del(&event->node); list_del(&event->list); return 0; } /** * unregister_ftrace_event - remove a no longer used event * @event: the event to remove */ int unregister_ftrace_event(struct trace_event *event) { down_write(&trace_event_mutex); __unregister_ftrace_event(event); up_write(&trace_event_mutex); return 0; } EXPORT_SYMBOL_GPL(unregister_ftrace_event); /* * Standard events */ enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, struct trace_event *event) { if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; } /* TRACE_FN */ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, struct trace_event *event) { struct ftrace_entry *field; struct trace_seq *s = &iter->seq; trace_assign_type(field, iter->ent); if (!seq_print_ip_sym(s, field->ip, flags)) goto partial; if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { if (!trace_seq_printf(s, " <-")) goto partial; if (!seq_print_ip_sym(s, field->parent_ip, flags)) goto partial; } if (!trace_seq_printf(s, "\n")) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, struct trace_event *event) { struct ftrace_entry *field; trace_assign_type(field, iter->ent); if (!trace_seq_printf(&iter->seq, "%lx %lx\n", field->ip, field->parent_ip)) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; } static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, struct trace_event *event) { struct ftrace_entry *field; struct trace_seq *s = &iter->seq; trace_assign_type(field, iter->ent); SEQ_PUT_HEX_FIELD_RET(s, field->ip); SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); return TRACE_TYPE_HANDLED; } static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, struct trace_event *event) { struct ftrace_entry *field; struct trace_seq *s = &iter->seq; trace_assign_type(field, iter->ent); SEQ_PUT_FIELD_RET(s, field->ip); SEQ_PUT_FIELD_RET(s, field->parent_ip); return TRACE_TYPE_HANDLED; } static struct trace_event_functions trace_fn_funcs = { .trace = trace_fn_trace, .raw = trace_fn_raw, .hex = trace_fn_hex, .binary = trace_fn_bin, }; static struct trace_event trace_fn_event = { .type = TRACE_FN, .funcs = &trace_fn_funcs, }; /* TRACE_CTX an TRACE_WAKE */ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, char *delim) { struct ctx_switch_entry *field; char comm[TASK_COMM_LEN]; int S, T; trace_assign_type(field, iter->ent); T = task_state_char(field->next_state); S = task_state_char(field->prev_state); trace_find_cmdline(field->next_pid, comm); if (!trace_seq_printf(&iter->seq, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", field->prev_pid, field->prev_prio, S, delim, field->next_cpu, field->next_pid, field->next_prio, T, comm)) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; } static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, struct trace_event *event) { return trace_ctxwake_print(iter, "==>"); } static enum print_line_t trace_wake_print(struct trace_iterator *iter, int flags, struct trace_event *event) { return trace_ctxwake_print(iter, " +"); } static int trace_ctxwake_raw(struct trace_iterator *iter, char S) { struct ctx_switch_entry *field; int T; trace_assign_type(field, iter->ent); if (!S) S = task_state_char(field->prev_state); T = task_state_char(field->next_state); if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", field->prev_pid, field->prev_prio, S, field->next_cpu, field->next_pid, field->next_prio, T)) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; } static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, struct trace_event *event) { return trace_ctxwake_raw(iter, 0); } static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags, struct trace_event *event) { return trace_ctxwake_raw(iter, '+'); } static int trace_ctxwake_hex(struct trace_iterator *iter, char S) { struct ctx_switch_entry *field; struct trace_seq *s = &iter->seq; int T; trace_assign_type(field, iter->ent); if (!S) S = task_state_char(field->prev_state); T = task_state_char(field->next_state); SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); SEQ_PUT_HEX_FIELD_RET(s, S); SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); SEQ_PUT_HEX_FIELD_RET(s, T); return TRACE_TYPE_HANDLED; } static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, struct trace_event *event) { return trace_ctxwake_hex(iter, 0); } static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags, struct trace_event *event) { return trace_ctxwake_hex(iter, '+'); } static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, int flags, struct trace_event *event) { struct ctx_switch_entry *field; struct trace_seq *s = &iter->seq; trace_assign_type(field, iter->ent); SEQ_PUT_FIELD_RET(s, field->prev_pid); SEQ_PUT_FIELD_RET(s, field->prev_prio); SEQ_PUT_FIELD_RET(s, field->prev_state); SEQ_PUT_FIELD_RET(s, field->next_pid); SEQ_PUT_FIELD_RET(s, field->next_prio); SEQ_PUT_FIELD_RET(s, field->next_state); return TRACE_TYPE_HANDLED; } static struct trace_event_functions trace_ctx_funcs = { .trace = trace_ctx_print, .raw = trace_ctx_raw, .hex = trace_ctx_hex, .binary = trace_ctxwake_bin, }; static struct trace_event trace_ctx_event = { .type = TRACE_CTX, .funcs = &trace_ctx_funcs, }; static struct trace_event_functions trace_wake_funcs = { .trace = trace_wake_print, .raw = trace_wake_raw, .hex = trace_wake_hex, .binary = trace_ctxwake_bin, }; static struct trace_event trace_wake_event = { .type = TRACE_WAKE, .funcs = &trace_wake_funcs, }; /* TRACE_STACK */ static enum print_line_t trace_stack_print(struct trace_iterator *iter, int flags, struct trace_event *event) { struct stack_entry *field; struct trace_seq *s = &iter->seq; unsigned long *p; unsigned long *end; trace_assign_type(field, iter->ent); end = (unsigned long *)((long)iter->ent + iter->ent_size); if (!trace_seq_puts(s, "<stack trace>\n")) goto partial; for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { if (!trace_seq_puts(s, " => ")) goto partial; if (!seq_print_ip_sym(s, *p, flags)) goto partial; if (!trace_seq_puts(s, "\n")) goto partial; } return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static struct trace_event_functions trace_stack_funcs = { .trace = trace_stack_print, }; static struct trace_event trace_stack_event = { .type = TRACE_STACK, .funcs = &trace_stack_funcs, }; /* TRACE_USER_STACK */ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, int flags, struct trace_event *event) { struct userstack_entry *field; struct trace_seq *s = &iter->seq; trace_assign_type(field, iter->ent); if (!trace_seq_puts(s, "<user stack trace>\n")) goto partial; if (!seq_print_userip_objs(field, s, flags)) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static struct trace_event_functions trace_user_stack_funcs = { .trace = trace_user_stack_print, }; static struct trace_event trace_user_stack_event = { .type = TRACE_USER_STACK, .funcs = &trace_user_stack_funcs, }; /* TRACE_BPRINT */ static enum print_line_t trace_bprint_print(struct trace_iterator *iter, int flags, struct trace_event *event) { struct trace_entry *entry = iter->ent; struct trace_seq *s = &iter->seq; struct bprint_entry *field; trace_assign_type(field, entry); if (!seq_print_ip_sym(s, field->ip, flags)) goto partial; if (!trace_seq_puts(s, ": ")) goto partial; if (!trace_seq_bprintf(s, field->fmt, field->buf)) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static enum print_line_t trace_bprint_raw(struct trace_iterator *iter, int flags, struct trace_event *event) { struct bprint_entry *field; struct trace_seq *s = &iter->seq; trace_assign_type(field, iter->ent); if (!trace_seq_printf(s, ": %lx : ", field->ip)) goto partial; if (!trace_seq_bprintf(s, field->fmt, field->buf)) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static struct trace_event_functions trace_bprint_funcs = { .trace = trace_bprint_print, .raw = trace_bprint_raw, }; static struct trace_event trace_bprint_event = { .type = TRACE_BPRINT, .funcs = &trace_bprint_funcs, }; /* TRACE_PRINT */ static enum print_line_t trace_print_print(struct trace_iterator *iter, int flags, struct trace_event *event) { struct print_entry *field; struct trace_seq *s = &iter->seq; trace_assign_type(field, iter->ent); if (!seq_print_ip_sym(s, field->ip, flags)) goto partial; if (!trace_seq_printf(s, ": %s", field->buf)) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, struct trace_event *event) { struct print_entry *field; trace_assign_type(field, iter->ent); if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static struct trace_event_functions trace_print_funcs = { .trace = trace_print_print, .raw = trace_print_raw, }; static struct trace_event trace_print_event = { .type = TRACE_PRINT, .funcs = &trace_print_funcs, }; static struct trace_event *events[] __initdata = { &trace_fn_event, &trace_ctx_event, &trace_wake_event, &trace_stack_event, &trace_user_stack_event, &trace_bprint_event, &trace_print_event, NULL }; __init static int init_events(void) { struct trace_event *event; int i, ret; for (i = 0; events[i]; i++) { event = events[i]; ret = register_ftrace_event(event); if (!ret) { printk(KERN_WARNING "event %d failed to register\n", event->type); WARN_ON_ONCE(1); } } return 0; } device_initcall(init_events);
gpl-2.0
grouper-tests/android_kernel_asus_grouper
drivers/isdn/hardware/eicon/maintidi.c
4301
68416
/* * Copyright (c) Eicon Networks, 2000. * This source file is supplied for the use with Eicon Networks range of DIVA Server Adapters. * Eicon File Revision : 1.9 * This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "platform.h" #include "kst_ifc.h" #include "di_defs.h" #include "maintidi.h" #include "pc.h" #include "man_defs.h" extern void diva_mnt_internal_dprintf (dword drv_id, dword type, char* p, ...); #define MODEM_PARSE_ENTRIES 16 /* amount of variables of interest */ #define FAX_PARSE_ENTRIES 12 /* amount of variables of interest */ #define LINE_PARSE_ENTRIES 15 /* amount of variables of interest */ #define STAT_PARSE_ENTRIES 70 /* amount of variables of interest */ /* LOCAL FUNCTIONS */ static int DivaSTraceLibraryStart (void* hLib); static int DivaSTraceLibraryStop (void* hLib); static int SuperTraceLibraryFinit (void* hLib); static void* SuperTraceGetHandle (void* hLib); static int SuperTraceMessageInput (void* hLib); static int SuperTraceSetAudioTap (void* hLib, int Channel, int on); static int SuperTraceSetBChannel (void* hLib, int Channel, int on); static int SuperTraceSetDChannel (void* hLib, int on); static int SuperTraceSetInfo (void* hLib, int on); static int SuperTraceClearCall (void* hLib, int Channel); static int SuperTraceGetOutgoingCallStatistics (void* hLib); static int SuperTraceGetIncomingCallStatistics (void* hLib); static int SuperTraceGetModemStatistics (void* hLib); static int SuperTraceGetFaxStatistics (void* hLib); static int SuperTraceGetBLayer1Statistics (void* hLib); static int SuperTraceGetBLayer2Statistics (void* hLib); static int SuperTraceGetDLayer1Statistics (void* hLib); static int SuperTraceGetDLayer2Statistics (void* hLib); /* LOCAL FUNCTIONS */ static int ScheduleNextTraceRequest (diva_strace_context_t* pLib); static int process_idi_event (diva_strace_context_t* pLib, diva_man_var_header_t* pVar); static int process_idi_info (diva_strace_context_t* pLib, diva_man_var_header_t* pVar); static int diva_modem_event (diva_strace_context_t* pLib, int Channel); static int diva_fax_event (diva_strace_context_t* pLib, int Channel); static int diva_line_event (diva_strace_context_t* pLib, int Channel); static int diva_modem_info (diva_strace_context_t* pLib, int Channel, diva_man_var_header_t* pVar); static int diva_fax_info (diva_strace_context_t* pLib, int Channel, diva_man_var_header_t* pVar); static int diva_line_info (diva_strace_context_t* pLib, int Channel, diva_man_var_header_t* pVar); static int diva_ifc_statistics (diva_strace_context_t* pLib, diva_man_var_header_t* pVar); static diva_man_var_header_t* get_next_var (diva_man_var_header_t* pVar); static diva_man_var_header_t* find_var (diva_man_var_header_t* pVar, const char* name); static int diva_strace_read_int (diva_man_var_header_t* pVar, int* var); static int diva_strace_read_uint (diva_man_var_header_t* pVar, dword* var); static int diva_strace_read_asz (diva_man_var_header_t* pVar, char* var); static int diva_strace_read_asc (diva_man_var_header_t* pVar, char* var); static int diva_strace_read_ie (diva_man_var_header_t* pVar, diva_trace_ie_t* var); static void diva_create_parse_table (diva_strace_context_t* pLib); static void diva_trace_error (diva_strace_context_t* pLib, int error, const char* file, int line); static void diva_trace_notify_user (diva_strace_context_t* pLib, int Channel, int notify_subject); static int diva_trace_read_variable (diva_man_var_header_t* pVar, void* variable); /* Initialize the library and return context of the created trace object that will represent the IDI adapter. Return 0 on error. */ diva_strace_library_interface_t* DivaSTraceLibraryCreateInstance (int Adapter, const diva_trace_library_user_interface_t* user_proc, byte* pmem) { diva_strace_context_t* pLib = (diva_strace_context_t*)pmem; int i; if (!pLib) { return NULL; } pmem += sizeof(*pLib); memset(pLib, 0x00, sizeof(*pLib)); pLib->Adapter = Adapter; /* Set up Library Interface */ pLib->instance.hLib = pLib; pLib->instance.DivaSTraceLibraryStart = DivaSTraceLibraryStart; pLib->instance.DivaSTraceLibraryStop = DivaSTraceLibraryStop; pLib->instance.DivaSTraceLibraryFinit = SuperTraceLibraryFinit; pLib->instance.DivaSTraceMessageInput = SuperTraceMessageInput; pLib->instance.DivaSTraceGetHandle = SuperTraceGetHandle; pLib->instance.DivaSTraceSetAudioTap = SuperTraceSetAudioTap; pLib->instance.DivaSTraceSetBChannel = SuperTraceSetBChannel; pLib->instance.DivaSTraceSetDChannel = SuperTraceSetDChannel; pLib->instance.DivaSTraceSetInfo = SuperTraceSetInfo; pLib->instance.DivaSTraceGetOutgoingCallStatistics = \ SuperTraceGetOutgoingCallStatistics; pLib->instance.DivaSTraceGetIncomingCallStatistics = \ SuperTraceGetIncomingCallStatistics; pLib->instance.DivaSTraceGetModemStatistics = \ SuperTraceGetModemStatistics; pLib->instance.DivaSTraceGetFaxStatistics = \ SuperTraceGetFaxStatistics; pLib->instance.DivaSTraceGetBLayer1Statistics = \ SuperTraceGetBLayer1Statistics; pLib->instance.DivaSTraceGetBLayer2Statistics = \ SuperTraceGetBLayer2Statistics; pLib->instance.DivaSTraceGetDLayer1Statistics = \ SuperTraceGetDLayer1Statistics; pLib->instance.DivaSTraceGetDLayer2Statistics = \ SuperTraceGetDLayer2Statistics; pLib->instance.DivaSTraceClearCall = SuperTraceClearCall; if (user_proc) { pLib->user_proc_table.user_context = user_proc->user_context; pLib->user_proc_table.notify_proc = user_proc->notify_proc; pLib->user_proc_table.trace_proc = user_proc->trace_proc; pLib->user_proc_table.error_notify_proc = user_proc->error_notify_proc; } if (!(pLib->hAdapter = SuperTraceOpenAdapter (Adapter))) { diva_mnt_internal_dprintf (0, DLI_ERR, "Can not open XDI adapter"); return NULL; } pLib->Channels = SuperTraceGetNumberOfChannels (pLib->hAdapter); /* Calculate amount of parte table entites necessary to translate information from all events of onterest */ pLib->parse_entries = (MODEM_PARSE_ENTRIES + FAX_PARSE_ENTRIES + \ STAT_PARSE_ENTRIES + \ LINE_PARSE_ENTRIES + 1) * pLib->Channels; pLib->parse_table = (diva_strace_path2action_t*)pmem; for (i = 0; i < 30; i++) { pLib->lines[i].pInterface = &pLib->Interface; pLib->lines[i].pInterfaceStat = &pLib->InterfaceStat; } pLib->e.R = &pLib->RData; pLib->req_busy = 1; pLib->rc_ok = ASSIGN_OK; diva_create_parse_table (pLib); return ((diva_strace_library_interface_t*)pLib); } static int DivaSTraceLibraryStart (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; return (SuperTraceASSIGN (pLib->hAdapter, pLib->buffer)); } /* Return (-1) on error Return (0) if was initiated or pending Return (1) if removal is complete */ static int DivaSTraceLibraryStop (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; if (!pLib->e.Id) { /* Was never started/assigned */ return (1); } switch (pLib->removal_state) { case 0: pLib->removal_state = 1; ScheduleNextTraceRequest(pLib); break; case 3: return (1); } return (0); } static int SuperTraceLibraryFinit (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; if (pLib) { if (pLib->hAdapter) { SuperTraceCloseAdapter (pLib->hAdapter); } return (0); } return (-1); } static void* SuperTraceGetHandle (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; return (&pLib->e); } /* After library handle object is gone in signaled state this function should be called and will pick up incoming IDI messages (return codes and indications). */ static int SuperTraceMessageInput (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; int ret = 0; byte Rc, Ind; if (pLib->e.complete == 255) { /* Process return code */ pLib->req_busy = 0; Rc = pLib->e.Rc; pLib->e.Rc = 0; if (pLib->removal_state == 2) { pLib->removal_state = 3; return (0); } if (Rc != pLib->rc_ok) { int ignore = 0; /* Auto-detect amount of events/channels and features */ if (pLib->general_b_ch_event == 1) { pLib->general_b_ch_event = 2; ignore = 1; } else if (pLib->general_fax_event == 1) { pLib->general_fax_event = 2; ignore = 1; } else if (pLib->general_mdm_event == 1) { pLib->general_mdm_event = 2; ignore = 1; } else if ((pLib->ChannelsTraceActive < pLib->Channels) && pLib->ChannelsTraceActive) { pLib->ChannelsTraceActive = pLib->Channels; ignore = 1; } else if (pLib->ModemTraceActive < pLib->Channels) { pLib->ModemTraceActive = pLib->Channels; ignore = 1; } else if (pLib->FaxTraceActive < pLib->Channels) { pLib->FaxTraceActive = pLib->Channels; ignore = 1; } else if (pLib->audio_trace_init == 2) { ignore = 1; pLib->audio_trace_init = 1; } else if (pLib->eye_pattern_pending) { pLib->eye_pattern_pending = 0; ignore = 1; } else if (pLib->audio_tap_pending) { pLib->audio_tap_pending = 0; ignore = 1; } if (!ignore) { return (-1); /* request failed */ } } else { if (pLib->general_b_ch_event == 1) { pLib->ChannelsTraceActive = pLib->Channels; pLib->general_b_ch_event = 2; } else if (pLib->general_fax_event == 1) { pLib->general_fax_event = 2; pLib->FaxTraceActive = pLib->Channels; } else if (pLib->general_mdm_event == 1) { pLib->general_mdm_event = 2; pLib->ModemTraceActive = pLib->Channels; } } if (pLib->audio_trace_init == 2) { pLib->audio_trace_init = 1; } pLib->rc_ok = 0xff; /* default OK after assign was done */ if ((ret = ScheduleNextTraceRequest(pLib))) { return (-1); } } else { /* Process indication Always 'RNR' indication if return code is pending */ Ind = pLib->e.Ind; pLib->e.Ind = 0; if (pLib->removal_state) { pLib->e.RNum = 0; pLib->e.RNR = 2; } else if (pLib->req_busy) { pLib->e.RNum = 0; pLib->e.RNR = 1; } else { if (pLib->e.complete != 0x02) { /* Look-ahead call, set up buffers */ pLib->e.RNum = 1; pLib->e.R->P = (byte*)&pLib->buffer[0]; pLib->e.R->PLength = (word)(sizeof(pLib->buffer) - 1); } else { /* Indication reception complete, process it now */ byte* p = (byte*)&pLib->buffer[0]; pLib->buffer[pLib->e.R->PLength] = 0; /* terminate I.E. with zero */ switch (Ind) { case MAN_COMBI_IND: { int total_length = pLib->e.R->PLength; word this_ind_length; while (total_length > 3 && *p) { Ind = *p++; this_ind_length = (word)p[0] | ((word)p[1] << 8); p += 2; switch (Ind) { case MAN_INFO_IND: if (process_idi_info (pLib, (diva_man_var_header_t*)p)) { return (-1); } break; case MAN_EVENT_IND: if (process_idi_event (pLib, (diva_man_var_header_t*)p)) { return (-1); } break; case MAN_TRACE_IND: if (pLib->trace_on == 1) { /* Ignore first trace event that is result of EVENT_ON operation */ pLib->trace_on++; } else { /* Delivery XLOG buffer to application */ if (pLib->user_proc_table.trace_proc) { (*(pLib->user_proc_table.trace_proc))(pLib->user_proc_table.user_context, &pLib->instance, pLib->Adapter, p, this_ind_length); } } break; default: diva_mnt_internal_dprintf (0, DLI_ERR, "Unknown IDI Ind (DMA mode): %02x", Ind); } p += (this_ind_length+1); total_length -= (4 + this_ind_length); } } break; case MAN_INFO_IND: if (process_idi_info (pLib, (diva_man_var_header_t*)p)) { return (-1); } break; case MAN_EVENT_IND: if (process_idi_event (pLib, (diva_man_var_header_t*)p)) { return (-1); } break; case MAN_TRACE_IND: if (pLib->trace_on == 1) { /* Ignore first trace event that is result of EVENT_ON operation */ pLib->trace_on++; } else { /* Delivery XLOG buffer to application */ if (pLib->user_proc_table.trace_proc) { (*(pLib->user_proc_table.trace_proc))(pLib->user_proc_table.user_context, &pLib->instance, pLib->Adapter, p, pLib->e.R->PLength); } } break; default: diva_mnt_internal_dprintf (0, DLI_ERR, "Unknown IDI Ind: %02x", Ind); } } } } if ((ret = ScheduleNextTraceRequest(pLib))) { return (-1); } return (ret); } /* Internal state machine responsible for scheduling of requests */ static int ScheduleNextTraceRequest (diva_strace_context_t* pLib) { char name[64]; int ret = 0; int i; if (pLib->req_busy) { return (0); } if (pLib->removal_state == 1) { if (SuperTraceREMOVE (pLib->hAdapter)) { pLib->removal_state = 3; } else { pLib->req_busy = 1; pLib->removal_state = 2; } return (0); } if (pLib->removal_state) { return (0); } if (!pLib->general_b_ch_event) { if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, "State\\B Event", pLib->buffer))) { return (-1); } pLib->general_b_ch_event = 1; pLib->req_busy = 1; return (0); } if (!pLib->general_fax_event) { if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, "State\\FAX Event", pLib->buffer))) { return (-1); } pLib->general_fax_event = 1; pLib->req_busy = 1; return (0); } if (!pLib->general_mdm_event) { if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, "State\\Modem Event", pLib->buffer))) { return (-1); } pLib->general_mdm_event = 1; pLib->req_busy = 1; return (0); } if (pLib->ChannelsTraceActive < pLib->Channels) { pLib->ChannelsTraceActive++; sprintf (name, "State\\B%d\\Line", pLib->ChannelsTraceActive); if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) { pLib->ChannelsTraceActive--; return (-1); } pLib->req_busy = 1; return (0); } if (pLib->ModemTraceActive < pLib->Channels) { pLib->ModemTraceActive++; sprintf (name, "State\\B%d\\Modem\\Event", pLib->ModemTraceActive); if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) { pLib->ModemTraceActive--; return (-1); } pLib->req_busy = 1; return (0); } if (pLib->FaxTraceActive < pLib->Channels) { pLib->FaxTraceActive++; sprintf (name, "State\\B%d\\FAX\\Event", pLib->FaxTraceActive); if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) { pLib->FaxTraceActive--; return (-1); } pLib->req_busy = 1; return (0); } if (!pLib->trace_mask_init) { word tmp = 0x0000; if (SuperTraceWriteVar (pLib->hAdapter, pLib->buffer, "Trace\\Event Enable", &tmp, 0x87, /* MI_BITFLD */ sizeof(tmp))) { return (-1); } pLib->trace_mask_init = 1; pLib->req_busy = 1; return (0); } if (!pLib->audio_trace_init) { dword tmp = 0x00000000; if (SuperTraceWriteVar (pLib->hAdapter, pLib->buffer, "Trace\\AudioCh# Enable", &tmp, 0x87, /* MI_BITFLD */ sizeof(tmp))) { return (-1); } pLib->audio_trace_init = 2; pLib->req_busy = 1; return (0); } if (!pLib->bchannel_init) { dword tmp = 0x00000000; if (SuperTraceWriteVar (pLib->hAdapter, pLib->buffer, "Trace\\B-Ch# Enable", &tmp, 0x87, /* MI_BITFLD */ sizeof(tmp))) { return (-1); } pLib->bchannel_init = 1; pLib->req_busy = 1; return (0); } if (!pLib->trace_length_init) { word tmp = 30; if (SuperTraceWriteVar (pLib->hAdapter, pLib->buffer, "Trace\\Max Log Length", &tmp, 0x82, /* MI_UINT */ sizeof(tmp))) { return (-1); } pLib->trace_length_init = 1; pLib->req_busy = 1; return (0); } if (!pLib->trace_on) { if (SuperTraceTraceOnRequest (pLib->hAdapter, "Trace\\Log Buffer", pLib->buffer)) { return (-1); } pLib->trace_on = 1; pLib->req_busy = 1; return (0); } if (pLib->trace_event_mask != pLib->current_trace_event_mask) { if (SuperTraceWriteVar (pLib->hAdapter, pLib->buffer, "Trace\\Event Enable", &pLib->trace_event_mask, 0x87, /* MI_BITFLD */ sizeof(pLib->trace_event_mask))) { return (-1); } pLib->current_trace_event_mask = pLib->trace_event_mask; pLib->req_busy = 1; return (0); } if ((pLib->audio_tap_pending >= 0) && (pLib->audio_tap_mask != pLib->current_audio_tap_mask)) { if (SuperTraceWriteVar (pLib->hAdapter, pLib->buffer, "Trace\\AudioCh# Enable", &pLib->audio_tap_mask, 0x87, /* MI_BITFLD */ sizeof(pLib->audio_tap_mask))) { return (-1); } pLib->current_audio_tap_mask = pLib->audio_tap_mask; pLib->audio_tap_pending = 1; pLib->req_busy = 1; return (0); } if ((pLib->eye_pattern_pending >= 0) && (pLib->audio_tap_mask != pLib->current_eye_pattern_mask)) { if (SuperTraceWriteVar (pLib->hAdapter, pLib->buffer, "Trace\\EyeCh# Enable", &pLib->audio_tap_mask, 0x87, /* MI_BITFLD */ sizeof(pLib->audio_tap_mask))) { return (-1); } pLib->current_eye_pattern_mask = pLib->audio_tap_mask; pLib->eye_pattern_pending = 1; pLib->req_busy = 1; return (0); } if (pLib->bchannel_trace_mask != pLib->current_bchannel_trace_mask) { if (SuperTraceWriteVar (pLib->hAdapter, pLib->buffer, "Trace\\B-Ch# Enable", &pLib->bchannel_trace_mask, 0x87, /* MI_BITFLD */ sizeof(pLib->bchannel_trace_mask))) { return (-1); } pLib->current_bchannel_trace_mask = pLib->bchannel_trace_mask; pLib->req_busy = 1; return (0); } if (!pLib->trace_events_down) { if (SuperTraceTraceOnRequest (pLib->hAdapter, "Events Down", pLib->buffer)) { return (-1); } pLib->trace_events_down = 1; pLib->req_busy = 1; return (0); } if (!pLib->l1_trace) { if (SuperTraceTraceOnRequest (pLib->hAdapter, "State\\Layer1", pLib->buffer)) { return (-1); } pLib->l1_trace = 1; pLib->req_busy = 1; return (0); } if (!pLib->l2_trace) { if (SuperTraceTraceOnRequest (pLib->hAdapter, "State\\Layer2 No1", pLib->buffer)) { return (-1); } pLib->l2_trace = 1; pLib->req_busy = 1; return (0); } for (i = 0; i < 30; i++) { if (pLib->pending_line_status & (1L << i)) { sprintf (name, "State\\B%d", i+1); if (SuperTraceReadRequest (pLib->hAdapter, name, pLib->buffer)) { return (-1); } pLib->pending_line_status &= ~(1L << i); pLib->req_busy = 1; return (0); } if (pLib->pending_modem_status & (1L << i)) { sprintf (name, "State\\B%d\\Modem", i+1); if (SuperTraceReadRequest (pLib->hAdapter, name, pLib->buffer)) { return (-1); } pLib->pending_modem_status &= ~(1L << i); pLib->req_busy = 1; return (0); } if (pLib->pending_fax_status & (1L << i)) { sprintf (name, "State\\B%d\\FAX", i+1); if (SuperTraceReadRequest (pLib->hAdapter, name, pLib->buffer)) { return (-1); } pLib->pending_fax_status &= ~(1L << i); pLib->req_busy = 1; return (0); } if (pLib->clear_call_command & (1L << i)) { sprintf (name, "State\\B%d\\Clear Call", i+1); if (SuperTraceExecuteRequest (pLib->hAdapter, name, pLib->buffer)) { return (-1); } pLib->clear_call_command &= ~(1L << i); pLib->req_busy = 1; return (0); } } if (pLib->outgoing_ifc_stats) { if (SuperTraceReadRequest (pLib->hAdapter, "Statistics\\Outgoing Calls", pLib->buffer)) { return (-1); } pLib->outgoing_ifc_stats = 0; pLib->req_busy = 1; return (0); } if (pLib->incoming_ifc_stats) { if (SuperTraceReadRequest (pLib->hAdapter, "Statistics\\Incoming Calls", pLib->buffer)) { return (-1); } pLib->incoming_ifc_stats = 0; pLib->req_busy = 1; return (0); } if (pLib->modem_ifc_stats) { if (SuperTraceReadRequest (pLib->hAdapter, "Statistics\\Modem", pLib->buffer)) { return (-1); } pLib->modem_ifc_stats = 0; pLib->req_busy = 1; return (0); } if (pLib->fax_ifc_stats) { if (SuperTraceReadRequest (pLib->hAdapter, "Statistics\\FAX", pLib->buffer)) { return (-1); } pLib->fax_ifc_stats = 0; pLib->req_busy = 1; return (0); } if (pLib->b1_ifc_stats) { if (SuperTraceReadRequest (pLib->hAdapter, "Statistics\\B-Layer1", pLib->buffer)) { return (-1); } pLib->b1_ifc_stats = 0; pLib->req_busy = 1; return (0); } if (pLib->b2_ifc_stats) { if (SuperTraceReadRequest (pLib->hAdapter, "Statistics\\B-Layer2", pLib->buffer)) { return (-1); } pLib->b2_ifc_stats = 0; pLib->req_busy = 1; return (0); } if (pLib->d1_ifc_stats) { if (SuperTraceReadRequest (pLib->hAdapter, "Statistics\\D-Layer1", pLib->buffer)) { return (-1); } pLib->d1_ifc_stats = 0; pLib->req_busy = 1; return (0); } if (pLib->d2_ifc_stats) { if (SuperTraceReadRequest (pLib->hAdapter, "Statistics\\D-Layer2", pLib->buffer)) { return (-1); } pLib->d2_ifc_stats = 0; pLib->req_busy = 1; return (0); } if (!pLib->IncomingCallsCallsActive) { pLib->IncomingCallsCallsActive = 1; sprintf (name, "%s", "Statistics\\Incoming Calls\\Calls"); if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) { pLib->IncomingCallsCallsActive = 0; return (-1); } pLib->req_busy = 1; return (0); } if (!pLib->IncomingCallsConnectedActive) { pLib->IncomingCallsConnectedActive = 1; sprintf (name, "%s", "Statistics\\Incoming Calls\\Connected"); if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) { pLib->IncomingCallsConnectedActive = 0; return (-1); } pLib->req_busy = 1; return (0); } if (!pLib->OutgoingCallsCallsActive) { pLib->OutgoingCallsCallsActive = 1; sprintf (name, "%s", "Statistics\\Outgoing Calls\\Calls"); if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) { pLib->OutgoingCallsCallsActive = 0; return (-1); } pLib->req_busy = 1; return (0); } if (!pLib->OutgoingCallsConnectedActive) { pLib->OutgoingCallsConnectedActive = 1; sprintf (name, "%s", "Statistics\\Outgoing Calls\\Connected"); if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) { pLib->OutgoingCallsConnectedActive = 0; return (-1); } pLib->req_busy = 1; return (0); } return (0); } static int process_idi_event (diva_strace_context_t* pLib, diva_man_var_header_t* pVar) { const char* path = (char*)&pVar->path_length+1; char name[64]; int i; if (!strncmp("State\\B Event", path, pVar->path_length)) { dword ch_id; if (!diva_trace_read_variable (pVar, &ch_id)) { if (!pLib->line_init_event && !pLib->pending_line_status) { for (i = 1; i <= pLib->Channels; i++) { diva_line_event(pLib, i); } return (0); } else if (ch_id && ch_id <= pLib->Channels) { return (diva_line_event(pLib, (int)ch_id)); } return (0); } return (-1); } if (!strncmp("State\\FAX Event", path, pVar->path_length)) { dword ch_id; if (!diva_trace_read_variable (pVar, &ch_id)) { if (!pLib->pending_fax_status && !pLib->fax_init_event) { for (i = 1; i <= pLib->Channels; i++) { diva_fax_event(pLib, i); } return (0); } else if (ch_id && ch_id <= pLib->Channels) { return (diva_fax_event(pLib, (int)ch_id)); } return (0); } return (-1); } if (!strncmp("State\\Modem Event", path, pVar->path_length)) { dword ch_id; if (!diva_trace_read_variable (pVar, &ch_id)) { if (!pLib->pending_modem_status && !pLib->modem_init_event) { for (i = 1; i <= pLib->Channels; i++) { diva_modem_event(pLib, i); } return (0); } else if (ch_id && ch_id <= pLib->Channels) { return (diva_modem_event(pLib, (int)ch_id)); } return (0); } return (-1); } /* First look for Line Event */ for (i = 1; i <= pLib->Channels; i++) { sprintf (name, "State\\B%d\\Line", i); if (find_var (pVar, name)) { return (diva_line_event(pLib, i)); } } /* Look for Moden Progress Event */ for (i = 1; i <= pLib->Channels; i++) { sprintf (name, "State\\B%d\\Modem\\Event", i); if (find_var (pVar, name)) { return (diva_modem_event (pLib, i)); } } /* Look for Fax Event */ for (i = 1; i <= pLib->Channels; i++) { sprintf (name, "State\\B%d\\FAX\\Event", i); if (find_var (pVar, name)) { return (diva_fax_event (pLib, i)); } } /* Notification about loss of events */ if (!strncmp("Events Down", path, pVar->path_length)) { if (pLib->trace_events_down == 1) { pLib->trace_events_down = 2; } else { diva_trace_error (pLib, 1, "Events Down", 0); } return (0); } if (!strncmp("State\\Layer1", path, pVar->path_length)) { diva_strace_read_asz (pVar, &pLib->lines[0].pInterface->Layer1[0]); if (pLib->l1_trace == 1) { pLib->l1_trace = 2; } else { diva_trace_notify_user (pLib, 0, DIVA_SUPER_TRACE_INTERFACE_CHANGE); } return (0); } if (!strncmp("State\\Layer2 No1", path, pVar->path_length)) { char* tmp = &pLib->lines[0].pInterface->Layer2[0]; dword l2_state; if (diva_strace_read_uint(pVar, &l2_state)) return -1; switch (l2_state) { case 0: strcpy (tmp, "Idle"); break; case 1: strcpy (tmp, "Layer2 UP"); break; case 2: strcpy (tmp, "Layer2 Disconnecting"); break; case 3: strcpy (tmp, "Layer2 Connecting"); break; case 4: strcpy (tmp, "SPID Initializing"); break; case 5: strcpy (tmp, "SPID Initialised"); break; case 6: strcpy (tmp, "Layer2 Connecting"); break; case 7: strcpy (tmp, "Auto SPID Stopped"); break; case 8: strcpy (tmp, "Auto SPID Idle"); break; case 9: strcpy (tmp, "Auto SPID Requested"); break; case 10: strcpy (tmp, "Auto SPID Delivery"); break; case 11: strcpy (tmp, "Auto SPID Complete"); break; default: sprintf (tmp, "U:%d", (int)l2_state); } if (pLib->l2_trace == 1) { pLib->l2_trace = 2; } else { diva_trace_notify_user (pLib, 0, DIVA_SUPER_TRACE_INTERFACE_CHANGE); } return (0); } if (!strncmp("Statistics\\Incoming Calls\\Calls", path, pVar->path_length) || !strncmp("Statistics\\Incoming Calls\\Connected", path, pVar->path_length)) { return (SuperTraceGetIncomingCallStatistics (pLib)); } if (!strncmp("Statistics\\Outgoing Calls\\Calls", path, pVar->path_length) || !strncmp("Statistics\\Outgoing Calls\\Connected", path, pVar->path_length)) { return (SuperTraceGetOutgoingCallStatistics (pLib)); } return (-1); } static int diva_line_event (diva_strace_context_t* pLib, int Channel) { pLib->pending_line_status |= (1L << (Channel-1)); return (0); } static int diva_modem_event (diva_strace_context_t* pLib, int Channel) { pLib->pending_modem_status |= (1L << (Channel-1)); return (0); } static int diva_fax_event (diva_strace_context_t* pLib, int Channel) { pLib->pending_fax_status |= (1L << (Channel-1)); return (0); } /* Process INFO indications that arrive from the card Uses path of first I.E. to detect the source of the infication */ static int process_idi_info (diva_strace_context_t* pLib, diva_man_var_header_t* pVar) { const char* path = (char*)&pVar->path_length+1; char name[64]; int i, len; /* First look for Modem Status Info */ for (i = pLib->Channels; i > 0; i--) { len = sprintf (name, "State\\B%d\\Modem", i); if (!strncmp(name, path, len)) { return (diva_modem_info (pLib, i, pVar)); } } /* Look for Fax Status Info */ for (i = pLib->Channels; i > 0; i--) { len = sprintf (name, "State\\B%d\\FAX", i); if (!strncmp(name, path, len)) { return (diva_fax_info (pLib, i, pVar)); } } /* Look for Line Status Info */ for (i = pLib->Channels; i > 0; i--) { len = sprintf (name, "State\\B%d", i); if (!strncmp(name, path, len)) { return (diva_line_info (pLib, i, pVar)); } } if (!diva_ifc_statistics (pLib, pVar)) { return (0); } return (-1); } /* MODEM INSTANCE STATE UPDATE Update Modem Status Information and issue notification to user, that will inform about change in the state of modem instance, that is associuated with this channel */ static int diva_modem_info (diva_strace_context_t* pLib, int Channel, diva_man_var_header_t* pVar) { diva_man_var_header_t* cur; int i, nr = Channel - 1; for (i = pLib->modem_parse_entry_first[nr]; i <= pLib->modem_parse_entry_last[nr]; i++) { if ((cur = find_var (pVar, pLib->parse_table[i].path))) { if (diva_trace_read_variable (cur, pLib->parse_table[i].variable)) { diva_trace_error (pLib, -3 , __FILE__, __LINE__); return (-1); } } else { diva_trace_error (pLib, -2 , __FILE__, __LINE__); return (-1); } } /* We do not use first event to notify user - this is the event that is generated as result of EVENT ON operation and is used only to initialize internal variables of application */ if (pLib->modem_init_event & (1L << nr)) { diva_trace_notify_user (pLib, nr, DIVA_SUPER_TRACE_NOTIFY_MODEM_CHANGE); } else { pLib->modem_init_event |= (1L << nr); } return (0); } static int diva_fax_info (diva_strace_context_t* pLib, int Channel, diva_man_var_header_t* pVar) { diva_man_var_header_t* cur; int i, nr = Channel - 1; for (i = pLib->fax_parse_entry_first[nr]; i <= pLib->fax_parse_entry_last[nr]; i++) { if ((cur = find_var (pVar, pLib->parse_table[i].path))) { if (diva_trace_read_variable (cur, pLib->parse_table[i].variable)) { diva_trace_error (pLib, -3 , __FILE__, __LINE__); return (-1); } } else { diva_trace_error (pLib, -2 , __FILE__, __LINE__); return (-1); } } /* We do not use first event to notify user - this is the event that is generated as result of EVENT ON operation and is used only to initialize internal variables of application */ if (pLib->fax_init_event & (1L << nr)) { diva_trace_notify_user (pLib, nr, DIVA_SUPER_TRACE_NOTIFY_FAX_CHANGE); } else { pLib->fax_init_event |= (1L << nr); } return (0); } /* LINE STATE UPDATE Update Line Status Information and issue notification to user, that will inform about change in the line state. */ static int diva_line_info (diva_strace_context_t* pLib, int Channel, diva_man_var_header_t* pVar) { diva_man_var_header_t* cur; int i, nr = Channel - 1; for (i = pLib->line_parse_entry_first[nr]; i <= pLib->line_parse_entry_last[nr]; i++) { if ((cur = find_var (pVar, pLib->parse_table[i].path))) { if (diva_trace_read_variable (cur, pLib->parse_table[i].variable)) { diva_trace_error (pLib, -3 , __FILE__, __LINE__); return (-1); } } else { diva_trace_error (pLib, -2 , __FILE__, __LINE__); return (-1); } } /* We do not use first event to notify user - this is the event that is generated as result of EVENT ON operation and is used only to initialize internal variables of application Exception is is if the line is "online". In this case we have to notify user about this confition. */ if (pLib->line_init_event & (1L << nr)) { diva_trace_notify_user (pLib, nr, DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE); } else { pLib->line_init_event |= (1L << nr); if (strcmp (&pLib->lines[nr].Line[0], "Idle")) { diva_trace_notify_user (pLib, nr, DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE); } } return (0); } /* Move position to next vatianle in the chain */ static diva_man_var_header_t* get_next_var (diva_man_var_header_t* pVar) { byte* msg = (byte*)pVar; byte* start; int msg_length; if (*msg != ESC) return NULL; start = msg + 2; msg_length = *(msg+1); msg = (start+msg_length); if (*msg != ESC) return NULL; return ((diva_man_var_header_t*)msg); } /* Move position to variable with given name */ static diva_man_var_header_t* find_var (diva_man_var_header_t* pVar, const char* name) { const char* path; do { path = (char*)&pVar->path_length+1; if (!strncmp (name, path, pVar->path_length)) { break; } } while ((pVar = get_next_var (pVar))); return (pVar); } static void diva_create_line_parse_table (diva_strace_context_t* pLib, int Channel) { diva_trace_line_state_t* pLine = &pLib->lines[Channel]; int nr = Channel+1; if ((pLib->cur_parse_entry + LINE_PARSE_ENTRIES) >= pLib->parse_entries) { diva_trace_error (pLib, -1, __FILE__, __LINE__); return; } pLine->ChannelNumber = nr; pLib->line_parse_entry_first[Channel] = pLib->cur_parse_entry; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Framing", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Framing[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Line", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Line[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Layer2", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Layer2[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Layer3", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Layer3[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Remote Address", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLine->RemoteAddress[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Remote SubAddr", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLine->RemoteSubAddress[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Local Address", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLine->LocalAddress[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Local SubAddr", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLine->LocalSubAddress[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\BC", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->call_BC; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\HLC", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->call_HLC; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\LLC", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->call_LLC; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Charges", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Charges; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Call Reference", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->CallReference; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Last Disc Cause", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLine->LastDisconnecCause; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\User ID", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->UserID[0]; pLib->line_parse_entry_last[Channel] = pLib->cur_parse_entry - 1; } static void diva_create_fax_parse_table (diva_strace_context_t* pLib, int Channel) { diva_trace_fax_state_t* pFax = &pLib->lines[Channel].fax; int nr = Channel+1; if ((pLib->cur_parse_entry + FAX_PARSE_ENTRIES) >= pLib->parse_entries) { diva_trace_error (pLib, -1, __FILE__, __LINE__); return; } pFax->ChannelNumber = nr; pLib->fax_parse_entry_first[Channel] = pLib->cur_parse_entry; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Event", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Event; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Page Counter", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Page_Counter; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Features", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Features; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Station ID", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Station_ID[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Subaddress", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Subaddress[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Password", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Password[0]; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Speed", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Speed; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Resolution", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Resolution; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Paper Width", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Paper_Width; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Paper Length", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Paper_Length; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Scanline Time", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Scanline_Time; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\FAX\\Disc Reason", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Disc_Reason; pLib->fax_parse_entry_last[Channel] = pLib->cur_parse_entry - 1; } static void diva_create_modem_parse_table (diva_strace_context_t* pLib, int Channel) { diva_trace_modem_state_t* pModem = &pLib->lines[Channel].modem; int nr = Channel+1; if ((pLib->cur_parse_entry + MODEM_PARSE_ENTRIES) >= pLib->parse_entries) { diva_trace_error (pLib, -1, __FILE__, __LINE__); return; } pModem->ChannelNumber = nr; pLib->modem_parse_entry_first[Channel] = pLib->cur_parse_entry; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Event", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->Event; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Norm", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->Norm; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Options", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->Options; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\TX Speed", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->TxSpeed; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\RX Speed", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RxSpeed; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Roundtrip ms", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RoundtripMsec; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Symbol Rate", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->SymbolRate; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\RX Level dBm", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RxLeveldBm; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Echo Level dBm", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->EchoLeveldBm; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\SNR dB", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->SNRdb; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\MAE", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->MAE; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Local Retrains", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->LocalRetrains; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Remote Retrains", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RemoteRetrains; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Local Resyncs", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->LocalResyncs; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Remote Resyncs", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RemoteResyncs; sprintf (pLib->parse_table[pLib->cur_parse_entry].path, "State\\B%d\\Modem\\Disc Reason", nr); pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->DiscReason; pLib->modem_parse_entry_last[Channel] = pLib->cur_parse_entry - 1; } static void diva_create_parse_table (diva_strace_context_t* pLib) { int i; for (i = 0; i < pLib->Channels; i++) { diva_create_line_parse_table (pLib, i); diva_create_modem_parse_table (pLib, i); diva_create_fax_parse_table (pLib, i); } pLib->statistic_parse_first = pLib->cur_parse_entry; /* Outgoing Calls */ strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Outgoing Calls\\Calls"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.outg.Calls; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Outgoing Calls\\Connected"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.outg.Connected; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Outgoing Calls\\User Busy"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.outg.User_Busy; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Outgoing Calls\\No Answer"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.outg.No_Answer; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Outgoing Calls\\Wrong Number"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.outg.Wrong_Number; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Outgoing Calls\\Call Rejected"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.outg.Call_Rejected; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Outgoing Calls\\Other Failures"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.outg.Other_Failures; /* Incoming Calls */ strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Incoming Calls\\Calls"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.inc.Calls; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Incoming Calls\\Connected"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.inc.Connected; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Incoming Calls\\User Busy"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.inc.User_Busy; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Incoming Calls\\Call Rejected"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.inc.Call_Rejected; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Incoming Calls\\Wrong Number"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.inc.Wrong_Number; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Incoming Calls\\Incompatible Dst"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.inc.Incompatible_Dst; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Incoming Calls\\Out of Order"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.inc.Out_of_Order; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Incoming Calls\\Ignored"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.inc.Ignored; /* Modem Statistics */ pLib->mdm_statistic_parse_first = pLib->cur_parse_entry; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Modem\\Disc Normal"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.mdm.Disc_Normal; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Modem\\Disc Unspecified"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.mdm.Disc_Unspecified; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Modem\\Disc Busy Tone"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.mdm.Disc_Busy_Tone; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Modem\\Disc Congestion"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.mdm.Disc_Congestion; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Modem\\Disc Carr. Wait"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.mdm.Disc_Carr_Wait; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Modem\\Disc Trn Timeout"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.mdm.Disc_Trn_Timeout; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Modem\\Disc Incompat."); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.mdm.Disc_Incompat; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Modem\\Disc Frame Rej."); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.mdm.Disc_Frame_Rej; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\Modem\\Disc V42bis"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.mdm.Disc_V42bis; pLib->mdm_statistic_parse_last = pLib->cur_parse_entry - 1; /* Fax Statistics */ pLib->fax_statistic_parse_first = pLib->cur_parse_entry; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Normal"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Normal; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Not Ident."); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Not_Ident; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc No Response"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_No_Response; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Retries"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Retries; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Unexp. Msg."); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Unexp_Msg; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc No Polling."); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_No_Polling; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Training"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Training; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Unexpected"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Unexpected; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Application"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Application; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Incompat."); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Incompat; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc No Command"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_No_Command; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Long Msg"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Long_Msg; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Supervisor"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Supervisor; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc SUB SEP PWD"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_SUB_SEP_PWD; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Invalid Msg"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Invalid_Msg; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Page Coding"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Page_Coding; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc App Timeout"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_App_Timeout; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\FAX\\Disc Unspecified"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.fax.Disc_Unspecified; pLib->fax_statistic_parse_last = pLib->cur_parse_entry - 1; /* B-Layer1" */ strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer1\\X-Frames"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b1.X_Frames; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer1\\X-Bytes"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b1.X_Bytes; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer1\\X-Errors"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b1.X_Errors; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer1\\R-Frames"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b1.R_Frames; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer1\\R-Bytes"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b1.R_Bytes; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer1\\R-Errors"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b1.R_Errors; /* B-Layer2 */ strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer2\\X-Frames"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b2.X_Frames; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer2\\X-Bytes"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b2.X_Bytes; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer2\\X-Errors"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b2.X_Errors; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer2\\R-Frames"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b2.R_Frames; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer2\\R-Bytes"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b2.R_Bytes; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\B-Layer2\\R-Errors"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.b2.R_Errors; /* D-Layer1 */ strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer1\\X-Frames"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d1.X_Frames; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer1\\X-Bytes"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d1.X_Bytes; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer1\\X-Errors"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d1.X_Errors; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer1\\R-Frames"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d1.R_Frames; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer1\\R-Bytes"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d1.R_Bytes; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer1\\R-Errors"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d1.R_Errors; /* D-Layer2 */ strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer2\\X-Frames"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d2.X_Frames; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer2\\X-Bytes"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d2.X_Bytes; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer2\\X-Errors"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d2.X_Errors; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer2\\R-Frames"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d2.R_Frames; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer2\\R-Bytes"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d2.R_Bytes; strcpy (pLib->parse_table[pLib->cur_parse_entry].path, "Statistics\\D-Layer2\\R-Errors"); pLib->parse_table[pLib->cur_parse_entry++].variable = \ &pLib->InterfaceStat.d2.R_Errors; pLib->statistic_parse_last = pLib->cur_parse_entry - 1; } static void diva_trace_error (diva_strace_context_t* pLib, int error, const char* file, int line) { if (pLib->user_proc_table.error_notify_proc) { (*(pLib->user_proc_table.error_notify_proc))(\ pLib->user_proc_table.user_context, &pLib->instance, pLib->Adapter, error, file, line); } } /* Delivery notification to user */ static void diva_trace_notify_user (diva_strace_context_t* pLib, int Channel, int notify_subject) { if (pLib->user_proc_table.notify_proc) { (*(pLib->user_proc_table.notify_proc))(pLib->user_proc_table.user_context, &pLib->instance, pLib->Adapter, &pLib->lines[Channel], notify_subject); } } /* Read variable value to they destination based on the variable type */ static int diva_trace_read_variable (diva_man_var_header_t* pVar, void* variable) { switch (pVar->type) { case 0x03: /* MI_ASCIIZ - syting */ return (diva_strace_read_asz (pVar, (char*)variable)); case 0x04: /* MI_ASCII - string */ return (diva_strace_read_asc (pVar, (char*)variable)); case 0x05: /* MI_NUMBER - counted sequence of bytes */ return (diva_strace_read_ie (pVar, (diva_trace_ie_t*)variable)); case 0x81: /* MI_INT - signed integer */ return (diva_strace_read_int (pVar, (int*)variable)); case 0x82: /* MI_UINT - unsigned integer */ return (diva_strace_read_uint (pVar, (dword*)variable)); case 0x83: /* MI_HINT - unsigned integer, hex representetion */ return (diva_strace_read_uint (pVar, (dword*)variable)); case 0x87: /* MI_BITFLD - unsigned integer, bit representation */ return (diva_strace_read_uint (pVar, (dword*)variable)); } /* This type of variable is not handled, indicate error Or one problem in management interface, or in application recodeing table, or this application should handle it. */ return (-1); } /* Read signed integer to destination */ static int diva_strace_read_int (diva_man_var_header_t* pVar, int* var) { byte* ptr = (char*)&pVar->path_length; int value; ptr += (pVar->path_length + 1); switch (pVar->value_length) { case 1: value = *(char*)ptr; break; case 2: value = (short)GET_WORD(ptr); break; case 4: value = (int)GET_DWORD(ptr); break; default: return (-1); } *var = value; return (0); } static int diva_strace_read_uint (diva_man_var_header_t* pVar, dword* var) { byte* ptr = (char*)&pVar->path_length; dword value; ptr += (pVar->path_length + 1); switch (pVar->value_length) { case 1: value = (byte)(*ptr); break; case 2: value = (word)GET_WORD(ptr); break; case 3: value = (dword)GET_DWORD(ptr); value &= 0x00ffffff; break; case 4: value = (dword)GET_DWORD(ptr); break; default: return (-1); } *var = value; return (0); } /* Read zero terminated ASCII string */ static int diva_strace_read_asz (diva_man_var_header_t* pVar, char* var) { char* ptr = (char*)&pVar->path_length; int length; ptr += (pVar->path_length + 1); if (!(length = pVar->value_length)) { length = strlen (ptr); } memcpy (var, ptr, length); var[length] = 0; return (0); } /* Read counted (with leading length byte) ASCII string */ static int diva_strace_read_asc (diva_man_var_header_t* pVar, char* var) { char* ptr = (char*)&pVar->path_length; ptr += (pVar->path_length + 1); memcpy (var, ptr+1, *ptr); var[(int)*ptr] = 0; return (0); } /* Read one information element - i.e. one string of byte values with one length byte in front */ static int diva_strace_read_ie (diva_man_var_header_t* pVar, diva_trace_ie_t* var) { char* ptr = (char*)&pVar->path_length; ptr += (pVar->path_length + 1); var->length = *ptr; memcpy (&var->data[0], ptr+1, *ptr); return (0); } static int SuperTraceSetAudioTap (void* hLib, int Channel, int on) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; if ((Channel < 1) || (Channel > pLib->Channels)) { return (-1); } Channel--; if (on) { pLib->audio_tap_mask |= (1L << Channel); } else { pLib->audio_tap_mask &= ~(1L << Channel); } /* EYE patterns have TM_M_DATA set as additional condition */ if (pLib->audio_tap_mask) { pLib->trace_event_mask |= TM_M_DATA; } else { pLib->trace_event_mask &= ~TM_M_DATA; } return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceSetBChannel (void* hLib, int Channel, int on) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; if ((Channel < 1) || (Channel > pLib->Channels)) { return (-1); } Channel--; if (on) { pLib->bchannel_trace_mask |= (1L << Channel); } else { pLib->bchannel_trace_mask &= ~(1L << Channel); } return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceSetDChannel (void* hLib, int on) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; if (on) { pLib->trace_event_mask |= (TM_D_CHAN | TM_C_COMM | TM_DL_ERR | TM_LAYER1); } else { pLib->trace_event_mask &= ~(TM_D_CHAN | TM_C_COMM | TM_DL_ERR | TM_LAYER1); } return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceSetInfo (void* hLib, int on) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; if (on) { pLib->trace_event_mask |= TM_STRING; } else { pLib->trace_event_mask &= ~TM_STRING; } return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceClearCall (void* hLib, int Channel) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; if ((Channel < 1) || (Channel > pLib->Channels)) { return (-1); } Channel--; pLib->clear_call_command |= (1L << Channel); return (ScheduleNextTraceRequest (pLib)); } /* Parse and update cumulative statistice */ static int diva_ifc_statistics (diva_strace_context_t* pLib, diva_man_var_header_t* pVar) { diva_man_var_header_t* cur; int i, one_updated = 0, mdm_updated = 0, fax_updated = 0; for (i = pLib->statistic_parse_first; i <= pLib->statistic_parse_last; i++) { if ((cur = find_var (pVar, pLib->parse_table[i].path))) { if (diva_trace_read_variable (cur, pLib->parse_table[i].variable)) { diva_trace_error (pLib, -3 , __FILE__, __LINE__); return (-1); } one_updated = 1; if ((i >= pLib->mdm_statistic_parse_first) && (i <= pLib->mdm_statistic_parse_last)) { mdm_updated = 1; } if ((i >= pLib->fax_statistic_parse_first) && (i <= pLib->fax_statistic_parse_last)) { fax_updated = 1; } } } /* We do not use first event to notify user - this is the event that is generated as result of EVENT ON operation and is used only to initialize internal variables of application */ if (mdm_updated) { diva_trace_notify_user (pLib, 0, DIVA_SUPER_TRACE_NOTIFY_MDM_STAT_CHANGE); } else if (fax_updated) { diva_trace_notify_user (pLib, 0, DIVA_SUPER_TRACE_NOTIFY_FAX_STAT_CHANGE); } else if (one_updated) { diva_trace_notify_user (pLib, 0, DIVA_SUPER_TRACE_NOTIFY_STAT_CHANGE); } return (one_updated ? 0 : -1); } static int SuperTraceGetOutgoingCallStatistics (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; pLib->outgoing_ifc_stats = 1; return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceGetIncomingCallStatistics (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; pLib->incoming_ifc_stats = 1; return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceGetModemStatistics (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; pLib->modem_ifc_stats = 1; return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceGetFaxStatistics (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; pLib->fax_ifc_stats = 1; return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceGetBLayer1Statistics (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; pLib->b1_ifc_stats = 1; return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceGetBLayer2Statistics (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; pLib->b2_ifc_stats = 1; return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceGetDLayer1Statistics (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; pLib->d1_ifc_stats = 1; return (ScheduleNextTraceRequest (pLib)); } static int SuperTraceGetDLayer2Statistics (void* hLib) { diva_strace_context_t* pLib = (diva_strace_context_t*)hLib; pLib->d2_ifc_stats = 1; return (ScheduleNextTraceRequest (pLib)); } dword DivaSTraceGetMemotyRequirement (int channels) { dword parse_entries = (MODEM_PARSE_ENTRIES + FAX_PARSE_ENTRIES + \ STAT_PARSE_ENTRIES + \ LINE_PARSE_ENTRIES + 1) * channels; return (sizeof(diva_strace_context_t) + \ (parse_entries * sizeof(diva_strace_path2action_t))); }
gpl-2.0
RenderBroken/msm8974_OPO_render_kernel
sound/soc/au1x/psc-ac97.c
5069
12576
/* * Au12x0/Au1550 PSC ALSA ASoC audio support. * * (c) 2007-2009 MSC Vertriebsges.m.b.H., * Manuel Lauss <manuel.lauss@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Au1xxx-PSC AC97 glue. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/suspend.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/soc.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_psc.h> #include "psc.h" /* how often to retry failed codec register reads/writes */ #define AC97_RW_RETRIES 5 #define AC97_DIR \ (SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE) #define AC97_RATES \ SNDRV_PCM_RATE_8000_48000 #define AC97_FMTS \ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3BE) #define AC97PCR_START(stype) \ ((stype) == SNDRV_PCM_STREAM_PLAYBACK ? PSC_AC97PCR_TS : PSC_AC97PCR_RS) #define AC97PCR_STOP(stype) \ ((stype) == SNDRV_PCM_STREAM_PLAYBACK ? PSC_AC97PCR_TP : PSC_AC97PCR_RP) #define AC97PCR_CLRFIFO(stype) \ ((stype) == SNDRV_PCM_STREAM_PLAYBACK ? PSC_AC97PCR_TC : PSC_AC97PCR_RC) #define AC97STAT_BUSY(stype) \ ((stype) == SNDRV_PCM_STREAM_PLAYBACK ? PSC_AC97STAT_TB : PSC_AC97STAT_RB) /* instance data. There can be only one, MacLeod!!!! */ static struct au1xpsc_audio_data *au1xpsc_ac97_workdata; #if 0 /* this could theoretically work, but ac97->bus->card->private_data can be NULL * when snd_ac97_mixer() is called; I don't know if the rest further down the * chain are always valid either. */ static inline struct au1xpsc_audio_data *ac97_to_pscdata(struct snd_ac97 *x) { struct snd_soc_card *c = x->bus->card->private_data; return snd_soc_dai_get_drvdata(c->rtd->cpu_dai); } #else #define ac97_to_pscdata(x) au1xpsc_ac97_workdata #endif /* AC97 controller reads codec register */ static unsigned short au1xpsc_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct au1xpsc_audio_data *pscdata = ac97_to_pscdata(ac97); unsigned short retry, tmo; unsigned long data; au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata)); au_sync(); retry = AC97_RW_RETRIES; do { mutex_lock(&pscdata->lock); au_writel(PSC_AC97CDC_RD | PSC_AC97CDC_INDX(reg), AC97_CDC(pscdata)); au_sync(); tmo = 20; do { udelay(21); if (au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD) break; } while (--tmo); data = au_readl(AC97_CDC(pscdata)); au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata)); au_sync(); mutex_unlock(&pscdata->lock); if (reg != ((data >> 16) & 0x7f)) tmo = 1; /* wrong register, try again */ } while (--retry && !tmo); return retry ? data & 0xffff : 0xffff; } /* AC97 controller writes to codec register */ static void au1xpsc_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct au1xpsc_audio_data *pscdata = ac97_to_pscdata(ac97); unsigned int tmo, retry; au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata)); au_sync(); retry = AC97_RW_RETRIES; do { mutex_lock(&pscdata->lock); au_writel(PSC_AC97CDC_INDX(reg) | (val & 0xffff), AC97_CDC(pscdata)); au_sync(); tmo = 20; do { udelay(21); if (au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD) break; } while (--tmo); au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata)); au_sync(); mutex_unlock(&pscdata->lock); } while (--retry && !tmo); } /* AC97 controller asserts a warm reset */ static void au1xpsc_ac97_warm_reset(struct snd_ac97 *ac97) { struct au1xpsc_audio_data *pscdata = ac97_to_pscdata(ac97); au_writel(PSC_AC97RST_SNC, AC97_RST(pscdata)); au_sync(); msleep(10); au_writel(0, AC97_RST(pscdata)); au_sync(); } static void au1xpsc_ac97_cold_reset(struct snd_ac97 *ac97) { struct au1xpsc_audio_data *pscdata = ac97_to_pscdata(ac97); int i; /* disable PSC during cold reset */ au_writel(0, AC97_CFG(au1xpsc_ac97_workdata)); au_sync(); au_writel(PSC_CTRL_DISABLE, PSC_CTRL(pscdata)); au_sync(); /* issue cold reset */ au_writel(PSC_AC97RST_RST, AC97_RST(pscdata)); au_sync(); msleep(500); au_writel(0, AC97_RST(pscdata)); au_sync(); /* enable PSC */ au_writel(PSC_CTRL_ENABLE, PSC_CTRL(pscdata)); au_sync(); /* wait for PSC to indicate it's ready */ i = 1000; while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_SR)) && (--i)) msleep(1); if (i == 0) { printk(KERN_ERR "au1xpsc-ac97: PSC not ready!\n"); return; } /* enable the ac97 function */ au_writel(pscdata->cfg | PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata)); au_sync(); /* wait for AC97 core to become ready */ i = 1000; while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)) && (--i)) msleep(1); if (i == 0) printk(KERN_ERR "au1xpsc-ac97: AC97 ctrl not ready\n"); } /* AC97 controller operations */ struct snd_ac97_bus_ops soc_ac97_ops = { .read = au1xpsc_ac97_read, .write = au1xpsc_ac97_write, .reset = au1xpsc_ac97_cold_reset, .warm_reset = au1xpsc_ac97_warm_reset, }; EXPORT_SYMBOL_GPL(soc_ac97_ops); static int au1xpsc_ac97_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai); unsigned long r, ro, stat; int chans, t, stype = substream->stream; chans = params_channels(params); r = ro = au_readl(AC97_CFG(pscdata)); stat = au_readl(AC97_STAT(pscdata)); /* already active? */ if (stat & (PSC_AC97STAT_TB | PSC_AC97STAT_RB)) { /* reject parameters not currently set up */ if ((PSC_AC97CFG_GET_LEN(r) != params->msbits) || (pscdata->rate != params_rate(params))) return -EINVAL; } else { /* set sample bitdepth: REG[24:21]=(BITS-2)/2 */ r &= ~PSC_AC97CFG_LEN_MASK; r |= PSC_AC97CFG_SET_LEN(params->msbits); /* channels: enable slots for front L/R channel */ if (stype == SNDRV_PCM_STREAM_PLAYBACK) { r &= ~PSC_AC97CFG_TXSLOT_MASK; r |= PSC_AC97CFG_TXSLOT_ENA(3); r |= PSC_AC97CFG_TXSLOT_ENA(4); } else { r &= ~PSC_AC97CFG_RXSLOT_MASK; r |= PSC_AC97CFG_RXSLOT_ENA(3); r |= PSC_AC97CFG_RXSLOT_ENA(4); } /* do we need to poke the hardware? */ if (!(r ^ ro)) goto out; /* ac97 engine is about to be disabled */ mutex_lock(&pscdata->lock); /* disable AC97 device controller first... */ au_writel(r & ~PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata)); au_sync(); /* ...wait for it... */ t = 100; while ((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR) && --t) msleep(1); if (!t) printk(KERN_ERR "PSC-AC97: can't disable!\n"); /* ...write config... */ au_writel(r, AC97_CFG(pscdata)); au_sync(); /* ...enable the AC97 controller again... */ au_writel(r | PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata)); au_sync(); /* ...and wait for ready bit */ t = 100; while ((!(au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)) && --t) msleep(1); if (!t) printk(KERN_ERR "PSC-AC97: can't enable!\n"); mutex_unlock(&pscdata->lock); pscdata->cfg = r; pscdata->rate = params_rate(params); } out: return 0; } static int au1xpsc_ac97_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai); int ret, stype = substream->stream; ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: au_writel(AC97PCR_CLRFIFO(stype), AC97_PCR(pscdata)); au_sync(); au_writel(AC97PCR_START(stype), AC97_PCR(pscdata)); au_sync(); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: au_writel(AC97PCR_STOP(stype), AC97_PCR(pscdata)); au_sync(); while (au_readl(AC97_STAT(pscdata)) & AC97STAT_BUSY(stype)) asm volatile ("nop"); au_writel(AC97PCR_CLRFIFO(stype), AC97_PCR(pscdata)); au_sync(); break; default: ret = -EINVAL; } return ret; } static int au1xpsc_ac97_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai); snd_soc_dai_set_dma_data(dai, substream, &pscdata->dmaids[0]); return 0; } static int au1xpsc_ac97_probe(struct snd_soc_dai *dai) { return au1xpsc_ac97_workdata ? 0 : -ENODEV; } static const struct snd_soc_dai_ops au1xpsc_ac97_dai_ops = { .startup = au1xpsc_ac97_startup, .trigger = au1xpsc_ac97_trigger, .hw_params = au1xpsc_ac97_hw_params, }; static const struct snd_soc_dai_driver au1xpsc_ac97_dai_template = { .ac97_control = 1, .probe = au1xpsc_ac97_probe, .playback = { .rates = AC97_RATES, .formats = AC97_FMTS, .channels_min = 2, .channels_max = 2, }, .capture = { .rates = AC97_RATES, .formats = AC97_FMTS, .channels_min = 2, .channels_max = 2, }, .ops = &au1xpsc_ac97_dai_ops, }; static int __devinit au1xpsc_ac97_drvprobe(struct platform_device *pdev) { int ret; struct resource *iores, *dmares; unsigned long sel; struct au1xpsc_audio_data *wd; wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data), GFP_KERNEL); if (!wd) return -ENOMEM; mutex_init(&wd->lock); iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iores) return -ENODEV; if (!devm_request_mem_region(&pdev->dev, iores->start, resource_size(iores), pdev->name)) return -EBUSY; wd->mmio = devm_ioremap(&pdev->dev, iores->start, resource_size(iores)); if (!wd->mmio) return -EBUSY; dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmares) return -EBUSY; wd->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; dmares = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!dmares) return -EBUSY; wd->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; /* configuration: max dma trigger threshold, enable ac97 */ wd->cfg = PSC_AC97CFG_RT_FIFO8 | PSC_AC97CFG_TT_FIFO8 | PSC_AC97CFG_DE_ENABLE; /* preserve PSC clock source set up by platform */ sel = au_readl(PSC_SEL(wd)) & PSC_SEL_CLK_MASK; au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); au_sync(); au_writel(0, PSC_SEL(wd)); au_sync(); au_writel(PSC_SEL_PS_AC97MODE | sel, PSC_SEL(wd)); au_sync(); /* name the DAI like this device instance ("au1xpsc-ac97.PSCINDEX") */ memcpy(&wd->dai_drv, &au1xpsc_ac97_dai_template, sizeof(struct snd_soc_dai_driver)); wd->dai_drv.name = dev_name(&pdev->dev); platform_set_drvdata(pdev, wd); ret = snd_soc_register_dai(&pdev->dev, &wd->dai_drv); if (ret) return ret; au1xpsc_ac97_workdata = wd; return 0; } static int __devexit au1xpsc_ac97_drvremove(struct platform_device *pdev) { struct au1xpsc_audio_data *wd = platform_get_drvdata(pdev); snd_soc_unregister_dai(&pdev->dev); /* disable PSC completely */ au_writel(0, AC97_CFG(wd)); au_sync(); au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); au_sync(); au1xpsc_ac97_workdata = NULL; /* MDEV */ return 0; } #ifdef CONFIG_PM static int au1xpsc_ac97_drvsuspend(struct device *dev) { struct au1xpsc_audio_data *wd = dev_get_drvdata(dev); /* save interesting registers and disable PSC */ wd->pm[0] = au_readl(PSC_SEL(wd)); au_writel(0, AC97_CFG(wd)); au_sync(); au_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); au_sync(); return 0; } static int au1xpsc_ac97_drvresume(struct device *dev) { struct au1xpsc_audio_data *wd = dev_get_drvdata(dev); /* restore PSC clock config */ au_writel(wd->pm[0] | PSC_SEL_PS_AC97MODE, PSC_SEL(wd)); au_sync(); /* after this point the ac97 core will cold-reset the codec. * During cold-reset the PSC is reinitialized and the last * configuration set up in hw_params() is restored. */ return 0; } static struct dev_pm_ops au1xpscac97_pmops = { .suspend = au1xpsc_ac97_drvsuspend, .resume = au1xpsc_ac97_drvresume, }; #define AU1XPSCAC97_PMOPS &au1xpscac97_pmops #else #define AU1XPSCAC97_PMOPS NULL #endif static struct platform_driver au1xpsc_ac97_driver = { .driver = { .name = "au1xpsc_ac97", .owner = THIS_MODULE, .pm = AU1XPSCAC97_PMOPS, }, .probe = au1xpsc_ac97_drvprobe, .remove = __devexit_p(au1xpsc_ac97_drvremove), }; static int __init au1xpsc_ac97_load(void) { au1xpsc_ac97_workdata = NULL; return platform_driver_register(&au1xpsc_ac97_driver); } static void __exit au1xpsc_ac97_unload(void) { platform_driver_unregister(&au1xpsc_ac97_driver); } module_init(au1xpsc_ac97_load); module_exit(au1xpsc_ac97_unload); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Au12x0/Au1550 PSC AC97 ALSA ASoC audio driver"); MODULE_AUTHOR("Manuel Lauss");
gpl-2.0
xiaolvmu/villec2-kernel
sound/isa/es18xx.c
5069
70959
/* * Driver for generic ESS AudioDrive ES18xx soundcards * Copyright (c) by Christian Fischbach <fishbach@pool.informatik.rwth-aachen.de> * Copyright (c) by Abramo Bagnara <abramo@alsa-project.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* GENERAL NOTES: * * BUGS: * - There are pops (we can't delay in trigger function, cause midlevel * often need to trigger down and then up very quickly). * Any ideas? * - Support for 16 bit DMA seems to be broken. I've no hardware to tune it. */ /* * ES1868 NOTES: * - The chip has one half duplex pcm (with very limited full duplex support). * * - Duplex stereophonic sound is impossible. * - Record and playback must share the same frequency rate. * * - The driver use dma2 for playback and dma1 for capture. */ /* * ES1869 NOTES: * * - there are a first full duplex pcm and a second playback only pcm * (incompatible with first pcm capture) * * - there is support for the capture volume and ESS Spatializer 3D effect. * * - contrarily to some pages in DS_1869.PDF the rates can be set * independently. * * - Zoom Video is implemented by sharing the FM DAC, thus the user can * have either FM playback or Video playback but not both simultaneously. * The Video Playback Switch mixer control toggles this choice. * * BUGS: * * - There is a major trouble I noted: * * using both channel for playback stereo 16 bit samples at 44100 Hz * the second pcm (Audio1) DMA slows down irregularly and sound is garbled. * * The same happens using Audio1 for captureing. * * The Windows driver does not suffer of this (although it use Audio1 * only for captureing). I'm unable to discover why. * */ /* * ES1879 NOTES: * - When Zoom Video is enabled (reg 0x71 bit 6 toggled on) the PCM playback * seems to be effected (speaker_test plays a lower frequency). Can't find * anything in the datasheet to account for this, so a Video Playback Switch * control has been included to allow ZV to be enabled only when necessary. * Then again on at least one test system the 0x71 bit 6 enable bit is not * needed for ZV, so maybe the datasheet is entirely wrong here. */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/pnp.h> #include <linux/isapnp.h> #include <linux/module.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> #define PFX "es18xx: " struct snd_es18xx { unsigned long port; /* port of ESS chip */ unsigned long ctrl_port; /* Control port of ESS chip */ struct resource *res_port; struct resource *res_mpu_port; struct resource *res_ctrl_port; int irq; /* IRQ number of ESS chip */ int dma1; /* DMA1 */ int dma2; /* DMA2 */ unsigned short version; /* version of ESS chip */ int caps; /* Chip capabilities */ unsigned short audio2_vol; /* volume level of audio2 */ unsigned short active; /* active channel mask */ unsigned int dma1_shift; unsigned int dma2_shift; struct snd_pcm *pcm; struct snd_pcm_substream *playback_a_substream; struct snd_pcm_substream *capture_a_substream; struct snd_pcm_substream *playback_b_substream; struct snd_rawmidi *rmidi; struct snd_kcontrol *hw_volume; struct snd_kcontrol *hw_switch; struct snd_kcontrol *master_volume; struct snd_kcontrol *master_switch; spinlock_t reg_lock; spinlock_t mixer_lock; #ifdef CONFIG_PM unsigned char pm_reg; #endif #ifdef CONFIG_PNP struct pnp_dev *dev; struct pnp_dev *devc; #endif }; #define AUDIO1_IRQ 0x01 #define AUDIO2_IRQ 0x02 #define HWV_IRQ 0x04 #define MPU_IRQ 0x08 #define ES18XX_PCM2 0x0001 /* Has two useable PCM */ #define ES18XX_SPATIALIZER 0x0002 /* Has 3D Spatializer */ #define ES18XX_RECMIX 0x0004 /* Has record mixer */ #define ES18XX_DUPLEX_MONO 0x0008 /* Has mono duplex only */ #define ES18XX_DUPLEX_SAME 0x0010 /* Playback and record must share the same rate */ #define ES18XX_NEW_RATE 0x0020 /* More precise rate setting */ #define ES18XX_AUXB 0x0040 /* AuxB mixer control */ #define ES18XX_HWV 0x0080 /* Has separate hardware volume mixer controls*/ #define ES18XX_MONO 0x0100 /* Mono_in mixer control */ #define ES18XX_I2S 0x0200 /* I2S mixer control */ #define ES18XX_MUTEREC 0x0400 /* Record source can be muted */ #define ES18XX_CONTROL 0x0800 /* Has control ports */ /* Power Management */ #define ES18XX_PM 0x07 #define ES18XX_PM_GPO0 0x01 #define ES18XX_PM_GPO1 0x02 #define ES18XX_PM_PDR 0x04 #define ES18XX_PM_ANA 0x08 #define ES18XX_PM_FM 0x020 #define ES18XX_PM_SUS 0x080 /* Lowlevel */ #define DAC1 0x01 #define ADC1 0x02 #define DAC2 0x04 #define MILLISECOND 10000 static int snd_es18xx_dsp_command(struct snd_es18xx *chip, unsigned char val) { int i; for(i = MILLISECOND; i; i--) if ((inb(chip->port + 0x0C) & 0x80) == 0) { outb(val, chip->port + 0x0C); return 0; } snd_printk(KERN_ERR "dsp_command: timeout (0x%x)\n", val); return -EINVAL; } static int snd_es18xx_dsp_get_byte(struct snd_es18xx *chip) { int i; for(i = MILLISECOND/10; i; i--) if (inb(chip->port + 0x0C) & 0x40) return inb(chip->port + 0x0A); snd_printk(KERN_ERR "dsp_get_byte failed: 0x%lx = 0x%x!!!\n", chip->port + 0x0A, inb(chip->port + 0x0A)); return -ENODEV; } #undef REG_DEBUG static int snd_es18xx_write(struct snd_es18xx *chip, unsigned char reg, unsigned char data) { unsigned long flags; int ret; spin_lock_irqsave(&chip->reg_lock, flags); ret = snd_es18xx_dsp_command(chip, reg); if (ret < 0) goto end; ret = snd_es18xx_dsp_command(chip, data); end: spin_unlock_irqrestore(&chip->reg_lock, flags); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Reg %02x set to %02x\n", reg, data); #endif return ret; } static int snd_es18xx_read(struct snd_es18xx *chip, unsigned char reg) { unsigned long flags; int ret, data; spin_lock_irqsave(&chip->reg_lock, flags); ret = snd_es18xx_dsp_command(chip, 0xC0); if (ret < 0) goto end; ret = snd_es18xx_dsp_command(chip, reg); if (ret < 0) goto end; data = snd_es18xx_dsp_get_byte(chip); ret = data; #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Reg %02x now is %02x (%d)\n", reg, data, ret); #endif end: spin_unlock_irqrestore(&chip->reg_lock, flags); return ret; } /* Return old value */ static int snd_es18xx_bits(struct snd_es18xx *chip, unsigned char reg, unsigned char mask, unsigned char val) { int ret; unsigned char old, new, oval; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); ret = snd_es18xx_dsp_command(chip, 0xC0); if (ret < 0) goto end; ret = snd_es18xx_dsp_command(chip, reg); if (ret < 0) goto end; ret = snd_es18xx_dsp_get_byte(chip); if (ret < 0) { goto end; } old = ret; oval = old & mask; if (val != oval) { ret = snd_es18xx_dsp_command(chip, reg); if (ret < 0) goto end; new = (old & ~mask) | (val & mask); ret = snd_es18xx_dsp_command(chip, new); if (ret < 0) goto end; #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Reg %02x was %02x, set to %02x (%d)\n", reg, old, new, ret); #endif } ret = oval; end: spin_unlock_irqrestore(&chip->reg_lock, flags); return ret; } static inline void snd_es18xx_mixer_write(struct snd_es18xx *chip, unsigned char reg, unsigned char data) { unsigned long flags; spin_lock_irqsave(&chip->mixer_lock, flags); outb(reg, chip->port + 0x04); outb(data, chip->port + 0x05); spin_unlock_irqrestore(&chip->mixer_lock, flags); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Mixer reg %02x set to %02x\n", reg, data); #endif } static inline int snd_es18xx_mixer_read(struct snd_es18xx *chip, unsigned char reg) { unsigned long flags; int data; spin_lock_irqsave(&chip->mixer_lock, flags); outb(reg, chip->port + 0x04); data = inb(chip->port + 0x05); spin_unlock_irqrestore(&chip->mixer_lock, flags); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Mixer reg %02x now is %02x\n", reg, data); #endif return data; } /* Return old value */ static inline int snd_es18xx_mixer_bits(struct snd_es18xx *chip, unsigned char reg, unsigned char mask, unsigned char val) { unsigned char old, new, oval; unsigned long flags; spin_lock_irqsave(&chip->mixer_lock, flags); outb(reg, chip->port + 0x04); old = inb(chip->port + 0x05); oval = old & mask; if (val != oval) { new = (old & ~mask) | (val & mask); outb(new, chip->port + 0x05); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Mixer reg %02x was %02x, set to %02x\n", reg, old, new); #endif } spin_unlock_irqrestore(&chip->mixer_lock, flags); return oval; } static inline int snd_es18xx_mixer_writable(struct snd_es18xx *chip, unsigned char reg, unsigned char mask) { int old, expected, new; unsigned long flags; spin_lock_irqsave(&chip->mixer_lock, flags); outb(reg, chip->port + 0x04); old = inb(chip->port + 0x05); expected = old ^ mask; outb(expected, chip->port + 0x05); new = inb(chip->port + 0x05); spin_unlock_irqrestore(&chip->mixer_lock, flags); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Mixer reg %02x was %02x, set to %02x, now is %02x\n", reg, old, expected, new); #endif return expected == new; } static int __devinit snd_es18xx_reset(struct snd_es18xx *chip) { int i; outb(0x03, chip->port + 0x06); inb(chip->port + 0x06); outb(0x00, chip->port + 0x06); for(i = 0; i < MILLISECOND && !(inb(chip->port + 0x0E) & 0x80); i++); if (inb(chip->port + 0x0A) != 0xAA) return -1; return 0; } static int snd_es18xx_reset_fifo(struct snd_es18xx *chip) { outb(0x02, chip->port + 0x06); inb(chip->port + 0x06); outb(0x00, chip->port + 0x06); return 0; } static struct snd_ratnum new_clocks[2] = { { .num = 793800, .den_min = 1, .den_max = 128, .den_step = 1, }, { .num = 768000, .den_min = 1, .den_max = 128, .den_step = 1, } }; static struct snd_pcm_hw_constraint_ratnums new_hw_constraints_clocks = { .nrats = 2, .rats = new_clocks, }; static struct snd_ratnum old_clocks[2] = { { .num = 795444, .den_min = 1, .den_max = 128, .den_step = 1, }, { .num = 397722, .den_min = 1, .den_max = 128, .den_step = 1, } }; static struct snd_pcm_hw_constraint_ratnums old_hw_constraints_clocks = { .nrats = 2, .rats = old_clocks, }; static void snd_es18xx_rate_set(struct snd_es18xx *chip, struct snd_pcm_substream *substream, int mode) { unsigned int bits, div0; struct snd_pcm_runtime *runtime = substream->runtime; if (chip->caps & ES18XX_NEW_RATE) { if (runtime->rate_num == new_clocks[0].num) bits = 128 - runtime->rate_den; else bits = 256 - runtime->rate_den; } else { if (runtime->rate_num == old_clocks[0].num) bits = 256 - runtime->rate_den; else bits = 128 - runtime->rate_den; } /* set filter register */ div0 = 256 - 7160000*20/(8*82*runtime->rate); if ((chip->caps & ES18XX_PCM2) && mode == DAC2) { snd_es18xx_mixer_write(chip, 0x70, bits); /* * Comment from kernel oss driver: * FKS: fascinating: 0x72 doesn't seem to work. */ snd_es18xx_write(chip, 0xA2, div0); snd_es18xx_mixer_write(chip, 0x72, div0); } else { snd_es18xx_write(chip, 0xA1, bits); snd_es18xx_write(chip, 0xA2, div0); } } static int snd_es18xx_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); int shift, err; shift = 0; if (params_channels(hw_params) == 2) shift++; if (snd_pcm_format_width(params_format(hw_params)) == 16) shift++; if (substream->number == 0 && (chip->caps & ES18XX_PCM2)) { if ((chip->caps & ES18XX_DUPLEX_MONO) && (chip->capture_a_substream) && params_channels(hw_params) != 1) { _snd_pcm_hw_param_setempty(hw_params, SNDRV_PCM_HW_PARAM_CHANNELS); return -EBUSY; } chip->dma2_shift = shift; } else { chip->dma1_shift = shift; } if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; return 0; } static int snd_es18xx_pcm_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_es18xx_playback1_prepare(struct snd_es18xx *chip, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); snd_es18xx_rate_set(chip, substream, DAC2); /* Transfer Count Reload */ count = 0x10000 - count; snd_es18xx_mixer_write(chip, 0x74, count & 0xff); snd_es18xx_mixer_write(chip, 0x76, count >> 8); /* Set format */ snd_es18xx_mixer_bits(chip, 0x7A, 0x07, ((runtime->channels == 1) ? 0x00 : 0x02) | (snd_pcm_format_width(runtime->format) == 16 ? 0x01 : 0x00) | (snd_pcm_format_unsigned(runtime->format) ? 0x00 : 0x04)); /* Set DMA controller */ snd_dma_program(chip->dma2, runtime->dma_addr, size, DMA_MODE_WRITE | DMA_AUTOINIT); return 0; } static int snd_es18xx_playback1_trigger(struct snd_es18xx *chip, struct snd_pcm_substream *substream, int cmd) { switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (chip->active & DAC2) return 0; chip->active |= DAC2; /* Start DMA */ if (chip->dma2 >= 4) snd_es18xx_mixer_write(chip, 0x78, 0xb3); else snd_es18xx_mixer_write(chip, 0x78, 0x93); #ifdef AVOID_POPS /* Avoid pops */ udelay(100000); if (chip->caps & ES18XX_PCM2) /* Restore Audio 2 volume */ snd_es18xx_mixer_write(chip, 0x7C, chip->audio2_vol); else /* Enable PCM output */ snd_es18xx_dsp_command(chip, 0xD1); #endif break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (!(chip->active & DAC2)) return 0; chip->active &= ~DAC2; /* Stop DMA */ snd_es18xx_mixer_write(chip, 0x78, 0x00); #ifdef AVOID_POPS udelay(25000); if (chip->caps & ES18XX_PCM2) /* Set Audio 2 volume to 0 */ snd_es18xx_mixer_write(chip, 0x7C, 0); else /* Disable PCM output */ snd_es18xx_dsp_command(chip, 0xD3); #endif break; default: return -EINVAL; } return 0; } static int snd_es18xx_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); int shift, err; shift = 0; if ((chip->caps & ES18XX_DUPLEX_MONO) && chip->playback_a_substream && params_channels(hw_params) != 1) { _snd_pcm_hw_param_setempty(hw_params, SNDRV_PCM_HW_PARAM_CHANNELS); return -EBUSY; } if (params_channels(hw_params) == 2) shift++; if (snd_pcm_format_width(params_format(hw_params)) == 16) shift++; chip->dma1_shift = shift; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; return 0; } static int snd_es18xx_capture_prepare(struct snd_pcm_substream *substream) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); snd_es18xx_reset_fifo(chip); /* Set stereo/mono */ snd_es18xx_bits(chip, 0xA8, 0x03, runtime->channels == 1 ? 0x02 : 0x01); snd_es18xx_rate_set(chip, substream, ADC1); /* Transfer Count Reload */ count = 0x10000 - count; snd_es18xx_write(chip, 0xA4, count & 0xff); snd_es18xx_write(chip, 0xA5, count >> 8); #ifdef AVOID_POPS udelay(100000); #endif /* Set format */ snd_es18xx_write(chip, 0xB7, snd_pcm_format_unsigned(runtime->format) ? 0x51 : 0x71); snd_es18xx_write(chip, 0xB7, 0x90 | ((runtime->channels == 1) ? 0x40 : 0x08) | (snd_pcm_format_width(runtime->format) == 16 ? 0x04 : 0x00) | (snd_pcm_format_unsigned(runtime->format) ? 0x00 : 0x20)); /* Set DMA controller */ snd_dma_program(chip->dma1, runtime->dma_addr, size, DMA_MODE_READ | DMA_AUTOINIT); return 0; } static int snd_es18xx_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (chip->active & ADC1) return 0; chip->active |= ADC1; /* Start DMA */ snd_es18xx_write(chip, 0xB8, 0x0f); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (!(chip->active & ADC1)) return 0; chip->active &= ~ADC1; /* Stop DMA */ snd_es18xx_write(chip, 0xB8, 0x00); break; default: return -EINVAL; } return 0; } static int snd_es18xx_playback2_prepare(struct snd_es18xx *chip, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); snd_es18xx_reset_fifo(chip); /* Set stereo/mono */ snd_es18xx_bits(chip, 0xA8, 0x03, runtime->channels == 1 ? 0x02 : 0x01); snd_es18xx_rate_set(chip, substream, DAC1); /* Transfer Count Reload */ count = 0x10000 - count; snd_es18xx_write(chip, 0xA4, count & 0xff); snd_es18xx_write(chip, 0xA5, count >> 8); /* Set format */ snd_es18xx_write(chip, 0xB6, snd_pcm_format_unsigned(runtime->format) ? 0x80 : 0x00); snd_es18xx_write(chip, 0xB7, snd_pcm_format_unsigned(runtime->format) ? 0x51 : 0x71); snd_es18xx_write(chip, 0xB7, 0x90 | (runtime->channels == 1 ? 0x40 : 0x08) | (snd_pcm_format_width(runtime->format) == 16 ? 0x04 : 0x00) | (snd_pcm_format_unsigned(runtime->format) ? 0x00 : 0x20)); /* Set DMA controller */ snd_dma_program(chip->dma1, runtime->dma_addr, size, DMA_MODE_WRITE | DMA_AUTOINIT); return 0; } static int snd_es18xx_playback2_trigger(struct snd_es18xx *chip, struct snd_pcm_substream *substream, int cmd) { switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (chip->active & DAC1) return 0; chip->active |= DAC1; /* Start DMA */ snd_es18xx_write(chip, 0xB8, 0x05); #ifdef AVOID_POPS /* Avoid pops */ udelay(100000); /* Enable Audio 1 */ snd_es18xx_dsp_command(chip, 0xD1); #endif break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (!(chip->active & DAC1)) return 0; chip->active &= ~DAC1; /* Stop DMA */ snd_es18xx_write(chip, 0xB8, 0x00); #ifdef AVOID_POPS /* Avoid pops */ udelay(25000); /* Disable Audio 1 */ snd_es18xx_dsp_command(chip, 0xD3); #endif break; default: return -EINVAL; } return 0; } static int snd_es18xx_playback_prepare(struct snd_pcm_substream *substream) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); if (substream->number == 0 && (chip->caps & ES18XX_PCM2)) return snd_es18xx_playback1_prepare(chip, substream); else return snd_es18xx_playback2_prepare(chip, substream); } static int snd_es18xx_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); if (substream->number == 0 && (chip->caps & ES18XX_PCM2)) return snd_es18xx_playback1_trigger(chip, substream, cmd); else return snd_es18xx_playback2_trigger(chip, substream, cmd); } static irqreturn_t snd_es18xx_interrupt(int irq, void *dev_id) { struct snd_card *card = dev_id; struct snd_es18xx *chip = card->private_data; unsigned char status; if (chip->caps & ES18XX_CONTROL) { /* Read Interrupt status */ status = inb(chip->ctrl_port + 6); } else { /* Read Interrupt status */ status = snd_es18xx_mixer_read(chip, 0x7f) >> 4; } #if 0 else { status = 0; if (inb(chip->port + 0x0C) & 0x01) status |= AUDIO1_IRQ; if (snd_es18xx_mixer_read(chip, 0x7A) & 0x80) status |= AUDIO2_IRQ; if ((chip->caps & ES18XX_HWV) && snd_es18xx_mixer_read(chip, 0x64) & 0x10) status |= HWV_IRQ; } #endif /* Audio 1 & Audio 2 */ if (status & AUDIO2_IRQ) { if (chip->active & DAC2) snd_pcm_period_elapsed(chip->playback_a_substream); /* ack interrupt */ snd_es18xx_mixer_bits(chip, 0x7A, 0x80, 0x00); } if (status & AUDIO1_IRQ) { /* ok.. capture is active */ if (chip->active & ADC1) snd_pcm_period_elapsed(chip->capture_a_substream); /* ok.. playback2 is active */ else if (chip->active & DAC1) snd_pcm_period_elapsed(chip->playback_b_substream); /* ack interrupt */ inb(chip->port + 0x0E); } /* MPU */ if ((status & MPU_IRQ) && chip->rmidi) snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data); /* Hardware volume */ if (status & HWV_IRQ) { int split = 0; if (chip->caps & ES18XX_HWV) { split = snd_es18xx_mixer_read(chip, 0x64) & 0x80; snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->hw_switch->id); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->hw_volume->id); } if (!split) { snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_switch->id); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_volume->id); } /* ack interrupt */ snd_es18xx_mixer_write(chip, 0x66, 0x00); } return IRQ_HANDLED; } static snd_pcm_uframes_t snd_es18xx_playback_pointer(struct snd_pcm_substream *substream) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); unsigned int size = snd_pcm_lib_buffer_bytes(substream); int pos; if (substream->number == 0 && (chip->caps & ES18XX_PCM2)) { if (!(chip->active & DAC2)) return 0; pos = snd_dma_pointer(chip->dma2, size); return pos >> chip->dma2_shift; } else { if (!(chip->active & DAC1)) return 0; pos = snd_dma_pointer(chip->dma1, size); return pos >> chip->dma1_shift; } } static snd_pcm_uframes_t snd_es18xx_capture_pointer(struct snd_pcm_substream *substream) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); unsigned int size = snd_pcm_lib_buffer_bytes(substream); int pos; if (!(chip->active & ADC1)) return 0; pos = snd_dma_pointer(chip->dma1, size); return pos >> chip->dma1_shift; } static struct snd_pcm_hardware snd_es18xx_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 65536, .period_bytes_min = 64, .period_bytes_max = 65536, .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_es18xx_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 65536, .period_bytes_min = 64, .period_bytes_max = 65536, .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static int snd_es18xx_playback_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_es18xx *chip = snd_pcm_substream_chip(substream); if (substream->number == 0 && (chip->caps & ES18XX_PCM2)) { if ((chip->caps & ES18XX_DUPLEX_MONO) && chip->capture_a_substream && chip->capture_a_substream->runtime->channels != 1) return -EAGAIN; chip->playback_a_substream = substream; } else if (substream->number <= 1) { if (chip->capture_a_substream) return -EAGAIN; chip->playback_b_substream = substream; } else { snd_BUG(); return -EINVAL; } substream->runtime->hw = snd_es18xx_playback; snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, (chip->caps & ES18XX_NEW_RATE) ? &new_hw_constraints_clocks : &old_hw_constraints_clocks); return 0; } static int snd_es18xx_capture_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_es18xx *chip = snd_pcm_substream_chip(substream); if (chip->playback_b_substream) return -EAGAIN; if ((chip->caps & ES18XX_DUPLEX_MONO) && chip->playback_a_substream && chip->playback_a_substream->runtime->channels != 1) return -EAGAIN; chip->capture_a_substream = substream; substream->runtime->hw = snd_es18xx_capture; snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, (chip->caps & ES18XX_NEW_RATE) ? &new_hw_constraints_clocks : &old_hw_constraints_clocks); return 0; } static int snd_es18xx_playback_close(struct snd_pcm_substream *substream) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); if (substream->number == 0 && (chip->caps & ES18XX_PCM2)) chip->playback_a_substream = NULL; else chip->playback_b_substream = NULL; snd_pcm_lib_free_pages(substream); return 0; } static int snd_es18xx_capture_close(struct snd_pcm_substream *substream) { struct snd_es18xx *chip = snd_pcm_substream_chip(substream); chip->capture_a_substream = NULL; snd_pcm_lib_free_pages(substream); return 0; } /* * MIXER part */ /* Record source mux routines: * Depending on the chipset this mux switches between 4, 5, or 8 possible inputs. * bit table for the 4/5 source mux: * reg 1C: * b2 b1 b0 muxSource * x 0 x microphone * 0 1 x CD * 1 1 0 line * 1 1 1 mixer * if it's "mixer" and it's a 5 source mux chipset then reg 7A bit 3 determines * either the play mixer or the capture mixer. * * "map4Source" translates from source number to reg bit pattern * "invMap4Source" translates from reg bit pattern to source number */ static int snd_es18xx_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts5Source[5] = { "Mic", "CD", "Line", "Master", "Mix" }; static char *texts8Source[8] = { "Mic", "Mic Master", "CD", "AOUT", "Mic1", "Mix", "Line", "Master" }; struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; switch (chip->version) { case 0x1868: case 0x1878: uinfo->value.enumerated.items = 4; if (uinfo->value.enumerated.item > 3) uinfo->value.enumerated.item = 3; strcpy(uinfo->value.enumerated.name, texts5Source[uinfo->value.enumerated.item]); break; case 0x1887: case 0x1888: uinfo->value.enumerated.items = 5; if (uinfo->value.enumerated.item > 4) uinfo->value.enumerated.item = 4; strcpy(uinfo->value.enumerated.name, texts5Source[uinfo->value.enumerated.item]); break; case 0x1869: /* DS somewhat contradictory for 1869: could be be 5 or 8 */ case 0x1879: uinfo->value.enumerated.items = 8; if (uinfo->value.enumerated.item > 7) uinfo->value.enumerated.item = 7; strcpy(uinfo->value.enumerated.name, texts8Source[uinfo->value.enumerated.item]); break; default: return -EINVAL; } return 0; } static int snd_es18xx_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { static unsigned char invMap4Source[8] = {0, 0, 1, 1, 0, 0, 2, 3}; struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); int muxSource = snd_es18xx_mixer_read(chip, 0x1c) & 0x07; if (!(chip->version == 0x1869 || chip->version == 0x1879)) { muxSource = invMap4Source[muxSource]; if (muxSource==3 && (chip->version == 0x1887 || chip->version == 0x1888) && (snd_es18xx_mixer_read(chip, 0x7a) & 0x08) ) muxSource = 4; } ucontrol->value.enumerated.item[0] = muxSource; return 0; } static int snd_es18xx_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { static unsigned char map4Source[4] = {0, 2, 6, 7}; struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); unsigned char val = ucontrol->value.enumerated.item[0]; unsigned char retVal = 0; switch (chip->version) { /* 5 source chips */ case 0x1887: case 0x1888: if (val > 4) return -EINVAL; if (val == 4) { retVal = snd_es18xx_mixer_bits(chip, 0x7a, 0x08, 0x08) != 0x08; val = 3; } else retVal = snd_es18xx_mixer_bits(chip, 0x7a, 0x08, 0x00) != 0x00; /* 4 source chips */ case 0x1868: case 0x1878: if (val > 3) return -EINVAL; val = map4Source[val]; break; /* 8 source chips */ case 0x1869: case 0x1879: if (val > 7) return -EINVAL; break; default: return -EINVAL; } return (snd_es18xx_mixer_bits(chip, 0x1c, 0x07, val) != val) || retVal; } #define snd_es18xx_info_spatializer_enable snd_ctl_boolean_mono_info static int snd_es18xx_get_spatializer_enable(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); unsigned char val = snd_es18xx_mixer_read(chip, 0x50); ucontrol->value.integer.value[0] = !!(val & 8); return 0; } static int snd_es18xx_put_spatializer_enable(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); unsigned char oval, nval; int change; nval = ucontrol->value.integer.value[0] ? 0x0c : 0x04; oval = snd_es18xx_mixer_read(chip, 0x50) & 0x0c; change = nval != oval; if (change) { snd_es18xx_mixer_write(chip, 0x50, nval & ~0x04); snd_es18xx_mixer_write(chip, 0x50, nval); } return change; } static int snd_es18xx_info_hw_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 63; return 0; } static int snd_es18xx_get_hw_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = snd_es18xx_mixer_read(chip, 0x61) & 0x3f; ucontrol->value.integer.value[1] = snd_es18xx_mixer_read(chip, 0x63) & 0x3f; return 0; } #define snd_es18xx_info_hw_switch snd_ctl_boolean_stereo_info static int snd_es18xx_get_hw_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = !(snd_es18xx_mixer_read(chip, 0x61) & 0x40); ucontrol->value.integer.value[1] = !(snd_es18xx_mixer_read(chip, 0x63) & 0x40); return 0; } static void snd_es18xx_hwv_free(struct snd_kcontrol *kcontrol) { struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); chip->master_volume = NULL; chip->master_switch = NULL; chip->hw_volume = NULL; chip->hw_switch = NULL; } static int snd_es18xx_reg_bits(struct snd_es18xx *chip, unsigned char reg, unsigned char mask, unsigned char val) { if (reg < 0xa0) return snd_es18xx_mixer_bits(chip, reg, mask, val); else return snd_es18xx_bits(chip, reg, mask, val); } static int snd_es18xx_reg_read(struct snd_es18xx *chip, unsigned char reg) { if (reg < 0xa0) return snd_es18xx_mixer_read(chip, reg); else return snd_es18xx_read(chip, reg); } #define ES18XX_SINGLE(xname, xindex, reg, shift, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_es18xx_info_single, \ .get = snd_es18xx_get_single, .put = snd_es18xx_put_single, \ .private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) } static int snd_es18xx_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_es18xx_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; int val; val = snd_es18xx_reg_read(chip, reg); ucontrol->value.integer.value[0] = (val >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } static int snd_es18xx_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; unsigned char val; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; mask <<= shift; val <<= shift; return snd_es18xx_reg_bits(chip, reg, mask, val) != val; } #define ES18XX_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_es18xx_info_double, \ .get = snd_es18xx_get_double, .put = snd_es18xx_put_double, \ .private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22) } static int snd_es18xx_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 24) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_es18xx_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; unsigned char left, right; left = snd_es18xx_reg_read(chip, left_reg); if (left_reg != right_reg) right = snd_es18xx_reg_read(chip, right_reg); else right = left; ucontrol->value.integer.value[0] = (left >> shift_left) & mask; ucontrol->value.integer.value[1] = (right >> shift_right) & mask; if (invert) { ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1]; } return 0; } static int snd_es18xx_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; int change; unsigned char val1, val2, mask1, mask2; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; if (invert) { val1 = mask - val1; val2 = mask - val2; } val1 <<= shift_left; val2 <<= shift_right; mask1 = mask << shift_left; mask2 = mask << shift_right; if (left_reg != right_reg) { change = 0; if (snd_es18xx_reg_bits(chip, left_reg, mask1, val1) != val1) change = 1; if (snd_es18xx_reg_bits(chip, right_reg, mask2, val2) != val2) change = 1; } else { change = (snd_es18xx_reg_bits(chip, left_reg, mask1 | mask2, val1 | val2) != (val1 | val2)); } return change; } /* Mixer controls * These arrays contain setup data for mixer controls. * * The controls that are universal to all chipsets are fully initialized * here. */ static struct snd_kcontrol_new snd_es18xx_base_controls[] = { ES18XX_DOUBLE("Master Playback Volume", 0, 0x60, 0x62, 0, 0, 63, 0), ES18XX_DOUBLE("Master Playback Switch", 0, 0x60, 0x62, 6, 6, 1, 1), ES18XX_DOUBLE("Line Playback Volume", 0, 0x3e, 0x3e, 4, 0, 15, 0), ES18XX_DOUBLE("CD Playback Volume", 0, 0x38, 0x38, 4, 0, 15, 0), ES18XX_DOUBLE("FM Playback Volume", 0, 0x36, 0x36, 4, 0, 15, 0), ES18XX_DOUBLE("Mic Playback Volume", 0, 0x1a, 0x1a, 4, 0, 15, 0), ES18XX_DOUBLE("Aux Playback Volume", 0, 0x3a, 0x3a, 4, 0, 15, 0), ES18XX_SINGLE("Record Monitor", 0, 0xa8, 3, 1, 0), ES18XX_DOUBLE("Capture Volume", 0, 0xb4, 0xb4, 4, 0, 15, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = snd_es18xx_info_mux, .get = snd_es18xx_get_mux, .put = snd_es18xx_put_mux, } }; static struct snd_kcontrol_new snd_es18xx_recmix_controls[] = { ES18XX_DOUBLE("PCM Capture Volume", 0, 0x69, 0x69, 4, 0, 15, 0), ES18XX_DOUBLE("Mic Capture Volume", 0, 0x68, 0x68, 4, 0, 15, 0), ES18XX_DOUBLE("Line Capture Volume", 0, 0x6e, 0x6e, 4, 0, 15, 0), ES18XX_DOUBLE("FM Capture Volume", 0, 0x6b, 0x6b, 4, 0, 15, 0), ES18XX_DOUBLE("CD Capture Volume", 0, 0x6a, 0x6a, 4, 0, 15, 0), ES18XX_DOUBLE("Aux Capture Volume", 0, 0x6c, 0x6c, 4, 0, 15, 0) }; /* * The chipset specific mixer controls */ static struct snd_kcontrol_new snd_es18xx_opt_speaker = ES18XX_SINGLE("Beep Playback Volume", 0, 0x3c, 0, 7, 0); static struct snd_kcontrol_new snd_es18xx_opt_1869[] = { ES18XX_SINGLE("Capture Switch", 0, 0x1c, 4, 1, 1), ES18XX_SINGLE("Video Playback Switch", 0, 0x7f, 0, 1, 0), ES18XX_DOUBLE("Mono Playback Volume", 0, 0x6d, 0x6d, 4, 0, 15, 0), ES18XX_DOUBLE("Mono Capture Volume", 0, 0x6f, 0x6f, 4, 0, 15, 0) }; static struct snd_kcontrol_new snd_es18xx_opt_1878 = ES18XX_DOUBLE("Video Playback Volume", 0, 0x68, 0x68, 4, 0, 15, 0); static struct snd_kcontrol_new snd_es18xx_opt_1879[] = { ES18XX_SINGLE("Video Playback Switch", 0, 0x71, 6, 1, 0), ES18XX_DOUBLE("Video Playback Volume", 0, 0x6d, 0x6d, 4, 0, 15, 0), ES18XX_DOUBLE("Video Capture Volume", 0, 0x6f, 0x6f, 4, 0, 15, 0) }; static struct snd_kcontrol_new snd_es18xx_pcm1_controls[] = { ES18XX_DOUBLE("PCM Playback Volume", 0, 0x14, 0x14, 4, 0, 15, 0), }; static struct snd_kcontrol_new snd_es18xx_pcm2_controls[] = { ES18XX_DOUBLE("PCM Playback Volume", 0, 0x7c, 0x7c, 4, 0, 15, 0), ES18XX_DOUBLE("PCM Playback Volume", 1, 0x14, 0x14, 4, 0, 15, 0) }; static struct snd_kcontrol_new snd_es18xx_spatializer_controls[] = { ES18XX_SINGLE("3D Control - Level", 0, 0x52, 0, 63, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "3D Control - Switch", .info = snd_es18xx_info_spatializer_enable, .get = snd_es18xx_get_spatializer_enable, .put = snd_es18xx_put_spatializer_enable, } }; static struct snd_kcontrol_new snd_es18xx_micpre1_control = ES18XX_SINGLE("Mic Boost (+26dB)", 0, 0xa9, 2, 1, 0); static struct snd_kcontrol_new snd_es18xx_micpre2_control = ES18XX_SINGLE("Mic Boost (+26dB)", 0, 0x7d, 3, 1, 0); static struct snd_kcontrol_new snd_es18xx_hw_volume_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Hardware Master Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_es18xx_info_hw_volume, .get = snd_es18xx_get_hw_volume, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Hardware Master Playback Switch", .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_es18xx_info_hw_switch, .get = snd_es18xx_get_hw_switch, }, ES18XX_SINGLE("Hardware Master Volume Split", 0, 0x64, 7, 1, 0), }; static int __devinit snd_es18xx_config_read(struct snd_es18xx *chip, unsigned char reg) { int data; outb(reg, chip->ctrl_port); data = inb(chip->ctrl_port + 1); return data; } static void __devinit snd_es18xx_config_write(struct snd_es18xx *chip, unsigned char reg, unsigned char data) { /* No need for spinlocks, this function is used only in otherwise protected init code */ outb(reg, chip->ctrl_port); outb(data, chip->ctrl_port + 1); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Config reg %02x set to %02x\n", reg, data); #endif } static int __devinit snd_es18xx_initialize(struct snd_es18xx *chip, unsigned long mpu_port, unsigned long fm_port) { int mask = 0; /* enable extended mode */ snd_es18xx_dsp_command(chip, 0xC6); /* Reset mixer registers */ snd_es18xx_mixer_write(chip, 0x00, 0x00); /* Audio 1 DMA demand mode (4 bytes/request) */ snd_es18xx_write(chip, 0xB9, 2); if (chip->caps & ES18XX_CONTROL) { /* Hardware volume IRQ */ snd_es18xx_config_write(chip, 0x27, chip->irq); if (fm_port > 0 && fm_port != SNDRV_AUTO_PORT) { /* FM I/O */ snd_es18xx_config_write(chip, 0x62, fm_port >> 8); snd_es18xx_config_write(chip, 0x63, fm_port & 0xff); } if (mpu_port > 0 && mpu_port != SNDRV_AUTO_PORT) { /* MPU-401 I/O */ snd_es18xx_config_write(chip, 0x64, mpu_port >> 8); snd_es18xx_config_write(chip, 0x65, mpu_port & 0xff); /* MPU-401 IRQ */ snd_es18xx_config_write(chip, 0x28, chip->irq); } /* Audio1 IRQ */ snd_es18xx_config_write(chip, 0x70, chip->irq); /* Audio2 IRQ */ snd_es18xx_config_write(chip, 0x72, chip->irq); /* Audio1 DMA */ snd_es18xx_config_write(chip, 0x74, chip->dma1); /* Audio2 DMA */ snd_es18xx_config_write(chip, 0x75, chip->dma2); /* Enable Audio 1 IRQ */ snd_es18xx_write(chip, 0xB1, 0x50); /* Enable Audio 2 IRQ */ snd_es18xx_mixer_write(chip, 0x7A, 0x40); /* Enable Audio 1 DMA */ snd_es18xx_write(chip, 0xB2, 0x50); /* Enable MPU and hardware volume interrupt */ snd_es18xx_mixer_write(chip, 0x64, 0x42); /* Enable ESS wavetable input */ snd_es18xx_mixer_bits(chip, 0x48, 0x10, 0x10); } else { int irqmask, dma1mask, dma2mask; switch (chip->irq) { case 2: case 9: irqmask = 0; break; case 5: irqmask = 1; break; case 7: irqmask = 2; break; case 10: irqmask = 3; break; default: snd_printk(KERN_ERR "invalid irq %d\n", chip->irq); return -ENODEV; } switch (chip->dma1) { case 0: dma1mask = 1; break; case 1: dma1mask = 2; break; case 3: dma1mask = 3; break; default: snd_printk(KERN_ERR "invalid dma1 %d\n", chip->dma1); return -ENODEV; } switch (chip->dma2) { case 0: dma2mask = 0; break; case 1: dma2mask = 1; break; case 3: dma2mask = 2; break; case 5: dma2mask = 3; break; default: snd_printk(KERN_ERR "invalid dma2 %d\n", chip->dma2); return -ENODEV; } /* Enable and set Audio 1 IRQ */ snd_es18xx_write(chip, 0xB1, 0x50 | (irqmask << 2)); /* Enable and set Audio 1 DMA */ snd_es18xx_write(chip, 0xB2, 0x50 | (dma1mask << 2)); /* Set Audio 2 DMA */ snd_es18xx_mixer_bits(chip, 0x7d, 0x07, 0x04 | dma2mask); /* Enable Audio 2 IRQ and DMA Set capture mixer input */ snd_es18xx_mixer_write(chip, 0x7A, 0x68); /* Enable and set hardware volume interrupt */ snd_es18xx_mixer_write(chip, 0x64, 0x06); if (mpu_port > 0 && mpu_port != SNDRV_AUTO_PORT) { /* MPU401 share irq with audio Joystick enabled FM enabled */ snd_es18xx_mixer_write(chip, 0x40, 0x43 | (mpu_port & 0xf0) >> 1); } snd_es18xx_mixer_write(chip, 0x7f, ((irqmask + 1) << 1) | 0x01); } if (chip->caps & ES18XX_NEW_RATE) { /* Change behaviour of register A1 4x oversampling 2nd channel DAC asynchronous */ snd_es18xx_mixer_write(chip, 0x71, 0x32); } if (!(chip->caps & ES18XX_PCM2)) { /* Enable DMA FIFO */ snd_es18xx_write(chip, 0xB7, 0x80); } if (chip->caps & ES18XX_SPATIALIZER) { /* Set spatializer parameters to recommended values */ snd_es18xx_mixer_write(chip, 0x54, 0x8f); snd_es18xx_mixer_write(chip, 0x56, 0x95); snd_es18xx_mixer_write(chip, 0x58, 0x94); snd_es18xx_mixer_write(chip, 0x5a, 0x80); } /* Flip the "enable I2S" bits for those chipsets that need it */ switch (chip->version) { case 0x1879: //Leaving I2S enabled on the 1879 screws up the PCM playback (rate effected somehow) //so a Switch control has been added to toggle this 0x71 bit on/off: //snd_es18xx_mixer_bits(chip, 0x71, 0x40, 0x40); /* Note: we fall through on purpose here. */ case 0x1878: snd_es18xx_config_write(chip, 0x29, snd_es18xx_config_read(chip, 0x29) | 0x40); break; } /* Mute input source */ if (chip->caps & ES18XX_MUTEREC) mask = 0x10; if (chip->caps & ES18XX_RECMIX) snd_es18xx_mixer_write(chip, 0x1c, 0x05 | mask); else { snd_es18xx_mixer_write(chip, 0x1c, 0x00 | mask); snd_es18xx_write(chip, 0xb4, 0x00); } #ifndef AVOID_POPS /* Enable PCM output */ snd_es18xx_dsp_command(chip, 0xD1); #endif return 0; } static int __devinit snd_es18xx_identify(struct snd_es18xx *chip) { int hi,lo; /* reset */ if (snd_es18xx_reset(chip) < 0) { snd_printk(KERN_ERR "reset at 0x%lx failed!!!\n", chip->port); return -ENODEV; } snd_es18xx_dsp_command(chip, 0xe7); hi = snd_es18xx_dsp_get_byte(chip); if (hi < 0) { return hi; } lo = snd_es18xx_dsp_get_byte(chip); if ((lo & 0xf0) != 0x80) { return -ENODEV; } if (hi == 0x48) { chip->version = 0x488; return 0; } if (hi != 0x68) { return -ENODEV; } if ((lo & 0x0f) < 8) { chip->version = 0x688; return 0; } outb(0x40, chip->port + 0x04); udelay(10); hi = inb(chip->port + 0x05); udelay(10); lo = inb(chip->port + 0x05); if (hi != lo) { chip->version = hi << 8 | lo; chip->ctrl_port = inb(chip->port + 0x05) << 8; udelay(10); chip->ctrl_port += inb(chip->port + 0x05); if ((chip->res_ctrl_port = request_region(chip->ctrl_port, 8, "ES18xx - CTRL")) == NULL) { snd_printk(KERN_ERR PFX "unable go grab port 0x%lx\n", chip->ctrl_port); return -EBUSY; } return 0; } /* If has Hardware volume */ if (snd_es18xx_mixer_writable(chip, 0x64, 0x04)) { /* If has Audio2 */ if (snd_es18xx_mixer_writable(chip, 0x70, 0x7f)) { /* If has volume count */ if (snd_es18xx_mixer_writable(chip, 0x64, 0x20)) { chip->version = 0x1887; } else { chip->version = 0x1888; } } else { chip->version = 0x1788; } } else chip->version = 0x1688; return 0; } static int __devinit snd_es18xx_probe(struct snd_es18xx *chip, unsigned long mpu_port, unsigned long fm_port) { if (snd_es18xx_identify(chip) < 0) { snd_printk(KERN_ERR PFX "[0x%lx] ESS chip not found\n", chip->port); return -ENODEV; } switch (chip->version) { case 0x1868: chip->caps = ES18XX_DUPLEX_MONO | ES18XX_DUPLEX_SAME | ES18XX_CONTROL; break; case 0x1869: chip->caps = ES18XX_PCM2 | ES18XX_SPATIALIZER | ES18XX_RECMIX | ES18XX_NEW_RATE | ES18XX_AUXB | ES18XX_MONO | ES18XX_MUTEREC | ES18XX_CONTROL | ES18XX_HWV; break; case 0x1878: chip->caps = ES18XX_DUPLEX_MONO | ES18XX_DUPLEX_SAME | ES18XX_I2S | ES18XX_CONTROL; break; case 0x1879: chip->caps = ES18XX_PCM2 | ES18XX_SPATIALIZER | ES18XX_RECMIX | ES18XX_NEW_RATE | ES18XX_AUXB | ES18XX_I2S | ES18XX_CONTROL | ES18XX_HWV; break; case 0x1887: case 0x1888: chip->caps = ES18XX_PCM2 | ES18XX_RECMIX | ES18XX_AUXB | ES18XX_DUPLEX_SAME; break; default: snd_printk(KERN_ERR "[0x%lx] unsupported chip ES%x\n", chip->port, chip->version); return -ENODEV; } snd_printd("[0x%lx] ESS%x chip found\n", chip->port, chip->version); if (chip->dma1 == chip->dma2) chip->caps &= ~(ES18XX_PCM2 | ES18XX_DUPLEX_SAME); return snd_es18xx_initialize(chip, mpu_port, fm_port); } static struct snd_pcm_ops snd_es18xx_playback_ops = { .open = snd_es18xx_playback_open, .close = snd_es18xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_es18xx_playback_hw_params, .hw_free = snd_es18xx_pcm_hw_free, .prepare = snd_es18xx_playback_prepare, .trigger = snd_es18xx_playback_trigger, .pointer = snd_es18xx_playback_pointer, }; static struct snd_pcm_ops snd_es18xx_capture_ops = { .open = snd_es18xx_capture_open, .close = snd_es18xx_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_es18xx_capture_hw_params, .hw_free = snd_es18xx_pcm_hw_free, .prepare = snd_es18xx_capture_prepare, .trigger = snd_es18xx_capture_trigger, .pointer = snd_es18xx_capture_pointer, }; static int __devinit snd_es18xx_pcm(struct snd_card *card, int device, struct snd_pcm **rpcm) { struct snd_es18xx *chip = card->private_data; struct snd_pcm *pcm; char str[16]; int err; if (rpcm) *rpcm = NULL; sprintf(str, "ES%x", chip->version); if (chip->caps & ES18XX_PCM2) err = snd_pcm_new(card, str, device, 2, 1, &pcm); else err = snd_pcm_new(card, str, device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_es18xx_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_es18xx_capture_ops); /* global setup */ pcm->private_data = chip; pcm->info_flags = 0; if (chip->caps & ES18XX_DUPLEX_SAME) pcm->info_flags |= SNDRV_PCM_INFO_JOINT_DUPLEX; if (! (chip->caps & ES18XX_PCM2)) pcm->info_flags |= SNDRV_PCM_INFO_HALF_DUPLEX; sprintf(pcm->name, "ESS AudioDrive ES%x", chip->version); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_isa_data(), 64*1024, chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024); if (rpcm) *rpcm = pcm; return 0; } /* Power Management support functions */ #ifdef CONFIG_PM static int snd_es18xx_suspend(struct snd_card *card, pm_message_t state) { struct snd_es18xx *chip = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); /* power down */ chip->pm_reg = (unsigned char)snd_es18xx_read(chip, ES18XX_PM); chip->pm_reg |= (ES18XX_PM_FM | ES18XX_PM_SUS); snd_es18xx_write(chip, ES18XX_PM, chip->pm_reg); snd_es18xx_write(chip, ES18XX_PM, chip->pm_reg ^= ES18XX_PM_SUS); return 0; } static int snd_es18xx_resume(struct snd_card *card) { struct snd_es18xx *chip = card->private_data; /* restore PM register, we won't wake till (not 0x07) i/o activity though */ snd_es18xx_write(chip, ES18XX_PM, chip->pm_reg ^= ES18XX_PM_FM); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ static int snd_es18xx_free(struct snd_card *card) { struct snd_es18xx *chip = card->private_data; release_and_free_resource(chip->res_port); release_and_free_resource(chip->res_ctrl_port); release_and_free_resource(chip->res_mpu_port); if (chip->irq >= 0) free_irq(chip->irq, (void *) card); if (chip->dma1 >= 0) { disable_dma(chip->dma1); free_dma(chip->dma1); } if (chip->dma2 >= 0 && chip->dma1 != chip->dma2) { disable_dma(chip->dma2); free_dma(chip->dma2); } return 0; } static int snd_es18xx_dev_free(struct snd_device *device) { return snd_es18xx_free(device->card); } static int __devinit snd_es18xx_new_device(struct snd_card *card, unsigned long port, unsigned long mpu_port, unsigned long fm_port, int irq, int dma1, int dma2) { struct snd_es18xx *chip = card->private_data; static struct snd_device_ops ops = { .dev_free = snd_es18xx_dev_free, }; int err; spin_lock_init(&chip->reg_lock); spin_lock_init(&chip->mixer_lock); chip->port = port; chip->irq = -1; chip->dma1 = -1; chip->dma2 = -1; chip->audio2_vol = 0x00; chip->active = 0; chip->res_port = request_region(port, 16, "ES18xx"); if (chip->res_port == NULL) { snd_es18xx_free(card); snd_printk(KERN_ERR PFX "unable to grap ports 0x%lx-0x%lx\n", port, port + 16 - 1); return -EBUSY; } if (request_irq(irq, snd_es18xx_interrupt, 0, "ES18xx", (void *) card)) { snd_es18xx_free(card); snd_printk(KERN_ERR PFX "unable to grap IRQ %d\n", irq); return -EBUSY; } chip->irq = irq; if (request_dma(dma1, "ES18xx DMA 1")) { snd_es18xx_free(card); snd_printk(KERN_ERR PFX "unable to grap DMA1 %d\n", dma1); return -EBUSY; } chip->dma1 = dma1; if (dma2 != dma1 && request_dma(dma2, "ES18xx DMA 2")) { snd_es18xx_free(card); snd_printk(KERN_ERR PFX "unable to grap DMA2 %d\n", dma2); return -EBUSY; } chip->dma2 = dma2; if (snd_es18xx_probe(chip, mpu_port, fm_port) < 0) { snd_es18xx_free(card); return -ENODEV; } err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { snd_es18xx_free(card); return err; } return 0; } static int __devinit snd_es18xx_mixer(struct snd_card *card) { struct snd_es18xx *chip = card->private_data; int err; unsigned int idx; strcpy(card->mixername, chip->pcm->name); for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_base_controls); idx++) { struct snd_kcontrol *kctl; kctl = snd_ctl_new1(&snd_es18xx_base_controls[idx], chip); if (chip->caps & ES18XX_HWV) { switch (idx) { case 0: chip->master_volume = kctl; kctl->private_free = snd_es18xx_hwv_free; break; case 1: chip->master_switch = kctl; kctl->private_free = snd_es18xx_hwv_free; break; } } if ((err = snd_ctl_add(card, kctl)) < 0) return err; } if (chip->caps & ES18XX_PCM2) { for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_pcm2_controls); idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_pcm2_controls[idx], chip))) < 0) return err; } } else { for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_pcm1_controls); idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_pcm1_controls[idx], chip))) < 0) return err; } } if (chip->caps & ES18XX_RECMIX) { for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_recmix_controls); idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_recmix_controls[idx], chip))) < 0) return err; } } switch (chip->version) { default: if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_micpre1_control, chip))) < 0) return err; break; case 0x1869: case 0x1879: if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_micpre2_control, chip))) < 0) return err; break; } if (chip->caps & ES18XX_SPATIALIZER) { for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_spatializer_controls); idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_spatializer_controls[idx], chip))) < 0) return err; } } if (chip->caps & ES18XX_HWV) { for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_hw_volume_controls); idx++) { struct snd_kcontrol *kctl; kctl = snd_ctl_new1(&snd_es18xx_hw_volume_controls[idx], chip); if (idx == 0) chip->hw_volume = kctl; else chip->hw_switch = kctl; kctl->private_free = snd_es18xx_hwv_free; if ((err = snd_ctl_add(card, kctl)) < 0) return err; } } /* finish initializing other chipset specific controls */ if (chip->version != 0x1868) { err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_opt_speaker, chip)); if (err < 0) return err; } if (chip->version == 0x1869) { for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_opt_1869); idx++) { err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_opt_1869[idx], chip)); if (err < 0) return err; } } else if (chip->version == 0x1878) { err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_opt_1878, chip)); if (err < 0) return err; } else if (chip->version == 0x1879) { for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_opt_1879); idx++) { err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_opt_1879[idx], chip)); if (err < 0) return err; } } return 0; } /* Card level */ MODULE_AUTHOR("Christian Fischbach <fishbach@pool.informatik.rwth-aachen.de>, Abramo Bagnara <abramo@alsa-project.org>"); MODULE_DESCRIPTION("ESS ES18xx AudioDrive"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ESS,ES1868 PnP AudioDrive}," "{ESS,ES1869 PnP AudioDrive}," "{ESS,ES1878 PnP AudioDrive}," "{ESS,ES1879 PnP AudioDrive}," "{ESS,ES1887 PnP AudioDrive}," "{ESS,ES1888 PnP AudioDrive}," "{ESS,ES1887 AudioDrive}," "{ESS,ES1888 AudioDrive}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; #endif static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x240,0x260,0x280 */ #ifndef CONFIG_PNP static long mpu_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -1}; #else static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; #endif static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3 */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3 */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for ES18xx soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for ES18xx soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable ES18xx soundcard."); #ifdef CONFIG_PNP module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard."); #endif module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for ES18xx driver."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for ES18xx driver."); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port # for ES18xx driver."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for ES18xx driver."); module_param_array(dma1, int, NULL, 0444); MODULE_PARM_DESC(dma1, "DMA 1 # for ES18xx driver."); module_param_array(dma2, int, NULL, 0444); MODULE_PARM_DESC(dma2, "DMA 2 # for ES18xx driver."); #ifdef CONFIG_PNP static int isa_registered; static int pnp_registered; static int pnpc_registered; static struct pnp_device_id snd_audiodrive_pnpbiosids[] = { { .id = "ESS1869" }, { .id = "ESS1879" }, { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp, snd_audiodrive_pnpbiosids); /* PnP main device initialization */ static int __devinit snd_audiodrive_pnp_init_main(int dev, struct pnp_dev *pdev) { if (pnp_activate_dev(pdev) < 0) { snd_printk(KERN_ERR PFX "PnP configure failure (out of resources?)\n"); return -EBUSY; } /* ok. hack using Vendor-Defined Card-Level registers */ /* skip csn and logdev initialization - already done in isapnp_configure */ if (pnp_device_is_isapnp(pdev)) { isapnp_cfg_begin(isapnp_card_number(pdev), isapnp_csn_number(pdev)); isapnp_write_byte(0x27, pnp_irq(pdev, 0)); /* Hardware Volume IRQ Number */ if (mpu_port[dev] != SNDRV_AUTO_PORT) isapnp_write_byte(0x28, pnp_irq(pdev, 0)); /* MPU-401 IRQ Number */ isapnp_write_byte(0x72, pnp_irq(pdev, 0)); /* second IRQ */ isapnp_cfg_end(); } port[dev] = pnp_port_start(pdev, 0); fm_port[dev] = pnp_port_start(pdev, 1); mpu_port[dev] = pnp_port_start(pdev, 2); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1); irq[dev] = pnp_irq(pdev, 0); snd_printdd("PnP ES18xx: port=0x%lx, fm port=0x%lx, mpu port=0x%lx\n", port[dev], fm_port[dev], mpu_port[dev]); snd_printdd("PnP ES18xx: dma1=%i, dma2=%i, irq=%i\n", dma1[dev], dma2[dev], irq[dev]); return 0; } static int __devinit snd_audiodrive_pnp(int dev, struct snd_es18xx *chip, struct pnp_dev *pdev) { chip->dev = pdev; if (snd_audiodrive_pnp_init_main(dev, chip->dev) < 0) return -EBUSY; return 0; } static struct pnp_card_device_id snd_audiodrive_pnpids[] = { /* ESS 1868 (integrated on Compaq dual P-Pro motherboard and Genius 18PnP 3D) */ { .id = "ESS1868", .devs = { { "ESS1868" }, { "ESS0000" } } }, /* ESS 1868 (integrated on Maxisound Cards) */ { .id = "ESS1868", .devs = { { "ESS8601" }, { "ESS8600" } } }, /* ESS 1868 (integrated on Maxisound Cards) */ { .id = "ESS1868", .devs = { { "ESS8611" }, { "ESS8610" } } }, /* ESS ES1869 Plug and Play AudioDrive */ { .id = "ESS0003", .devs = { { "ESS1869" }, { "ESS0006" } } }, /* ESS 1869 */ { .id = "ESS1869", .devs = { { "ESS1869" }, { "ESS0006" } } }, /* ESS 1878 */ { .id = "ESS1878", .devs = { { "ESS1878" }, { "ESS0004" } } }, /* ESS 1879 */ { .id = "ESS1879", .devs = { { "ESS1879" }, { "ESS0009" } } }, /* --- */ { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_audiodrive_pnpids); static int __devinit snd_audiodrive_pnpc(int dev, struct snd_es18xx *chip, struct pnp_card_link *card, const struct pnp_card_device_id *id) { chip->dev = pnp_request_card_device(card, id->devs[0].id, NULL); if (chip->dev == NULL) return -EBUSY; chip->devc = pnp_request_card_device(card, id->devs[1].id, NULL); if (chip->devc == NULL) return -EBUSY; /* Control port initialization */ if (pnp_activate_dev(chip->devc) < 0) { snd_printk(KERN_ERR PFX "PnP control configure failure (out of resources?)\n"); return -EAGAIN; } snd_printdd("pnp: port=0x%llx\n", (unsigned long long)pnp_port_start(chip->devc, 0)); if (snd_audiodrive_pnp_init_main(dev, chip->dev) < 0) return -EBUSY; return 0; } #endif /* CONFIG_PNP */ #ifdef CONFIG_PNP #define is_isapnp_selected(dev) isapnp[dev] #else #define is_isapnp_selected(dev) 0 #endif static int snd_es18xx_card_new(int dev, struct snd_card **cardp) { return snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_es18xx), cardp); } static int __devinit snd_audiodrive_probe(struct snd_card *card, int dev) { struct snd_es18xx *chip = card->private_data; struct snd_opl3 *opl3; int err; err = snd_es18xx_new_device(card, port[dev], mpu_port[dev], fm_port[dev], irq[dev], dma1[dev], dma2[dev]); if (err < 0) return err; sprintf(card->driver, "ES%x", chip->version); sprintf(card->shortname, "ESS AudioDrive ES%x", chip->version); if (dma1[dev] != dma2[dev]) sprintf(card->longname, "%s at 0x%lx, irq %d, dma1 %d, dma2 %d", card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]); else sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d", card->shortname, chip->port, irq[dev], dma1[dev]); err = snd_es18xx_pcm(card, 0, NULL); if (err < 0) return err; err = snd_es18xx_mixer(card); if (err < 0) return err; if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) { if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_OPL3, 0, &opl3) < 0) { snd_printk(KERN_WARNING PFX "opl3 not detected at 0x%lx\n", fm_port[dev]); } else { err = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (err < 0) return err; } } if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) { err = snd_mpu401_uart_new(card, 0, MPU401_HW_ES18XX, mpu_port[dev], MPU401_INFO_IRQ_HOOK, -1, &chip->rmidi); if (err < 0) return err; } return snd_card_register(card); } static int __devinit snd_es18xx_isa_match(struct device *pdev, unsigned int dev) { return enable[dev] && !is_isapnp_selected(dev); } static int __devinit snd_es18xx_isa_probe1(int dev, struct device *devptr) { struct snd_card *card; int err; err = snd_es18xx_card_new(dev, &card); if (err < 0) return err; snd_card_set_dev(card, devptr); if ((err = snd_audiodrive_probe(card, dev)) < 0) { snd_card_free(card); return err; } dev_set_drvdata(devptr, card); return 0; } static int __devinit snd_es18xx_isa_probe(struct device *pdev, unsigned int dev) { int err; static int possible_irqs[] = {5, 9, 10, 7, 11, 12, -1}; static int possible_dmas[] = {1, 0, 3, 5, -1}; if (irq[dev] == SNDRV_AUTO_IRQ) { if ((irq[dev] = snd_legacy_find_free_irq(possible_irqs)) < 0) { snd_printk(KERN_ERR PFX "unable to find a free IRQ\n"); return -EBUSY; } } if (dma1[dev] == SNDRV_AUTO_DMA) { if ((dma1[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) { snd_printk(KERN_ERR PFX "unable to find a free DMA1\n"); return -EBUSY; } } if (dma2[dev] == SNDRV_AUTO_DMA) { if ((dma2[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) { snd_printk(KERN_ERR PFX "unable to find a free DMA2\n"); return -EBUSY; } } if (port[dev] != SNDRV_AUTO_PORT) { return snd_es18xx_isa_probe1(dev, pdev); } else { static unsigned long possible_ports[] = {0x220, 0x240, 0x260, 0x280}; int i; for (i = 0; i < ARRAY_SIZE(possible_ports); i++) { port[dev] = possible_ports[i]; err = snd_es18xx_isa_probe1(dev, pdev); if (! err) return 0; } return err; } } static int __devexit snd_es18xx_isa_remove(struct device *devptr, unsigned int dev) { snd_card_free(dev_get_drvdata(devptr)); dev_set_drvdata(devptr, NULL); return 0; } #ifdef CONFIG_PM static int snd_es18xx_isa_suspend(struct device *dev, unsigned int n, pm_message_t state) { return snd_es18xx_suspend(dev_get_drvdata(dev), state); } static int snd_es18xx_isa_resume(struct device *dev, unsigned int n) { return snd_es18xx_resume(dev_get_drvdata(dev)); } #endif #define DEV_NAME "es18xx" static struct isa_driver snd_es18xx_isa_driver = { .match = snd_es18xx_isa_match, .probe = snd_es18xx_isa_probe, .remove = __devexit_p(snd_es18xx_isa_remove), #ifdef CONFIG_PM .suspend = snd_es18xx_isa_suspend, .resume = snd_es18xx_isa_resume, #endif .driver = { .name = DEV_NAME }, }; #ifdef CONFIG_PNP static int __devinit snd_audiodrive_pnp_detect(struct pnp_dev *pdev, const struct pnp_device_id *id) { static int dev; int err; struct snd_card *card; if (pnp_device_is_isapnp(pdev)) return -ENOENT; /* we have another procedure - card */ for (; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev >= SNDRV_CARDS) return -ENODEV; err = snd_es18xx_card_new(dev, &card); if (err < 0) return err; if ((err = snd_audiodrive_pnp(dev, card->private_data, pdev)) < 0) { snd_card_free(card); return err; } snd_card_set_dev(card, &pdev->dev); if ((err = snd_audiodrive_probe(card, dev)) < 0) { snd_card_free(card); return err; } pnp_set_drvdata(pdev, card); dev++; return 0; } static void __devexit snd_audiodrive_pnp_remove(struct pnp_dev * pdev) { snd_card_free(pnp_get_drvdata(pdev)); pnp_set_drvdata(pdev, NULL); } #ifdef CONFIG_PM static int snd_audiodrive_pnp_suspend(struct pnp_dev *pdev, pm_message_t state) { return snd_es18xx_suspend(pnp_get_drvdata(pdev), state); } static int snd_audiodrive_pnp_resume(struct pnp_dev *pdev) { return snd_es18xx_resume(pnp_get_drvdata(pdev)); } #endif static struct pnp_driver es18xx_pnp_driver = { .name = "es18xx-pnpbios", .id_table = snd_audiodrive_pnpbiosids, .probe = snd_audiodrive_pnp_detect, .remove = __devexit_p(snd_audiodrive_pnp_remove), #ifdef CONFIG_PM .suspend = snd_audiodrive_pnp_suspend, .resume = snd_audiodrive_pnp_resume, #endif }; static int __devinit snd_audiodrive_pnpc_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int dev; struct snd_card *card; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev >= SNDRV_CARDS) return -ENODEV; res = snd_es18xx_card_new(dev, &card); if (res < 0) return res; if ((res = snd_audiodrive_pnpc(dev, card->private_data, pcard, pid)) < 0) { snd_card_free(card); return res; } snd_card_set_dev(card, &pcard->card->dev); if ((res = snd_audiodrive_probe(card, dev)) < 0) { snd_card_free(card); return res; } pnp_set_card_drvdata(pcard, card); dev++; return 0; } static void __devexit snd_audiodrive_pnpc_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } #ifdef CONFIG_PM static int snd_audiodrive_pnpc_suspend(struct pnp_card_link *pcard, pm_message_t state) { return snd_es18xx_suspend(pnp_get_card_drvdata(pcard), state); } static int snd_audiodrive_pnpc_resume(struct pnp_card_link *pcard) { return snd_es18xx_resume(pnp_get_card_drvdata(pcard)); } #endif static struct pnp_card_driver es18xx_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "es18xx", .id_table = snd_audiodrive_pnpids, .probe = snd_audiodrive_pnpc_detect, .remove = __devexit_p(snd_audiodrive_pnpc_remove), #ifdef CONFIG_PM .suspend = snd_audiodrive_pnpc_suspend, .resume = snd_audiodrive_pnpc_resume, #endif }; #endif /* CONFIG_PNP */ static int __init alsa_card_es18xx_init(void) { int err; err = isa_register_driver(&snd_es18xx_isa_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_driver(&es18xx_pnp_driver); if (!err) pnp_registered = 1; err = pnp_register_card_driver(&es18xx_pnpc_driver); if (!err) pnpc_registered = 1; if (isa_registered || pnp_registered) err = 0; #endif return err; } static void __exit alsa_card_es18xx_exit(void) { #ifdef CONFIG_PNP if (pnpc_registered) pnp_unregister_card_driver(&es18xx_pnpc_driver); if (pnp_registered) pnp_unregister_driver(&es18xx_pnp_driver); if (isa_registered) #endif isa_unregister_driver(&snd_es18xx_isa_driver); } module_init(alsa_card_es18xx_init) module_exit(alsa_card_es18xx_exit)
gpl-2.0
curbthepain/revkernel_ubers5
drivers/rtc/rtc-pcf50633.c
5069
7850
/* NXP PCF50633 RTC Driver * * (C) 2006-2008 by Openmoko, Inc. * Author: Balaji Rao <balajirrao@openmoko.org> * All rights reserved. * * Broken down from monstrous PCF50633 driver mainly by * Harald Welte, Andy Green and Werner Almesberger * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/err.h> #include <linux/mfd/pcf50633/core.h> #define PCF50633_REG_RTCSC 0x59 /* Second */ #define PCF50633_REG_RTCMN 0x5a /* Minute */ #define PCF50633_REG_RTCHR 0x5b /* Hour */ #define PCF50633_REG_RTCWD 0x5c /* Weekday */ #define PCF50633_REG_RTCDT 0x5d /* Day */ #define PCF50633_REG_RTCMT 0x5e /* Month */ #define PCF50633_REG_RTCYR 0x5f /* Year */ #define PCF50633_REG_RTCSCA 0x60 /* Alarm Second */ #define PCF50633_REG_RTCMNA 0x61 /* Alarm Minute */ #define PCF50633_REG_RTCHRA 0x62 /* Alarm Hour */ #define PCF50633_REG_RTCWDA 0x63 /* Alarm Weekday */ #define PCF50633_REG_RTCDTA 0x64 /* Alarm Day */ #define PCF50633_REG_RTCMTA 0x65 /* Alarm Month */ #define PCF50633_REG_RTCYRA 0x66 /* Alarm Year */ enum pcf50633_time_indexes { PCF50633_TI_SEC, PCF50633_TI_MIN, PCF50633_TI_HOUR, PCF50633_TI_WKDAY, PCF50633_TI_DAY, PCF50633_TI_MONTH, PCF50633_TI_YEAR, PCF50633_TI_EXTENT /* always last */ }; struct pcf50633_time { u_int8_t time[PCF50633_TI_EXTENT]; }; struct pcf50633_rtc { int alarm_enabled; int alarm_pending; struct pcf50633 *pcf; struct rtc_device *rtc_dev; }; static void pcf2rtc_time(struct rtc_time *rtc, struct pcf50633_time *pcf) { rtc->tm_sec = bcd2bin(pcf->time[PCF50633_TI_SEC]); rtc->tm_min = bcd2bin(pcf->time[PCF50633_TI_MIN]); rtc->tm_hour = bcd2bin(pcf->time[PCF50633_TI_HOUR]); rtc->tm_wday = bcd2bin(pcf->time[PCF50633_TI_WKDAY]); rtc->tm_mday = bcd2bin(pcf->time[PCF50633_TI_DAY]); rtc->tm_mon = bcd2bin(pcf->time[PCF50633_TI_MONTH]) - 1; rtc->tm_year = bcd2bin(pcf->time[PCF50633_TI_YEAR]) + 100; } static void rtc2pcf_time(struct pcf50633_time *pcf, struct rtc_time *rtc) { pcf->time[PCF50633_TI_SEC] = bin2bcd(rtc->tm_sec); pcf->time[PCF50633_TI_MIN] = bin2bcd(rtc->tm_min); pcf->time[PCF50633_TI_HOUR] = bin2bcd(rtc->tm_hour); pcf->time[PCF50633_TI_WKDAY] = bin2bcd(rtc->tm_wday); pcf->time[PCF50633_TI_DAY] = bin2bcd(rtc->tm_mday); pcf->time[PCF50633_TI_MONTH] = bin2bcd(rtc->tm_mon + 1); pcf->time[PCF50633_TI_YEAR] = bin2bcd(rtc->tm_year % 100); } static int pcf50633_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct pcf50633_rtc *rtc = dev_get_drvdata(dev); int err; if (enabled) err = pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM); else err = pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM); if (err < 0) return err; rtc->alarm_enabled = enabled; return 0; } static int pcf50633_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pcf50633_rtc *rtc; struct pcf50633_time pcf_tm; int ret; rtc = dev_get_drvdata(dev); ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSC, PCF50633_TI_EXTENT, &pcf_tm.time[0]); if (ret != PCF50633_TI_EXTENT) { dev_err(dev, "Failed to read time\n"); return -EIO; } dev_dbg(dev, "PCF_TIME: %02x.%02x.%02x %02x:%02x:%02x\n", pcf_tm.time[PCF50633_TI_DAY], pcf_tm.time[PCF50633_TI_MONTH], pcf_tm.time[PCF50633_TI_YEAR], pcf_tm.time[PCF50633_TI_HOUR], pcf_tm.time[PCF50633_TI_MIN], pcf_tm.time[PCF50633_TI_SEC]); pcf2rtc_time(tm, &pcf_tm); dev_dbg(dev, "RTC_TIME: %u.%u.%u %u:%u:%u\n", tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec); return rtc_valid_tm(tm); } static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct pcf50633_rtc *rtc; struct pcf50633_time pcf_tm; int alarm_masked, ret = 0; rtc = dev_get_drvdata(dev); dev_dbg(dev, "RTC_TIME: %u.%u.%u %u:%u:%u\n", tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec); rtc2pcf_time(&pcf_tm, tm); dev_dbg(dev, "PCF_TIME: %02x.%02x.%02x %02x:%02x:%02x\n", pcf_tm.time[PCF50633_TI_DAY], pcf_tm.time[PCF50633_TI_MONTH], pcf_tm.time[PCF50633_TI_YEAR], pcf_tm.time[PCF50633_TI_HOUR], pcf_tm.time[PCF50633_TI_MIN], pcf_tm.time[PCF50633_TI_SEC]); alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM); if (!alarm_masked) pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM); /* Returns 0 on success */ ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSC, PCF50633_TI_EXTENT, &pcf_tm.time[0]); if (!alarm_masked) pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM); return ret; } static int pcf50633_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pcf50633_rtc *rtc; struct pcf50633_time pcf_tm; int ret = 0; rtc = dev_get_drvdata(dev); alrm->enabled = rtc->alarm_enabled; alrm->pending = rtc->alarm_pending; ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSCA, PCF50633_TI_EXTENT, &pcf_tm.time[0]); if (ret != PCF50633_TI_EXTENT) { dev_err(dev, "Failed to read time\n"); return -EIO; } pcf2rtc_time(&alrm->time, &pcf_tm); return rtc_valid_tm(&alrm->time); } static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pcf50633_rtc *rtc; struct pcf50633_time pcf_tm; int alarm_masked, ret = 0; rtc = dev_get_drvdata(dev); rtc2pcf_time(&pcf_tm, &alrm->time); /* do like mktime does and ignore tm_wday */ pcf_tm.time[PCF50633_TI_WKDAY] = 7; alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM); /* disable alarm interrupt */ if (!alarm_masked) pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM); /* Returns 0 on success */ ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA, PCF50633_TI_EXTENT, &pcf_tm.time[0]); if (!alrm->enabled) rtc->alarm_pending = 0; if (!alarm_masked || alrm->enabled) pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM); rtc->alarm_enabled = alrm->enabled; return ret; } static struct rtc_class_ops pcf50633_rtc_ops = { .read_time = pcf50633_rtc_read_time, .set_time = pcf50633_rtc_set_time, .read_alarm = pcf50633_rtc_read_alarm, .set_alarm = pcf50633_rtc_set_alarm, .alarm_irq_enable = pcf50633_rtc_alarm_irq_enable, }; static void pcf50633_rtc_irq(int irq, void *data) { struct pcf50633_rtc *rtc = data; rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); rtc->alarm_pending = 1; } static int __devinit pcf50633_rtc_probe(struct platform_device *pdev) { struct pcf50633_rtc *rtc; rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); if (!rtc) return -ENOMEM; rtc->pcf = dev_to_pcf50633(pdev->dev.parent); platform_set_drvdata(pdev, rtc); rtc->rtc_dev = rtc_device_register("pcf50633-rtc", &pdev->dev, &pcf50633_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc_dev)) { int ret = PTR_ERR(rtc->rtc_dev); kfree(rtc); return ret; } pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM, pcf50633_rtc_irq, rtc); return 0; } static int __devexit pcf50633_rtc_remove(struct platform_device *pdev) { struct pcf50633_rtc *rtc; rtc = platform_get_drvdata(pdev); pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM); rtc_device_unregister(rtc->rtc_dev); kfree(rtc); return 0; } static struct platform_driver pcf50633_rtc_driver = { .driver = { .name = "pcf50633-rtc", }, .probe = pcf50633_rtc_probe, .remove = __devexit_p(pcf50633_rtc_remove), }; module_platform_driver(pcf50633_rtc_driver); MODULE_DESCRIPTION("PCF50633 RTC driver"); MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>"); MODULE_LICENSE("GPL");
gpl-2.0
kimjh4930/linuxkernel
arch/mips/ar7/gpio.c
8397
8354
/* * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org> * Copyright (C) 2009-2010 Florian Fainelli <florian@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/gpio.h> #include <asm/mach-ar7/gpio.h> struct ar7_gpio_chip { void __iomem *regs; struct gpio_chip chip; }; static int ar7_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { struct ar7_gpio_chip *gpch = container_of(chip, struct ar7_gpio_chip, chip); void __iomem *gpio_in = gpch->regs + AR7_GPIO_INPUT; return readl(gpio_in) & (1 << gpio); } static int titan_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { struct ar7_gpio_chip *gpch = container_of(chip, struct ar7_gpio_chip, chip); void __iomem *gpio_in0 = gpch->regs + TITAN_GPIO_INPUT_0; void __iomem *gpio_in1 = gpch->regs + TITAN_GPIO_INPUT_1; return readl(gpio >> 5 ? gpio_in1 : gpio_in0) & (1 << (gpio & 0x1f)); } static void ar7_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { struct ar7_gpio_chip *gpch = container_of(chip, struct ar7_gpio_chip, chip); void __iomem *gpio_out = gpch->regs + AR7_GPIO_OUTPUT; unsigned tmp; tmp = readl(gpio_out) & ~(1 << gpio); if (value) tmp |= 1 << gpio; writel(tmp, gpio_out); } static void titan_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { struct ar7_gpio_chip *gpch = container_of(chip, struct ar7_gpio_chip, chip); void __iomem *gpio_out0 = gpch->regs + TITAN_GPIO_OUTPUT_0; void __iomem *gpio_out1 = gpch->regs + TITAN_GPIO_OUTPUT_1; unsigned tmp; tmp = readl(gpio >> 5 ? gpio_out1 : gpio_out0) & ~(1 << (gpio & 0x1f)); if (value) tmp |= 1 << (gpio & 0x1f); writel(tmp, gpio >> 5 ? gpio_out1 : gpio_out0); } static int ar7_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { struct ar7_gpio_chip *gpch = container_of(chip, struct ar7_gpio_chip, chip); void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR; writel(readl(gpio_dir) | (1 << gpio), gpio_dir); return 0; } static int titan_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { struct ar7_gpio_chip *gpch = container_of(chip, struct ar7_gpio_chip, chip); void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0; void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1; if (gpio >= TITAN_GPIO_MAX) return -EINVAL; writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) | (1 << (gpio & 0x1f)), gpio >> 5 ? gpio_dir1 : gpio_dir0); return 0; } static int ar7_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct ar7_gpio_chip *gpch = container_of(chip, struct ar7_gpio_chip, chip); void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR; ar7_gpio_set_value(chip, gpio, value); writel(readl(gpio_dir) & ~(1 << gpio), gpio_dir); return 0; } static int titan_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct ar7_gpio_chip *gpch = container_of(chip, struct ar7_gpio_chip, chip); void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0; void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1; if (gpio >= TITAN_GPIO_MAX) return -EINVAL; titan_gpio_set_value(chip, gpio, value); writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) & ~(1 << (gpio & 0x1f)), gpio >> 5 ? gpio_dir1 : gpio_dir0); return 0; } static struct ar7_gpio_chip ar7_gpio_chip = { .chip = { .label = "ar7-gpio", .direction_input = ar7_gpio_direction_input, .direction_output = ar7_gpio_direction_output, .set = ar7_gpio_set_value, .get = ar7_gpio_get_value, .base = 0, .ngpio = AR7_GPIO_MAX, } }; static struct ar7_gpio_chip titan_gpio_chip = { .chip = { .label = "titan-gpio", .direction_input = titan_gpio_direction_input, .direction_output = titan_gpio_direction_output, .set = titan_gpio_set_value, .get = titan_gpio_get_value, .base = 0, .ngpio = TITAN_GPIO_MAX, } }; static inline int ar7_gpio_enable_ar7(unsigned gpio) { void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE; writel(readl(gpio_en) | (1 << gpio), gpio_en); return 0; } static inline int ar7_gpio_enable_titan(unsigned gpio) { void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0; void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1; writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) | (1 << (gpio & 0x1f)), gpio >> 5 ? gpio_en1 : gpio_en0); return 0; } int ar7_gpio_enable(unsigned gpio) { return ar7_is_titan() ? ar7_gpio_enable_titan(gpio) : ar7_gpio_enable_ar7(gpio); } EXPORT_SYMBOL(ar7_gpio_enable); static inline int ar7_gpio_disable_ar7(unsigned gpio) { void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE; writel(readl(gpio_en) & ~(1 << gpio), gpio_en); return 0; } static inline int ar7_gpio_disable_titan(unsigned gpio) { void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0; void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1; writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) & ~(1 << (gpio & 0x1f)), gpio >> 5 ? gpio_en1 : gpio_en0); return 0; } int ar7_gpio_disable(unsigned gpio) { return ar7_is_titan() ? ar7_gpio_disable_titan(gpio) : ar7_gpio_disable_ar7(gpio); } EXPORT_SYMBOL(ar7_gpio_disable); struct titan_gpio_cfg { u32 reg; u32 shift; u32 func; }; static const struct titan_gpio_cfg titan_gpio_table[] = { /* reg, start bit, mux value */ {4, 24, 1}, {4, 26, 1}, {4, 28, 1}, {4, 30, 1}, {5, 6, 1}, {5, 8, 1}, {5, 10, 1}, {5, 12, 1}, {7, 14, 3}, {7, 16, 3}, {7, 18, 3}, {7, 20, 3}, {7, 22, 3}, {7, 26, 3}, {7, 28, 3}, {7, 30, 3}, {8, 0, 3}, {8, 2, 3}, {8, 4, 3}, {8, 10, 3}, {8, 14, 3}, {8, 16, 3}, {8, 18, 3}, {8, 20, 3}, {9, 8, 3}, {9, 10, 3}, {9, 12, 3}, {9, 14, 3}, {9, 18, 3}, {9, 20, 3}, {9, 24, 3}, {9, 26, 3}, {9, 28, 3}, {9, 30, 3}, {10, 0, 3}, {10, 2, 3}, {10, 8, 3}, {10, 10, 3}, {10, 12, 3}, {10, 14, 3}, {13, 12, 3}, {13, 14, 3}, {13, 16, 3}, {13, 18, 3}, {13, 24, 3}, {13, 26, 3}, {13, 28, 3}, {13, 30, 3}, {14, 2, 3}, {14, 6, 3}, {14, 8, 3}, {14, 12, 3} }; static int titan_gpio_pinsel(unsigned gpio) { struct titan_gpio_cfg gpio_cfg; u32 mux_status, pin_sel_reg, tmp; void __iomem *pin_sel = (void __iomem *)KSEG1ADDR(AR7_REGS_PINSEL); if (gpio >= ARRAY_SIZE(titan_gpio_table)) return -EINVAL; gpio_cfg = titan_gpio_table[gpio]; pin_sel_reg = gpio_cfg.reg - 1; mux_status = (readl(pin_sel + pin_sel_reg) >> gpio_cfg.shift) & 0x3; /* Check the mux status */ if (!((mux_status == 0) || (mux_status == gpio_cfg.func))) return 0; /* Set the pin sel value */ tmp = readl(pin_sel + pin_sel_reg); tmp |= ((gpio_cfg.func & 0x3) << gpio_cfg.shift); writel(tmp, pin_sel + pin_sel_reg); return 0; } /* Perform minimal Titan GPIO configuration */ static void titan_gpio_init(void) { unsigned i; for (i = 44; i < 48; i++) { titan_gpio_pinsel(i); ar7_gpio_enable_titan(i); titan_gpio_direction_input(&titan_gpio_chip.chip, i); } } int __init ar7_gpio_init(void) { int ret; struct ar7_gpio_chip *gpch; unsigned size; if (!ar7_is_titan()) { gpch = &ar7_gpio_chip; size = 0x10; } else { gpch = &titan_gpio_chip; size = 0x1f; } gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size); if (!gpch->regs) { printk(KERN_ERR "%s: failed to ioremap regs\n", gpch->chip.label); return -ENOMEM; } ret = gpiochip_add(&gpch->chip); if (ret) { printk(KERN_ERR "%s: failed to add gpiochip\n", gpch->chip.label); return ret; } printk(KERN_INFO "%s: registered %d GPIOs\n", gpch->chip.label, gpch->chip.ngpio); if (ar7_is_titan()) titan_gpio_init(); return ret; }
gpl-2.0
Pandza/Hammerhead
arch/arm/kernel/early_printk.c
9421
1138
/* * linux/arch/arm/kernel/early_printk.c * * Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/console.h> #include <linux/init.h> extern void printch(int); static void early_write(const char *s, unsigned n) { while (n-- > 0) { if (*s == '\n') printch('\r'); printch(*s); s++; } } static void early_console_write(struct console *con, const char *s, unsigned n) { early_write(s, n); } static struct console early_console = { .name = "earlycon", .write = early_console_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; asmlinkage void early_printk(const char *fmt, ...) { char buf[512]; int n; va_list ap; va_start(ap, fmt); n = vscnprintf(buf, sizeof(buf), fmt, ap); early_write(buf, n); va_end(ap); } static int __init setup_early_printk(char *buf) { register_console(&early_console); return 0; } early_param("earlyprintk", setup_early_printk);
gpl-2.0
Pantech-Discover/android_kernel_pantech_magnus
arch/powerpc/platforms/embedded6xx/gamecube.c
10445
2145
/* * arch/powerpc/platforms/embedded6xx/gamecube.c * * Nintendo GameCube board-specific support * Copyright (C) 2004-2009 The GameCube Linux Team * Copyright (C) 2007,2008,2009 Albert Herranz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/kexec.h> #include <linux/seq_file.h> #include <linux/of_platform.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/udbg.h> #include "flipper-pic.h" #include "usbgecko_udbg.h" static void gamecube_spin(void) { /* spin until power button pressed */ for (;;) cpu_relax(); } static void gamecube_restart(char *cmd) { local_irq_disable(); flipper_platform_reset(); gamecube_spin(); } static void gamecube_power_off(void) { local_irq_disable(); gamecube_spin(); } static void gamecube_halt(void) { gamecube_restart(NULL); } static void __init gamecube_init_early(void) { ug_udbg_init(); } static int __init gamecube_probe(void) { unsigned long dt_root; dt_root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(dt_root, "nintendo,gamecube")) return 0; return 1; } static void gamecube_shutdown(void) { flipper_quiesce(); } define_machine(gamecube) { .name = "gamecube", .probe = gamecube_probe, .init_early = gamecube_init_early, .restart = gamecube_restart, .power_off = gamecube_power_off, .halt = gamecube_halt, .init_IRQ = flipper_pic_probe, .get_irq = flipper_pic_get_irq, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, .machine_shutdown = gamecube_shutdown, }; static struct of_device_id gamecube_of_bus[] = { { .compatible = "nintendo,flipper", }, { }, }; static int __init gamecube_device_probe(void) { if (!machine_is(gamecube)) return 0; of_platform_bus_probe(NULL, gamecube_of_bus, NULL); return 0; } device_initcall(gamecube_device_probe);
gpl-2.0
cleech/linux
drivers/clk/sunxi-ng/ccu-sun8i-r.c
206
7948
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016 Icenowy Zheng <icenowy@aosc.xyz> */ #include <linux/clk-provider.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include "ccu_common.h" #include "ccu_reset.h" #include "ccu_div.h" #include "ccu_gate.h" #include "ccu_mp.h" #include "ccu_nm.h" #include "ccu-sun8i-r.h" static const struct clk_parent_data ar100_parents[] = { { .fw_name = "losc" }, { .fw_name = "hosc" }, { .fw_name = "pll-periph" }, { .fw_name = "iosc" }, }; static const struct ccu_mux_var_prediv ar100_predivs[] = { { .index = 2, .shift = 8, .width = 5 }, }; static struct ccu_div ar100_clk = { .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), .mux = { .shift = 16, .width = 2, .var_predivs = ar100_predivs, .n_var_predivs = ARRAY_SIZE(ar100_predivs), }, .common = { .reg = 0x00, .features = CCU_FEATURE_VARIABLE_PREDIV, .hw.init = CLK_HW_INIT_PARENTS_DATA("ar100", ar100_parents, &ccu_div_ops, 0), }, }; static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0); static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); /* * Define the parent as an array that can be reused to save space * instead of having compound literals for each gate. Also have it * non-const so we can change it on the A83T. */ static const struct clk_hw *apb0_gate_parent[] = { &apb0_clk.common.hw }; static SUNXI_CCU_GATE_HWS(apb0_pio_clk, "apb0-pio", apb0_gate_parent, 0x28, BIT(0), 0); static SUNXI_CCU_GATE_HWS(apb0_ir_clk, "apb0-ir", apb0_gate_parent, 0x28, BIT(1), 0); static SUNXI_CCU_GATE_HWS(apb0_timer_clk, "apb0-timer", apb0_gate_parent, 0x28, BIT(2), 0); static SUNXI_CCU_GATE_HWS(apb0_rsb_clk, "apb0-rsb", apb0_gate_parent, 0x28, BIT(3), 0); static SUNXI_CCU_GATE_HWS(apb0_uart_clk, "apb0-uart", apb0_gate_parent, 0x28, BIT(4), 0); static SUNXI_CCU_GATE_HWS(apb0_i2c_clk, "apb0-i2c", apb0_gate_parent, 0x28, BIT(6), 0); static SUNXI_CCU_GATE_HWS(apb0_twd_clk, "apb0-twd", apb0_gate_parent, 0x28, BIT(7), 0); static const char * const r_mod0_default_parents[] = { "osc32k", "osc24M" }; static SUNXI_CCU_MP_WITH_MUX_GATE(ir_clk, "ir", r_mod0_default_parents, 0x54, 0, 4, /* M */ 16, 2, /* P */ 24, 2, /* mux */ BIT(31), /* gate */ 0); static const struct clk_parent_data a83t_r_mod0_parents[] = { { .fw_name = "iosc" }, { .fw_name = "hosc" }, }; static const struct ccu_mux_fixed_prediv a83t_ir_predivs[] = { { .index = 0, .div = 16 }, }; static struct ccu_mp a83t_ir_clk = { .enable = BIT(31), .m = _SUNXI_CCU_DIV(0, 4), .p = _SUNXI_CCU_DIV(16, 2), .mux = { .shift = 24, .width = 2, .fixed_predivs = a83t_ir_predivs, .n_predivs = ARRAY_SIZE(a83t_ir_predivs), }, .common = { .reg = 0x54, .features = CCU_FEATURE_VARIABLE_PREDIV, .hw.init = CLK_HW_INIT_PARENTS_DATA("ir", a83t_r_mod0_parents, &ccu_mp_ops, 0), }, }; static struct ccu_common *sun8i_a83t_r_ccu_clks[] = { &ar100_clk.common, &apb0_clk.common, &apb0_pio_clk.common, &apb0_ir_clk.common, &apb0_timer_clk.common, &apb0_rsb_clk.common, &apb0_uart_clk.common, &apb0_i2c_clk.common, &apb0_twd_clk.common, &a83t_ir_clk.common, }; static struct ccu_common *sun8i_h3_r_ccu_clks[] = { &ar100_clk.common, &apb0_clk.common, &apb0_pio_clk.common, &apb0_ir_clk.common, &apb0_timer_clk.common, &apb0_uart_clk.common, &apb0_i2c_clk.common, &apb0_twd_clk.common, &ir_clk.common, }; static struct ccu_common *sun50i_a64_r_ccu_clks[] = { &ar100_clk.common, &apb0_clk.common, &apb0_pio_clk.common, &apb0_ir_clk.common, &apb0_timer_clk.common, &apb0_rsb_clk.common, &apb0_uart_clk.common, &apb0_i2c_clk.common, &apb0_twd_clk.common, &ir_clk.common, }; static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = { .hws = { [CLK_AR100] = &ar100_clk.common.hw, [CLK_AHB0] = &ahb0_clk.hw, [CLK_APB0] = &apb0_clk.common.hw, [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, [CLK_APB0_IR] = &apb0_ir_clk.common.hw, [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, [CLK_APB0_RSB] = &apb0_rsb_clk.common.hw, [CLK_APB0_UART] = &apb0_uart_clk.common.hw, [CLK_APB0_I2C] = &apb0_i2c_clk.common.hw, [CLK_APB0_TWD] = &apb0_twd_clk.common.hw, [CLK_IR] = &a83t_ir_clk.common.hw, }, .num = CLK_NUMBER, }; static struct clk_hw_onecell_data sun8i_h3_r_hw_clks = { .hws = { [CLK_AR100] = &ar100_clk.common.hw, [CLK_AHB0] = &ahb0_clk.hw, [CLK_APB0] = &apb0_clk.common.hw, [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, [CLK_APB0_IR] = &apb0_ir_clk.common.hw, [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, [CLK_APB0_UART] = &apb0_uart_clk.common.hw, [CLK_APB0_I2C] = &apb0_i2c_clk.common.hw, [CLK_APB0_TWD] = &apb0_twd_clk.common.hw, [CLK_IR] = &ir_clk.common.hw, }, .num = CLK_NUMBER, }; static struct clk_hw_onecell_data sun50i_a64_r_hw_clks = { .hws = { [CLK_AR100] = &ar100_clk.common.hw, [CLK_AHB0] = &ahb0_clk.hw, [CLK_APB0] = &apb0_clk.common.hw, [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, [CLK_APB0_IR] = &apb0_ir_clk.common.hw, [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, [CLK_APB0_RSB] = &apb0_rsb_clk.common.hw, [CLK_APB0_UART] = &apb0_uart_clk.common.hw, [CLK_APB0_I2C] = &apb0_i2c_clk.common.hw, [CLK_APB0_TWD] = &apb0_twd_clk.common.hw, [CLK_IR] = &ir_clk.common.hw, }, .num = CLK_NUMBER, }; static struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = { [RST_APB0_IR] = { 0xb0, BIT(1) }, [RST_APB0_TIMER] = { 0xb0, BIT(2) }, [RST_APB0_RSB] = { 0xb0, BIT(3) }, [RST_APB0_UART] = { 0xb0, BIT(4) }, [RST_APB0_I2C] = { 0xb0, BIT(6) }, }; static struct ccu_reset_map sun8i_h3_r_ccu_resets[] = { [RST_APB0_IR] = { 0xb0, BIT(1) }, [RST_APB0_TIMER] = { 0xb0, BIT(2) }, [RST_APB0_UART] = { 0xb0, BIT(4) }, [RST_APB0_I2C] = { 0xb0, BIT(6) }, }; static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = { [RST_APB0_IR] = { 0xb0, BIT(1) }, [RST_APB0_TIMER] = { 0xb0, BIT(2) }, [RST_APB0_RSB] = { 0xb0, BIT(3) }, [RST_APB0_UART] = { 0xb0, BIT(4) }, [RST_APB0_I2C] = { 0xb0, BIT(6) }, }; static const struct sunxi_ccu_desc sun8i_a83t_r_ccu_desc = { .ccu_clks = sun8i_a83t_r_ccu_clks, .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_r_ccu_clks), .hw_clks = &sun8i_a83t_r_hw_clks, .resets = sun8i_a83t_r_ccu_resets, .num_resets = ARRAY_SIZE(sun8i_a83t_r_ccu_resets), }; static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = { .ccu_clks = sun8i_h3_r_ccu_clks, .num_ccu_clks = ARRAY_SIZE(sun8i_h3_r_ccu_clks), .hw_clks = &sun8i_h3_r_hw_clks, .resets = sun8i_h3_r_ccu_resets, .num_resets = ARRAY_SIZE(sun8i_h3_r_ccu_resets), }; static const struct sunxi_ccu_desc sun50i_a64_r_ccu_desc = { .ccu_clks = sun50i_a64_r_ccu_clks, .num_ccu_clks = ARRAY_SIZE(sun50i_a64_r_ccu_clks), .hw_clks = &sun50i_a64_r_hw_clks, .resets = sun50i_a64_r_ccu_resets, .num_resets = ARRAY_SIZE(sun50i_a64_r_ccu_resets), }; static void __init sunxi_r_ccu_init(struct device_node *node, const struct sunxi_ccu_desc *desc) { void __iomem *reg; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { pr_err("%pOF: Could not map the clock registers\n", node); return; } sunxi_ccu_probe(node, reg, desc); } static void __init sun8i_a83t_r_ccu_setup(struct device_node *node) { sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc); } CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu", sun8i_a83t_r_ccu_setup); static void __init sun8i_h3_r_ccu_setup(struct device_node *node) { sunxi_r_ccu_init(node, &sun8i_h3_r_ccu_desc); } CLK_OF_DECLARE(sun8i_h3_r_ccu, "allwinner,sun8i-h3-r-ccu", sun8i_h3_r_ccu_setup); static void __init sun50i_a64_r_ccu_setup(struct device_node *node) { sunxi_r_ccu_init(node, &sun50i_a64_r_ccu_desc); } CLK_OF_DECLARE(sun50i_a64_r_ccu, "allwinner,sun50i-a64-r-ccu", sun50i_a64_r_ccu_setup);
gpl-2.0
JustBeYou/linux
drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
206
6860
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #define _USB_OPS_LINUX_C_ #include <drv_types.h> #include <usb_ops_linux.h> #include <rtw_sreset.h> void rtl8723au_read_port_cancel(struct rtw_adapter *padapter) { struct recv_buf *precvbuf; int i; precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf; DBG_8723A("%s\n", __func__); padapter->bReadPortCancel = true; for (i = 0; i < NR_RECVBUFF ; i++) { if (precvbuf->purb) usb_kill_urb(precvbuf->purb); precvbuf++; } usb_kill_urb(padapter->recvpriv.int_in_urb); } static void usb_write_port23a_complete(struct urb *purb) { struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context; struct rtw_adapter *padapter = pxmitbuf->padapter; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct hal_data_8723a *phaldata; unsigned long irqL; switch (pxmitbuf->flags) { case HIGH_QUEUE_INX: #ifdef CONFIG_8723AU_AP_MODE rtw_chk_hi_queue_cmd23a(padapter); #endif break; default: break; } if (padapter->bSurpriseRemoved || padapter->bDriverStopped || padapter->bWritePortCancel) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port23a_complete:bDriverStopped(%d) OR " "bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved)); DBG_8723A("%s(): TX Warning! bDriverStopped(%d) OR " "bSurpriseRemoved(%d) bWritePortCancel(%d) " "pxmitbuf->ext_tag(%x)\n", __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, padapter->bReadPortCancel, pxmitbuf->ext_tag); goto check_completion; } if (purb->status) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port23a_complete : purb->status(%d) " "!= 0\n", purb->status)); DBG_8723A("###=> urb_write_port_complete status(%d)\n", purb->status); if (purb->status == -EPIPE || purb->status == -EPROTO) { } else if (purb->status == -EINPROGRESS) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port23a_complete: EINPROGESS\n")); goto check_completion; } else if (purb->status == -ENOENT) { DBG_8723A("%s: -ENOENT\n", __func__); goto check_completion; } else if (purb->status == -ECONNRESET) { DBG_8723A("%s: -ECONNRESET\n", __func__); goto check_completion; } else if (purb->status == -ESHUTDOWN) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port23a_complete: ESHUTDOWN\n")); padapter->bDriverStopped = true; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port23a_complete:bDriverStopped " "= true\n")); goto check_completion; } else { padapter->bSurpriseRemoved = true; DBG_8723A("bSurpriseRemoved = true\n"); RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port23a_complete:bSurpriseRemoved " "= true\n")); goto check_completion; } } phaldata = GET_HAL_DATA(padapter); phaldata->srestpriv.last_tx_complete_time = jiffies; check_completion: spin_lock_irqsave(&pxmitpriv->lock_sctx, irqL); rtw23a_sctx_done_err(&pxmitbuf->sctx, purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR : RTW_SCTX_DONE_SUCCESS); spin_unlock_irqrestore(&pxmitpriv->lock_sctx, irqL); rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf); tasklet_hi_schedule(&pxmitpriv->xmit_tasklet); } int rtl8723au_write_port(struct rtw_adapter *padapter, u32 addr, u32 cnt, struct xmit_buf *pxmitbuf) { struct urb *purb = NULL; struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct xmit_frame *pxmitframe; struct usb_device *pusbd = pdvobj->pusbdev; unsigned long irqL; unsigned int pipe, ep_num; int status; int ret = _FAIL; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("+usb_write_port23a\n")); if (padapter->bDriverStopped || padapter->bSurpriseRemoved) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("%s:(padapter->bDriverStopped || " "padapter->bSurpriseRemoved)!!!\n", __func__)); rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY); goto exit; } pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data; spin_lock_irqsave(&pxmitpriv->lock, irqL); switch (addr) { case VO_QUEUE_INX: pxmitbuf->flags = VO_QUEUE_INX; break; case VI_QUEUE_INX: pxmitbuf->flags = VI_QUEUE_INX; break; case BE_QUEUE_INX: pxmitbuf->flags = BE_QUEUE_INX; break; case BK_QUEUE_INX: pxmitbuf->flags = BK_QUEUE_INX; break; case HIGH_QUEUE_INX: pxmitbuf->flags = HIGH_QUEUE_INX; break; default: pxmitbuf->flags = MGT_QUEUE_INX; break; } spin_unlock_irqrestore(&pxmitpriv->lock, irqL); purb = pxmitbuf->pxmit_urb[0]; /* translate DMA FIFO addr to pipehandle */ ep_num = pdvobj->Queue2Pipe[addr]; pipe = usb_sndbulkpipe(pusbd, ep_num); usb_fill_bulk_urb(purb, pusbd, pipe, pxmitframe->buf_addr, /* pxmitbuf->pbuf */ cnt, usb_write_port23a_complete, pxmitbuf);/* context is pxmitbuf */ status = usb_submit_urb(purb, GFP_ATOMIC); if (!status) { struct hal_data_8723a *phaldata = GET_HAL_DATA(padapter); phaldata->srestpriv.last_tx_time = jiffies; } else { rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR); DBG_8723A("usb_write_port23a, status =%d\n", status); RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port23a(): usb_submit_urb, status =%x\n", status)); switch (status) { case -ENODEV: padapter->bDriverStopped = true; break; default: break; } goto exit; } ret = _SUCCESS; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("-usb_write_port23a\n")); exit: if (ret != _SUCCESS) rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf); return ret; } void rtl8723au_write_port_cancel(struct rtw_adapter *padapter) { struct xmit_buf *pxmitbuf; struct list_head *plist; int j; DBG_8723A("%s\n", __func__); padapter->bWritePortCancel = true; list_for_each(plist, &padapter->xmitpriv.xmitbuf_list) { pxmitbuf = container_of(plist, struct xmit_buf, list2); for (j = 0; j < 8; j++) { if (pxmitbuf->pxmit_urb[j]) usb_kill_urb(pxmitbuf->pxmit_urb[j]); } } list_for_each(plist, &padapter->xmitpriv.xmitextbuf_list) { pxmitbuf = container_of(plist, struct xmit_buf, list2); for (j = 0; j < 8; j++) { if (pxmitbuf->pxmit_urb[j]) usb_kill_urb(pxmitbuf->pxmit_urb[j]); } } }
gpl-2.0
krizky82/Xperia-2011-Kernel-2.6.32.X-ICS
arch/arm/mach-msm/proc_comm_test.c
718
2790
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ /* * PROC COMM TEST Driver source file */ #include <linux/types.h> #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/module.h> #include "proc_comm.h" static struct dentry *dent; static int proc_comm_test_res; static int proc_comm_reverse_test(void) { uint32_t data1, data2; int rc; data1 = 10; data2 = 20; rc = msm_proc_comm(PCOM_OEM_TEST_CMD, &data1, &data2); if (rc) return rc; if ((data1 != 20) || (data2 != 10)) return -1; return 0; } static ssize_t debug_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { char _buf[16]; snprintf(_buf, sizeof(_buf), "%i\n", proc_comm_test_res); return simple_read_from_buffer(buf, count, pos, _buf, strlen(_buf)); } static ssize_t debug_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { unsigned char cmd[64]; int len; if (count < 1) return 0; len = count > 63 ? 63 : count; if (copy_from_user(cmd, buf, len)) return -EFAULT; cmd[len] = 0; if (cmd[len-1] == '\n') { cmd[len-1] = 0; len--; } if (!strncmp(cmd, "reverse_test", 64)) proc_comm_test_res = proc_comm_reverse_test(); else proc_comm_test_res = -EINVAL; if (proc_comm_test_res) pr_err("proc comm test fail %d\n", proc_comm_test_res); else pr_info("proc comm test passed\n"); return count; } static int debug_release(struct inode *ip, struct file *fp) { return 0; } static int debug_open(struct inode *ip, struct file *fp) { return 0; } static const struct file_operations debug_ops = { .owner = THIS_MODULE, .open = debug_open, .release = debug_release, .read = debug_read, .write = debug_write, }; static void __exit proc_comm_test_mod_exit(void) { debugfs_remove(dent); } static int __init proc_comm_test_mod_init(void) { dent = debugfs_create_file("proc_comm", 0444, 0, NULL, &debug_ops); proc_comm_test_res = -1; return 0; } module_init(proc_comm_test_mod_init); module_exit(proc_comm_test_mod_exit); MODULE_DESCRIPTION("PROC COMM TEST Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
vidyaravipati/net-next-rocker
drivers/mtd/afs.c
1742
6407
/*====================================================================== drivers/mtd/afs.c: ARM Flash Layout/Partitioning Copyright © 2000 ARM Limited This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA This is access code for flashes using ARM's flash partitioning standards. ======================================================================*/ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> struct footer_struct { u32 image_info_base; /* Address of first word of ImageFooter */ u32 image_start; /* Start of area reserved by this footer */ u32 signature; /* 'Magic' number proves it's a footer */ u32 type; /* Area type: ARM Image, SIB, customer */ u32 checksum; /* Just this structure */ }; struct image_info_struct { u32 bootFlags; /* Boot flags, compression etc. */ u32 imageNumber; /* Unique number, selects for boot etc. */ u32 loadAddress; /* Address program should be loaded to */ u32 length; /* Actual size of image */ u32 address; /* Image is executed from here */ char name[16]; /* Null terminated */ u32 headerBase; /* Flash Address of any stripped header */ u32 header_length; /* Length of header in memory */ u32 headerType; /* AIF, RLF, s-record etc. */ u32 checksum; /* Image checksum (inc. this struct) */ }; static u32 word_sum(void *words, int num) { u32 *p = words; u32 sum = 0; while (num--) sum += *p++; return sum; } static int afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start, u_int off, u_int mask) { struct footer_struct fs; u_int ptr = off + mtd->erasesize - sizeof(fs); size_t sz; int ret; ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs); if (ret >= 0 && sz != sizeof(fs)) ret = -EINVAL; if (ret < 0) { printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n", ptr, ret); return ret; } ret = 1; /* * Does it contain the magic number? */ if (fs.signature != 0xa0ffff9f) ret = 0; /* * Check the checksum. */ if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff) ret = 0; /* * Don't touch the SIB. */ if (fs.type == 2) ret = 0; *iis_start = fs.image_info_base & mask; *img_start = fs.image_start & mask; /* * Check the image info base. This can not * be located after the footer structure. */ if (*iis_start >= ptr) ret = 0; /* * Check the start of this image. The image * data can not be located after this block. */ if (*img_start > off) ret = 0; return ret; } static int afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr) { size_t sz; int ret, i; memset(iis, 0, sizeof(*iis)); ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis); if (ret < 0) goto failed; if (sz != sizeof(*iis)) { ret = -EINVAL; goto failed; } ret = 0; /* * Validate the name - it must be NUL terminated. */ for (i = 0; i < sizeof(iis->name); i++) if (iis->name[i] == '\0') break; if (i < sizeof(iis->name)) ret = 1; return ret; failed: printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n", ptr, ret); return ret; } static int parse_afs_partitions(struct mtd_info *mtd, struct mtd_partition **pparts, struct mtd_part_parser_data *data) { struct mtd_partition *parts; u_int mask, off, idx, sz; int ret = 0; char *str; /* * This is the address mask; we use this to mask off out of * range address bits. */ mask = mtd->size - 1; /* * First, calculate the size of the array we need for the * partition information. We include in this the size of * the strings. */ for (idx = off = sz = 0; off < mtd->size; off += mtd->erasesize) { struct image_info_struct iis; u_int iis_ptr, img_ptr; ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask); if (ret < 0) break; if (ret == 0) continue; ret = afs_read_iis(mtd, &iis, iis_ptr); if (ret < 0) break; if (ret == 0) continue; sz += sizeof(struct mtd_partition); sz += strlen(iis.name) + 1; idx += 1; } if (!sz) return ret; parts = kzalloc(sz, GFP_KERNEL); if (!parts) return -ENOMEM; str = (char *)(parts + idx); /* * Identify the partitions */ for (idx = off = 0; off < mtd->size; off += mtd->erasesize) { struct image_info_struct iis; u_int iis_ptr, img_ptr; /* Read the footer. */ ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask); if (ret < 0) break; if (ret == 0) continue; /* Read the image info block */ ret = afs_read_iis(mtd, &iis, iis_ptr); if (ret < 0) break; if (ret == 0) continue; strcpy(str, iis.name); parts[idx].name = str; parts[idx].size = (iis.length + mtd->erasesize - 1) & ~(mtd->erasesize - 1); parts[idx].offset = img_ptr; parts[idx].mask_flags = 0; printk(" mtd%d: at 0x%08x, %5lluKiB, %8u, %s\n", idx, img_ptr, parts[idx].size / 1024, iis.imageNumber, str); idx += 1; str = str + strlen(iis.name) + 1; } if (!idx) { kfree(parts); parts = NULL; } *pparts = parts; return idx ? idx : ret; } static struct mtd_part_parser afs_parser = { .owner = THIS_MODULE, .parse_fn = parse_afs_partitions, .name = "afs", }; static int __init afs_parser_init(void) { register_mtd_parser(&afs_parser); return 0; } static void __exit afs_parser_exit(void) { deregister_mtd_parser(&afs_parser); } module_init(afs_parser_init); module_exit(afs_parser_exit); MODULE_AUTHOR("ARM Ltd"); MODULE_DESCRIPTION("ARM Firmware Suite partition parser"); MODULE_LICENSE("GPL");
gpl-2.0
krzk/tizen-tv-rpi-linux
arch/x86/crypto/aes_glue.c
2510
1711
/* * Glue Code for the asm optimized version of the AES Cipher Algorithm * */ #include <linux/module.h> #include <crypto/aes.h> #include <asm/crypto/aes.h> asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) { aes_enc_blk(ctx, dst, src); } EXPORT_SYMBOL_GPL(crypto_aes_encrypt_x86); void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) { aes_dec_blk(ctx, dst, src); } EXPORT_SYMBOL_GPL(crypto_aes_decrypt_x86); static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { aes_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { aes_dec_blk(crypto_tfm_ctx(tfm), dst, src); } static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = crypto_aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt } } }; static int __init aes_init(void) { return crypto_register_alg(&aes_alg); } static void __exit aes_fini(void) { crypto_unregister_alg(&aes_alg); } module_init(aes_init); module_exit(aes_fini); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("aes"); MODULE_ALIAS_CRYPTO("aes-asm");
gpl-2.0
ARMP/ARMP-i9300
drivers/net/tulip/winbond-840.c
2766
48870
/* winbond-840.c: A Linux PCI network adapter device driver. */ /* Written 1998-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 Support and updates available at http://www.scyld.com/network/drivers.html Do not remove the copyright information. Do not change the version information unless an improvement has been made. Merely removing my name, as Compex has done in the past, does not count as an improvement. Changelog: * ported to 2.4 ??? * spin lock update, memory barriers, new style dma mappings limit each tx buffer to < 1024 bytes remove DescIntr from Rx descriptors (that's an Tx flag) remove next pointer from Tx descriptors synchronize tx_q_bytes software reset in tx_timeout Copyright (C) 2000 Manfred Spraul * further cleanups power management. support for big endian descriptors Copyright (C) 2001 Manfred Spraul * ethtool support (jgarzik) * Replace some MII-related magic numbers with constants (jgarzik) TODO: * enable pci_power_off * Wake-On-LAN */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "winbond-840" #define DRV_VERSION "1.01-e" #define DRV_RELDATE "Sep-11-2006" /* Automatically extracted configuration info: probe-func: winbond840_probe config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840 c-help-name: Winbond W89c840 PCI Ethernet support c-help-symbol: CONFIG_WINBOND_840 c-help: This driver is for the Winbond W89c840 chip. It also works with c-help: the TX9882 chip on the Compex RL100-ATX board. c-help: More specific information and updates are available from c-help: http://www.scyld.com/network/drivers.html */ /* The user-configurable values. These may be modified when a driver module is loaded.*/ static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ static int max_interrupt_work = 20; /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The '840 uses a 64 element hash table based on the Ethernet CRC. */ static int multicast_filter_limit = 32; /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1518 effectively disables this feature. */ static int rx_copybreak; /* Used to pass the media type, etc. Both 'options[]' and 'full_duplex[]' should exist for driver interoperability. The media type is usually passed in 'options[]'. */ #define MAX_UNITS 8 /* More are supported, limit only on options */ static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for compile efficiency. The compiler will convert <unsigned>'%'<2^N> into a bit mask. Making the Tx ring too large decreases the effectiveness of channel bonding and packet priority. There are no ill effects from too-large receive rings. */ #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ #define TX_QUEUE_LEN_RESTART 5 #define TX_BUFLIMIT (1024-128) /* The presumed FIFO size for working around the Tx-FIFO-overflow bug. To avoid overflowing we don't queue again until we have room for a full-size packet. */ #define TX_FIFO_SIZE (2048) #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16) /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (2*HZ) /* Include files, designed to support most kernel versions 2.0.0 and later. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/rtnetlink.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <asm/irq.h> #include "tulip.h" #undef PKT_BUF_SZ /* tulip.h also defines this */ #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ /* These identify the driver base version and may not be removed. */ static const char version[] __initconst = "v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" " http://www.scyld.com/network/drivers.html\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_param(max_interrupt_work, int, 0); module_param(debug, int, 0); module_param(rx_copybreak, int, 0); module_param(multicast_filter_limit, int, 0); module_param_array(options, int, NULL, 0); module_param_array(full_duplex, int, NULL, 0); MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt"); MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)"); MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses"); MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex"); MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)"); /* Theory of Operation I. Board Compatibility This driver is for the Winbond w89c840 chip. II. Board-specific settings None. III. Driver operation This chip is very similar to the Digital 21*4* "Tulip" family. The first twelve registers and the descriptor format are nearly identical. Read a Tulip manual for operational details. A significant difference is that the multicast filter and station address are stored in registers rather than loaded through a pseudo-transmit packet. Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a full-sized packet we must use both data buffers in a descriptor. Thus the driver uses ring mode where descriptors are implicitly sequential in memory, rather than using the second descriptor address as a chain pointer to subsequent descriptors. IV. Notes If you are going to almost clone a Tulip, why not go all the way and avoid the need for a new driver? IVb. References http://www.scyld.com/expert/100mbps.html http://www.scyld.com/expert/NWay.html http://www.winbond.com.tw/ IVc. Errata A horrible bug exists in the transmit FIFO. Apparently the chip doesn't correctly detect a full FIFO, and queuing more than 2048 bytes may result in silent data corruption. Test with 'ping -s 10000' on a fast computer. */ /* PCI probe table. */ enum chip_capability_flags { CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8, }; static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = { { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, { } }; MODULE_DEVICE_TABLE(pci, w840_pci_tbl); enum { netdev_res_size = 128, /* size of PCI BAR resource */ }; struct pci_id_info { const char *name; int drv_flags; /* Driver use, intended as capability flags. */ }; static const struct pci_id_info pci_id_tbl[] __devinitdata = { { /* Sometime a Level-One switch card. */ "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, { "Winbond W89c840", CanHaveMII | HasBrokenTx}, { "Compex RL100-ATX", CanHaveMII | HasBrokenTx}, { } /* terminate list. */ }; /* This driver was written to use PCI memory space, however some x86 systems work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config */ /* Offsets to the Command and Status Registers, "CSRs". While similar to the Tulip, these registers are longword aligned. Note: It's not useful to define symbolic names for every register bit in the device. The name can only partially document the semantics and make the driver longer and more difficult to read. */ enum w840_offsets { PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08, RxRingPtr=0x0C, TxRingPtr=0x10, IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C, RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C, CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */ MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40, CurTxDescAddr=0x4C, CurTxBufAddr=0x50, }; /* Bits in the NetworkConfig register. */ enum rx_mode_bits { AcceptErr=0x80, RxAcceptBroadcast=0x20, AcceptMulticast=0x10, RxAcceptAllPhys=0x08, AcceptMyPhys=0x02, }; enum mii_reg_bits { MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000, MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000, }; /* The Tulip Rx and Tx buffer descriptors. */ struct w840_rx_desc { s32 status; s32 length; u32 buffer1; u32 buffer2; }; struct w840_tx_desc { s32 status; s32 length; u32 buffer1, buffer2; }; #define MII_CNT 1 /* winbond only supports one MII */ struct netdev_private { struct w840_rx_desc *rx_ring; dma_addr_t rx_addr[RX_RING_SIZE]; struct w840_tx_desc *tx_ring; dma_addr_t tx_addr[TX_RING_SIZE]; dma_addr_t ring_dma_addr; /* The addresses of receive-in-place skbuffs. */ struct sk_buff* rx_skbuff[RX_RING_SIZE]; /* The saved address of a sent-in-place packet/buffer, for later free(). */ struct sk_buff* tx_skbuff[TX_RING_SIZE]; struct net_device_stats stats; struct timer_list timer; /* Media monitoring timer. */ /* Frequently used values: keep some adjacent for cache effect. */ spinlock_t lock; int chip_id, drv_flags; struct pci_dev *pci_dev; int csr6; struct w840_rx_desc *rx_head_desc; unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ unsigned int rx_buf_sz; /* Based on MTU+slack. */ unsigned int cur_tx, dirty_tx; unsigned int tx_q_bytes; unsigned int tx_full; /* The Tx queue is full. */ /* MII transceiver section. */ int mii_cnt; /* MII device addresses. */ unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */ u32 mii; struct mii_if_info mii_if; void __iomem *base_addr; }; static int eeprom_read(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int netdev_open(struct net_device *dev); static int update_link(struct net_device *dev); static void netdev_timer(unsigned long data); static void init_rxtx_rings(struct net_device *dev); static void free_rxtx_rings(struct netdev_private *np); static void init_registers(struct net_device *dev); static void tx_timeout(struct net_device *dev); static int alloc_ringdesc(struct net_device *dev); static void free_ringdesc(struct netdev_private *np); static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t intr_handler(int irq, void *dev_instance); static void netdev_error(struct net_device *dev, int intr_status); static int netdev_rx(struct net_device *dev); static u32 __set_rx_mode(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static struct net_device_stats *get_stats(struct net_device *dev); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static int netdev_close(struct net_device *dev); static const struct net_device_ops netdev_ops = { .ndo_open = netdev_open, .ndo_stop = netdev_close, .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_multicast_list = set_rx_mode, .ndo_do_ioctl = netdev_ioctl, .ndo_tx_timeout = tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __devinit w840_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; static int find_cnt; int chip_idx = ent->driver_data; int irq; int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; void __iomem *ioaddr; i = pci_enable_device(pdev); if (i) return i; pci_set_master(pdev); irq = pdev->irq; if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { pr_warn("Device %s disabled due to DMA limitations\n", pci_name(pdev)); return -EIO; } dev = alloc_etherdev(sizeof(*np)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, DRV_NAME)) goto err_out_netdev; ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size); if (!ioaddr) goto err_out_free_res; for (i = 0; i < 3; i++) ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); /* Reset the chip to erase previous misconfiguration. No hold time required! */ iowrite32(0x00000001, ioaddr + PCIBusCfg); dev->base_addr = (unsigned long)ioaddr; dev->irq = irq; np = netdev_priv(dev); np->pci_dev = pdev; np->chip_id = chip_idx; np->drv_flags = pci_id_tbl[chip_idx].drv_flags; spin_lock_init(&np->lock); np->mii_if.dev = dev; np->mii_if.mdio_read = mdio_read; np->mii_if.mdio_write = mdio_write; np->base_addr = ioaddr; pci_set_drvdata(pdev, dev); if (dev->mem_start) option = dev->mem_start; /* The lower four bits are the media type. */ if (option > 0) { if (option & 0x200) np->mii_if.full_duplex = 1; if (option & 15) dev_info(&dev->dev, "ignoring user supplied media type %d", option & 15); } if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) np->mii_if.full_duplex = 1; if (np->mii_if.full_duplex) np->mii_if.force_media = 1; /* The chip-specific entries in the device structure. */ dev->netdev_ops = &netdev_ops; dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; i = register_netdev(dev); if (i) goto err_out_cleardev; dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n", pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq); if (np->drv_flags & CanHaveMII) { int phy, phy_idx = 0; for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) { int mii_status = mdio_read(dev, phy, MII_BMSR); if (mii_status != 0xffff && mii_status != 0x0000) { np->phys[phy_idx++] = phy; np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+ mdio_read(dev, phy, MII_PHYSID2); dev_info(&dev->dev, "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n", np->mii, phy, mii_status, np->mii_if.advertising); } } np->mii_cnt = phy_idx; np->mii_if.phy_id = np->phys[0]; if (phy_idx == 0) { dev_warn(&dev->dev, "MII PHY not found -- this device may not operate correctly\n"); } } find_cnt++; return 0; err_out_cleardev: pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ioaddr); err_out_free_res: pci_release_regions(pdev); err_out_netdev: free_netdev (dev); return -ENODEV; } /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are often serial bit streams generated by the host processor. The example below is for the common 93c46 EEPROM, 64 16 bit words. */ /* Delay between EEPROM clock transitions. No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that made udelay() unreliable. The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is deprecated. */ #define eeprom_delay(ee_addr) ioread32(ee_addr) enum EEPROM_Ctrl_Bits { EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805, EE_ChipSelect=0x801, EE_DataIn=0x08, }; /* The EEPROM commands include the alway-set leading bit. */ enum EEPROM_Cmds { EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), }; static int eeprom_read(void __iomem *addr, int location) { int i; int retval = 0; void __iomem *ee_addr = addr + EECtrl; int read_cmd = location | EE_ReadCmd; iowrite32(EE_ChipSelect, ee_addr); /* Shift the read command bits out. */ for (i = 10; i >= 0; i--) { short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; iowrite32(dataval, ee_addr); eeprom_delay(ee_addr); iowrite32(dataval | EE_ShiftClk, ee_addr); eeprom_delay(ee_addr); } iowrite32(EE_ChipSelect, ee_addr); eeprom_delay(ee_addr); for (i = 16; i > 0; i--) { iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr); eeprom_delay(ee_addr); retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0); iowrite32(EE_ChipSelect, ee_addr); eeprom_delay(ee_addr); } /* Terminate the EEPROM access. */ iowrite32(0, ee_addr); return retval; } /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See the MII specifications or DP83840A data sheet for details. The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back 33Mhz PCI cycles. */ #define mdio_delay(mdio_addr) ioread32(mdio_addr) /* Set iff a MII transceiver on any interface requires mdio preamble. This only set with older transceivers, so the extra code size of a per-interface flag is not worthwhile. */ static char mii_preamble_required = 1; #define MDIO_WRITE0 (MDIO_EnbOutput) #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput) /* Generate the preamble required for initial synchronization and a few older transceivers. */ static void mdio_sync(void __iomem *mdio_addr) { int bits = 32; /* Establish sync by sending at least 32 logic ones. */ while (--bits >= 0) { iowrite32(MDIO_WRITE1, mdio_addr); mdio_delay(mdio_addr); iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); mdio_delay(mdio_addr); } } static int mdio_read(struct net_device *dev, int phy_id, int location) { struct netdev_private *np = netdev_priv(dev); void __iomem *mdio_addr = np->base_addr + MIICtrl; int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; int i, retval = 0; if (mii_preamble_required) mdio_sync(mdio_addr); /* Shift the read command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; iowrite32(dataval, mdio_addr); mdio_delay(mdio_addr); iowrite32(dataval | MDIO_ShiftClk, mdio_addr); mdio_delay(mdio_addr); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 20; i > 0; i--) { iowrite32(MDIO_EnbIn, mdio_addr); mdio_delay(mdio_addr); retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0); iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); mdio_delay(mdio_addr); } return (retval>>1) & 0xffff; } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { struct netdev_private *np = netdev_priv(dev); void __iomem *mdio_addr = np->base_addr + MIICtrl; int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; int i; if (location == 4 && phy_id == np->phys[0]) np->mii_if.advertising = value; if (mii_preamble_required) mdio_sync(mdio_addr); /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; iowrite32(dataval, mdio_addr); mdio_delay(mdio_addr); iowrite32(dataval | MDIO_ShiftClk, mdio_addr); mdio_delay(mdio_addr); } /* Clear out extra bits. */ for (i = 2; i > 0; i--) { iowrite32(MDIO_EnbIn, mdio_addr); mdio_delay(mdio_addr); iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); mdio_delay(mdio_addr); } } static int netdev_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; int i; iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ netif_device_detach(dev); i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); if (i) goto out_err; if (debug > 1) netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq); if((i=alloc_ringdesc(dev))) goto out_err; spin_lock_irq(&np->lock); netif_device_attach(dev); init_registers(dev); spin_unlock_irq(&np->lock); netif_start_queue(dev); if (debug > 2) netdev_dbg(dev, "Done netdev_open()\n"); /* Set the timer to check for link beat. */ init_timer(&np->timer); np->timer.expires = jiffies + 1*HZ; np->timer.data = (unsigned long)dev; np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); return 0; out_err: netif_device_attach(dev); return i; } #define MII_DAVICOM_DM9101 0x0181b800 static int update_link(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int duplex, fasteth, result, mii_reg; /* BSMR */ mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); if (mii_reg == 0xffff) return np->csr6; /* reread: the link status bit is sticky */ mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); if (!(mii_reg & 0x4)) { if (netif_carrier_ok(dev)) { if (debug) dev_info(&dev->dev, "MII #%d reports no link. Disabling watchdog\n", np->phys[0]); netif_carrier_off(dev); } return np->csr6; } if (!netif_carrier_ok(dev)) { if (debug) dev_info(&dev->dev, "MII #%d link is back. Enabling watchdog\n", np->phys[0]); netif_carrier_on(dev); } if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) { /* If the link partner doesn't support autonegotiation * the MII detects it's abilities with the "parallel detection". * Some MIIs update the LPA register to the result of the parallel * detection, some don't. * The Davicom PHY [at least 0181b800] doesn't. * Instead bit 9 and 13 of the BMCR are updated to the result * of the negotiation.. */ mii_reg = mdio_read(dev, np->phys[0], MII_BMCR); duplex = mii_reg & BMCR_FULLDPLX; fasteth = mii_reg & BMCR_SPEED100; } else { int negotiated; mii_reg = mdio_read(dev, np->phys[0], MII_LPA); negotiated = mii_reg & np->mii_if.advertising; duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL); fasteth = negotiated & 0x380; } duplex |= np->mii_if.force_media; /* remove fastether and fullduplex */ result = np->csr6 & ~0x20000200; if (duplex) result |= 0x200; if (fasteth) result |= 0x20000000; if (result != np->csr6 && debug) dev_info(&dev->dev, "Setting %dMBit-%s-duplex based on MII#%d\n", fasteth ? 100 : 10, duplex ? "full" : "half", np->phys[0]); return result; } #define RXTX_TIMEOUT 2000 static inline void update_csr6(struct net_device *dev, int new) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; int limit = RXTX_TIMEOUT; if (!netif_device_present(dev)) new = 0; if (new==np->csr6) return; /* stop both Tx and Rx processes */ iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig); /* wait until they have really stopped */ for (;;) { int csr5 = ioread32(ioaddr + IntrStatus); int t; t = (csr5 >> 17) & 0x07; if (t==0||t==1) { /* rx stopped */ t = (csr5 >> 20) & 0x07; if (t==0||t==1) break; } limit--; if(!limit) { dev_info(&dev->dev, "couldn't stop rxtx, IntrStatus %xh\n", csr5); break; } udelay(1); } np->csr6 = new; /* and restart them with the new configuration */ iowrite32(np->csr6, ioaddr + NetworkConfig); if (new & 0x200) np->mii_if.full_duplex = 1; } static void netdev_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; if (debug > 2) netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n", ioread32(ioaddr + IntrStatus), ioread32(ioaddr + NetworkConfig)); spin_lock_irq(&np->lock); update_csr6(dev, update_link(dev)); spin_unlock_irq(&np->lock); np->timer.expires = jiffies + 10*HZ; add_timer(&np->timer); } static void init_rxtx_rings(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; np->rx_head_desc = &np->rx_ring[0]; np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE]; /* Initial all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].length = np->rx_buf_sz; np->rx_ring[i].status = 0; np->rx_skbuff[i] = NULL; } /* Mark the last entry as wrapping the ring. */ np->rx_ring[i-1].length |= DescEndRing; /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); np->rx_skbuff[i] = skb; if (skb == NULL) break; np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, np->rx_buf_sz,PCI_DMA_FROMDEVICE); np->rx_ring[i].buffer1 = np->rx_addr[i]; np->rx_ring[i].status = DescOwned; } np->cur_rx = 0; np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); /* Initialize the Tx descriptors */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].status = 0; } np->tx_full = 0; np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0; iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr); iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE, np->base_addr + TxRingPtr); } static void free_rxtx_rings(struct netdev_private* np) { int i; /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].status = 0; if (np->rx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->rx_addr[i], np->rx_skbuff[i]->len, PCI_DMA_FROMDEVICE); dev_kfree_skb(np->rx_skbuff[i]); } np->rx_skbuff[i] = NULL; } for (i = 0; i < TX_RING_SIZE; i++) { if (np->tx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->tx_addr[i], np->tx_skbuff[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb(np->tx_skbuff[i]); } np->tx_skbuff[i] = NULL; } } static void init_registers(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; int i; for (i = 0; i < 6; i++) iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); /* Initialize other registers. */ #ifdef __BIG_ENDIAN i = (1<<20); /* Big-endian descriptors */ #else i = 0; #endif i |= (0x04<<2); /* skip length 4 u32 */ i |= 0x02; /* give Rx priority */ /* Configure the PCI bus bursts and FIFO thresholds. 486: Set 8 longword cache alignment, 8 longword burst. 586: Set 16 longword cache alignment, no burst limit. Cache alignment bits 15:14 Burst length 13:8 0000 <not allowed> 0000 align to cache 0800 8 longwords 4000 8 longwords 0100 1 longword 1000 16 longwords 8000 16 longwords 0200 2 longwords 2000 32 longwords C000 32 longwords 0400 4 longwords */ #if defined (__i386__) && !defined(MODULE) /* When not a module we can work around broken '486 PCI boards. */ if (boot_cpu_data.x86 <= 4) { i |= 0x4800; dev_info(&dev->dev, "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n"); } else { i |= 0xE000; } #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) i |= 0xE000; #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) i |= 0x4800; #else #warning Processor architecture undefined i |= 0x4800; #endif iowrite32(i, ioaddr + PCIBusCfg); np->csr6 = 0; /* 128 byte Tx threshold; Transmit on; Receive on; */ update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev)); /* Clear and Enable interrupts by setting the interrupt mask. */ iowrite32(0x1A0F5, ioaddr + IntrStatus); iowrite32(0x1A0F5, ioaddr + IntrEnable); iowrite32(0, ioaddr + RxStartDemand); } static void tx_timeout(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n", ioread32(ioaddr + IntrStatus)); { int i; printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status); printk(KERN_CONT "\n"); printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_CONT " %08x", np->tx_ring[i].status); printk(KERN_CONT "\n"); } printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n", np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C)); disable_irq(dev->irq); spin_lock_irq(&np->lock); /* * Under high load dirty_tx and the internal tx descriptor pointer * come out of sync, thus perform a software reset and reinitialize * everything. */ iowrite32(1, np->base_addr+PCIBusCfg); udelay(1); free_rxtx_rings(np); init_rxtx_rings(dev); init_registers(dev); spin_unlock_irq(&np->lock); enable_irq(dev->irq); netif_wake_queue(dev); dev->trans_start = jiffies; /* prevent tx timeout */ np->stats.tx_errors++; } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static int alloc_ringdesc(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); np->rx_ring = pci_alloc_consistent(np->pci_dev, sizeof(struct w840_rx_desc)*RX_RING_SIZE + sizeof(struct w840_tx_desc)*TX_RING_SIZE, &np->ring_dma_addr); if(!np->rx_ring) return -ENOMEM; init_rxtx_rings(dev); return 0; } static void free_ringdesc(struct netdev_private *np) { pci_free_consistent(np->pci_dev, sizeof(struct w840_rx_desc)*RX_RING_SIZE + sizeof(struct w840_tx_desc)*TX_RING_SIZE, np->rx_ring, np->ring_dma_addr); } static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); unsigned entry; /* Caution: the write order is important here, set the field with the "ownership" bits last. */ /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % TX_RING_SIZE; np->tx_addr[entry] = pci_map_single(np->pci_dev, skb->data,skb->len, PCI_DMA_TODEVICE); np->tx_skbuff[entry] = skb; np->tx_ring[entry].buffer1 = np->tx_addr[entry]; if (skb->len < TX_BUFLIMIT) { np->tx_ring[entry].length = DescWholePkt | skb->len; } else { int len = skb->len - TX_BUFLIMIT; np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT; np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT; } if(entry == TX_RING_SIZE-1) np->tx_ring[entry].length |= DescEndRing; /* Now acquire the irq spinlock. * The difficult race is the ordering between * increasing np->cur_tx and setting DescOwned: * - if np->cur_tx is increased first the interrupt * handler could consider the packet as transmitted * since DescOwned is cleared. * - If DescOwned is set first the NIC could report the * packet as sent, but the interrupt handler would ignore it * since the np->cur_tx was not yet increased. */ spin_lock_irq(&np->lock); np->cur_tx++; wmb(); /* flush length, buffer1, buffer2 */ np->tx_ring[entry].status = DescOwned; wmb(); /* flush status and kick the hardware */ iowrite32(0, np->base_addr + TxStartDemand); np->tx_q_bytes += skb->len; /* Work around horrible bug in the chip by marking the queue as full when we do not have FIFO room for a maximum sized packet. */ if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN || ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) { netif_stop_queue(dev); wmb(); np->tx_full = 1; } spin_unlock_irq(&np->lock); if (debug > 4) { netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n", np->cur_tx, entry); } return NETDEV_TX_OK; } static void netdev_tx_done(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; int tx_status = np->tx_ring[entry].status; if (tx_status < 0) break; if (tx_status & 0x8000) { /* There was an error, log it. */ #ifndef final_version if (debug > 1) netdev_dbg(dev, "Transmit error, Tx status %08x\n", tx_status); #endif np->stats.tx_errors++; if (tx_status & 0x0104) np->stats.tx_aborted_errors++; if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; if (tx_status & 0x0200) np->stats.tx_window_errors++; if (tx_status & 0x0002) np->stats.tx_fifo_errors++; if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0) np->stats.tx_heartbeat_errors++; } else { #ifndef final_version if (debug > 3) netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n", entry, tx_status); #endif np->stats.tx_bytes += np->tx_skbuff[entry]->len; np->stats.collisions += (tx_status >> 3) & 15; np->stats.tx_packets++; } /* Free the original skb. */ pci_unmap_single(np->pci_dev,np->tx_addr[entry], np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); np->tx_q_bytes -= np->tx_skbuff[entry]->len; dev_kfree_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; } if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART && np->tx_q_bytes < TX_BUG_FIFO_LIMIT) { /* The ring is no longer full, clear tbusy. */ np->tx_full = 0; wmb(); netif_wake_queue(dev); } } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static irqreturn_t intr_handler(int irq, void *dev_instance) { struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; int work_limit = max_interrupt_work; int handled = 0; if (!netif_device_present(dev)) return IRQ_NONE; do { u32 intr_status = ioread32(ioaddr + IntrStatus); /* Acknowledge all of the current interrupt sources ASAP. */ iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus); if (debug > 4) netdev_dbg(dev, "Interrupt, status %04x\n", intr_status); if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) break; handled = 1; if (intr_status & (RxIntr | RxNoBuf)) netdev_rx(dev); if (intr_status & RxNoBuf) iowrite32(0, ioaddr + RxStartDemand); if (intr_status & (TxNoBuf | TxIntr) && np->cur_tx != np->dirty_tx) { spin_lock(&np->lock); netdev_tx_done(dev); spin_unlock(&np->lock); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError | TimerInt | TxDied)) netdev_error(dev, intr_status); if (--work_limit < 0) { dev_warn(&dev->dev, "Too much work at interrupt, status=0x%04x\n", intr_status); /* Set the timer to re-enable the other interrupts after 10*82usec ticks. */ spin_lock(&np->lock); if (netif_device_present(dev)) { iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable); iowrite32(10, ioaddr + GPTimer); } spin_unlock(&np->lock); break; } } while (1); if (debug > 3) netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n", ioread32(ioaddr + IntrStatus)); return IRQ_RETVAL(handled); } /* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */ static int netdev_rx(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int entry = np->cur_rx % RX_RING_SIZE; int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; if (debug > 4) { netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n", entry, np->rx_ring[entry].status); } /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (--work_limit >= 0) { struct w840_rx_desc *desc = np->rx_head_desc; s32 status = desc->status; if (debug > 4) netdev_dbg(dev, " netdev_rx() status was %08x\n", status); if (status < 0) break; if ((status & 0x38008300) != 0x0300) { if ((status & 0x38000300) != 0x0300) { /* Ingore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { dev_warn(&dev->dev, "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n", np->cur_rx, status); np->stats.rx_length_errors++; } } else if (status & 0x8000) { /* There was a fatal error. */ if (debug > 2) netdev_dbg(dev, "Receive error, Rx status %08x\n", status); np->stats.rx_errors++; /* end of a packet.*/ if (status & 0x0890) np->stats.rx_length_errors++; if (status & 0x004C) np->stats.rx_frame_errors++; if (status & 0x0002) np->stats.rx_crc_errors++; } } else { struct sk_buff *skb; /* Omit the four octet CRC from the length. */ int pkt_len = ((status >> 16) & 0x7ff) - 4; #ifndef final_version if (debug > 4) netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n", pkt_len, status); #endif /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], np->rx_skbuff[entry]->len, PCI_DMA_FROMDEVICE); skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], np->rx_skbuff[entry]->len, PCI_DMA_FROMDEVICE); } else { pci_unmap_single(np->pci_dev,np->rx_addr[entry], np->rx_skbuff[entry]->len, PCI_DMA_FROMDEVICE); skb_put(skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; } #ifndef final_version /* Remove after testing. */ /* You will want this info for the initial debug. */ if (debug > 5) netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n", &skb->data[0], &skb->data[6], skb->data[12], skb->data[13], &skb->data[14]); #endif skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); np->stats.rx_packets++; np->stats.rx_bytes += pkt_len; } entry = (++np->cur_rx) % RX_RING_SIZE; np->rx_head_desc = &np->rx_ring[entry]; } /* Refill the Rx ring buffers. */ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { struct sk_buff *skb; entry = np->dirty_rx % RX_RING_SIZE; if (np->rx_skbuff[entry] == NULL) { skb = dev_alloc_skb(np->rx_buf_sz); np->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ np->rx_addr[entry] = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->rx_ring[entry].buffer1 = np->rx_addr[entry]; } wmb(); np->rx_ring[entry].status = DescOwned; } return 0; } static void netdev_error(struct net_device *dev, int intr_status) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; if (debug > 2) netdev_dbg(dev, "Abnormal event, %08x\n", intr_status); if (intr_status == 0xffffffff) return; spin_lock(&np->lock); if (intr_status & TxFIFOUnderflow) { int new; /* Bump up the Tx threshold */ #if 0 /* This causes lots of dropped packets, * and under high load even tx_timeouts */ new = np->csr6 + 0x4000; #else new = (np->csr6 >> 14)&0x7f; if (new < 64) new *= 2; else new = 127; /* load full packet before starting */ new = (np->csr6 & ~(0x7F << 14)) | (new<<14); #endif netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new); update_csr6(dev, new); } if (intr_status & RxDied) { /* Missed a Rx frame. */ np->stats.rx_errors++; } if (intr_status & TimerInt) { /* Re-enable other interrupts. */ if (netif_device_present(dev)) iowrite32(0x1A0F5, ioaddr + IntrEnable); } np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; iowrite32(0, ioaddr + RxStartDemand); spin_unlock(&np->lock); } static struct net_device_stats *get_stats(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; /* The chip only need report frame silently dropped. */ spin_lock_irq(&np->lock); if (netif_running(dev) && netif_device_present(dev)) np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; spin_unlock_irq(&np->lock); return &np->stats; } static u32 __set_rx_mode(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; u32 mc_filter[2]; /* Multicast hash filter */ u32 rx_mode; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys | AcceptMyPhys; } else if ((netdev_mc_count(dev) > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to match, or accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; } else { struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); netdev_for_each_mc_addr(ha, dev) { int filbit; filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; filbit &= 0x3f; mc_filter[filbit >> 5] |= 1 << (filbit & 31); } rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; } iowrite32(mc_filter[0], ioaddr + MulticastFilter0); iowrite32(mc_filter[1], ioaddr + MulticastFilter1); return rx_mode; } static void set_rx_mode(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); u32 rx_mode = __set_rx_mode(dev); spin_lock_irq(&np->lock); update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode); spin_unlock_irq(&np->lock); } static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); strcpy (info->driver, DRV_NAME); strcpy (info->version, DRV_VERSION); strcpy (info->bus_info, pci_name(np->pci_dev)); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); rc = mii_ethtool_gset(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return rc; } static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); rc = mii_ethtool_sset(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return rc; } static int netdev_nway_reset(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return mii_nway_restart(&np->mii_if); } static u32 netdev_get_link(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return mii_link_ok(&np->mii_if); } static u32 netdev_get_msglevel(struct net_device *dev) { return debug; } static void netdev_set_msglevel(struct net_device *dev, u32 value) { debug = value; } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct mii_ioctl_data *data = if_mii(rq); struct netdev_private *np = netdev_priv(dev); switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f; /* Fall Through */ case SIOCGMIIREG: /* Read MII PHY register. */ spin_lock_irq(&np->lock); data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f); spin_unlock_irq(&np->lock); return 0; case SIOCSMIIREG: /* Write MII PHY register. */ spin_lock_irq(&np->lock); mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); spin_unlock_irq(&np->lock); return 0; default: return -EOPNOTSUPP; } } static int netdev_close(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; netif_stop_queue(dev); if (debug > 1) { netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n", ioread32(ioaddr + IntrStatus), ioread32(ioaddr + NetworkConfig)); netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n", np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); } /* Stop the chip's Tx and Rx processes. */ spin_lock_irq(&np->lock); netif_device_detach(dev); update_csr6(dev, 0); iowrite32(0x0000, ioaddr + IntrEnable); spin_unlock_irq(&np->lock); free_irq(dev->irq, dev); wmb(); netif_device_attach(dev); if (ioread32(ioaddr + NetworkConfig) != 0xffffffff) np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; #ifdef __i386__ if (debug > 2) { int i; printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", i, np->tx_ring[i].length, np->tx_ring[i].status, np->tx_ring[i].buffer1); printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", i, np->rx_ring[i].length, np->rx_ring[i].status, np->rx_ring[i].buffer1); } } #endif /* __i386__ debugging only */ del_timer_sync(&np->timer); free_rxtx_rings(np); free_ringdesc(np); return 0; } static void __devexit w840_remove1 (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct netdev_private *np = netdev_priv(dev); unregister_netdev(dev); pci_release_regions(pdev); pci_iounmap(pdev, np->base_addr); free_netdev(dev); } pci_set_drvdata(pdev, NULL); } #ifdef CONFIG_PM /* * suspend/resume synchronization: * - open, close, do_ioctl: * rtnl_lock, & netif_device_detach after the rtnl_unlock. * - get_stats: * spin_lock_irq(np->lock), doesn't touch hw if not present * - start_xmit: * synchronize_irq + netif_tx_disable; * - tx_timeout: * netif_device_detach + netif_tx_disable; * - set_multicast_list * netif_device_detach + netif_tx_disable; * - interrupt handler * doesn't touch hw if not present, synchronize_irq waits for * running instances of the interrupt handler. * * Disabling hw requires clearing csr6 & IntrEnable. * update_csr6 & all function that write IntrEnable check netif_device_present * before settings any bits. * * Detach must occur under spin_unlock_irq(), interrupts from a detached * device would cause an irq storm. */ static int w840_suspend (struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata (pdev); struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; rtnl_lock(); if (netif_running (dev)) { del_timer_sync(&np->timer); spin_lock_irq(&np->lock); netif_device_detach(dev); update_csr6(dev, 0); iowrite32(0, ioaddr + IntrEnable); spin_unlock_irq(&np->lock); synchronize_irq(dev->irq); netif_tx_disable(dev); np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; /* no more hardware accesses behind this line. */ BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable)); /* pci_power_off(pdev, -1); */ free_rxtx_rings(np); } else { netif_device_detach(dev); } rtnl_unlock(); return 0; } static int w840_resume (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); struct netdev_private *np = netdev_priv(dev); int retval = 0; rtnl_lock(); if (netif_device_present(dev)) goto out; /* device not suspended */ if (netif_running(dev)) { if ((retval = pci_enable_device(pdev))) { dev_err(&dev->dev, "pci_enable_device failed in resume\n"); goto out; } spin_lock_irq(&np->lock); iowrite32(1, np->base_addr+PCIBusCfg); ioread32(np->base_addr+PCIBusCfg); udelay(1); netif_device_attach(dev); init_rxtx_rings(dev); init_registers(dev); spin_unlock_irq(&np->lock); netif_wake_queue(dev); mod_timer(&np->timer, jiffies + 1*HZ); } else { netif_device_attach(dev); } out: rtnl_unlock(); return retval; } #endif static struct pci_driver w840_driver = { .name = DRV_NAME, .id_table = w840_pci_tbl, .probe = w840_probe1, .remove = __devexit_p(w840_remove1), #ifdef CONFIG_PM .suspend = w840_suspend, .resume = w840_resume, #endif }; static int __init w840_init(void) { printk(version); return pci_register_driver(&w840_driver); } static void __exit w840_exit(void) { pci_unregister_driver(&w840_driver); } module_init(w840_init); module_exit(w840_exit);
gpl-2.0
Tommy-Geenexus/android_kernel_sony_apq8064_yuga_5.x
net/batman-adv/routing.c
2766
30040
/* * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA * */ #include "main.h" #include "routing.h" #include "send.h" #include "soft-interface.h" #include "hard-interface.h" #include "icmp_socket.h" #include "translation-table.h" #include "originator.h" #include "vis.h" #include "unicast.h" void slide_own_bcast_window(struct hard_iface *hard_iface) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct hashtable_t *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct orig_node *orig_node; unsigned long *word; uint32_t i; size_t word_index; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { spin_lock_bh(&orig_node->ogm_cnt_lock); word_index = hard_iface->if_num * NUM_WORDS; word = &(orig_node->bcast_own[word_index]); bit_get_packet(bat_priv, word, 1, 0); orig_node->bcast_own_sum[hard_iface->if_num] = bit_packet_count(word); spin_unlock_bh(&orig_node->ogm_cnt_lock); } rcu_read_unlock(); } } static void _update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node *neigh_node) { struct neigh_node *curr_router; curr_router = orig_node_get_router(orig_node); /* route deleted */ if ((curr_router) && (!neigh_node)) { bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", orig_node->orig); tt_global_del_orig(bat_priv, orig_node, "Deleted route towards originator"); /* route added */ } else if ((!curr_router) && (neigh_node)) { bat_dbg(DBG_ROUTES, bat_priv, "Adding route towards: %pM (via %pM)\n", orig_node->orig, neigh_node->addr); /* route changed */ } else if (neigh_node && curr_router) { bat_dbg(DBG_ROUTES, bat_priv, "Changing route towards: %pM (now via %pM - was via %pM)\n", orig_node->orig, neigh_node->addr, curr_router->addr); } if (curr_router) neigh_node_free_ref(curr_router); /* increase refcount of new best neighbor */ if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) neigh_node = NULL; spin_lock_bh(&orig_node->neigh_list_lock); rcu_assign_pointer(orig_node->router, neigh_node); spin_unlock_bh(&orig_node->neigh_list_lock); /* decrease refcount of previous best neighbor */ if (curr_router) neigh_node_free_ref(curr_router); } void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node *neigh_node) { struct neigh_node *router = NULL; if (!orig_node) goto out; router = orig_node_get_router(orig_node); if (router != neigh_node) _update_route(bat_priv, orig_node, neigh_node); out: if (router) neigh_node_free_ref(router); } /* caller must hold the neigh_list_lock */ void bonding_candidate_del(struct orig_node *orig_node, struct neigh_node *neigh_node) { /* this neighbor is not part of our candidate list */ if (list_empty(&neigh_node->bonding_list)) goto out; list_del_rcu(&neigh_node->bonding_list); INIT_LIST_HEAD(&neigh_node->bonding_list); neigh_node_free_ref(neigh_node); atomic_dec(&orig_node->bond_candidates); out: return; } void bonding_candidate_add(struct orig_node *orig_node, struct neigh_node *neigh_node) { struct hlist_node *node; struct neigh_node *tmp_neigh_node, *router = NULL; uint8_t interference_candidate = 0; spin_lock_bh(&orig_node->neigh_list_lock); /* only consider if it has the same primary address ... */ if (!compare_eth(orig_node->orig, neigh_node->orig_node->primary_addr)) goto candidate_del; router = orig_node_get_router(orig_node); if (!router) goto candidate_del; /* ... and is good enough to be considered */ if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD) goto candidate_del; /** * check if we have another candidate with the same mac address or * interface. If we do, we won't select this candidate because of * possible interference. */ hlist_for_each_entry_rcu(tmp_neigh_node, node, &orig_node->neigh_list, list) { if (tmp_neigh_node == neigh_node) continue; /* we only care if the other candidate is even * considered as candidate. */ if (list_empty(&tmp_neigh_node->bonding_list)) continue; if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) || (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) { interference_candidate = 1; break; } } /* don't care further if it is an interference candidate */ if (interference_candidate) goto candidate_del; /* this neighbor already is part of our candidate list */ if (!list_empty(&neigh_node->bonding_list)) goto out; if (!atomic_inc_not_zero(&neigh_node->refcount)) goto out; list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list); atomic_inc(&orig_node->bond_candidates); goto out; candidate_del: bonding_candidate_del(orig_node, neigh_node); out: spin_unlock_bh(&orig_node->neigh_list_lock); if (router) neigh_node_free_ref(router); } /* copy primary address for bonding */ void bonding_save_primary(const struct orig_node *orig_node, struct orig_node *orig_neigh_node, const struct batman_ogm_packet *batman_ogm_packet) { if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) return; memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); } /* checks whether the host restarted and is in the protection time. * returns: * 0 if the packet is to be accepted * 1 if the packet is to be ignored. */ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, unsigned long *last_reset) { if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) { *last_reset = jiffies; bat_dbg(DBG_BATMAN, bat_priv, "old packet received, start protection\n"); return 0; } else { return 1; } } return 0; } int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN))) return NET_RX_DROP; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* packet with broadcast indication but unicast recipient */ if (!is_broadcast_ether_addr(ethhdr->h_dest)) return NET_RX_DROP; /* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) return NET_RX_DROP; /* create a copy of the skb, if needed, to modify it. */ if (skb_cow(skb, 0) < 0) return NET_RX_DROP; /* keep skb linear */ if (skb_linearize(skb) < 0) return NET_RX_DROP; bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb); kfree_skb(skb); return NET_RX_SUCCESS; } static int recv_my_icmp_packet(struct bat_priv *bat_priv, struct sk_buff *skb, size_t icmp_len) { struct hard_iface *primary_if = NULL; struct orig_node *orig_node = NULL; struct neigh_node *router = NULL; struct icmp_packet_rr *icmp_packet; int ret = NET_RX_DROP; icmp_packet = (struct icmp_packet_rr *)skb->data; /* add data to device queue */ if (icmp_packet->msg_type != ECHO_REQUEST) { bat_socket_receive_packet(icmp_packet, icmp_len); goto out; } primary_if = primary_if_get_selected(bat_priv); if (!primary_if) goto out; /* answer echo request (ping) */ /* get routing information */ orig_node = orig_hash_find(bat_priv, icmp_packet->orig); if (!orig_node) goto out; router = orig_node_get_router(orig_node); if (!router) goto out; /* create a copy of the skb, if needed, to modify it. */ if (skb_cow(skb, sizeof(struct ethhdr)) < 0) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); icmp_packet->msg_type = ECHO_REPLY; icmp_packet->header.ttl = TTL; send_skb_packet(skb, router->if_incoming, router->addr); ret = NET_RX_SUCCESS; out: if (primary_if) hardif_free_ref(primary_if); if (router) neigh_node_free_ref(router); if (orig_node) orig_node_free_ref(orig_node); return ret; } static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, struct sk_buff *skb) { struct hard_iface *primary_if = NULL; struct orig_node *orig_node = NULL; struct neigh_node *router = NULL; struct icmp_packet *icmp_packet; int ret = NET_RX_DROP; icmp_packet = (struct icmp_packet *)skb->data; /* send TTL exceeded if packet is an echo request (traceroute) */ if (icmp_packet->msg_type != ECHO_REQUEST) { pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", icmp_packet->orig, icmp_packet->dst); goto out; } primary_if = primary_if_get_selected(bat_priv); if (!primary_if) goto out; /* get routing information */ orig_node = orig_hash_find(bat_priv, icmp_packet->orig); if (!orig_node) goto out; router = orig_node_get_router(orig_node); if (!router) goto out; /* create a copy of the skb, if needed, to modify it. */ if (skb_cow(skb, sizeof(struct ethhdr)) < 0) goto out; icmp_packet = (struct icmp_packet *)skb->data; memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); icmp_packet->msg_type = TTL_EXCEEDED; icmp_packet->header.ttl = TTL; send_skb_packet(skb, router->if_incoming, router->addr); ret = NET_RX_SUCCESS; out: if (primary_if) hardif_free_ref(primary_if); if (router) neigh_node_free_ref(router); if (orig_node) orig_node_free_ref(orig_node); return ret; } int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct icmp_packet_rr *icmp_packet; struct ethhdr *ethhdr; struct orig_node *orig_node = NULL; struct neigh_node *router = NULL; int hdr_size = sizeof(struct icmp_packet); int ret = NET_RX_DROP; /** * we truncate all incoming icmp packets if they don't match our size */ if (skb->len >= sizeof(struct icmp_packet_rr)) hdr_size = sizeof(struct icmp_packet_rr); /* drop packet if it has not necessary minimum size */ if (unlikely(!pskb_may_pull(skb, hdr_size))) goto out; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* packet with unicast indication but broadcast recipient */ if (is_broadcast_ether_addr(ethhdr->h_dest)) goto out; /* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) goto out; /* not for me */ if (!is_my_mac(ethhdr->h_dest)) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; /* add record route information if not full */ if ((hdr_size == sizeof(struct icmp_packet_rr)) && (icmp_packet->rr_cur < BAT_RR_LEN)) { memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), ethhdr->h_dest, ETH_ALEN); icmp_packet->rr_cur++; } /* packet for me */ if (is_my_mac(icmp_packet->dst)) return recv_my_icmp_packet(bat_priv, skb, hdr_size); /* TTL exceeded */ if (icmp_packet->header.ttl < 2) return recv_icmp_ttl_exceeded(bat_priv, skb); /* get routing information */ orig_node = orig_hash_find(bat_priv, icmp_packet->dst); if (!orig_node) goto out; router = orig_node_get_router(orig_node); if (!router) goto out; /* create a copy of the skb, if needed, to modify it. */ if (skb_cow(skb, sizeof(struct ethhdr)) < 0) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; /* decrement ttl */ icmp_packet->header.ttl--; /* route it */ send_skb_packet(skb, router->if_incoming, router->addr); ret = NET_RX_SUCCESS; out: if (router) neigh_node_free_ref(router); if (orig_node) orig_node_free_ref(orig_node); return ret; } /* In the bonding case, send the packets in a round * robin fashion over the remaining interfaces. * * This method rotates the bonding list and increases the * returned router's refcount. */ static struct neigh_node *find_bond_router(struct orig_node *primary_orig, const struct hard_iface *recv_if) { struct neigh_node *tmp_neigh_node; struct neigh_node *router = NULL, *first_candidate = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list, bonding_list) { if (!first_candidate) first_candidate = tmp_neigh_node; /* recv_if == NULL on the first node. */ if (tmp_neigh_node->if_incoming == recv_if) continue; if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) continue; router = tmp_neigh_node; break; } /* use the first candidate if nothing was found. */ if (!router && first_candidate && atomic_inc_not_zero(&first_candidate->refcount)) router = first_candidate; if (!router) goto out; /* selected should point to the next element * after the current router */ spin_lock_bh(&primary_orig->neigh_list_lock); /* this is a list_move(), which unfortunately * does not exist as rcu version */ list_del_rcu(&primary_orig->bond_list); list_add_rcu(&primary_orig->bond_list, &router->bonding_list); spin_unlock_bh(&primary_orig->neigh_list_lock); out: rcu_read_unlock(); return router; } /* Interface Alternating: Use the best of the * remaining candidates which are not using * this interface. * * Increases the returned router's refcount */ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, const struct hard_iface *recv_if) { struct neigh_node *tmp_neigh_node; struct neigh_node *router = NULL, *first_candidate = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list, bonding_list) { if (!first_candidate) first_candidate = tmp_neigh_node; /* recv_if == NULL on the first node. */ if (tmp_neigh_node->if_incoming == recv_if) continue; if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) continue; /* if we don't have a router yet * or this one is better, choose it. */ if ((!router) || (tmp_neigh_node->tq_avg > router->tq_avg)) { /* decrement refcount of * previously selected router */ if (router) neigh_node_free_ref(router); router = tmp_neigh_node; atomic_inc_not_zero(&router->refcount); } neigh_node_free_ref(tmp_neigh_node); } /* use the first candidate if nothing was found. */ if (!router && first_candidate && atomic_inc_not_zero(&first_candidate->refcount)) router = first_candidate; rcu_read_unlock(); return router; } int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct tt_query_packet *tt_query; uint16_t tt_len; struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet)))) goto out; /* I could need to modify it */ if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0) goto out; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* packet with unicast indication but broadcast recipient */ if (is_broadcast_ether_addr(ethhdr->h_dest)) goto out; /* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) goto out; tt_query = (struct tt_query_packet *)skb->data; tt_query->tt_data = ntohs(tt_query->tt_data); switch (tt_query->flags & TT_QUERY_TYPE_MASK) { case TT_REQUEST: /* If we cannot provide an answer the tt_request is * forwarded */ if (!send_tt_response(bat_priv, tt_query)) { bat_dbg(DBG_TT, bat_priv, "Routing TT_REQUEST to %pM [%c]\n", tt_query->dst, (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); tt_query->tt_data = htons(tt_query->tt_data); return route_unicast_packet(skb, recv_if); } break; case TT_RESPONSE: if (is_my_mac(tt_query->dst)) { /* packet needs to be linearized to access the TT * changes */ if (skb_linearize(skb) < 0) goto out; tt_len = tt_query->tt_data * sizeof(struct tt_change); /* Ensure we have all the claimed data */ if (unlikely(skb_headlen(skb) < sizeof(struct tt_query_packet) + tt_len)) goto out; handle_tt_response(bat_priv, tt_query); } else { bat_dbg(DBG_TT, bat_priv, "Routing TT_RESPONSE to %pM [%c]\n", tt_query->dst, (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); tt_query->tt_data = htons(tt_query->tt_data); return route_unicast_packet(skb, recv_if); } break; } out: /* returning NET_RX_DROP will make the caller function kfree the skb */ return NET_RX_DROP; } int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct roam_adv_packet *roam_adv_packet; struct orig_node *orig_node; struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet)))) goto out; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* packet with unicast indication but broadcast recipient */ if (is_broadcast_ether_addr(ethhdr->h_dest)) goto out; /* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) goto out; roam_adv_packet = (struct roam_adv_packet *)skb->data; if (!is_my_mac(roam_adv_packet->dst)) return route_unicast_packet(skb, recv_if); orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); if (!orig_node) goto out; bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM (client %pM)\n", roam_adv_packet->src, roam_adv_packet->client); tt_global_add(bat_priv, orig_node, roam_adv_packet->client, atomic_read(&orig_node->last_ttvn) + 1, true, false); /* Roaming phase starts: I have new information but the ttvn has not * been incremented yet. This flag will make me check all the incoming * packets for the correct destination. */ bat_priv->tt_poss_change = true; orig_node_free_ref(orig_node); out: /* returning NET_RX_DROP will make the caller function kfree the skb */ return NET_RX_DROP; } /* find a suitable router for this originator, and use * bonding if possible. increases the found neighbors * refcount.*/ struct neigh_node *find_router(struct bat_priv *bat_priv, struct orig_node *orig_node, const struct hard_iface *recv_if) { struct orig_node *primary_orig_node; struct orig_node *router_orig; struct neigh_node *router; static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; int bonding_enabled; if (!orig_node) return NULL; router = orig_node_get_router(orig_node); if (!router) goto err; /* without bonding, the first node should * always choose the default router. */ bonding_enabled = atomic_read(&bat_priv->bonding); rcu_read_lock(); /* select default router to output */ router_orig = router->orig_node; if (!router_orig) goto err_unlock; if ((!recv_if) && (!bonding_enabled)) goto return_router; /* if we have something in the primary_addr, we can search * for a potential bonding candidate. */ if (compare_eth(router_orig->primary_addr, zero_mac)) goto return_router; /* find the orig_node which has the primary interface. might * even be the same as our router_orig in many cases */ if (compare_eth(router_orig->primary_addr, router_orig->orig)) { primary_orig_node = router_orig; } else { primary_orig_node = orig_hash_find(bat_priv, router_orig->primary_addr); if (!primary_orig_node) goto return_router; orig_node_free_ref(primary_orig_node); } /* with less than 2 candidates, we can't do any * bonding and prefer the original router. */ if (atomic_read(&primary_orig_node->bond_candidates) < 2) goto return_router; /* all nodes between should choose a candidate which * is is not on the interface where the packet came * in. */ neigh_node_free_ref(router); if (bonding_enabled) router = find_bond_router(primary_orig_node, recv_if); else router = find_ifalter_router(primary_orig_node, recv_if); return_router: if (router && router->if_incoming->if_status != IF_ACTIVE) goto err_unlock; rcu_read_unlock(); return router; err_unlock: rcu_read_unlock(); err: if (router) neigh_node_free_ref(router); return NULL; } static int check_unicast_packet(struct sk_buff *skb, int hdr_size) { struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ if (unlikely(!pskb_may_pull(skb, hdr_size))) return -1; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* packet with unicast indication but broadcast recipient */ if (is_broadcast_ether_addr(ethhdr->h_dest)) return -1; /* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) return -1; /* not for me */ if (!is_my_mac(ethhdr->h_dest)) return -1; return 0; } int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct orig_node *orig_node = NULL; struct neigh_node *neigh_node = NULL; struct unicast_packet *unicast_packet; struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); int ret = NET_RX_DROP; struct sk_buff *new_skb; unicast_packet = (struct unicast_packet *)skb->data; /* TTL exceeded */ if (unicast_packet->header.ttl < 2) { pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", ethhdr->h_source, unicast_packet->dest); goto out; } /* get routing information */ orig_node = orig_hash_find(bat_priv, unicast_packet->dest); if (!orig_node) goto out; /* find_router() increases neigh_nodes refcount if found. */ neigh_node = find_router(bat_priv, orig_node, recv_if); if (!neigh_node) goto out; /* create a copy of the skb, if needed, to modify it. */ if (skb_cow(skb, sizeof(struct ethhdr)) < 0) goto out; unicast_packet = (struct unicast_packet *)skb->data; if (unicast_packet->header.packet_type == BAT_UNICAST && atomic_read(&bat_priv->fragmentation) && skb->len > neigh_node->if_incoming->net_dev->mtu) { ret = frag_send_skb(skb, bat_priv, neigh_node->if_incoming, neigh_node->addr); goto out; } if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG && frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) { ret = frag_reassemble_skb(skb, bat_priv, &new_skb); if (ret == NET_RX_DROP) goto out; /* packet was buffered for late merge */ if (!new_skb) { ret = NET_RX_SUCCESS; goto out; } skb = new_skb; unicast_packet = (struct unicast_packet *)skb->data; } /* decrement ttl */ unicast_packet->header.ttl--; /* route it */ send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = NET_RX_SUCCESS; out: if (neigh_node) neigh_node_free_ref(neigh_node); if (orig_node) orig_node_free_ref(orig_node); return ret; } static int check_unicast_ttvn(struct bat_priv *bat_priv, struct sk_buff *skb) { uint8_t curr_ttvn; struct orig_node *orig_node; struct ethhdr *ethhdr; struct hard_iface *primary_if; struct unicast_packet *unicast_packet; bool tt_poss_change; /* I could need to modify it */ if (skb_cow(skb, sizeof(struct unicast_packet)) < 0) return 0; unicast_packet = (struct unicast_packet *)skb->data; if (is_my_mac(unicast_packet->dest)) { tt_poss_change = bat_priv->tt_poss_change; curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); } else { orig_node = orig_hash_find(bat_priv, unicast_packet->dest); if (!orig_node) return 0; curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); tt_poss_change = orig_node->tt_poss_change; orig_node_free_ref(orig_node); } /* Check whether I have to reroute the packet */ if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) { /* Linearize the skb before accessing it */ if (skb_linearize(skb) < 0) return 0; ethhdr = (struct ethhdr *)(skb->data + sizeof(struct unicast_packet)); orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest); if (!orig_node) { if (!is_my_client(bat_priv, ethhdr->h_dest)) return 0; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) return 0; memcpy(unicast_packet->dest, primary_if->net_dev->dev_addr, ETH_ALEN); hardif_free_ref(primary_if); } else { memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); curr_ttvn = (uint8_t) atomic_read(&orig_node->last_ttvn); orig_node_free_ref(orig_node); } bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n", unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest, unicast_packet->dest); unicast_packet->ttvn = curr_ttvn; } return 1; } int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct unicast_packet *unicast_packet; int hdr_size = sizeof(*unicast_packet); if (check_unicast_packet(skb, hdr_size) < 0) return NET_RX_DROP; if (!check_unicast_ttvn(bat_priv, skb)) return NET_RX_DROP; unicast_packet = (struct unicast_packet *)skb->data; /* packet for me */ if (is_my_mac(unicast_packet->dest)) { interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); return NET_RX_SUCCESS; } return route_unicast_packet(skb, recv_if); } int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct unicast_frag_packet *unicast_packet; int hdr_size = sizeof(*unicast_packet); struct sk_buff *new_skb = NULL; int ret; if (check_unicast_packet(skb, hdr_size) < 0) return NET_RX_DROP; if (!check_unicast_ttvn(bat_priv, skb)) return NET_RX_DROP; unicast_packet = (struct unicast_frag_packet *)skb->data; /* packet for me */ if (is_my_mac(unicast_packet->dest)) { ret = frag_reassemble_skb(skb, bat_priv, &new_skb); if (ret == NET_RX_DROP) return NET_RX_DROP; /* packet was buffered for late merge */ if (!new_skb) return NET_RX_SUCCESS; interface_rx(recv_if->soft_iface, new_skb, recv_if, sizeof(struct unicast_packet)); return NET_RX_SUCCESS; } return route_unicast_packet(skb, recv_if); } int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct orig_node *orig_node = NULL; struct bcast_packet *bcast_packet; struct ethhdr *ethhdr; int hdr_size = sizeof(*bcast_packet); int ret = NET_RX_DROP; int32_t seq_diff; /* drop packet if it has not necessary minimum size */ if (unlikely(!pskb_may_pull(skb, hdr_size))) goto out; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* packet with broadcast indication but unicast recipient */ if (!is_broadcast_ether_addr(ethhdr->h_dest)) goto out; /* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) goto out; /* ignore broadcasts sent by myself */ if (is_my_mac(ethhdr->h_source)) goto out; bcast_packet = (struct bcast_packet *)skb->data; /* ignore broadcasts originated by myself */ if (is_my_mac(bcast_packet->orig)) goto out; if (bcast_packet->header.ttl < 2) goto out; orig_node = orig_hash_find(bat_priv, bcast_packet->orig); if (!orig_node) goto out; spin_lock_bh(&orig_node->bcast_seqno_lock); /* check whether the packet is a duplicate */ if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, ntohl(bcast_packet->seqno))) goto spin_unlock; seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; /* check whether the packet is old and the host just restarted. */ if (window_protected(bat_priv, seq_diff, &orig_node->bcast_seqno_reset)) goto spin_unlock; /* mark broadcast in flood history, update window position * if required. */ if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); spin_unlock_bh(&orig_node->bcast_seqno_lock); /* rebroadcast packet */ add_bcast_packet_to_list(bat_priv, skb, 1); /* broadcast for me */ interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); ret = NET_RX_SUCCESS; goto out; spin_unlock: spin_unlock_bh(&orig_node->bcast_seqno_lock); out: if (orig_node) orig_node_free_ref(orig_node); return ret; } int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct vis_packet *vis_packet; struct ethhdr *ethhdr; struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); int hdr_size = sizeof(*vis_packet); /* keep skb linear */ if (skb_linearize(skb) < 0) return NET_RX_DROP; if (unlikely(!pskb_may_pull(skb, hdr_size))) return NET_RX_DROP; vis_packet = (struct vis_packet *)skb->data; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* not for me */ if (!is_my_mac(ethhdr->h_dest)) return NET_RX_DROP; /* ignore own packets */ if (is_my_mac(vis_packet->vis_orig)) return NET_RX_DROP; if (is_my_mac(vis_packet->sender_orig)) return NET_RX_DROP; switch (vis_packet->vis_type) { case VIS_TYPE_SERVER_SYNC: receive_server_sync_packet(bat_priv, vis_packet, skb_headlen(skb)); break; case VIS_TYPE_CLIENT_UPDATE: receive_client_update_packet(bat_priv, vis_packet, skb_headlen(skb)); break; default: /* ignore unknown packet */ break; } /* We take a copy of the data in the packet, so we should always free the skbuf. */ return NET_RX_DROP; }
gpl-2.0
Happy-Ferret/Kernel-Experiments
drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
2766
16273
/* * Copyright © 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Jackie Li<yaodong.li@intel.com> */ #include <linux/freezer.h> #include "mdfld_dsi_output.h" #include "mdfld_dsi_pkg_sender.h" #include "mdfld_dsi_dpi.h" #define MDFLD_DSI_READ_MAX_COUNT 5000 enum data_type { DSI_DT_GENERIC_SHORT_WRITE_0 = 0x03, DSI_DT_GENERIC_SHORT_WRITE_1 = 0x13, DSI_DT_GENERIC_SHORT_WRITE_2 = 0x23, DSI_DT_GENERIC_READ_0 = 0x04, DSI_DT_GENERIC_READ_1 = 0x14, DSI_DT_GENERIC_READ_2 = 0x24, DSI_DT_GENERIC_LONG_WRITE = 0x29, DSI_DT_DCS_SHORT_WRITE_0 = 0x05, DSI_DT_DCS_SHORT_WRITE_1 = 0x15, DSI_DT_DCS_READ = 0x06, DSI_DT_DCS_LONG_WRITE = 0x39, }; enum { MDFLD_DSI_PANEL_MODE_SLEEP = 0x1, }; enum { MDFLD_DSI_PKG_SENDER_FREE = 0x0, MDFLD_DSI_PKG_SENDER_BUSY = 0x1, }; static const char *const dsi_errors[] = { "RX SOT Error", "RX SOT Sync Error", "RX EOT Sync Error", "RX Escape Mode Entry Error", "RX LP TX Sync Error", "RX HS Receive Timeout Error", "RX False Control Error", "RX ECC Single Bit Error", "RX ECC Multibit Error", "RX Checksum Error", "RX DSI Data Type Not Recognised", "RX DSI VC ID Invalid", "TX False Control Error", "TX ECC Single Bit Error", "TX ECC Multibit Error", "TX Checksum Error", "TX DSI Data Type Not Recognised", "TX DSI VC ID invalid", "High Contention", "Low contention", "DPI FIFO Under run", "HS TX Timeout", "LP RX Timeout", "Turn Around ACK Timeout", "ACK With No Error", "RX Invalid TX Length", "RX Prot Violation", "HS Generic Write FIFO Full", "LP Generic Write FIFO Full", "Generic Read Data Avail" "Special Packet Sent", "Tearing Effect", }; static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender, u32 mask) { struct drm_device *dev = sender->dev; u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg; int retry = 0xffff; while (retry--) { if ((mask & REG_READ(gen_fifo_stat_reg)) == mask) return 0; udelay(100); } DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg)); return -EIO; } static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender) { return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) | BIT(26) | BIT(27) | BIT(28))); } static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender) { return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26))); } static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender) { return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18))); } static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask) { u32 intr_stat_reg = sender->mipi_intr_stat_reg; struct drm_device *dev = sender->dev; dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask); switch (mask) { case BIT(0): case BIT(1): case BIT(2): case BIT(3): case BIT(4): case BIT(5): case BIT(6): case BIT(7): case BIT(8): case BIT(9): case BIT(10): case BIT(11): case BIT(12): case BIT(13): dev_dbg(sender->dev->dev, "No Action required\n"); break; case BIT(14): /*wait for all fifo empty*/ /*wait_for_all_fifos_empty(sender)*/; break; case BIT(15): dev_dbg(sender->dev->dev, "No Action required\n"); break; case BIT(16): break; case BIT(17): break; case BIT(18): case BIT(19): dev_dbg(sender->dev->dev, "High/Low contention detected\n"); /*wait for contention recovery time*/ /*mdelay(10);*/ /*wait for all fifo empty*/ if (0) wait_for_all_fifos_empty(sender); break; case BIT(20): dev_dbg(sender->dev->dev, "No Action required\n"); break; case BIT(21): /*wait for all fifo empty*/ /*wait_for_all_fifos_empty(sender);*/ break; case BIT(22): break; case BIT(23): case BIT(24): case BIT(25): case BIT(26): case BIT(27): dev_dbg(sender->dev->dev, "HS Gen fifo full\n"); REG_WRITE(intr_stat_reg, mask); wait_for_hs_fifos_empty(sender); break; case BIT(28): dev_dbg(sender->dev->dev, "LP Gen fifo full\n"); REG_WRITE(intr_stat_reg, mask); wait_for_lp_fifos_empty(sender); break; case BIT(29): case BIT(30): case BIT(31): dev_dbg(sender->dev->dev, "No Action required\n"); break; } if (mask & REG_READ(intr_stat_reg)) dev_dbg(sender->dev->dev, "Cannot clean interrupt 0x%08x\n", mask); return 0; } static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender) { struct drm_device *dev = sender->dev; u32 intr_stat_reg = sender->mipi_intr_stat_reg; u32 mask; u32 intr_stat; int i; int err = 0; intr_stat = REG_READ(intr_stat_reg); for (i = 0; i < 32; i++) { mask = (0x00000001UL) << i; if (intr_stat & mask) { dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]); err = handle_dsi_error(sender, mask); if (err) DRM_ERROR("Cannot handle error\n"); } } return err; } static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type, u8 cmd, u8 param, bool hs) { struct drm_device *dev = sender->dev; u32 ctrl_reg; u32 val; u8 virtual_channel = 0; if (hs) { ctrl_reg = sender->mipi_hs_gen_ctrl_reg; /* FIXME: wait_for_hs_fifos_empty(sender); */ } else { ctrl_reg = sender->mipi_lp_gen_ctrl_reg; /* FIXME: wait_for_lp_fifos_empty(sender); */ } val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) | FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0); REG_WRITE(ctrl_reg, val); return 0; } static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type, u8 *data, int len, bool hs) { struct drm_device *dev = sender->dev; u32 ctrl_reg; u32 data_reg; u32 val; u8 *p; u8 b1, b2, b3, b4; u8 virtual_channel = 0; int i; if (hs) { ctrl_reg = sender->mipi_hs_gen_ctrl_reg; data_reg = sender->mipi_hs_gen_data_reg; /* FIXME: wait_for_hs_fifos_empty(sender); */ } else { ctrl_reg = sender->mipi_lp_gen_ctrl_reg; data_reg = sender->mipi_lp_gen_data_reg; /* FIXME: wait_for_lp_fifos_empty(sender); */ } p = data; for (i = 0; i < len / 4; i++) { b1 = *p++; b2 = *p++; b3 = *p++; b4 = *p++; REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1); } i = len % 4; if (i) { b1 = 0; b2 = 0; b3 = 0; switch (i) { case 3: b1 = *p++; b2 = *p++; b3 = *p++; break; case 2: b1 = *p++; b2 = *p++; break; case 1: b1 = *p++; break; } REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1); } val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0); REG_WRITE(ctrl_reg, val); return 0; } static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type, u8 *data, u16 len) { u8 cmd; switch (data_type) { case DSI_DT_DCS_SHORT_WRITE_0: case DSI_DT_DCS_SHORT_WRITE_1: case DSI_DT_DCS_LONG_WRITE: cmd = *data; break; default: return 0; } /*this prevents other package sending while doing msleep*/ sender->status = MDFLD_DSI_PKG_SENDER_BUSY; /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/ if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) { /*TODO: replace it with msleep later*/ mdelay(120); } if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) { /*TODO: replace it with msleep later*/ mdelay(120); } return 0; } static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type, u8 *data, u16 len) { u8 cmd; switch (data_type) { case DSI_DT_DCS_SHORT_WRITE_0: case DSI_DT_DCS_SHORT_WRITE_1: case DSI_DT_DCS_LONG_WRITE: cmd = *data; break; default: return 0; } /*update panel status*/ if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) { sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP; /*TODO: replace it with msleep later*/ mdelay(120); } else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) { sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP; /*TODO: replace it with msleep later*/ mdelay(120); } else if (unlikely(cmd == DCS_SOFT_RESET)) { /*TODO: replace it with msleep later*/ mdelay(5); } sender->status = MDFLD_DSI_PKG_SENDER_FREE; return 0; } static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type, u8 *data, u16 len, bool hs) { int ret; /*handle DSI error*/ ret = dsi_error_handler(sender); if (ret) { DRM_ERROR("Error handling failed\n"); return -EAGAIN; } /* send pkg */ if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) { DRM_ERROR("sender is busy\n"); return -EAGAIN; } ret = send_pkg_prepare(sender, data_type, data, len); if (ret) { DRM_ERROR("send_pkg_prepare error\n"); return ret; } switch (data_type) { case DSI_DT_GENERIC_SHORT_WRITE_0: case DSI_DT_GENERIC_SHORT_WRITE_1: case DSI_DT_GENERIC_SHORT_WRITE_2: case DSI_DT_GENERIC_READ_0: case DSI_DT_GENERIC_READ_1: case DSI_DT_GENERIC_READ_2: case DSI_DT_DCS_SHORT_WRITE_0: case DSI_DT_DCS_SHORT_WRITE_1: case DSI_DT_DCS_READ: ret = send_short_pkg(sender, data_type, data[0], data[1], hs); break; case DSI_DT_GENERIC_LONG_WRITE: case DSI_DT_DCS_LONG_WRITE: ret = send_long_pkg(sender, data_type, data, len, hs); break; } send_pkg_done(sender, data_type, data, len); /*FIXME: should I query complete and fifo empty here?*/ return ret; } int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, u32 len, bool hs) { unsigned long flags; if (!sender || !data || !len) { DRM_ERROR("Invalid parameters\n"); return -EINVAL; } spin_lock_irqsave(&sender->lock, flags); send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs); spin_unlock_irqrestore(&sender->lock, flags); return 0; } int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd, u8 param, u8 param_num, bool hs) { u8 data[2]; unsigned long flags; u8 data_type; if (!sender) { DRM_ERROR("Invalid parameter\n"); return -EINVAL; } data[0] = cmd; if (param_num) { data_type = DSI_DT_DCS_SHORT_WRITE_1; data[1] = param; } else { data_type = DSI_DT_DCS_SHORT_WRITE_0; data[1] = 0; } spin_lock_irqsave(&sender->lock, flags); send_pkg(sender, data_type, data, sizeof(data), hs); spin_unlock_irqrestore(&sender->lock, flags); return 0; } int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0, u8 param1, u8 param_num, bool hs) { u8 data[2]; unsigned long flags; u8 data_type; if (!sender || param_num > 2) { DRM_ERROR("Invalid parameter\n"); return -EINVAL; } switch (param_num) { case 0: data_type = DSI_DT_GENERIC_SHORT_WRITE_0; data[0] = 0; data[1] = 0; break; case 1: data_type = DSI_DT_GENERIC_SHORT_WRITE_1; data[0] = param0; data[1] = 0; break; case 2: data_type = DSI_DT_GENERIC_SHORT_WRITE_2; data[0] = param0; data[1] = param1; break; } spin_lock_irqsave(&sender->lock, flags); send_pkg(sender, data_type, data, sizeof(data), hs); spin_unlock_irqrestore(&sender->lock, flags); return 0; } int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, u32 len, bool hs) { unsigned long flags; if (!sender || !data || !len) { DRM_ERROR("Invalid parameters\n"); return -EINVAL; } spin_lock_irqsave(&sender->lock, flags); send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs); spin_unlock_irqrestore(&sender->lock, flags); return 0; } static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type, u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs) { unsigned long flags; struct drm_device *dev = sender->dev; int i; u32 gen_data_reg; int retry = MDFLD_DSI_READ_MAX_COUNT; if (!sender || !data_out || !len_out) { DRM_ERROR("Invalid parameters\n"); return -EINVAL; } /** * do reading. * 0) send out generic read request * 1) polling read data avail interrupt * 2) read data */ spin_lock_irqsave(&sender->lock, flags); REG_WRITE(sender->mipi_intr_stat_reg, BIT(29)); if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) DRM_ERROR("Can NOT clean read data valid interrupt\n"); /*send out read request*/ send_pkg(sender, data_type, data, len, hs); /*polling read data avail interrupt*/ while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) { udelay(100); retry--; } if (!retry) { spin_unlock_irqrestore(&sender->lock, flags); return -ETIMEDOUT; } REG_WRITE(sender->mipi_intr_stat_reg, BIT(29)); /*read data*/ if (hs) gen_data_reg = sender->mipi_hs_gen_data_reg; else gen_data_reg = sender->mipi_lp_gen_data_reg; for (i = 0; i < len_out; i++) *(data_out + i) = REG_READ(gen_data_reg); spin_unlock_irqrestore(&sender->lock, flags); return 0; } int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd, u32 *data, u16 len, bool hs) { if (!sender || !data || !len) { DRM_ERROR("Invalid parameters\n"); return -EINVAL; } return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1, data, len, hs); } int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector, int pipe) { struct mdfld_dsi_pkg_sender *pkg_sender; struct mdfld_dsi_config *dsi_config = mdfld_dsi_get_config(dsi_connector); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 mipi_val = 0; if (!dsi_connector) { DRM_ERROR("Invalid parameter\n"); return -EINVAL; } pkg_sender = dsi_connector->pkg_sender; if (!pkg_sender || IS_ERR(pkg_sender)) { pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender), GFP_KERNEL); if (!pkg_sender) { DRM_ERROR("Create DSI pkg sender failed\n"); return -ENOMEM; } dsi_connector->pkg_sender = (void *)pkg_sender; } pkg_sender->dev = dev; pkg_sender->dsi_connector = dsi_connector; pkg_sender->pipe = pipe; pkg_sender->pkg_num = 0; pkg_sender->panel_mode = 0; pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE; /*init regs*/ /* FIXME: should just copy the regmap ptr ? */ pkg_sender->dpll_reg = map->dpll; pkg_sender->dspcntr_reg = map->cntr; pkg_sender->pipeconf_reg = map->conf; pkg_sender->dsplinoff_reg = map->linoff; pkg_sender->dspsurf_reg = map->surf; pkg_sender->pipestat_reg = map->status; pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe); pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe); pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe); pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe); pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe); pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe); pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe); pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe); pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe); /*init lock*/ spin_lock_init(&pkg_sender->lock); if (mdfld_get_panel_type(dev, pipe) != TC35876X) { /** * For video mode, don't enable DPI timing output here, * will init the DPI timing output during mode setting. */ mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; if (pipe == 0) mipi_val |= 0x2; REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val); REG_READ(MIPI_PORT_CONTROL(pipe)); /* do dsi controller init */ mdfld_dsi_controller_init(dsi_config, pipe); } return 0; } void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender) { if (!sender || IS_ERR(sender)) return; /*free*/ kfree(sender); }
gpl-2.0
TiaLuna/Nexus_S_kernel
drivers/staging/vt6656/wmgr.c
2766
171178
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: wmgr.c * * Purpose: Handles the 802.11 management functions * * Author: Lyndon Chen * * Date: May 8, 2002 * * Functions: * nsMgrObjectInitial - Initialize Management Objet data structure * vMgrObjectReset - Reset Management Objet data structure * vMgrAssocBeginSta - Start associate function * vMgrReAssocBeginSta - Start reassociate function * vMgrDisassocBeginSta - Start disassociate function * s_vMgrRxAssocRequest - Handle Rcv associate_request * s_vMgrRxAssocResponse - Handle Rcv associate_response * vMrgAuthenBeginSta - Start authentication function * vMgrDeAuthenDeginSta - Start deauthentication function * s_vMgrRxAuthentication - Handle Rcv authentication * s_vMgrRxAuthenSequence_1 - Handle Rcv authentication sequence 1 * s_vMgrRxAuthenSequence_2 - Handle Rcv authentication sequence 2 * s_vMgrRxAuthenSequence_3 - Handle Rcv authentication sequence 3 * s_vMgrRxAuthenSequence_4 - Handle Rcv authentication sequence 4 * s_vMgrRxDisassociation - Handle Rcv disassociation * s_vMgrRxBeacon - Handle Rcv Beacon * vMgrCreateOwnIBSS - Create ad_hoc IBSS or AP BSS * vMgrJoinBSSBegin - Join BSS function * s_vMgrSynchBSS - Synch & adopt BSS parameters * s_MgrMakeBeacon - Create Baecon frame * s_MgrMakeProbeResponse - Create Probe Response frame * s_MgrMakeAssocRequest - Create Associate Request frame * s_MgrMakeReAssocRequest - Create ReAssociate Request frame * s_vMgrRxProbeResponse - Handle Rcv probe_response * s_vMrgRxProbeRequest - Handle Rcv probe_request * bMgrPrepareBeaconToSend - Prepare Beacon frame * s_vMgrLogStatus - Log 802.11 Status * vMgrRxManagePacket - Rcv management frame dispatch function * s_vMgrFormatTIM- Assember TIM field of beacon * vMgrTimerInit- Initial 1-sec and command call back funtions * * Revision History: * */ #include "tmacro.h" #include "desc.h" #include "device.h" #include "card.h" #include "80211hdr.h" #include "80211mgr.h" #include "wmgr.h" #include "wcmd.h" #include "mac.h" #include "bssdb.h" #include "power.h" #include "datarate.h" #include "baseband.h" #include "rxtx.h" #include "wpa.h" #include "rf.h" #include "iowpa.h" #include "control.h" #include "rndis.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ static int msglevel =MSG_LEVEL_INFO; //static int msglevel =MSG_LEVEL_DEBUG; /*--------------------- Static Functions --------------------------*/ static BOOL ChannelExceedZoneType( PSDevice pDevice, BYTE byCurrChannel ); // Association/diassociation functions static PSTxMgmtPacket s_MgrMakeAssocRequest( PSDevice pDevice, PSMgmtObject pMgmt, PBYTE pDAddr, WORD wCurrCapInfo, WORD wListenInterval, PWLAN_IE_SSID pCurrSSID, PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ); static void s_vMgrRxAssocRequest( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket, unsigned int uNodeIndex ); static PSTxMgmtPacket s_MgrMakeReAssocRequest( PSDevice pDevice, PSMgmtObject pMgmt, PBYTE pDAddr, WORD wCurrCapInfo, WORD wListenInterval, PWLAN_IE_SSID pCurrSSID, PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ); static void s_vMgrRxAssocResponse( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket, BOOL bReAssocType ); static void s_vMgrRxDisassociation( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ); // Authentication/deauthen functions static void s_vMgrRxAuthenSequence_1( PSDevice pDevice, PSMgmtObject pMgmt, PWLAN_FR_AUTHEN pFrame ); static void s_vMgrRxAuthenSequence_2( PSDevice pDevice, PSMgmtObject pMgmt, PWLAN_FR_AUTHEN pFrame ); static void s_vMgrRxAuthenSequence_3( PSDevice pDevice, PSMgmtObject pMgmt, PWLAN_FR_AUTHEN pFrame ); static void s_vMgrRxAuthenSequence_4( PSDevice pDevice, PSMgmtObject pMgmt, PWLAN_FR_AUTHEN pFrame ); static void s_vMgrRxAuthentication( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ); static void s_vMgrRxDeauthentication( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ); // Scan functions // probe request/response functions static void s_vMgrRxProbeRequest( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ); static void s_vMgrRxProbeResponse( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ); // beacon functions static void s_vMgrRxBeacon( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket, BOOL bInScan ); static void s_vMgrFormatTIM( PSMgmtObject pMgmt, PWLAN_IE_TIM pTIM ); static PSTxMgmtPacket s_MgrMakeBeacon( PSDevice pDevice, PSMgmtObject pMgmt, WORD wCurrCapInfo, WORD wCurrBeaconPeriod, unsigned int uCurrChannel, WORD wCurrATIMWinodw, PWLAN_IE_SSID pCurrSSID, PBYTE pCurrBSSID, PWLAN_IE_SUPP_RATES pCurrSuppRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ); // Association response static PSTxMgmtPacket s_MgrMakeAssocResponse( PSDevice pDevice, PSMgmtObject pMgmt, WORD wCurrCapInfo, WORD wAssocStatus, WORD wAssocAID, PBYTE pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ); // ReAssociation response static PSTxMgmtPacket s_MgrMakeReAssocResponse( PSDevice pDevice, PSMgmtObject pMgmt, WORD wCurrCapInfo, WORD wAssocStatus, WORD wAssocAID, PBYTE pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ); // Probe response static PSTxMgmtPacket s_MgrMakeProbeResponse( PSDevice pDevice, PSMgmtObject pMgmt, WORD wCurrCapInfo, WORD wCurrBeaconPeriod, unsigned int uCurrChannel, WORD wCurrATIMWinodw, PBYTE pDstAddr, PWLAN_IE_SSID pCurrSSID, PBYTE pCurrBSSID, PWLAN_IE_SUPP_RATES pCurrSuppRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates, BYTE byPHYType ); // received status static void s_vMgrLogStatus( PSMgmtObject pMgmt, WORD wStatus ); static void s_vMgrSynchBSS ( PSDevice pDevice, unsigned int uBSSMode, PKnownBSS pCurr, PCMD_STATUS pStatus ); static BOOL s_bCipherMatch ( PKnownBSS pBSSNode, NDIS_802_11_ENCRYPTION_STATUS EncStatus, PBYTE pbyCCSPK, PBYTE pbyCCSGK ); static void Encyption_Rebuild( PSDevice pDevice, PKnownBSS pCurr ); /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /*+ * * Routine Description: * Allocates and initializes the Management object. * * Return Value: * Ndis_staus. * -*/ void vMgrObjectInit(void *hDeviceContext) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int ii; pMgmt->pbyPSPacketPool = &pMgmt->byPSPacketPool[0]; pMgmt->pbyMgmtPacketPool = &pMgmt->byMgmtPacketPool[0]; pMgmt->uCurrChannel = pDevice->uChannel; for (ii = 0; ii < WLAN_BSSID_LEN; ii++) pMgmt->abyDesireBSSID[ii] = 0xFF; pMgmt->sAssocInfo.AssocInfo.Length = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION); //memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN +1); pMgmt->byCSSPK = KEY_CTL_NONE; pMgmt->byCSSGK = KEY_CTL_NONE; pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI; BSSvClearBSSList((void *) pDevice, FALSE); init_timer(&pMgmt->sTimerSecondCallback); pMgmt->sTimerSecondCallback.data = (unsigned long)pDevice; pMgmt->sTimerSecondCallback.function = (TimerFunction)BSSvSecondCallBack; pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ); init_timer(&pDevice->sTimerCommand); pDevice->sTimerCommand.data = (unsigned long)pDevice; pDevice->sTimerCommand.function = (TimerFunction)vRunCommand; pDevice->sTimerCommand.expires = RUN_AT(HZ); init_timer(&pDevice->sTimerTxData); pDevice->sTimerTxData.data = (unsigned long)pDevice; pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData; pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback pDevice->fTxDataInSleep = FALSE; pDevice->IsTxDataTrigger = FALSE; pDevice->nTxDataTimeCout = 0; pDevice->cbFreeCmdQueue = CMD_Q_SIZE; pDevice->uCmdDequeueIdx = 0; pDevice->uCmdEnqueueIdx = 0; pDevice->eCommandState = WLAN_CMD_IDLE; pDevice->bCmdRunning = FALSE; pDevice->bCmdClear = FALSE; return; } /*+ * * Routine Description: * Start the station association procedure. Namely, send an * association request frame to the AP. * * Return Value: * None. * -*/ void vMgrAssocBeginSta(void *hDeviceContext, PSMgmtObject pMgmt, PCMD_STATUS pStatus) { PSDevice pDevice = (PSDevice)hDeviceContext; PSTxMgmtPacket pTxPacket; pMgmt->wCurrCapInfo = 0; pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1); if (pDevice->bEncryptionEnable) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1); } // always allow receive short preamble //if (pDevice->byPreambleType == 1) { // pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1); //} pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1); if (pMgmt->wListenInterval == 0) pMgmt->wListenInterval = 1; // at least one. // ERP Phy (802.11g) should support short preamble. if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1); if (pDevice->bShortSlotTime == TRUE) pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1); } else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) { if (pDevice->byPreambleType == 1) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1); } } if (pMgmt->b11hEnable == TRUE) pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1); // build an assocreq frame and send it pTxPacket = s_MgrMakeAssocRequest ( pDevice, pMgmt, pMgmt->abyCurrBSSID, pMgmt->wCurrCapInfo, pMgmt->wListenInterval, (PWLAN_IE_SSID)pMgmt->abyCurrSSID, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates ); if (pTxPacket != NULL ){ // send the frame *pStatus = csMgmt_xmit(pDevice, pTxPacket); if (*pStatus == CMD_STATUS_PENDING) { pMgmt->eCurrState = WMAC_STATE_ASSOCPENDING; *pStatus = CMD_STATUS_SUCCESS; } } else *pStatus = CMD_STATUS_RESOURCES; return ; } /*+ * * Routine Description: * Start the station re-association procedure. * * Return Value: * None. * -*/ void vMgrReAssocBeginSta(void *hDeviceContext, PSMgmtObject pMgmt, PCMD_STATUS pStatus) { PSDevice pDevice = (PSDevice)hDeviceContext; PSTxMgmtPacket pTxPacket; pMgmt->wCurrCapInfo = 0; pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1); if (pDevice->bEncryptionEnable) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1); } //if (pDevice->byPreambleType == 1) { // pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1); //} pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1); if (pMgmt->wListenInterval == 0) pMgmt->wListenInterval = 1; // at least one. // ERP Phy (802.11g) should support short preamble. if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1); if (pDevice->bShortSlotTime == TRUE) pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1); } else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) { if (pDevice->byPreambleType == 1) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1); } } if (pMgmt->b11hEnable == TRUE) pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1); pTxPacket = s_MgrMakeReAssocRequest ( pDevice, pMgmt, pMgmt->abyCurrBSSID, pMgmt->wCurrCapInfo, pMgmt->wListenInterval, (PWLAN_IE_SSID)pMgmt->abyCurrSSID, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates ); if (pTxPacket != NULL ){ // send the frame *pStatus = csMgmt_xmit(pDevice, pTxPacket); if (*pStatus != CMD_STATUS_PENDING) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Reassociation tx failed.\n"); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Reassociation tx sending.\n"); } } return ; } /*+ * * Routine Description: * Send an dis-association request frame to the AP. * * Return Value: * None. * -*/ void vMgrDisassocBeginSta(void *hDeviceContext, PSMgmtObject pMgmt, PBYTE abyDestAddress, WORD wReason, PCMD_STATUS pStatus) { PSDevice pDevice = (PSDevice)hDeviceContext; PSTxMgmtPacket pTxPacket = NULL; WLAN_FR_DISASSOC sFrame; pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DISASSOC_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); // Setup the sFrame structure sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_DISASSOC_FR_MAXLEN; // format fixed field frame structure vMgrEncodeDisassociation(&sFrame); // Setup the header sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_DISASSOC) )); memcpy( sFrame.pHdr->sA3.abyAddr1, abyDestAddress, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); // Set reason code *(sFrame.pwReason) = cpu_to_le16(wReason); pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; // send the frame *pStatus = csMgmt_xmit(pDevice, pTxPacket); if (*pStatus == CMD_STATUS_PENDING) { pMgmt->eCurrState = WMAC_STATE_IDLE; *pStatus = CMD_STATUS_SUCCESS; } return; } /*+ * * Routine Description:(AP function) * Handle incoming station association request frames. * * Return Value: * None. * -*/ static void s_vMgrRxAssocRequest( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket, unsigned int uNodeIndex ) { WLAN_FR_ASSOCREQ sFrame; CMD_STATUS Status; PSTxMgmtPacket pTxPacket; WORD wAssocStatus = 0; WORD wAssocAID = 0; unsigned int uRateLen = WLAN_RATES_MAXLEN; BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1]; BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1]; if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) return; // node index not found if (!uNodeIndex) return; //check if node is authenticated //decode the frame memset(&sFrame, 0, sizeof(WLAN_FR_ASSOCREQ)); memset(abyCurrSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1); memset(abyCurrExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1); sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; vMgrDecodeAssocRequest(&sFrame); if (pMgmt->sNodeDBTable[uNodeIndex].eNodeState >= NODE_AUTH) { pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC; pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo); pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval); pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? TRUE : FALSE; // Todo: check sta basic rate, if ap can't support, set status code if (pDevice->byBBType == BB_TYPE_11B) { uRateLen = WLAN_RATES_MAXLEN_11B; } abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES; abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates, (PWLAN_IE_SUPP_RATES)abyCurrSuppRates, uRateLen); abyCurrExtSuppRates[0] = WLAN_EID_EXTSUPP_RATES; if (pDevice->byBBType == BB_TYPE_11G) { abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pExtSuppRates, (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates, uRateLen); } else { abyCurrExtSuppRates[1] = 0; } RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates, FALSE, // do not change our basic rate &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate), &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate), &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate), &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate), &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate) ); // set max tx rate pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate = pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate; // Todo: check sta preamble, if ap can't support, set status code pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo); pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo); pMgmt->sNodeDBTable[uNodeIndex].wAID = (WORD)uNodeIndex; wAssocStatus = WLAN_MGMT_STATUS_SUCCESS; wAssocAID = (WORD)uNodeIndex; // check if ERP support if(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M) pMgmt->sNodeDBTable[uNodeIndex].bERPExist = TRUE; if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) { // B only STA join pDevice->bProtectMode = TRUE; pDevice->bNonERPPresent = TRUE; } if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == FALSE) { pDevice->bBarkerPreambleMd = TRUE; } DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Associate AID= %d \n", wAssocAID); DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "MAC=%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X \n", sFrame.pHdr->sA3.abyAddr2[0], sFrame.pHdr->sA3.abyAddr2[1], sFrame.pHdr->sA3.abyAddr2[2], sFrame.pHdr->sA3.abyAddr2[3], sFrame.pHdr->sA3.abyAddr2[4], sFrame.pHdr->sA3.abyAddr2[5] ) ; DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Max Support rate = %d \n", pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate); } // assoc response reply.. pTxPacket = s_MgrMakeAssocResponse ( pDevice, pMgmt, pMgmt->wCurrCapInfo, wAssocStatus, wAssocAID, sFrame.pHdr->sA3.abyAddr2, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates ); if (pTxPacket != NULL ){ if (pDevice->bEnableHostapd) { return; } /* send the frame */ Status = csMgmt_xmit(pDevice, pTxPacket); if (Status != CMD_STATUS_PENDING) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Assoc response tx failed\n"); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Assoc response tx sending..\n"); } } return; } /*+ * * Description:(AP function) * Handle incoming station re-association request frames. * * Parameters: * In: * pMgmt - Management Object structure * pRxPacket - Received Packet * Out: * none * * Return Value: None. * -*/ static void s_vMgrRxReAssocRequest( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket, unsigned int uNodeIndex ) { WLAN_FR_REASSOCREQ sFrame; CMD_STATUS Status; PSTxMgmtPacket pTxPacket; WORD wAssocStatus = 0; WORD wAssocAID = 0; unsigned int uRateLen = WLAN_RATES_MAXLEN; BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1]; BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1]; if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) return; // node index not found if (!uNodeIndex) return; //check if node is authenticated //decode the frame memset(&sFrame, 0, sizeof(WLAN_FR_REASSOCREQ)); sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; vMgrDecodeReassocRequest(&sFrame); if (pMgmt->sNodeDBTable[uNodeIndex].eNodeState >= NODE_AUTH) { pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC; pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo); pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval); pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? TRUE : FALSE; // Todo: check sta basic rate, if ap can't support, set status code if (pDevice->byBBType == BB_TYPE_11B) { uRateLen = WLAN_RATES_MAXLEN_11B; } abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES; abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates, (PWLAN_IE_SUPP_RATES)abyCurrSuppRates, uRateLen); abyCurrExtSuppRates[0] = WLAN_EID_EXTSUPP_RATES; if (pDevice->byBBType == BB_TYPE_11G) { abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pExtSuppRates, (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates, uRateLen); } else { abyCurrExtSuppRates[1] = 0; } RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates, FALSE, // do not change our basic rate &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate), &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate), &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate), &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate), &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate) ); // set max tx rate pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate = pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate; // Todo: check sta preamble, if ap can't support, set status code pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo); pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo); pMgmt->sNodeDBTable[uNodeIndex].wAID = (WORD)uNodeIndex; wAssocStatus = WLAN_MGMT_STATUS_SUCCESS; wAssocAID = (WORD)uNodeIndex; // if suppurt ERP if(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M) pMgmt->sNodeDBTable[uNodeIndex].bERPExist = TRUE; if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) { // B only STA join pDevice->bProtectMode = TRUE; pDevice->bNonERPPresent = TRUE; } if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble == FALSE) { pDevice->bBarkerPreambleMd = TRUE; } DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Rx ReAssociate AID= %d \n", wAssocAID); DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "MAC=%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X \n", sFrame.pHdr->sA3.abyAddr2[0], sFrame.pHdr->sA3.abyAddr2[1], sFrame.pHdr->sA3.abyAddr2[2], sFrame.pHdr->sA3.abyAddr2[3], sFrame.pHdr->sA3.abyAddr2[4], sFrame.pHdr->sA3.abyAddr2[5] ) ; DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Max Support rate = %d \n", pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate); } // assoc response reply.. pTxPacket = s_MgrMakeReAssocResponse ( pDevice, pMgmt, pMgmt->wCurrCapInfo, wAssocStatus, wAssocAID, sFrame.pHdr->sA3.abyAddr2, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates ); if (pTxPacket != NULL ){ /* send the frame */ if (pDevice->bEnableHostapd) { return; } Status = csMgmt_xmit(pDevice, pTxPacket); if (Status != CMD_STATUS_PENDING) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:ReAssoc response tx failed\n"); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:ReAssoc response tx sending..\n"); } } return; } /*+ * * Routine Description: * Handle incoming association response frames. * * Return Value: * None. * -*/ static void s_vMgrRxAssocResponse( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket, BOOL bReAssocType ) { WLAN_FR_ASSOCRESP sFrame; PWLAN_IE_SSID pItemSSID; PBYTE pbyIEs; viawget_wpa_header *wpahdr; if (pMgmt->eCurrState == WMAC_STATE_ASSOCPENDING || pMgmt->eCurrState == WMAC_STATE_ASSOC) { sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; // decode the frame vMgrDecodeAssocResponse(&sFrame); if ((sFrame.pwCapInfo == NULL) || (sFrame.pwStatus == NULL) || (sFrame.pwAid == NULL) || (sFrame.pSuppRates == NULL)) { DBG_PORT80(0xCC); return; } pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.Capabilities = *(sFrame.pwCapInfo); pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.StatusCode = *(sFrame.pwStatus); pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.AssociationId = *(sFrame.pwAid); pMgmt->sAssocInfo.AssocInfo.AvailableResponseFixedIEs |= 0x07; pMgmt->sAssocInfo.AssocInfo.ResponseIELength = sFrame.len - 24 - 6; pMgmt->sAssocInfo.AssocInfo.OffsetResponseIEs = pMgmt->sAssocInfo.AssocInfo.OffsetRequestIEs + pMgmt->sAssocInfo.AssocInfo.RequestIELength; pbyIEs = pMgmt->sAssocInfo.abyIEs; pbyIEs += pMgmt->sAssocInfo.AssocInfo.RequestIELength; memcpy(pbyIEs, (sFrame.pBuf + 24 +6), pMgmt->sAssocInfo.AssocInfo.ResponseIELength); // save values and set current BSS state if (cpu_to_le16((*(sFrame.pwStatus))) == WLAN_MGMT_STATUS_SUCCESS ){ // set AID pMgmt->wCurrAID = cpu_to_le16((*(sFrame.pwAid))); if ( (pMgmt->wCurrAID >> 14) != (BIT0 | BIT1) ) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "AID from AP, has two msb clear.\n"); } DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Association Successful, AID=%d.\n", pMgmt->wCurrAID & ~(BIT14|BIT15)); pMgmt->eCurrState = WMAC_STATE_ASSOC; BSSvUpdateAPNode((void *) pDevice, sFrame.pwCapInfo, sFrame.pSuppRates, sFrame.pExtSuppRates); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Link with AP(SSID): %s\n", pItemSSID->abySSID); pDevice->bLinkPass = TRUE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER); if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) { if(skb_tailroom(pDevice->skb) <(sizeof(viawget_wpa_header)+pMgmt->sAssocInfo.AssocInfo.ResponseIELength+ pMgmt->sAssocInfo.AssocInfo.RequestIELength)) { //data room not enough dev_kfree_skb(pDevice->skb); pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); } wpahdr = (viawget_wpa_header *)pDevice->skb->data; wpahdr->type = VIAWGET_ASSOC_MSG; wpahdr->resp_ie_len = pMgmt->sAssocInfo.AssocInfo.ResponseIELength; wpahdr->req_ie_len = pMgmt->sAssocInfo.AssocInfo.RequestIELength; memcpy(pDevice->skb->data + sizeof(viawget_wpa_header), pMgmt->sAssocInfo.abyIEs, wpahdr->req_ie_len); memcpy(pDevice->skb->data + sizeof(viawget_wpa_header) + wpahdr->req_ie_len, pbyIEs, wpahdr->resp_ie_len ); skb_put(pDevice->skb, sizeof(viawget_wpa_header) + wpahdr->resp_ie_len + wpahdr->req_ie_len); pDevice->skb->dev = pDevice->wpadev; skb_reset_mac_header(pDevice->skb); pDevice->skb->pkt_type = PACKET_HOST; pDevice->skb->protocol = htons(ETH_P_802_2); memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb)); netif_rx(pDevice->skb); pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); } #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT //if(pDevice->bWPASuppWextEnabled == TRUE) { BYTE buf[512]; size_t len; union iwreq_data wrqu; int we_event; memset(buf, 0, 512); len = pMgmt->sAssocInfo.AssocInfo.RequestIELength; if(len) { memcpy(buf, pMgmt->sAssocInfo.abyIEs, len); memset(&wrqu, 0, sizeof (wrqu)); wrqu.data.length = len; we_event = IWEVASSOCREQIE; PRINT_K("wireless_send_event--->IWEVASSOCREQIE\n"); wireless_send_event(pDevice->dev, we_event, &wrqu, buf); } memset(buf, 0, 512); len = pMgmt->sAssocInfo.AssocInfo.ResponseIELength; if(len) { memcpy(buf, pbyIEs, len); memset(&wrqu, 0, sizeof (wrqu)); wrqu.data.length = len; we_event = IWEVASSOCRESPIE; PRINT_K("wireless_send_event--->IWEVASSOCRESPIE\n"); wireless_send_event(pDevice->dev, we_event, &wrqu, buf); } memset(&wrqu, 0, sizeof (wrqu)); memcpy(wrqu.ap_addr.sa_data, &pMgmt->abyCurrBSSID[0], ETH_ALEN); wrqu.ap_addr.sa_family = ARPHRD_ETHER; PRINT_K("wireless_send_event--->SIOCGIWAP(associated)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif //#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT } else { if (bReAssocType) { pMgmt->eCurrState = WMAC_STATE_IDLE; } else { // jump back to the auth state and indicate the error pMgmt->eCurrState = WMAC_STATE_AUTH; } s_vMgrLogStatus(pMgmt,cpu_to_le16((*(sFrame.pwStatus)))); } } #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT //need clear flags related to Networkmanager pDevice->bwextstep0 = FALSE; pDevice->bwextstep1 = FALSE; pDevice->bwextstep2 = FALSE; pDevice->bwextstep3 = FALSE; pDevice->bWPASuppWextEnabled = FALSE; #endif if(pMgmt->eCurrState == WMAC_STATE_ASSOC) timer_expire(pDevice->sTimerCommand, 0); return; } /*+ * * Routine Description: * Start the station authentication procedure. Namely, send an * authentication frame to the AP. * * Return Value: * None. * -*/ void vMgrAuthenBeginSta(void *hDeviceContext, PSMgmtObject pMgmt, PCMD_STATUS pStatus) { PSDevice pDevice = (PSDevice)hDeviceContext; WLAN_FR_AUTHEN sFrame; PSTxMgmtPacket pTxPacket = NULL; pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_AUTHEN_FR_MAXLEN; vMgrEncodeAuthen(&sFrame); /* insert values */ sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN) )); memcpy( sFrame.pHdr->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); if (pMgmt->bShareKeyAlgorithm) *(sFrame.pwAuthAlgorithm) = cpu_to_le16(WLAN_AUTH_ALG_SHAREDKEY); else *(sFrame.pwAuthAlgorithm) = cpu_to_le16(WLAN_AUTH_ALG_OPENSYSTEM); *(sFrame.pwAuthSequence) = cpu_to_le16(1); /* Adjust the length fields */ pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; *pStatus = csMgmt_xmit(pDevice, pTxPacket); if (*pStatus == CMD_STATUS_PENDING){ pMgmt->eCurrState = WMAC_STATE_AUTHPENDING; *pStatus = CMD_STATUS_SUCCESS; } return ; } /*+ * * Routine Description: * Start the station(AP) deauthentication procedure. Namely, send an * deauthentication frame to the AP or Sta. * * Return Value: * None. * -*/ void vMgrDeAuthenBeginSta(void *hDeviceContext, PSMgmtObject pMgmt, PBYTE abyDestAddress, WORD wReason, PCMD_STATUS pStatus) { PSDevice pDevice = (PSDevice)hDeviceContext; WLAN_FR_DEAUTHEN sFrame; PSTxMgmtPacket pTxPacket = NULL; pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DEAUTHEN_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_DEAUTHEN_FR_MAXLEN; vMgrEncodeDeauthen(&sFrame); /* insert values */ sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_DEAUTHEN) )); memcpy( sFrame.pHdr->sA3.abyAddr1, abyDestAddress, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); *(sFrame.pwReason) = cpu_to_le16(wReason); // deauthen. bcs left BSS /* Adjust the length fields */ pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; *pStatus = csMgmt_xmit(pDevice, pTxPacket); if (*pStatus == CMD_STATUS_PENDING){ *pStatus = CMD_STATUS_SUCCESS; } return ; } /*+ * * Routine Description: * Handle incoming authentication frames. * * Return Value: * None. * -*/ static void s_vMgrRxAuthentication( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ) { WLAN_FR_AUTHEN sFrame; // we better be an AP or a STA in AUTHPENDING otherwise ignore if (!(pMgmt->eCurrMode == WMAC_MODE_ESS_AP || pMgmt->eCurrState == WMAC_STATE_AUTHPENDING)) { return; } // decode the frame sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; vMgrDecodeAuthen(&sFrame); switch (cpu_to_le16((*(sFrame.pwAuthSequence )))){ case 1: //AP funciton s_vMgrRxAuthenSequence_1(pDevice,pMgmt, &sFrame); break; case 2: s_vMgrRxAuthenSequence_2(pDevice, pMgmt, &sFrame); break; case 3: //AP funciton s_vMgrRxAuthenSequence_3(pDevice, pMgmt, &sFrame); break; case 4: s_vMgrRxAuthenSequence_4(pDevice, pMgmt, &sFrame); break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Auth Sequence error, seq = %d\n", cpu_to_le16((*(sFrame.pwAuthSequence)))); break; } return; } /*+ * * Routine Description: * Handles incoming authen frames with sequence 1. Currently * assumes we're an AP. So far, no one appears to use authentication * in Ad-Hoc mode. * * Return Value: * None. * -*/ static void s_vMgrRxAuthenSequence_1( PSDevice pDevice, PSMgmtObject pMgmt, PWLAN_FR_AUTHEN pFrame ) { PSTxMgmtPacket pTxPacket = NULL; unsigned int uNodeIndex; WLAN_FR_AUTHEN sFrame; PSKeyItem pTransmitKey; // Insert a Node entry if (!BSSbIsSTAInNodeDB(pDevice, pFrame->pHdr->sA3.abyAddr2, &uNodeIndex)) { BSSvCreateOneNode((PSDevice)pDevice, &uNodeIndex); memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, pFrame->pHdr->sA3.abyAddr2, WLAN_ADDR_LEN); } if (pMgmt->bShareKeyAlgorithm) { pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_KNOWN; pMgmt->sNodeDBTable[uNodeIndex].byAuthSequence = 1; } else { pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_AUTH; } // send auth reply pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_AUTHEN_FR_MAXLEN; // format buffer structure vMgrEncodeAuthen(&sFrame); // insert values sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)| WLAN_SET_FC_ISWEP(0) )); memcpy( sFrame.pHdr->sA3.abyAddr1, pFrame->pHdr->sA3.abyAddr2, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); *(sFrame.pwAuthAlgorithm) = *(pFrame->pwAuthAlgorithm); *(sFrame.pwAuthSequence) = cpu_to_le16(2); if (cpu_to_le16(*(pFrame->pwAuthAlgorithm)) == WLAN_AUTH_ALG_SHAREDKEY) { if (pMgmt->bShareKeyAlgorithm) *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_SUCCESS); else *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG); } else { if (pMgmt->bShareKeyAlgorithm) *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG); else *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_SUCCESS); } if (pMgmt->bShareKeyAlgorithm && (cpu_to_le16(*(sFrame.pwStatus)) == WLAN_MGMT_STATUS_SUCCESS)) { sFrame.pChallenge = (PWLAN_IE_CHALLENGE)(sFrame.pBuf + sFrame.len); sFrame.len += WLAN_CHALLENGE_IE_LEN; sFrame.pChallenge->byElementID = WLAN_EID_CHALLENGE; sFrame.pChallenge->len = WLAN_CHALLENGE_LEN; memset(pMgmt->abyChallenge, 0, WLAN_CHALLENGE_LEN); // get group key if(KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, GROUP_KEY, &pTransmitKey) == TRUE) { rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength+3); rc4_encrypt(&pDevice->SBox, pMgmt->abyChallenge, pMgmt->abyChallenge, WLAN_CHALLENGE_LEN); } memcpy(sFrame.pChallenge->abyChallenge, pMgmt->abyChallenge , WLAN_CHALLENGE_LEN); } /* Adjust the length fields */ pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; // send the frame if (pDevice->bEnableHostapd) { return; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Authreq_reply sequence_1 tx.. \n"); if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Authreq_reply sequence_1 tx failed.\n"); } return; } /*+ * * Routine Description: * Handles incoming auth frames with sequence number 2. Currently * assumes we're a station. * * * Return Value: * None. * -*/ static void s_vMgrRxAuthenSequence_2( PSDevice pDevice, PSMgmtObject pMgmt, PWLAN_FR_AUTHEN pFrame ) { WLAN_FR_AUTHEN sFrame; PSTxMgmtPacket pTxPacket = NULL; switch (cpu_to_le16((*(pFrame->pwAuthAlgorithm)))) { case WLAN_AUTH_ALG_OPENSYSTEM: if ( cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS ){ DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "802.11 Authen (OPEN) Successful.\n"); pMgmt->eCurrState = WMAC_STATE_AUTH; timer_expire(pDevice->sTimerCommand, 0); } else { DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "802.11 Authen (OPEN) Failed.\n"); s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus)))); pMgmt->eCurrState = WMAC_STATE_IDLE; } if (pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT) { /* spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, 0); spin_lock_irq(&pDevice->lock); */ } break; case WLAN_AUTH_ALG_SHAREDKEY: if (cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) { pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_AUTHEN_FR_MAXLEN; // format buffer structure vMgrEncodeAuthen(&sFrame); // insert values sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)| WLAN_SET_FC_ISWEP(1) )); memcpy( sFrame.pHdr->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); *(sFrame.pwAuthAlgorithm) = *(pFrame->pwAuthAlgorithm); *(sFrame.pwAuthSequence) = cpu_to_le16(3); *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_SUCCESS); sFrame.pChallenge = (PWLAN_IE_CHALLENGE)(sFrame.pBuf + sFrame.len); sFrame.len += WLAN_CHALLENGE_IE_LEN; sFrame.pChallenge->byElementID = WLAN_EID_CHALLENGE; sFrame.pChallenge->len = WLAN_CHALLENGE_LEN; memcpy( sFrame.pChallenge->abyChallenge, pFrame->pChallenge->abyChallenge, WLAN_CHALLENGE_LEN); // Adjust the length fields pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; // send the frame if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Auth_reply sequence_2 tx failed.\n"); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Auth_reply sequence_2 tx ...\n"); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:rx Auth_reply sequence_2 status error ...\n"); if ( pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT ) { /* spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, 0); spin_lock_irq(&pDevice->lock); */ } s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus)))); } break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt: rx auth.seq = 2 unknown AuthAlgorithm=%d\n", cpu_to_le16((*(pFrame->pwAuthAlgorithm)))); break; } return; } /*+ * * Routine Description: * Handles incoming authen frames with sequence 3. Currently * assumes we're an AP. This function assumes the frame has * already been successfully decrypted. * * * Return Value: * None. * -*/ static void s_vMgrRxAuthenSequence_3( PSDevice pDevice, PSMgmtObject pMgmt, PWLAN_FR_AUTHEN pFrame ) { PSTxMgmtPacket pTxPacket = NULL; unsigned int uStatusCode = 0 ; unsigned int uNodeIndex = 0; WLAN_FR_AUTHEN sFrame; if (!WLAN_GET_FC_ISWEP(pFrame->pHdr->sA3.wFrameCtl)) { uStatusCode = WLAN_MGMT_STATUS_CHALLENGE_FAIL; goto reply; } if (BSSbIsSTAInNodeDB(pDevice, pFrame->pHdr->sA3.abyAddr2, &uNodeIndex)) { if (pMgmt->sNodeDBTable[uNodeIndex].byAuthSequence != 1) { uStatusCode = WLAN_MGMT_STATUS_RX_AUTH_NOSEQ; goto reply; } if (memcmp(pMgmt->abyChallenge, pFrame->pChallenge->abyChallenge, WLAN_CHALLENGE_LEN) != 0) { uStatusCode = WLAN_MGMT_STATUS_CHALLENGE_FAIL; goto reply; } } else { uStatusCode = WLAN_MGMT_STATUS_UNSPEC_FAILURE; goto reply; } if (uNodeIndex) { pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_AUTH; pMgmt->sNodeDBTable[uNodeIndex].byAuthSequence = 0; } uStatusCode = WLAN_MGMT_STATUS_SUCCESS; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Challenge text check ok..\n"); reply: // send auth reply pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_AUTHEN_FR_MAXLEN; // format buffer structure vMgrEncodeAuthen(&sFrame); /* insert values */ sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)| WLAN_SET_FC_ISWEP(0) )); memcpy( sFrame.pHdr->sA3.abyAddr1, pFrame->pHdr->sA3.abyAddr2, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); *(sFrame.pwAuthAlgorithm) = *(pFrame->pwAuthAlgorithm); *(sFrame.pwAuthSequence) = cpu_to_le16(4); *(sFrame.pwStatus) = cpu_to_le16(uStatusCode); /* Adjust the length fields */ pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; // send the frame if (pDevice->bEnableHostapd) { return; } if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Authreq_reply sequence_4 tx failed.\n"); } return; } /*+ * * Routine Description: * Handles incoming authen frames with sequence 4 * * * Return Value: * None. * -*/ static void s_vMgrRxAuthenSequence_4( PSDevice pDevice, PSMgmtObject pMgmt, PWLAN_FR_AUTHEN pFrame ) { if ( cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS ){ DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "802.11 Authen (SHAREDKEY) Successful.\n"); pMgmt->eCurrState = WMAC_STATE_AUTH; timer_expire(pDevice->sTimerCommand, 0); } else{ DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "802.11 Authen (SHAREDKEY) Failed.\n"); s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))) ); pMgmt->eCurrState = WMAC_STATE_IDLE; } if ( pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT ) { /* spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, 0); spin_lock_irq(&pDevice->lock); */ } } /*+ * * Routine Description: * Handles incoming disassociation frames * * * Return Value: * None. * -*/ static void s_vMgrRxDisassociation( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ) { WLAN_FR_DISASSOC sFrame; unsigned int uNodeIndex = 0; CMD_STATUS CmdStatus; viawget_wpa_header *wpahdr; if ( pMgmt->eCurrMode == WMAC_MODE_ESS_AP ){ // if is acting an AP.. // a STA is leaving this BSS.. sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; if (BSSbIsSTAInNodeDB(pDevice, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex)) { BSSvRemoveOneNode(pDevice, uNodeIndex); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rx disassoc, sta not found\n"); } } else if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA ){ sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; vMgrDecodeDisassociation(&sFrame); DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP disassociated me, reason=%d.\n", cpu_to_le16(*(sFrame.pwReason))); pDevice->fWPA_Authened = FALSE; if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) { wpahdr = (viawget_wpa_header *)pDevice->skb->data; wpahdr->type = VIAWGET_DISASSOC_MSG; wpahdr->resp_ie_len = 0; wpahdr->req_ie_len = 0; skb_put(pDevice->skb, sizeof(viawget_wpa_header)); pDevice->skb->dev = pDevice->wpadev; skb_reset_mac_header(pDevice->skb); pDevice->skb->pkt_type = PACKET_HOST; pDevice->skb->protocol = htons(ETH_P_802_2); memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb)); netif_rx(pDevice->skb); pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); } //TODO: do something let upper layer know or //try to send associate packet again because of inactivity timeout if (pMgmt->eCurrState == WMAC_STATE_ASSOC) { pDevice->bLinkPass = FALSE; pMgmt->sNodeDBTable[0].bActive = FALSE; pDevice->byReAssocCount = 0; pMgmt->eCurrState = WMAC_STATE_AUTH; // jump back to the auth state! pDevice->eCommandState = WLAN_ASSOCIATE_WAIT; vMgrReAssocBeginSta((PSDevice)pDevice, pMgmt, &CmdStatus); if(CmdStatus == CMD_STATUS_PENDING) { pDevice->byReAssocCount ++; return; //mike add: you'll retry for many times, so it cann't be regarded as disconnected! } } #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT // if(pDevice->bWPASuppWextEnabled == TRUE) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif } /* else, ignore it */ return; } /*+ * * Routine Description: * Handles incoming deauthentication frames * * * Return Value: * None. * -*/ static void s_vMgrRxDeauthentication( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ) { WLAN_FR_DEAUTHEN sFrame; unsigned int uNodeIndex = 0; viawget_wpa_header *wpahdr; if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP ){ //Todo: // if is acting an AP.. // a STA is leaving this BSS.. sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; if (BSSbIsSTAInNodeDB(pDevice, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex)) { BSSvRemoveOneNode(pDevice, uNodeIndex); } else { DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Rx deauth, sta not found\n"); } } else { if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA ) { sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; vMgrDecodeDeauthen(&sFrame); pDevice->fWPA_Authened = FALSE; DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP deauthed me, reason=%d.\n", cpu_to_le16((*(sFrame.pwReason)))); // TODO: update BSS list for specific BSSID if pre-authentication case if (!compare_ether_addr(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID)) { if (pMgmt->eCurrState >= WMAC_STATE_AUTHPENDING) { pMgmt->sNodeDBTable[0].bActive = FALSE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; pMgmt->eCurrState = WMAC_STATE_IDLE; netif_stop_queue(pDevice->dev); pDevice->bLinkPass = FALSE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); } } if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) { wpahdr = (viawget_wpa_header *)pDevice->skb->data; wpahdr->type = VIAWGET_DISASSOC_MSG; wpahdr->resp_ie_len = 0; wpahdr->req_ie_len = 0; skb_put(pDevice->skb, sizeof(viawget_wpa_header)); pDevice->skb->dev = pDevice->wpadev; skb_reset_mac_header(pDevice->skb); pDevice->skb->pkt_type = PACKET_HOST; pDevice->skb->protocol = htons(ETH_P_802_2); memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb)); netif_rx(pDevice->skb); pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); } #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT // if(pDevice->bWPASuppWextEnabled == TRUE) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof (wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; PRINT_K("wireless_send_event--->SIOCGIWAP(disauthen)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } #endif } /* else, ignore it. TODO: IBSS authentication service would be implemented here */ }; return; } /*+ * * Routine Description: * check if current channel is match ZoneType. *for USA:1~11; * Japan:1~13; * Europe:1~13 * Return Value: * True:exceed; * False:normal case -*/ static BOOL ChannelExceedZoneType( PSDevice pDevice, BYTE byCurrChannel ) { BOOL exceed=FALSE; switch(pDevice->byZoneType) { case 0x00: //USA:1~11 if((byCurrChannel<1) ||(byCurrChannel>11)) exceed = TRUE; break; case 0x01: //Japan:1~13 case 0x02: //Europe:1~13 if((byCurrChannel<1) ||(byCurrChannel>13)) exceed = TRUE; break; default: //reserve for other zonetype break; } return exceed; } /*+ * * Routine Description: * Handles and analysis incoming beacon frames. * * * Return Value: * None. * -*/ static void s_vMgrRxBeacon( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket, BOOL bInScan ) { PKnownBSS pBSSList; WLAN_FR_BEACON sFrame; QWORD qwTSFOffset; BOOL bIsBSSIDEqual = FALSE; BOOL bIsSSIDEqual = FALSE; BOOL bTSFLargeDiff = FALSE; BOOL bTSFOffsetPostive = FALSE; BOOL bUpdateTSF = FALSE; BOOL bIsAPBeacon = FALSE; BOOL bIsChannelEqual = FALSE; unsigned int uLocateByteIndex; BYTE byTIMBitOn = 0; WORD wAIDNumber = 0; unsigned int uNodeIndex; QWORD qwTimestamp, qwLocalTSF; QWORD qwCurrTSF; WORD wStartIndex = 0; WORD wAIDIndex = 0; BYTE byCurrChannel = pRxPacket->byRxChannel; ERPObject sERP; unsigned int uRateLen = WLAN_RATES_MAXLEN; BOOL bChannelHit = FALSE; BYTE byOldPreambleType; if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) return; memset(&sFrame, 0, sizeof(WLAN_FR_BEACON)); sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; // decode the beacon frame vMgrDecodeBeacon(&sFrame); if ((sFrame.pwBeaconInterval == NULL) || (sFrame.pwCapInfo == NULL) || (sFrame.pSSID == NULL) || (sFrame.pSuppRates == NULL)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rx beacon frame error\n"); return; } if( byCurrChannel > CB_MAX_CHANNEL_24G ) { if (sFrame.pDSParms != NULL) { if (byCurrChannel == RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1]) bChannelHit = TRUE; byCurrChannel = RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1]; } else { bChannelHit = TRUE; } } else { if (sFrame.pDSParms != NULL) { if (byCurrChannel == sFrame.pDSParms->byCurrChannel) bChannelHit = TRUE; byCurrChannel = sFrame.pDSParms->byCurrChannel; } else { bChannelHit = TRUE; } } if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE) return; if (sFrame.pERP != NULL) { sERP.byERP = sFrame.pERP->byContext; sERP.bERPExist = TRUE; } else { sERP.bERPExist = FALSE; sERP.byERP = 0; } pBSSList = BSSpAddrIsInBSSList((void *) pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID); if (pBSSList == NULL) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Beacon/insert: RxChannel = : %d\n", byCurrChannel); BSSbInsertToBSSList((void *) pDevice, sFrame.pHdr->sA3.abyAddr3, *sFrame.pqwTimestamp, *sFrame.pwBeaconInterval, *sFrame.pwCapInfo, byCurrChannel, sFrame.pSSID, sFrame.pSuppRates, sFrame.pExtSuppRates, &sERP, sFrame.pRSN, sFrame.pRSNWPA, sFrame.pIE_Country, sFrame.pIE_Quiet, sFrame.len - WLAN_HDR_ADDR3_LEN, sFrame.pHdr->sA4.abyAddr4, // payload of beacon (void *) pRxPacket); } else { // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"update bcn: RxChannel = : %d\n", byCurrChannel); BSSbUpdateToBSSList((void *) pDevice, *sFrame.pqwTimestamp, *sFrame.pwBeaconInterval, *sFrame.pwCapInfo, byCurrChannel, bChannelHit, sFrame.pSSID, sFrame.pSuppRates, sFrame.pExtSuppRates, &sERP, sFrame.pRSN, sFrame.pRSNWPA, sFrame.pIE_Country, sFrame.pIE_Quiet, pBSSList, sFrame.len - WLAN_HDR_ADDR3_LEN, sFrame.pHdr->sA4.abyAddr4, // payload of probresponse (void *) pRxPacket); } if (bInScan) { return; } if(byCurrChannel == (BYTE)pMgmt->uCurrChannel) bIsChannelEqual = TRUE; if (bIsChannelEqual && (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) { // if rx beacon without ERP field if (sERP.bERPExist) { if (WLAN_GET_ERP_USE_PROTECTION(sERP.byERP)){ pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1); pDevice->wUseProtectCntDown = USE_PROTECT_PERIOD; } } else { pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1); pDevice->wUseProtectCntDown = USE_PROTECT_PERIOD; } if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { if(!WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo)) pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1); if(!sERP.bERPExist) pDevice->byERPFlag |= WLAN_SET_ERP_NONERP_PRESENT(1); } } // check if BSSID the same if (memcmp(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN) == 0) { bIsBSSIDEqual = TRUE; pDevice->uCurrRSSI = pRxPacket->uRSSI; pDevice->byCurrSQ = pRxPacket->bySQ; if (pMgmt->sNodeDBTable[0].uInActiveCount != 0) { pMgmt->sNodeDBTable[0].uInActiveCount = 0; //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BCN:Wake Count= [%d]\n", pMgmt->wCountToWakeUp); } } // check if SSID the same if (sFrame.pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) { if (memcmp(sFrame.pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, sFrame.pSSID->len ) == 0) { bIsSSIDEqual = TRUE; } } if ((WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo)== TRUE) && (bIsBSSIDEqual == TRUE) && (bIsSSIDEqual == TRUE) && (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { // add state check to prevent reconnect fail since we'll receive Beacon bIsAPBeacon = TRUE; if (pBSSList != NULL) { // Sync ERP field if ((pBSSList->sERP.bERPExist == TRUE) && (pDevice->byBBType == BB_TYPE_11G)) { if ((pBSSList->sERP.byERP & WLAN_EID_ERP_USE_PROTECTION) != pDevice->bProtectMode) {//0000 0010 pDevice->bProtectMode = (pBSSList->sERP.byERP & WLAN_EID_ERP_USE_PROTECTION); if (pDevice->bProtectMode) { MACvEnableProtectMD(pDevice); } else { MACvDisableProtectMD(pDevice); } vUpdateIFS(pDevice); } if ((pBSSList->sERP.byERP & WLAN_EID_ERP_NONERP_PRESENT) != pDevice->bNonERPPresent) {//0000 0001 pDevice->bNonERPPresent = (pBSSList->sERP.byERP & WLAN_EID_ERP_USE_PROTECTION); } if ((pBSSList->sERP.byERP & WLAN_EID_ERP_BARKER_MODE) != pDevice->bBarkerPreambleMd) {//0000 0100 pDevice->bBarkerPreambleMd = (pBSSList->sERP.byERP & WLAN_EID_ERP_BARKER_MODE); //BarkerPreambleMd has higher priority than shortPreamble bit in Cap if (pDevice->bBarkerPreambleMd) { MACvEnableBarkerPreambleMd(pDevice); } else { MACvDisableBarkerPreambleMd(pDevice); } } } // Sync Short Slot Time if (WLAN_GET_CAP_INFO_SHORTSLOTTIME(pBSSList->wCapInfo) != pDevice->bShortSlotTime) { BOOL bShortSlotTime; bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(pBSSList->wCapInfo); //DBG_PRN_WLAN05(("Set Short Slot Time: %d\n", pDevice->bShortSlotTime)); //Kyle check if it is OK to set G. if (pDevice->byBBType == BB_TYPE_11A) { bShortSlotTime = TRUE; } else if (pDevice->byBBType == BB_TYPE_11B) { bShortSlotTime = FALSE; } if (bShortSlotTime != pDevice->bShortSlotTime) { pDevice->bShortSlotTime = bShortSlotTime; BBvSetShortSlotTime(pDevice); vUpdateIFS(pDevice); } } // // Preamble may change dynamiclly // byOldPreambleType = pDevice->byPreambleType; if (WLAN_GET_CAP_INFO_SHORTPREAMBLE(pBSSList->wCapInfo)) { pDevice->byPreambleType = pDevice->byShortPreamble; } else { pDevice->byPreambleType = 0; } if (pDevice->byPreambleType != byOldPreambleType) CARDvSetRSPINF(pDevice, (BYTE)pDevice->byBBType); // // Basic Rate Set may change dynamiclly // if (pBSSList->eNetworkTypeInUse == PHY_TYPE_11B) { uRateLen = WLAN_RATES_MAXLEN_11B; } pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pBSSList->abySuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, uRateLen); pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pBSSList->abyExtSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, uRateLen); RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, TRUE, &(pMgmt->sNodeDBTable[0].wMaxBasicRate), &(pMgmt->sNodeDBTable[0].wMaxSuppRate), &(pMgmt->sNodeDBTable[0].wSuppRate), &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate), &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate) ); } } // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Beacon 2 \n"); // check if CF field exisit if (WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo)) { if (sFrame.pCFParms->wCFPDurRemaining > 0) { // TODO: deal with CFP period to set NAV } } HIDWORD(qwTimestamp) = cpu_to_le32(HIDWORD(*sFrame.pqwTimestamp)); LODWORD(qwTimestamp) = cpu_to_le32(LODWORD(*sFrame.pqwTimestamp)); HIDWORD(qwLocalTSF) = HIDWORD(pRxPacket->qwLocalTSF); LODWORD(qwLocalTSF) = LODWORD(pRxPacket->qwLocalTSF); // check if beacon TSF larger or small than our local TSF if (HIDWORD(qwTimestamp) == HIDWORD(qwLocalTSF)) { if (LODWORD(qwTimestamp) >= LODWORD(qwLocalTSF)) { bTSFOffsetPostive = TRUE; } else { bTSFOffsetPostive = FALSE; } } else if (HIDWORD(qwTimestamp) > HIDWORD(qwLocalTSF)) { bTSFOffsetPostive = TRUE; } else if (HIDWORD(qwTimestamp) < HIDWORD(qwLocalTSF)) { bTSFOffsetPostive = FALSE; } if (bTSFOffsetPostive) { qwTSFOffset = CARDqGetTSFOffset(pRxPacket->byRxRate, (qwTimestamp), (qwLocalTSF)); } else { qwTSFOffset = CARDqGetTSFOffset(pRxPacket->byRxRate, (qwLocalTSF), (qwTimestamp)); } if (HIDWORD(qwTSFOffset) != 0 || (LODWORD(qwTSFOffset) > TRIVIAL_SYNC_DIFFERENCE )) { bTSFLargeDiff = TRUE; } // if infra mode if (bIsAPBeacon == TRUE) { // Infra mode: Local TSF always follow AP's TSF if Difference huge. if (bTSFLargeDiff) bUpdateTSF = TRUE; if ((pDevice->bEnablePSMode == TRUE) && (sFrame.pTIM)) { /* deal with DTIM, analysis TIM */ pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? TRUE : FALSE ; pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount; pMgmt->byDTIMPeriod = sFrame.pTIM->byDTIMPeriod; wAIDNumber = pMgmt->wCurrAID & ~(BIT14|BIT15); // check if AID in TIM field bit on // wStartIndex = N1 wStartIndex = WLAN_MGMT_GET_TIM_OFFSET(sFrame.pTIM->byBitMapCtl) << 1; // AIDIndex = N2 wAIDIndex = (wAIDNumber >> 3); if ((wAIDNumber > 0) && (wAIDIndex >= wStartIndex)) { uLocateByteIndex = wAIDIndex - wStartIndex; // len = byDTIMCount + byDTIMPeriod + byDTIMPeriod + byVirtBitMap[0~250] if (sFrame.pTIM->len >= (uLocateByteIndex + 4)) { byTIMBitOn = (0x01) << ((wAIDNumber) % 8); pMgmt->bInTIM = sFrame.pTIM->byVirtBitMap[uLocateByteIndex] & byTIMBitOn ? TRUE : FALSE; } else { pMgmt->bInTIM = FALSE; }; } else { pMgmt->bInTIM = FALSE; }; if (pMgmt->bInTIM || (pMgmt->bMulticastTIM && (pMgmt->byDTIMCount == 0))) { pMgmt->bInTIMWake = TRUE; // send out ps-poll packet // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN:In TIM\n"); if (pMgmt->bInTIM) { PSvSendPSPOLL((PSDevice)pDevice); // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN:PS-POLL sent..\n"); } } else { pMgmt->bInTIMWake = FALSE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Not In TIM..\n"); if (pDevice->bPWBitOn == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Send Null Packet\n"); if (PSbSendNullPacket(pDevice)) pDevice->bPWBitOn = TRUE; } if(PSbConsiderPowerDown(pDevice, FALSE, FALSE)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BCN: Power down now...\n"); } } } } // if adhoc mode if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && !bIsAPBeacon && bIsChannelEqual) { if (bIsBSSIDEqual) { // Use sNodeDBTable[0].uInActiveCount as IBSS beacons received count. if (pMgmt->sNodeDBTable[0].uInActiveCount != 0) pMgmt->sNodeDBTable[0].uInActiveCount = 0; // adhoc mode:TSF updated only when beacon larger then local TSF if (bTSFLargeDiff && bTSFOffsetPostive && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) bUpdateTSF = TRUE; // During dpc, already in spinlocked. if (BSSbIsSTAInNodeDB(pDevice, sFrame.pHdr->sA3.abyAddr2, &uNodeIndex)) { // Update the STA, (Techically the Beacons of all the IBSS nodes // should be identical, but that's not happening in practice. pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, WLAN_RATES_MAXLEN_11B); RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, NULL, TRUE, &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate), &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate), &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate), &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate), &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate) ); pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo); pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo); pMgmt->sNodeDBTable[uNodeIndex].uInActiveCount = 0; } else { // Todo, initial Node content BSSvCreateOneNode((PSDevice)pDevice, &uNodeIndex); pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, WLAN_RATES_MAXLEN_11B); RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, NULL, TRUE, &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate), &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate), &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate), &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate), &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate) ); memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, sFrame.pHdr->sA3.abyAddr2, WLAN_ADDR_LEN); pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo); pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate = pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate; /* pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo); if(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M) pMgmt->sNodeDBTable[uNodeIndex].bERPExist = TRUE; */ } // if other stations jointed, indicate connect to upper layer.. if (pMgmt->eCurrState == WMAC_STATE_STARTED) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Current IBSS State: [Started]........to: [Jointed] \n"); pMgmt->eCurrState = WMAC_STATE_JOINTED; pDevice->bLinkPass = TRUE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER); if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } pMgmt->sNodeDBTable[0].bActive = TRUE; pMgmt->sNodeDBTable[0].uInActiveCount = 0; } } else if (bIsSSIDEqual) { // See other adhoc sta with the same SSID but BSSID is different. // adpot this vars only when TSF larger then us. if (bTSFLargeDiff && bTSFOffsetPostive) { // we don't support ATIM under adhoc mode // if ( sFrame.pIBSSParms->wATIMWindow == 0) { // adpot this vars // TODO: check sFrame cap if privacy on, and support rate syn memcpy(pMgmt->abyCurrBSSID, sFrame.pHdr->sA3.abyAddr3, WLAN_BSSID_LEN); memcpy(pDevice->abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); pMgmt->wCurrATIMWindow = cpu_to_le16(sFrame.pIBSSParms->wATIMWindow); pMgmt->wCurrBeaconPeriod = cpu_to_le16(*sFrame.pwBeaconInterval); pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, WLAN_RATES_MAXLEN_11B); // set HW beacon interval and re-synchronizing.... DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rejoining to Other Adhoc group with same SSID........\n"); MACvWriteBeaconInterval(pDevice, pMgmt->wCurrBeaconPeriod); CARDvAdjustTSF(pDevice, pRxPacket->byRxRate, qwTimestamp, pRxPacket->qwLocalTSF); CARDvUpdateNextTBTT(pDevice, qwTimestamp, pMgmt->wCurrBeaconPeriod); // Turn off bssid filter to avoid filter others adhoc station which bssid is different. MACvWriteBSSIDAddress(pDevice, pMgmt->abyCurrBSSID); byOldPreambleType = pDevice->byPreambleType; if (WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo)) { pDevice->byPreambleType = pDevice->byShortPreamble; } else { pDevice->byPreambleType = 0; } if (pDevice->byPreambleType != byOldPreambleType) CARDvSetRSPINF(pDevice, (BYTE)pDevice->byBBType); // MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_BSSID); // set highest basic rate // s_vSetHighestBasicRate(pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates); // Prepare beacon frame bMgrPrepareBeaconToSend((void *) pDevice, pMgmt); // } } } } // endian issue ??? // Update TSF if (bUpdateTSF) { CARDbGetCurrentTSF(pDevice, &qwCurrTSF); CARDvAdjustTSF(pDevice, pRxPacket->byRxRate, qwTimestamp , pRxPacket->qwLocalTSF); CARDbGetCurrentTSF(pDevice, &qwCurrTSF); CARDvUpdateNextTBTT(pDevice, qwTimestamp, pMgmt->wCurrBeaconPeriod); } return; } /*+ * * Routine Description: * Instructs the hw to create a bss using the supplied * attributes. Note that this implementation only supports Ad-Hoc * BSS creation. * * * Return Value: * CMD_STATUS * -*/ void vMgrCreateOwnIBSS(void *hDeviceContext, PCMD_STATUS pStatus) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); WORD wMaxBasicRate; WORD wMaxSuppRate; BYTE byTopCCKBasicRate; BYTE byTopOFDMBasicRate; QWORD qwCurrTSF; unsigned int ii; BYTE abyRATE[] = {0x82, 0x84, 0x8B, 0x96, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60}; BYTE abyCCK_RATE[] = {0x82, 0x84, 0x8B, 0x96}; BYTE abyOFDM_RATE[] = {0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C}; WORD wSuppRate; HIDWORD(qwCurrTSF) = 0; LODWORD(qwCurrTSF) = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create Basic Service Set .......\n"); if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) { if ((pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) && (pDevice->eEncryptionStatus != Ndis802_11Encryption2Enabled) && (pDevice->eEncryptionStatus != Ndis802_11Encryption3Enabled)) { // encryption mode error *pStatus = CMD_STATUS_FAILURE; return; } } pMgmt->abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES; pMgmt->abyCurrExtSuppRates[0] = WLAN_EID_EXTSUPP_RATES; if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { pMgmt->eCurrentPHYMode = pMgmt->byAPBBType; } else { if (pDevice->byBBType == BB_TYPE_11G) pMgmt->eCurrentPHYMode = PHY_TYPE_11G; if (pDevice->byBBType == BB_TYPE_11B) pMgmt->eCurrentPHYMode = PHY_TYPE_11B; if (pDevice->byBBType == BB_TYPE_11A) pMgmt->eCurrentPHYMode = PHY_TYPE_11A; } if (pMgmt->eCurrentPHYMode != PHY_TYPE_11A) { pMgmt->abyCurrSuppRates[1] = WLAN_RATES_MAXLEN_11B; pMgmt->abyCurrExtSuppRates[1] = 0; for (ii = 0; ii < 4; ii++) pMgmt->abyCurrSuppRates[2+ii] = abyRATE[ii]; } else { pMgmt->abyCurrSuppRates[1] = 8; pMgmt->abyCurrExtSuppRates[1] = 0; for (ii = 0; ii < 8; ii++) pMgmt->abyCurrSuppRates[2+ii] = abyRATE[ii]; } if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) { pMgmt->abyCurrSuppRates[1] = 8; pMgmt->abyCurrExtSuppRates[1] = 4; for (ii = 0; ii < 4; ii++) pMgmt->abyCurrSuppRates[2+ii] = abyCCK_RATE[ii]; for (ii = 4; ii < 8; ii++) pMgmt->abyCurrSuppRates[2+ii] = abyOFDM_RATE[ii-4]; for (ii = 0; ii < 4; ii++) pMgmt->abyCurrExtSuppRates[2+ii] = abyOFDM_RATE[ii+4]; } // Disable Protect Mode pDevice->bProtectMode = 0; MACvDisableProtectMD(pDevice); pDevice->bBarkerPreambleMd = 0; MACvDisableBarkerPreambleMd(pDevice); // Kyle Test 2003.11.04 // set HW beacon interval if (pMgmt->wIBSSBeaconPeriod == 0) pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI; MACvWriteBeaconInterval(pDevice, pMgmt->wIBSSBeaconPeriod); CARDbGetCurrentTSF(pDevice, &qwCurrTSF); // clear TSF counter CARDbClearCurrentTSF(pDevice); // enable TSF counter MACvRegBitsOn(pDevice,MAC_REG_TFTCTL,TFTCTL_TSFCNTREN); // set Next TBTT CARDvSetFirstNextTBTT(pDevice, pMgmt->wIBSSBeaconPeriod); pMgmt->uIBSSChannel = pDevice->uChannel; if (pMgmt->uIBSSChannel == 0) pMgmt->uIBSSChannel = DEFAULT_IBSS_CHANNEL; // set channel and clear NAV CARDbSetMediaChannel(pDevice, pMgmt->uIBSSChannel); pMgmt->uCurrChannel = pMgmt->uIBSSChannel; pDevice->byPreambleType = pDevice->byShortPreamble; // set basic rate RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, TRUE, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate, &byTopCCKBasicRate, &byTopOFDMBasicRate); if (pDevice->byBBType == BB_TYPE_11A) { pDevice->bShortSlotTime = TRUE; } else { pDevice->bShortSlotTime = FALSE; } BBvSetShortSlotTime(pDevice); // vUpdateIFS() use pDevice->bShortSlotTime as parameter so it must be called // after setting ShortSlotTime. // CARDvSetBSSMode call vUpdateIFS() CARDvSetBSSMode(pDevice); if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { MACvRegBitsOn(pDevice, MAC_REG_HOSTCR, HOSTCR_AP); pMgmt->eCurrMode = WMAC_MODE_ESS_AP; } if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) { MACvRegBitsOn(pDevice, MAC_REG_HOSTCR, HOSTCR_ADHOC); pMgmt->eCurrMode = WMAC_MODE_IBSS_STA; } // Adopt pre-configured IBSS vars to current vars pMgmt->eCurrState = WMAC_STATE_STARTED; pMgmt->wCurrBeaconPeriod = pMgmt->wIBSSBeaconPeriod; pMgmt->uCurrChannel = pMgmt->uIBSSChannel; pMgmt->wCurrATIMWindow = pMgmt->wIBSSATIMWindow; pDevice->uCurrRSSI = 0; pDevice->byCurrSQ = 0; memcpy(pMgmt->abyDesireSSID,pMgmt->abyAdHocSSID, ((PWLAN_IE_SSID)pMgmt->abyAdHocSSID)->len + WLAN_IEHDR_LEN); memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); memcpy(pMgmt->abyCurrSSID, pMgmt->abyDesireSSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN ); if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { // AP mode BSSID = MAC addr memcpy(pMgmt->abyCurrBSSID, pMgmt->abyMACAddr, WLAN_ADDR_LEN); DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"AP beacon created BSSID:%02x-%02x-%02x-%02x-%02x-%02x \n", pMgmt->abyCurrBSSID[0], pMgmt->abyCurrBSSID[1], pMgmt->abyCurrBSSID[2], pMgmt->abyCurrBSSID[3], pMgmt->abyCurrBSSID[4], pMgmt->abyCurrBSSID[5] ); } if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { // BSSID selected must be randomized as spec 11.1.3 pMgmt->abyCurrBSSID[5] = (BYTE) (LODWORD(qwCurrTSF)& 0x000000ff); pMgmt->abyCurrBSSID[4] = (BYTE)((LODWORD(qwCurrTSF)& 0x0000ff00) >> 8); pMgmt->abyCurrBSSID[3] = (BYTE)((LODWORD(qwCurrTSF)& 0x00ff0000) >> 16); pMgmt->abyCurrBSSID[2] = (BYTE)((LODWORD(qwCurrTSF)& 0x00000ff0) >> 4); pMgmt->abyCurrBSSID[1] = (BYTE)((LODWORD(qwCurrTSF)& 0x000ff000) >> 12); pMgmt->abyCurrBSSID[0] = (BYTE)((LODWORD(qwCurrTSF)& 0x0ff00000) >> 20); pMgmt->abyCurrBSSID[5] ^= pMgmt->abyMACAddr[0]; pMgmt->abyCurrBSSID[4] ^= pMgmt->abyMACAddr[1]; pMgmt->abyCurrBSSID[3] ^= pMgmt->abyMACAddr[2]; pMgmt->abyCurrBSSID[2] ^= pMgmt->abyMACAddr[3]; pMgmt->abyCurrBSSID[1] ^= pMgmt->abyMACAddr[4]; pMgmt->abyCurrBSSID[0] ^= pMgmt->abyMACAddr[5]; pMgmt->abyCurrBSSID[0] &= ~IEEE_ADDR_GROUP; pMgmt->abyCurrBSSID[0] |= IEEE_ADDR_UNIVERSAL; DBG_PRT(MSG_LEVEL_INFO, KERN_INFO"Adhoc beacon created bssid:%02x-%02x-%02x-%02x-%02x-%02x \n", pMgmt->abyCurrBSSID[0], pMgmt->abyCurrBSSID[1], pMgmt->abyCurrBSSID[2], pMgmt->abyCurrBSSID[3], pMgmt->abyCurrBSSID[4], pMgmt->abyCurrBSSID[5] ); } // set BSSID filter MACvWriteBSSIDAddress(pDevice, pMgmt->abyCurrBSSID); memcpy(pDevice->abyBSSID, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN); MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID); pDevice->byRxMode |= RCR_BSSID; pMgmt->bCurrBSSIDFilterOn = TRUE; // Set Capability Info pMgmt->wCurrCapInfo = 0; if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1); pMgmt->byDTIMPeriod = DEFAULT_DTIM_PERIOD; pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1; pDevice->eOPMode = OP_MODE_AP; } if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_IBSS(1); pDevice->eOPMode = OP_MODE_ADHOC; } if (pDevice->bEncryptionEnable) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1); if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) { if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) { pMgmt->byCSSPK = KEY_CTL_CCMP; pMgmt->byCSSGK = KEY_CTL_CCMP; } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) { pMgmt->byCSSPK = KEY_CTL_TKIP; pMgmt->byCSSGK = KEY_CTL_TKIP; } else { pMgmt->byCSSPK = KEY_CTL_NONE; pMgmt->byCSSGK = KEY_CTL_WEP; } } else { pMgmt->byCSSPK = KEY_CTL_WEP; pMgmt->byCSSGK = KEY_CTL_WEP; } } pMgmt->byERPContext = 0; if (pDevice->byPreambleType == 1) { pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1); } else { pMgmt->wCurrCapInfo &= (~WLAN_SET_CAP_INFO_SHORTPREAMBLE(1)); } pMgmt->eCurrState = WMAC_STATE_STARTED; // Prepare beacon to send if (bMgrPrepareBeaconToSend((void *) pDevice, pMgmt)) *pStatus = CMD_STATUS_SUCCESS; return; } /*+ * * Routine Description: * Instructs wmac to join a bss using the supplied attributes. * The arguments may the BSSID or SSID and the rest of the * attributes are obtained from the scan result of known bss list. * * * Return Value: * None. * -*/ void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus) { PSDevice pDevice = (PSDevice)hDeviceContext; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PKnownBSS pCurr = NULL; unsigned int ii, uu; PWLAN_IE_SUPP_RATES pItemRates = NULL; PWLAN_IE_SUPP_RATES pItemExtRates = NULL; PWLAN_IE_SSID pItemSSID; unsigned int uRateLen = WLAN_RATES_MAXLEN; WORD wMaxBasicRate = RATE_1M; WORD wMaxSuppRate = RATE_1M; WORD wSuppRate; BYTE byTopCCKBasicRate = RATE_1M; BYTE byTopOFDMBasicRate = RATE_1M; BOOL bShortSlotTime = FALSE; for (ii = 0; ii < MAX_BSS_NUM; ii++) { if (pMgmt->sBSSList[ii].bActive == TRUE) break; } if (ii == MAX_BSS_NUM) { *pStatus = CMD_STATUS_RESOURCES; DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "BSS finding:BSS list is empty.\n"); return; } // memset(pMgmt->abyDesireBSSID, 0, WLAN_BSSID_LEN); // Search known BSS list for prefer BSSID or SSID pCurr = BSSpSearchBSSList(pDevice, pMgmt->abyDesireBSSID, pMgmt->abyDesireSSID, pDevice->eConfigPHYMode ); if (pCurr == NULL){ *pStatus = CMD_STATUS_RESOURCES; pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Scanning [%s] not found, disconnected !\n", pItemSSID->abySSID); return; } DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "AP(BSS) finding:Found a AP(BSS)..\n"); if (WLAN_GET_CAP_INFO_ESS(cpu_to_le16(pCurr->wCapInfo))){ if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) { /* if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) { if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No match RSN info. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"); // encryption mode error pMgmt->eCurrState = WMAC_STATE_IDLE; return; } } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) { if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No match RSN info. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"); // encryption mode error pMgmt->eCurrState = WMAC_STATE_IDLE; return; } } */ } #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT //if(pDevice->bWPASuppWextEnabled == TRUE) Encyption_Rebuild(pDevice, pCurr); #endif // Infrastructure BSS s_vMgrSynchBSS(pDevice, WMAC_MODE_ESS_STA, pCurr, pStatus ); if (*pStatus == CMD_STATUS_SUCCESS){ // Adopt this BSS state vars in Mgmt Object pMgmt->uCurrChannel = pCurr->uChannel; memset(pMgmt->abyCurrSuppRates, 0 , WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1); memset(pMgmt->abyCurrExtSuppRates, 0 , WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1); if (pCurr->eNetworkTypeInUse == PHY_TYPE_11B) { uRateLen = WLAN_RATES_MAXLEN_11B; } pItemRates = (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates; pItemExtRates = (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates; // Parse Support Rate IE pItemRates->byElementID = WLAN_EID_SUPP_RATES; pItemRates->len = RATEuSetIE((PWLAN_IE_SUPP_RATES)pCurr->abySuppRates, pItemRates, uRateLen); // Parse Extension Support Rate IE pItemExtRates->byElementID = WLAN_EID_EXTSUPP_RATES; pItemExtRates->len = RATEuSetIE((PWLAN_IE_SUPP_RATES)pCurr->abyExtSuppRates, pItemExtRates, uRateLen); // Stuffing Rate IE if ((pItemExtRates->len > 0) && (pItemRates->len < 8)) { for (ii = 0; ii < (unsigned int) (8 - pItemRates->len); ) { pItemRates->abyRates[pItemRates->len + ii] = pItemExtRates->abyRates[ii]; ii++; if (pItemExtRates->len <= ii) break; } pItemRates->len += (BYTE)ii; if (pItemExtRates->len - ii > 0) { pItemExtRates->len -= (BYTE)ii; for (uu = 0; uu < pItemExtRates->len; uu ++) { pItemExtRates->abyRates[uu] = pItemExtRates->abyRates[uu + ii]; } } else { pItemExtRates->len = 0; } } RATEvParseMaxRate((void *)pDevice, pItemRates, pItemExtRates, TRUE, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate, &byTopCCKBasicRate, &byTopOFDMBasicRate); vUpdateIFS(pDevice); // TODO: deal with if wCapInfo the privacy is on, but station WEP is off // TODO: deal with if wCapInfo the PS-Pollable is on. pMgmt->wCurrBeaconPeriod = pCurr->wBeaconInterval; memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); memcpy(pMgmt->abyCurrBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN); memcpy(pMgmt->abyCurrSSID, pCurr->abySSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); pMgmt->eCurrMode = WMAC_MODE_ESS_STA; pMgmt->eCurrState = WMAC_STATE_JOINTED; // Adopt BSS state in Adapter Device Object pDevice->eOPMode = OP_MODE_INFRASTRUCTURE; memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN); // Add current BSS to Candidate list // This should only works for WPA2 BSS, and WPA2 BSS check must be done before. if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) { BOOL bResult = bAdd_PMKID_Candidate((void *) pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate: 1(%d)\n", bResult); if (bResult == FALSE) { vFlush_PMKID_Candidate((void *) pDevice); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vFlush_PMKID_Candidate: 4\n"); bAdd_PMKID_Candidate((void *) pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj); } } // Preamble type auto-switch: if AP can receive short-preamble cap, // we can turn on too. if (WLAN_GET_CAP_INFO_SHORTPREAMBLE(pCurr->wCapInfo)) { pDevice->byPreambleType = pDevice->byShortPreamble; } else { pDevice->byPreambleType = 0; } // Change PreambleType must set RSPINF again CARDvSetRSPINF(pDevice, (BYTE)pDevice->byBBType); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Join ESS\n"); if (pCurr->eNetworkTypeInUse == PHY_TYPE_11G) { if ((pCurr->sERP.byERP & WLAN_EID_ERP_USE_PROTECTION) != pDevice->bProtectMode) {//0000 0010 pDevice->bProtectMode = (pCurr->sERP.byERP & WLAN_EID_ERP_USE_PROTECTION); if (pDevice->bProtectMode) { MACvEnableProtectMD(pDevice); } else { MACvDisableProtectMD(pDevice); } vUpdateIFS(pDevice); } if ((pCurr->sERP.byERP & WLAN_EID_ERP_NONERP_PRESENT) != pDevice->bNonERPPresent) {//0000 0001 pDevice->bNonERPPresent = (pCurr->sERP.byERP & WLAN_EID_ERP_USE_PROTECTION); } if ((pCurr->sERP.byERP & WLAN_EID_ERP_BARKER_MODE) != pDevice->bBarkerPreambleMd) {//0000 0100 pDevice->bBarkerPreambleMd = (pCurr->sERP.byERP & WLAN_EID_ERP_BARKER_MODE); //BarkerPreambleMd has higher priority than shortPreamble bit in Cap if (pDevice->bBarkerPreambleMd) { MACvEnableBarkerPreambleMd(pDevice); } else { MACvDisableBarkerPreambleMd(pDevice); } } } //DBG_PRN_WLAN05(("wCapInfo: %X\n", pCurr->wCapInfo)); if (WLAN_GET_CAP_INFO_SHORTSLOTTIME(pCurr->wCapInfo) != pDevice->bShortSlotTime) { if (pDevice->byBBType == BB_TYPE_11A) { bShortSlotTime = TRUE; } else if (pDevice->byBBType == BB_TYPE_11B) { bShortSlotTime = FALSE; } else { bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(pCurr->wCapInfo); } //DBG_PRN_WLAN05(("Set Short Slot Time: %d\n", pDevice->bShortSlotTime)); if (bShortSlotTime != pDevice->bShortSlotTime) { pDevice->bShortSlotTime = bShortSlotTime; BBvSetShortSlotTime(pDevice); vUpdateIFS(pDevice); } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"End of Join AP -- A/B/G Action\n"); } else { pMgmt->eCurrState = WMAC_STATE_IDLE; }; } else { // ad-hoc mode BSS if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) { if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) { /* if (WPA_SearchRSN(0, WPA_TKIP, pCurr) == FALSE) { // encryption mode error pMgmt->eCurrState = WMAC_STATE_IDLE; return; } */ } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) { /* if (WPA_SearchRSN(0, WPA_AESCCMP, pCurr) == FALSE) { // encryption mode error pMgmt->eCurrState = WMAC_STATE_IDLE; return; } */ } else { // encryption mode error pMgmt->eCurrState = WMAC_STATE_IDLE; return; } } s_vMgrSynchBSS(pDevice, WMAC_MODE_IBSS_STA, pCurr, pStatus ); if (*pStatus == CMD_STATUS_SUCCESS){ // Adopt this BSS state vars in Mgmt Object // TODO: check if CapInfo privacy on, but we don't.. pMgmt->uCurrChannel = pCurr->uChannel; // Parse Support Rate IE pMgmt->abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES; pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pCurr->abySuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, WLAN_RATES_MAXLEN_11B); // set basic rate RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, NULL, TRUE, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate, &byTopCCKBasicRate, &byTopOFDMBasicRate); vUpdateIFS(pDevice); pMgmt->wCurrCapInfo = pCurr->wCapInfo; pMgmt->wCurrBeaconPeriod = pCurr->wBeaconInterval; memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN); memcpy(pMgmt->abyCurrBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN); memcpy(pMgmt->abyCurrSSID, pCurr->abySSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN); // pMgmt->wCurrATIMWindow = pCurr->wATIMWindow; pMgmt->eCurrMode = WMAC_MODE_IBSS_STA; pMgmt->eCurrState = WMAC_STATE_STARTED; // Adopt BSS state in Adapter Device Object pDevice->eOPMode = OP_MODE_ADHOC; pDevice->bLinkPass = TRUE; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER); memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Join IBSS ok:%02x-%02x-%02x-%02x-%02x-%02x \n", pMgmt->abyCurrBSSID[0], pMgmt->abyCurrBSSID[1], pMgmt->abyCurrBSSID[2], pMgmt->abyCurrBSSID[3], pMgmt->abyCurrBSSID[4], pMgmt->abyCurrBSSID[5] ); // Preamble type auto-switch: if AP can receive short-preamble cap, // and if registry setting is short preamble we can turn on too. if (WLAN_GET_CAP_INFO_SHORTPREAMBLE(pCurr->wCapInfo)) { pDevice->byPreambleType = pDevice->byShortPreamble; } else { pDevice->byPreambleType = 0; } // Change PreambleType must set RSPINF again CARDvSetRSPINF(pDevice, (BYTE)pDevice->byBBType); // Prepare beacon bMgrPrepareBeaconToSend((void *) pDevice, pMgmt); } else { pMgmt->eCurrState = WMAC_STATE_IDLE; }; }; return; } /*+ * * Routine Description: * Set HW to synchronize a specific BSS from known BSS list. * * * Return Value: * PCM_STATUS * -*/ static void s_vMgrSynchBSS ( PSDevice pDevice, unsigned int uBSSMode, PKnownBSS pCurr, PCMD_STATUS pStatus ) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); //1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M BYTE abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C}; BYTE abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60}; //6M, 9M, 12M, 48M BYTE abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C}; BYTE abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16}; *pStatus = CMD_STATUS_FAILURE; if (s_bCipherMatch(pCurr, pDevice->eEncryptionStatus, &(pMgmt->byCSSPK), &(pMgmt->byCSSGK)) == FALSE) { DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "s_bCipherMatch Fail .......\n"); return; } pMgmt->pCurrBSS = pCurr; // if previous mode is IBSS. if(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { MACvRegBitsOff(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX); } // Init the BSS informations pDevice->bCCK = TRUE; pDevice->bProtectMode = FALSE; MACvDisableProtectMD(pDevice); pDevice->bBarkerPreambleMd = FALSE; MACvDisableBarkerPreambleMd(pDevice); pDevice->bNonERPPresent = FALSE; pDevice->byPreambleType = 0; pDevice->wBasicRate = 0; // Set Basic Rate CARDbAddBasicRate((void *)pDevice, RATE_1M); // calculate TSF offset // TSF Offset = Received Timestamp TSF - Marked Local's TSF CARDvAdjustTSF(pDevice, pCurr->byRxRate, pCurr->qwBSSTimestamp, pCurr->qwLocalTSF); // set HW beacon interval MACvWriteBeaconInterval(pDevice, pCurr->wBeaconInterval); // set Next TBTT // Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval CARDvSetFirstNextTBTT(pDevice, pCurr->wBeaconInterval); // set BSSID MACvWriteBSSIDAddress(pDevice, pCurr->abyBSSID); memcpy(pMgmt->abyCurrBSSID, pCurr->abyBSSID, 6); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Sync:set CurrBSSID address = %02x-%02x-%02x=%02x-%02x-%02x\n", pMgmt->abyCurrBSSID[0], pMgmt->abyCurrBSSID[1], pMgmt->abyCurrBSSID[2], pMgmt->abyCurrBSSID[3], pMgmt->abyCurrBSSID[4], pMgmt->abyCurrBSSID[5]); if (pCurr->eNetworkTypeInUse == PHY_TYPE_11A) { if ((pDevice->eConfigPHYMode == PHY_TYPE_11A) || (pDevice->eConfigPHYMode == PHY_TYPE_AUTO)) { pDevice->byBBType = BB_TYPE_11A; pMgmt->eCurrentPHYMode = PHY_TYPE_11A; pDevice->bShortSlotTime = TRUE; BBvSetShortSlotTime(pDevice); CARDvSetBSSMode(pDevice); } else { return; } } else if (pCurr->eNetworkTypeInUse == PHY_TYPE_11B) { if ((pDevice->eConfigPHYMode == PHY_TYPE_11B) || (pDevice->eConfigPHYMode == PHY_TYPE_11G) || (pDevice->eConfigPHYMode == PHY_TYPE_AUTO)) { pDevice->byBBType = BB_TYPE_11B; pMgmt->eCurrentPHYMode = PHY_TYPE_11B; pDevice->bShortSlotTime = FALSE; BBvSetShortSlotTime(pDevice); CARDvSetBSSMode(pDevice); } else { return; } } else { if ((pDevice->eConfigPHYMode == PHY_TYPE_11G) || (pDevice->eConfigPHYMode == PHY_TYPE_AUTO)) { pDevice->byBBType = BB_TYPE_11G; pMgmt->eCurrentPHYMode = PHY_TYPE_11G; pDevice->bShortSlotTime = TRUE; BBvSetShortSlotTime(pDevice); CARDvSetBSSMode(pDevice); } else if (pDevice->eConfigPHYMode == PHY_TYPE_11B) { pDevice->byBBType = BB_TYPE_11B; pDevice->bShortSlotTime = FALSE; BBvSetShortSlotTime(pDevice); CARDvSetBSSMode(pDevice); } else { return; } } if (uBSSMode == WMAC_MODE_ESS_STA) { MACvRegBitsOff(pDevice, MAC_REG_HOSTCR, HOSTCR_ADHOC); MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID); pDevice->byRxMode |= RCR_BSSID; pMgmt->bCurrBSSIDFilterOn = TRUE; } // set channel and clear NAV CARDbSetMediaChannel(pDevice, pCurr->uChannel); pMgmt->uCurrChannel = pCurr->uChannel; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "<----s_bSynchBSS Set Channel [%d]\n", pCurr->uChannel); if ((pDevice->bUpdateBBVGA) && (pDevice->byBBVGACurrent != pDevice->abyBBVGA[0])) { pDevice->byBBVGACurrent = pDevice->abyBBVGA[0]; BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent); BBvSetShortSlotTime(pDevice); } // // Notes: // 1. In Ad-hoc mode : check if received others beacon as jointed indication, // otherwise we will start own IBSS. // 2. In Infra mode : Supposed we already synchronized with AP right now. if (uBSSMode == WMAC_MODE_IBSS_STA) { MACvRegBitsOn(pDevice, MAC_REG_HOSTCR, HOSTCR_ADHOC); MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID); pDevice->byRxMode |= RCR_BSSID; pMgmt->bCurrBSSIDFilterOn = TRUE; } if (pDevice->byBBType == BB_TYPE_11A) { memcpy(pMgmt->abyCurrSuppRates, &abyCurrSuppRatesA[0], sizeof(abyCurrSuppRatesA)); pMgmt->abyCurrExtSuppRates[1] = 0; } else if (pDevice->byBBType == BB_TYPE_11B) { memcpy(pMgmt->abyCurrSuppRates, &abyCurrSuppRatesB[0], sizeof(abyCurrSuppRatesB)); pMgmt->abyCurrExtSuppRates[1] = 0; } else { memcpy(pMgmt->abyCurrSuppRates, &abyCurrSuppRatesG[0], sizeof(abyCurrSuppRatesG)); memcpy(pMgmt->abyCurrExtSuppRates, &abyCurrExtSuppRatesG[0], sizeof(abyCurrExtSuppRatesG)); } pMgmt->byERPContext = pCurr->sERP.byERP; *pStatus = CMD_STATUS_SUCCESS; return; }; //mike add: fix NetworkManager 0.7.0 hidden ssid mode in WPA encryption // ,need reset eAuthenMode and eEncryptionStatus static void Encyption_Rebuild( PSDevice pDevice, PKnownBSS pCurr ) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); /* unsigned int ii, uSameBssidNum=0; */ // if( uSameBssidNum>=2) { //we only check AP in hidden sssid mode if ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || //networkmanager 0.7.0 does not give the pairwise-key selsection, (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { // so we need re-selsect it according to real pairwise-key info. if(pCurr->bWPAValid == TRUE) { //WPA-PSK pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK; if(pCurr->abyPKType[0] == WPA_TKIP) { pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-TKIP]\n"); } else if(pCurr->abyPKType[0] == WPA_AESCCMP) { pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; //AES PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-AES]\n"); } } else if(pCurr->bWPA2Valid == TRUE) { //WPA2-PSK pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK; if(pCurr->abyCSSPK[0] == WLAN_11i_CSS_TKIP) { pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP PRINT_K("Encyption_Rebuild--->ssid reset config to [WPA2PSK-TKIP]\n"); } else if(pCurr->abyCSSPK[0] == WLAN_11i_CSS_CCMP) { pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; //AES PRINT_K("Encyption_Rebuild--->ssid reset config to [WPA2PSK-AES]\n"); } } } // } return; } /*+ * * Routine Description: * Format TIM field * * * Return Value: * void * -*/ static void s_vMgrFormatTIM( PSMgmtObject pMgmt, PWLAN_IE_TIM pTIM ) { BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80}; BYTE byMap; unsigned int ii, jj; BOOL bStartFound = FALSE; BOOL bMulticast = FALSE; WORD wStartIndex = 0; WORD wEndIndex = 0; // Find size of partial virtual bitmap for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) { byMap = pMgmt->abyPSTxMap[ii]; if (!ii) { // Mask out the broadcast bit which is indicated separately. bMulticast = (byMap & byMask[0]) != 0; if(bMulticast) { pMgmt->sNodeDBTable[0].bRxPSPoll = TRUE; } byMap = 0; } if (byMap) { if (!bStartFound) { bStartFound = TRUE; wStartIndex = (WORD)ii; } wEndIndex = (WORD)ii; } } // Round start index down to nearest even number wStartIndex &= ~BIT0; // Round end index up to nearest even number wEndIndex = ((wEndIndex + 1) & ~BIT0); // Size of element payload pTIM->len = 3 + (wEndIndex - wStartIndex) + 1; // Fill in the Fixed parts of the TIM pTIM->byDTIMCount = pMgmt->byDTIMCount; pTIM->byDTIMPeriod = pMgmt->byDTIMPeriod; pTIM->byBitMapCtl = (bMulticast ? TIM_MULTICAST_MASK : 0) | (((wStartIndex >> 1) << 1) & TIM_BITMAPOFFSET_MASK); // Append variable part of TIM for (ii = wStartIndex, jj =0 ; ii <= wEndIndex; ii++, jj++) { pTIM->byVirtBitMap[jj] = pMgmt->abyPSTxMap[ii]; } // Aid = 0 don't used. pTIM->byVirtBitMap[0] &= ~BIT0; } /*+ * * Routine Description: * Constructs an Beacon frame( Ad-hoc mode) * * * Return Value: * PTR to frame; or NULL on allocation failue * -*/ static PSTxMgmtPacket s_MgrMakeBeacon( PSDevice pDevice, PSMgmtObject pMgmt, WORD wCurrCapInfo, WORD wCurrBeaconPeriod, unsigned int uCurrChannel, WORD wCurrATIMWinodw, PWLAN_IE_SSID pCurrSSID, PBYTE pCurrBSSID, PWLAN_IE_SUPP_RATES pCurrSuppRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ) { PSTxMgmtPacket pTxPacket = NULL; WLAN_FR_BEACON sFrame; BYTE abyBroadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; // prepare beacon frame pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_BEACON_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); // Setup the sFrame structure. sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_BEACON_FR_MAXLEN; vMgrEncodeBeacon(&sFrame); // Setup the header sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_BEACON) )); if (pDevice->bEnablePSMode) { sFrame.pHdr->sA3.wFrameCtl |= cpu_to_le16((WORD)WLAN_SET_FC_PWRMGT(1)); } memcpy( sFrame.pHdr->sA3.abyAddr1, abyBroadcastAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pCurrBSSID, WLAN_BSSID_LEN); *sFrame.pwBeaconInterval = cpu_to_le16(wCurrBeaconPeriod); *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo); // Copy SSID sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSSID, pCurrSSID, ((PWLAN_IE_SSID)pCurrSSID)->len + WLAN_IEHDR_LEN ); // Copy the rate set sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSuppRates, pCurrSuppRates, ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN ); // DS parameter if (pDevice->byBBType != BB_TYPE_11A) { sFrame.pDSParms = (PWLAN_IE_DS_PARMS)(sFrame.pBuf + sFrame.len); sFrame.len += (1) + WLAN_IEHDR_LEN; sFrame.pDSParms->byElementID = WLAN_EID_DS_PARMS; sFrame.pDSParms->len = 1; sFrame.pDSParms->byCurrChannel = (BYTE)uCurrChannel; } // TIM field if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { sFrame.pTIM = (PWLAN_IE_TIM)(sFrame.pBuf + sFrame.len); sFrame.pTIM->byElementID = WLAN_EID_TIM; s_vMgrFormatTIM(pMgmt, sFrame.pTIM); sFrame.len += (WLAN_IEHDR_LEN + sFrame.pTIM->len); } if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { // IBSS parameter sFrame.pIBSSParms = (PWLAN_IE_IBSS_PARMS)(sFrame.pBuf + sFrame.len); sFrame.len += (2) + WLAN_IEHDR_LEN; sFrame.pIBSSParms->byElementID = WLAN_EID_IBSS_PARMS; sFrame.pIBSSParms->len = 2; sFrame.pIBSSParms->wATIMWindow = wCurrATIMWinodw; if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) { /* RSN parameter */ sFrame.pRSNWPA = (PWLAN_IE_RSN_EXT)(sFrame.pBuf + sFrame.len); sFrame.pRSNWPA->byElementID = WLAN_EID_RSN_WPA; sFrame.pRSNWPA->len = 12; sFrame.pRSNWPA->abyOUI[0] = 0x00; sFrame.pRSNWPA->abyOUI[1] = 0x50; sFrame.pRSNWPA->abyOUI[2] = 0xf2; sFrame.pRSNWPA->abyOUI[3] = 0x01; sFrame.pRSNWPA->wVersion = 1; sFrame.pRSNWPA->abyMulticast[0] = 0x00; sFrame.pRSNWPA->abyMulticast[1] = 0x50; sFrame.pRSNWPA->abyMulticast[2] = 0xf2; if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) sFrame.pRSNWPA->abyMulticast[3] = 0x04;//AES else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) sFrame.pRSNWPA->abyMulticast[3] = 0x02;//TKIP else if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) sFrame.pRSNWPA->abyMulticast[3] = 0x01;//WEP40 else sFrame.pRSNWPA->abyMulticast[3] = 0x00;//NONE // Pairwise Key Cipher Suite sFrame.pRSNWPA->wPKCount = 0; // Auth Key Management Suite *((PWORD)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len))=0; sFrame.pRSNWPA->len +=2; // RSN Capabilites *((PWORD)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len))=0; sFrame.pRSNWPA->len +=2; sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN; } } if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) { sFrame.pERP = (PWLAN_IE_ERP)(sFrame.pBuf + sFrame.len); sFrame.len += 1 + WLAN_IEHDR_LEN; sFrame.pERP->byElementID = WLAN_EID_ERP; sFrame.pERP->len = 1; sFrame.pERP->byContext = 0; if (pDevice->bProtectMode == TRUE) sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION; if (pDevice->bNonERPPresent == TRUE) sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT; if (pDevice->bBarkerPreambleMd == TRUE) sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE; } if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) { sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN ); } // hostapd wpa/wpa2 IE if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == TRUE)) { if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) { if (pMgmt->wWPAIELen != 0) { sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len); memcpy(sFrame.pRSN, pMgmt->abyWPAIE, pMgmt->wWPAIELen); sFrame.len += pMgmt->wWPAIELen; } } } /* Adjust the length fields */ pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; return pTxPacket; } /*+ * * Routine Description: * Constructs an Prob-response frame * * * Return Value: * PTR to frame; or NULL on allocation failue * -*/ PSTxMgmtPacket s_MgrMakeProbeResponse( PSDevice pDevice, PSMgmtObject pMgmt, WORD wCurrCapInfo, WORD wCurrBeaconPeriod, unsigned int uCurrChannel, WORD wCurrATIMWinodw, PBYTE pDstAddr, PWLAN_IE_SSID pCurrSSID, PBYTE pCurrBSSID, PWLAN_IE_SUPP_RATES pCurrSuppRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates, BYTE byPHYType ) { PSTxMgmtPacket pTxPacket = NULL; WLAN_FR_PROBERESP sFrame; pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_PROBERESP_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); // Setup the sFrame structure. sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_PROBERESP_FR_MAXLEN; vMgrEncodeProbeResponse(&sFrame); // Setup the header sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PROBERESP) )); memcpy( sFrame.pHdr->sA3.abyAddr1, pDstAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pCurrBSSID, WLAN_BSSID_LEN); *sFrame.pwBeaconInterval = cpu_to_le16(wCurrBeaconPeriod); *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo); if (byPHYType == BB_TYPE_11B) { *sFrame.pwCapInfo &= cpu_to_le16((WORD)~(WLAN_SET_CAP_INFO_SHORTSLOTTIME(1))); } // Copy SSID sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSSID, pCurrSSID, ((PWLAN_IE_SSID)pCurrSSID)->len + WLAN_IEHDR_LEN ); // Copy the rate set sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSuppRates, pCurrSuppRates, ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN ); // DS parameter if (pDevice->byBBType != BB_TYPE_11A) { sFrame.pDSParms = (PWLAN_IE_DS_PARMS)(sFrame.pBuf + sFrame.len); sFrame.len += (1) + WLAN_IEHDR_LEN; sFrame.pDSParms->byElementID = WLAN_EID_DS_PARMS; sFrame.pDSParms->len = 1; sFrame.pDSParms->byCurrChannel = (BYTE)uCurrChannel; } if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) { // IBSS parameter sFrame.pIBSSParms = (PWLAN_IE_IBSS_PARMS)(sFrame.pBuf + sFrame.len); sFrame.len += (2) + WLAN_IEHDR_LEN; sFrame.pIBSSParms->byElementID = WLAN_EID_IBSS_PARMS; sFrame.pIBSSParms->len = 2; sFrame.pIBSSParms->wATIMWindow = 0; } if (pDevice->byBBType == BB_TYPE_11G) { sFrame.pERP = (PWLAN_IE_ERP)(sFrame.pBuf + sFrame.len); sFrame.len += 1 + WLAN_IEHDR_LEN; sFrame.pERP->byElementID = WLAN_EID_ERP; sFrame.pERP->len = 1; sFrame.pERP->byContext = 0; if (pDevice->bProtectMode == TRUE) sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION; if (pDevice->bNonERPPresent == TRUE) sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT; if (pDevice->bBarkerPreambleMd == TRUE) sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE; } if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) { sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN ); } // hostapd wpa/wpa2 IE if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnableHostapd == TRUE)) { if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) { if (pMgmt->wWPAIELen != 0) { sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len); memcpy(sFrame.pRSN, pMgmt->abyWPAIE, pMgmt->wWPAIELen); sFrame.len += pMgmt->wWPAIELen; } } } // Adjust the length fields pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; return pTxPacket; } /*+ * * Routine Description: * Constructs an association request frame * * * Return Value: * A ptr to frame or NULL on allocation failue * -*/ PSTxMgmtPacket s_MgrMakeAssocRequest( PSDevice pDevice, PSMgmtObject pMgmt, PBYTE pDAddr, WORD wCurrCapInfo, WORD wListenInterval, PWLAN_IE_SSID pCurrSSID, PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ) { PSTxMgmtPacket pTxPacket = NULL; WLAN_FR_ASSOCREQ sFrame; PBYTE pbyIEs; PBYTE pbyRSN; pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); // Setup the sFrame structure. sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_ASSOCREQ_FR_MAXLEN; // format fixed field frame structure vMgrEncodeAssocRequest(&sFrame); // Setup the header sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ASSOCREQ) )); memcpy( sFrame.pHdr->sA3.abyAddr1, pDAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); // Set the capibility and listen interval *(sFrame.pwCapInfo) = cpu_to_le16(wCurrCapInfo); *(sFrame.pwListenInterval) = cpu_to_le16(wListenInterval); // sFrame.len point to end of fixed field sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len); sFrame.len += pCurrSSID->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSSID, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN); pMgmt->sAssocInfo.AssocInfo.RequestIELength = pCurrSSID->len + WLAN_IEHDR_LEN; pMgmt->sAssocInfo.AssocInfo.OffsetRequestIEs = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION); pbyIEs = pMgmt->sAssocInfo.abyIEs; memcpy(pbyIEs, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN); pbyIEs += pCurrSSID->len + WLAN_IEHDR_LEN; // Copy the rate set sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); if ((pDevice->byBBType == BB_TYPE_11B) && (pCurrRates->len > 4)) sFrame.len += 4 + WLAN_IEHDR_LEN; else sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN); // Copy the extension rate set if ((pDevice->byBBType == BB_TYPE_11G) && (pCurrExtSuppRates->len > 0)) { sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN; memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN); } pMgmt->sAssocInfo.AssocInfo.RequestIELength += pCurrRates->len + WLAN_IEHDR_LEN; memcpy(pbyIEs, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN); pbyIEs += pCurrRates->len + WLAN_IEHDR_LEN; if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE)) && (pMgmt->pCurrBSS != NULL)) { /* WPA IE */ sFrame.pRSNWPA = (PWLAN_IE_RSN_EXT)(sFrame.pBuf + sFrame.len); sFrame.pRSNWPA->byElementID = WLAN_EID_RSN_WPA; sFrame.pRSNWPA->len = 16; sFrame.pRSNWPA->abyOUI[0] = 0x00; sFrame.pRSNWPA->abyOUI[1] = 0x50; sFrame.pRSNWPA->abyOUI[2] = 0xf2; sFrame.pRSNWPA->abyOUI[3] = 0x01; sFrame.pRSNWPA->wVersion = 1; //Group Key Cipher Suite sFrame.pRSNWPA->abyMulticast[0] = 0x00; sFrame.pRSNWPA->abyMulticast[1] = 0x50; sFrame.pRSNWPA->abyMulticast[2] = 0xf2; if (pMgmt->byCSSGK == KEY_CTL_WEP) { sFrame.pRSNWPA->abyMulticast[3] = pMgmt->pCurrBSS->byGKType; } else if (pMgmt->byCSSGK == KEY_CTL_TKIP) { sFrame.pRSNWPA->abyMulticast[3] = WPA_TKIP; } else if (pMgmt->byCSSGK == KEY_CTL_CCMP) { sFrame.pRSNWPA->abyMulticast[3] = WPA_AESCCMP; } else { sFrame.pRSNWPA->abyMulticast[3] = WPA_NONE; } // Pairwise Key Cipher Suite sFrame.pRSNWPA->wPKCount = 1; sFrame.pRSNWPA->PKSList[0].abyOUI[0] = 0x00; sFrame.pRSNWPA->PKSList[0].abyOUI[1] = 0x50; sFrame.pRSNWPA->PKSList[0].abyOUI[2] = 0xf2; if (pMgmt->byCSSPK == KEY_CTL_TKIP) { sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_TKIP; } else if (pMgmt->byCSSPK == KEY_CTL_CCMP) { sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_AESCCMP; } else { sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_NONE; } // Auth Key Management Suite pbyRSN = (PBYTE)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len); *pbyRSN++=0x01; *pbyRSN++=0x00; *pbyRSN++=0x00; *pbyRSN++=0x50; *pbyRSN++=0xf2; if (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) { *pbyRSN++=WPA_AUTH_PSK; } else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA) { *pbyRSN++=WPA_AUTH_IEEE802_1X; } else { *pbyRSN++=WPA_NONE; } sFrame.pRSNWPA->len +=6; // RSN Capabilites *pbyRSN++=0x00; *pbyRSN++=0x00; sFrame.pRSNWPA->len +=2; sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN; // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN; memcpy(pbyIEs, sFrame.pRSNWPA, sFrame.pRSNWPA->len + WLAN_IEHDR_LEN); pbyIEs += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN; } else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) && (pMgmt->pCurrBSS != NULL)) { unsigned int ii; PWORD pwPMKID; // WPA IE sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len); sFrame.pRSN->byElementID = WLAN_EID_RSN; sFrame.pRSN->len = 6; //Version(2)+GK(4) sFrame.pRSN->wVersion = 1; //Group Key Cipher Suite sFrame.pRSN->abyRSN[0] = 0x00; sFrame.pRSN->abyRSN[1] = 0x0F; sFrame.pRSN->abyRSN[2] = 0xAC; if (pMgmt->byCSSGK == KEY_CTL_WEP) { sFrame.pRSN->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK; } else if (pMgmt->byCSSGK == KEY_CTL_TKIP) { sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_TKIP; } else if (pMgmt->byCSSGK == KEY_CTL_CCMP) { sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_CCMP; } else { sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_UNKNOWN; } // Pairwise Key Cipher Suite sFrame.pRSN->abyRSN[4] = 1; sFrame.pRSN->abyRSN[5] = 0; sFrame.pRSN->abyRSN[6] = 0x00; sFrame.pRSN->abyRSN[7] = 0x0F; sFrame.pRSN->abyRSN[8] = 0xAC; if (pMgmt->byCSSPK == KEY_CTL_TKIP) { sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_TKIP; } else if (pMgmt->byCSSPK == KEY_CTL_CCMP) { sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_CCMP; } else if (pMgmt->byCSSPK == KEY_CTL_NONE) { sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_USE_GROUP; } else { sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_UNKNOWN; } sFrame.pRSN->len += 6; // Auth Key Management Suite sFrame.pRSN->abyRSN[10] = 1; sFrame.pRSN->abyRSN[11] = 0; sFrame.pRSN->abyRSN[12] = 0x00; sFrame.pRSN->abyRSN[13] = 0x0F; sFrame.pRSN->abyRSN[14] = 0xAC; if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK) { sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_PSK; } else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) { sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_802_1X; } else { sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN; } sFrame.pRSN->len +=6; // RSN Capabilites if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == TRUE) { memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2); } else { sFrame.pRSN->abyRSN[16] = 0; sFrame.pRSN->abyRSN[17] = 0; } sFrame.pRSN->len +=2; if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == TRUE) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) { // RSN PMKID pbyRSN = &sFrame.pRSN->abyRSN[18]; pwPMKID = (PWORD)pbyRSN; // Point to PMKID count *pwPMKID = 0; // Initialize PMKID count pbyRSN += 2; // Point to PMKID list for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) { if (!memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) { (*pwPMKID)++; memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16); pbyRSN += 16; } } if (*pwPMKID != 0) { sFrame.pRSN->len += (2 + (*pwPMKID)*16); } } sFrame.len += sFrame.pRSN->len + WLAN_IEHDR_LEN; // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSN->len + WLAN_IEHDR_LEN; memcpy(pbyIEs, sFrame.pRSN, sFrame.pRSN->len + WLAN_IEHDR_LEN); pbyIEs += sFrame.pRSN->len + WLAN_IEHDR_LEN; } // Adjust the length fields pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; return pTxPacket; } /*+ * * Routine Description: * Constructs an re-association request frame * * * Return Value: * A ptr to frame or NULL on allocation failue * -*/ PSTxMgmtPacket s_MgrMakeReAssocRequest( PSDevice pDevice, PSMgmtObject pMgmt, PBYTE pDAddr, WORD wCurrCapInfo, WORD wListenInterval, PWLAN_IE_SSID pCurrSSID, PWLAN_IE_SUPP_RATES pCurrRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ) { PSTxMgmtPacket pTxPacket = NULL; WLAN_FR_REASSOCREQ sFrame; PBYTE pbyIEs; PBYTE pbyRSN; pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset( pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_REASSOCREQ_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); /* Setup the sFrame structure. */ sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_REASSOCREQ_FR_MAXLEN; // format fixed field frame structure vMgrEncodeReassocRequest(&sFrame); /* Setup the header */ sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_REASSOCREQ) )); memcpy( sFrame.pHdr->sA3.abyAddr1, pDAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); /* Set the capibility and listen interval */ *(sFrame.pwCapInfo) = cpu_to_le16(wCurrCapInfo); *(sFrame.pwListenInterval) = cpu_to_le16(wListenInterval); memcpy(sFrame.pAddrCurrAP, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); /* Copy the SSID */ /* sFrame.len point to end of fixed field */ sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len); sFrame.len += pCurrSSID->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSSID, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN); pMgmt->sAssocInfo.AssocInfo.RequestIELength = pCurrSSID->len + WLAN_IEHDR_LEN; pMgmt->sAssocInfo.AssocInfo.OffsetRequestIEs = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION); pbyIEs = pMgmt->sAssocInfo.abyIEs; memcpy(pbyIEs, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN); pbyIEs += pCurrSSID->len + WLAN_IEHDR_LEN; /* Copy the rate set */ /* sFrame.len point to end of SSID */ sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN); // Copy the extension rate set if ((pMgmt->eCurrentPHYMode == PHY_TYPE_11G) && (pCurrExtSuppRates->len > 0)) { sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN; memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN); } pMgmt->sAssocInfo.AssocInfo.RequestIELength += pCurrRates->len + WLAN_IEHDR_LEN; memcpy(pbyIEs, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN); pbyIEs += pCurrRates->len + WLAN_IEHDR_LEN; if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE)) && (pMgmt->pCurrBSS != NULL)) { /* WPA IE */ sFrame.pRSNWPA = (PWLAN_IE_RSN_EXT)(sFrame.pBuf + sFrame.len); sFrame.pRSNWPA->byElementID = WLAN_EID_RSN_WPA; sFrame.pRSNWPA->len = 16; sFrame.pRSNWPA->abyOUI[0] = 0x00; sFrame.pRSNWPA->abyOUI[1] = 0x50; sFrame.pRSNWPA->abyOUI[2] = 0xf2; sFrame.pRSNWPA->abyOUI[3] = 0x01; sFrame.pRSNWPA->wVersion = 1; //Group Key Cipher Suite sFrame.pRSNWPA->abyMulticast[0] = 0x00; sFrame.pRSNWPA->abyMulticast[1] = 0x50; sFrame.pRSNWPA->abyMulticast[2] = 0xf2; if (pMgmt->byCSSGK == KEY_CTL_WEP) { sFrame.pRSNWPA->abyMulticast[3] = pMgmt->pCurrBSS->byGKType; } else if (pMgmt->byCSSGK == KEY_CTL_TKIP) { sFrame.pRSNWPA->abyMulticast[3] = WPA_TKIP; } else if (pMgmt->byCSSGK == KEY_CTL_CCMP) { sFrame.pRSNWPA->abyMulticast[3] = WPA_AESCCMP; } else { sFrame.pRSNWPA->abyMulticast[3] = WPA_NONE; } // Pairwise Key Cipher Suite sFrame.pRSNWPA->wPKCount = 1; sFrame.pRSNWPA->PKSList[0].abyOUI[0] = 0x00; sFrame.pRSNWPA->PKSList[0].abyOUI[1] = 0x50; sFrame.pRSNWPA->PKSList[0].abyOUI[2] = 0xf2; if (pMgmt->byCSSPK == KEY_CTL_TKIP) { sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_TKIP; } else if (pMgmt->byCSSPK == KEY_CTL_CCMP) { sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_AESCCMP; } else { sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_NONE; } // Auth Key Management Suite pbyRSN = (PBYTE)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len); *pbyRSN++=0x01; *pbyRSN++=0x00; *pbyRSN++=0x00; *pbyRSN++=0x50; *pbyRSN++=0xf2; if (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) { *pbyRSN++=WPA_AUTH_PSK; } else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA) { *pbyRSN++=WPA_AUTH_IEEE802_1X; } else { *pbyRSN++=WPA_NONE; } sFrame.pRSNWPA->len +=6; // RSN Capabilites *pbyRSN++=0x00; *pbyRSN++=0x00; sFrame.pRSNWPA->len +=2; sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN; // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN; memcpy(pbyIEs, sFrame.pRSNWPA, sFrame.pRSNWPA->len + WLAN_IEHDR_LEN); pbyIEs += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN; } else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) && (pMgmt->pCurrBSS != NULL)) { unsigned int ii; PWORD pwPMKID; /* WPA IE */ sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len); sFrame.pRSN->byElementID = WLAN_EID_RSN; sFrame.pRSN->len = 6; //Version(2)+GK(4) sFrame.pRSN->wVersion = 1; //Group Key Cipher Suite sFrame.pRSN->abyRSN[0] = 0x00; sFrame.pRSN->abyRSN[1] = 0x0F; sFrame.pRSN->abyRSN[2] = 0xAC; if (pMgmt->byCSSGK == KEY_CTL_WEP) { sFrame.pRSN->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK; } else if (pMgmt->byCSSGK == KEY_CTL_TKIP) { sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_TKIP; } else if (pMgmt->byCSSGK == KEY_CTL_CCMP) { sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_CCMP; } else { sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_UNKNOWN; } // Pairwise Key Cipher Suite sFrame.pRSN->abyRSN[4] = 1; sFrame.pRSN->abyRSN[5] = 0; sFrame.pRSN->abyRSN[6] = 0x00; sFrame.pRSN->abyRSN[7] = 0x0F; sFrame.pRSN->abyRSN[8] = 0xAC; if (pMgmt->byCSSPK == KEY_CTL_TKIP) { sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_TKIP; } else if (pMgmt->byCSSPK == KEY_CTL_CCMP) { sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_CCMP; } else if (pMgmt->byCSSPK == KEY_CTL_NONE) { sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_USE_GROUP; } else { sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_UNKNOWN; } sFrame.pRSN->len += 6; // Auth Key Management Suite sFrame.pRSN->abyRSN[10] = 1; sFrame.pRSN->abyRSN[11] = 0; sFrame.pRSN->abyRSN[12] = 0x00; sFrame.pRSN->abyRSN[13] = 0x0F; sFrame.pRSN->abyRSN[14] = 0xAC; if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK) { sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_PSK; } else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) { sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_802_1X; } else { sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN; } sFrame.pRSN->len +=6; // RSN Capabilites if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == TRUE) { memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2); } else { sFrame.pRSN->abyRSN[16] = 0; sFrame.pRSN->abyRSN[17] = 0; } sFrame.pRSN->len +=2; if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && (pDevice->bRoaming == TRUE) && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) { // RSN PMKID pbyRSN = &sFrame.pRSN->abyRSN[18]; pwPMKID = (PWORD)pbyRSN; // Point to PMKID count *pwPMKID = 0; // Initialize PMKID count pbyRSN += 2; // Point to PMKID list for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) { if (!memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) { (*pwPMKID)++; memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16); pbyRSN += 16; } } if (*pwPMKID != 0) { sFrame.pRSN->len += (2 + (*pwPMKID)*16); } } sFrame.len += sFrame.pRSN->len + WLAN_IEHDR_LEN; // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSN->len + WLAN_IEHDR_LEN; memcpy(pbyIEs, sFrame.pRSN, sFrame.pRSN->len + WLAN_IEHDR_LEN); pbyIEs += sFrame.pRSN->len + WLAN_IEHDR_LEN; } /* Adjust the length fields */ pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; return pTxPacket; } /*+ * * Routine Description: * Constructs an assoc-response frame * * * Return Value: * PTR to frame; or NULL on allocation failue * -*/ PSTxMgmtPacket s_MgrMakeAssocResponse( PSDevice pDevice, PSMgmtObject pMgmt, WORD wCurrCapInfo, WORD wAssocStatus, WORD wAssocAID, PBYTE pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ) { PSTxMgmtPacket pTxPacket = NULL; WLAN_FR_ASSOCRESP sFrame; pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); // Setup the sFrame structure sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN; vMgrEncodeAssocResponse(&sFrame); // Setup the header sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ASSOCRESP) )); memcpy( sFrame.pHdr->sA3.abyAddr1, pDstAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo); *sFrame.pwStatus = cpu_to_le16(wAssocStatus); *sFrame.pwAid = cpu_to_le16((WORD)(wAssocAID | BIT14 | BIT15)); // Copy the rate set sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSuppRates, pCurrSuppRates, ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN ); if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) { sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN ); } // Adjust the length fields pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; return pTxPacket; } /*+ * * Routine Description: * Constructs an reassoc-response frame * * * Return Value: * PTR to frame; or NULL on allocation failue * -*/ PSTxMgmtPacket s_MgrMakeReAssocResponse( PSDevice pDevice, PSMgmtObject pMgmt, WORD wCurrCapInfo, WORD wAssocStatus, WORD wAssocAID, PBYTE pDstAddr, PWLAN_IE_SUPP_RATES pCurrSuppRates, PWLAN_IE_SUPP_RATES pCurrExtSuppRates ) { PSTxMgmtPacket pTxPacket = NULL; WLAN_FR_REASSOCRESP sFrame; pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((PBYTE)pTxPacket + sizeof(STxMgmtPacket)); // Setup the sFrame structure sFrame.pBuf = (PBYTE)pTxPacket->p80211Header; sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN; vMgrEncodeReassocResponse(&sFrame); // Setup the header sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16( ( WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_REASSOCRESP) )); memcpy( sFrame.pHdr->sA3.abyAddr1, pDstAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN); memcpy( sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo); *sFrame.pwStatus = cpu_to_le16(wAssocStatus); *sFrame.pwAid = cpu_to_le16((WORD)(wAssocAID | BIT14 | BIT15)); // Copy the rate set sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pSuppRates, pCurrSuppRates, ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN ); if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) { sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len); sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN; memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN ); } // Adjust the length fields pTxPacket->cbMPDULen = sFrame.len; pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN; return pTxPacket; } /*+ * * Routine Description: * Handles probe response management frames. * * * Return Value: * none. * -*/ static void s_vMgrRxProbeResponse( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ) { PKnownBSS pBSSList = NULL; WLAN_FR_PROBERESP sFrame; BYTE byCurrChannel = pRxPacket->byRxChannel; ERPObject sERP; BOOL bChannelHit = TRUE; memset(&sFrame, 0, sizeof(WLAN_FR_PROBERESP)); // decode the frame sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; vMgrDecodeProbeResponse(&sFrame); if ((sFrame.pqwTimestamp == NULL) || (sFrame.pwBeaconInterval == NULL) || (sFrame.pwCapInfo == NULL) || (sFrame.pSSID == NULL) || (sFrame.pSuppRates == NULL)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe resp:Fail addr:[%p]\n", pRxPacket->p80211Header); DBG_PORT80(0xCC); return; } if(sFrame.pSSID->len == 0) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rx Probe resp: SSID len = 0 \n"); //{{ RobertYu:20050201, 11a byCurrChannel != sFrame.pDSParms->byCurrChannel mapping if( byCurrChannel > CB_MAX_CHANNEL_24G ) { if (sFrame.pDSParms) { if (byCurrChannel == RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1]) bChannelHit = TRUE; byCurrChannel = RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1]; } else { bChannelHit = TRUE; } } else { if (sFrame.pDSParms) { if (byCurrChannel == sFrame.pDSParms->byCurrChannel) bChannelHit = TRUE; byCurrChannel = sFrame.pDSParms->byCurrChannel; } else { bChannelHit = TRUE; } } //RobertYu:20050201 if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE) return; if (sFrame.pERP) { sERP.byERP = sFrame.pERP->byContext; sERP.bERPExist = TRUE; } else { sERP.bERPExist = FALSE; sERP.byERP = 0; } // update or insert the bss pBSSList = BSSpAddrIsInBSSList((void *) pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID); if (pBSSList) { BSSbUpdateToBSSList((void *) pDevice, *sFrame.pqwTimestamp, *sFrame.pwBeaconInterval, *sFrame.pwCapInfo, byCurrChannel, bChannelHit, sFrame.pSSID, sFrame.pSuppRates, sFrame.pExtSuppRates, &sERP, sFrame.pRSN, sFrame.pRSNWPA, sFrame.pIE_Country, sFrame.pIE_Quiet, pBSSList, sFrame.len - WLAN_HDR_ADDR3_LEN, /* payload of probresponse */ sFrame.pHdr->sA4.abyAddr4, (void *) pRxPacket); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Probe resp/insert: RxChannel = : %d\n", byCurrChannel); BSSbInsertToBSSList((void *) pDevice, sFrame.pHdr->sA3.abyAddr3, *sFrame.pqwTimestamp, *sFrame.pwBeaconInterval, *sFrame.pwCapInfo, byCurrChannel, sFrame.pSSID, sFrame.pSuppRates, sFrame.pExtSuppRates, &sERP, sFrame.pRSN, sFrame.pRSNWPA, sFrame.pIE_Country, sFrame.pIE_Quiet, sFrame.len - WLAN_HDR_ADDR3_LEN, sFrame.pHdr->sA4.abyAddr4, /* payload of beacon */ (void *) pRxPacket); } return; } /*+ * * Routine Description:(AP)or(Ad-hoc STA) * Handles probe request management frames. * * * Return Value: * none. * -*/ static void s_vMgrRxProbeRequest( PSDevice pDevice, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket ) { WLAN_FR_PROBEREQ sFrame; CMD_STATUS Status; PSTxMgmtPacket pTxPacket; BYTE byPHYType = BB_TYPE_11B; // STA in Ad-hoc mode: when latest TBTT beacon transmit success, // STA have to response this request. if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && pDevice->bBeaconSent)) { memset(&sFrame, 0, sizeof(WLAN_FR_PROBEREQ)); // decode the frame sFrame.len = pRxPacket->cbMPDULen; sFrame.pBuf = (PBYTE)pRxPacket->p80211Header; vMgrDecodeProbeRequest(&sFrame); /* DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe request rx:MAC addr:%02x-%02x-%02x=%02x-%02x-%02x \n", sFrame.pHdr->sA3.abyAddr2[0], sFrame.pHdr->sA3.abyAddr2[1], sFrame.pHdr->sA3.abyAddr2[2], sFrame.pHdr->sA3.abyAddr2[3], sFrame.pHdr->sA3.abyAddr2[4], sFrame.pHdr->sA3.abyAddr2[5] ); */ if (sFrame.pSSID->len != 0) { if (sFrame.pSSID->len != ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) return; if (memcmp(sFrame.pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) != 0) { return; } } if ((sFrame.pSuppRates->len > 4) || (sFrame.pExtSuppRates != NULL)) { byPHYType = BB_TYPE_11G; } // Probe response reply.. pTxPacket = s_MgrMakeProbeResponse ( pDevice, pMgmt, pMgmt->wCurrCapInfo, pMgmt->wCurrBeaconPeriod, pMgmt->uCurrChannel, 0, sFrame.pHdr->sA3.abyAddr2, (PWLAN_IE_SSID)pMgmt->abyCurrSSID, (PBYTE)pMgmt->abyCurrBSSID, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, byPHYType ); if (pTxPacket != NULL ){ /* send the frame */ Status = csMgmt_xmit(pDevice, pTxPacket); if (Status != CMD_STATUS_PENDING) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Probe response tx failed\n"); } else { // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:Probe response tx sending..\n"); } } } return; } /*+ * * Routine Description: * * Entry point for the reception and handling of 802.11 management * frames. Makes a determination of the frame type and then calls * the appropriate function. * * * Return Value: * none. * -*/ void vMgrRxManagePacket(void *hDeviceContext, PSMgmtObject pMgmt, PSRxMgmtPacket pRxPacket) { PSDevice pDevice = (PSDevice)hDeviceContext; BOOL bInScan = FALSE; unsigned int uNodeIndex = 0; NODE_STATE eNodeState = 0; CMD_STATUS Status; if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { if (BSSbIsSTAInNodeDB(pDevice, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex)) eNodeState = pMgmt->sNodeDBTable[uNodeIndex].eNodeState; } switch( WLAN_GET_FC_FSTYPE((pRxPacket->p80211Header->sA3.wFrameCtl)) ){ case WLAN_FSTYPE_ASSOCREQ: // Frame Clase = 2 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx assocreq\n"); if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (eNodeState < NODE_AUTH)) { // send deauth notification // reason = (6) class 2 received from nonauth sta vMgrDeAuthenBeginSta(pDevice, pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, (6), &Status ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wmgr: send vMgrDeAuthenBeginSta 1\n"); } else { s_vMgrRxAssocRequest(pDevice, pMgmt, pRxPacket, uNodeIndex); } break; case WLAN_FSTYPE_ASSOCRESP: // Frame Clase = 2 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx assocresp1\n"); s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, FALSE); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx assocresp2\n"); break; case WLAN_FSTYPE_REASSOCREQ: // Frame Clase = 2 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx reassocreq\n"); // Todo: reassoc if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (eNodeState < NODE_AUTH)) { // send deauth notification // reason = (6) class 2 received from nonauth sta vMgrDeAuthenBeginSta(pDevice, pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, (6), &Status ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wmgr: send vMgrDeAuthenBeginSta 2\n"); } s_vMgrRxReAssocRequest(pDevice, pMgmt, pRxPacket, uNodeIndex); break; case WLAN_FSTYPE_REASSOCRESP: // Frame Clase = 2 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx reassocresp\n"); s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, TRUE); break; case WLAN_FSTYPE_PROBEREQ: // Frame Clase = 0 //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx probereq\n"); s_vMgrRxProbeRequest(pDevice, pMgmt, pRxPacket); break; case WLAN_FSTYPE_PROBERESP: // Frame Clase = 0 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx proberesp\n"); s_vMgrRxProbeResponse(pDevice, pMgmt, pRxPacket); break; case WLAN_FSTYPE_BEACON: // Frame Clase = 0 //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx beacon\n"); if (pMgmt->eScanState != WMAC_NO_SCANNING) { bInScan = TRUE; } s_vMgrRxBeacon(pDevice, pMgmt, pRxPacket, bInScan); break; case WLAN_FSTYPE_ATIM: // Frame Clase = 1 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx atim\n"); break; case WLAN_FSTYPE_DISASSOC: // Frame Clase = 2 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx disassoc\n"); if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (eNodeState < NODE_AUTH)) { // send deauth notification // reason = (6) class 2 received from nonauth sta vMgrDeAuthenBeginSta(pDevice, pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, (6), &Status ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wmgr: send vMgrDeAuthenBeginSta 3\n"); } s_vMgrRxDisassociation(pDevice, pMgmt, pRxPacket); break; case WLAN_FSTYPE_AUTHEN: // Frame Clase = 1 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx authen\n"); s_vMgrRxAuthentication(pDevice, pMgmt, pRxPacket); break; case WLAN_FSTYPE_DEAUTHEN: // Frame Clase = 1 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx deauthen\n"); s_vMgrRxDeauthentication(pDevice, pMgmt, pRxPacket); break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "rx unknown mgmt\n"); } return; } /*+ * * Routine Description: * * * Prepare beacon to send * * Return Value: * TRUE if success; FALSE if failed. * -*/ BOOL bMgrPrepareBeaconToSend(void *hDeviceContext, PSMgmtObject pMgmt) { PSDevice pDevice = (PSDevice)hDeviceContext; PSTxMgmtPacket pTxPacket; // pDevice->bBeaconBufReady = FALSE; if (pDevice->bEncryptionEnable || pDevice->bEnable8021x){ pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1); } else { pMgmt->wCurrCapInfo &= ~WLAN_SET_CAP_INFO_PRIVACY(1); } pTxPacket = s_MgrMakeBeacon ( pDevice, pMgmt, pMgmt->wCurrCapInfo, pMgmt->wCurrBeaconPeriod, pMgmt->uCurrChannel, pMgmt->wCurrATIMWindow, //0, (PWLAN_IE_SSID)pMgmt->abyCurrSSID, (PBYTE)pMgmt->abyCurrBSSID, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates ); if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->abyCurrBSSID[0] == 0)) return FALSE; csBeacon_xmit(pDevice, pTxPacket); MACvRegBitsOn(pDevice, MAC_REG_TCR, TCR_AUTOBCNTX); return TRUE; } /*+ * * Routine Description: * * Log a warning message based on the contents of the Status * Code field of an 802.11 management frame. Defines are * derived from 802.11-1997 SPEC. * * Return Value: * none. * -*/ static void s_vMgrLogStatus( PSMgmtObject pMgmt, WORD wStatus ) { switch( wStatus ){ case WLAN_MGMT_STATUS_UNSPEC_FAILURE: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Unspecified error.\n"); break; case WLAN_MGMT_STATUS_CAPS_UNSUPPORTED: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Can't support all requested capabilities.\n"); break; case WLAN_MGMT_STATUS_REASSOC_NO_ASSOC: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Reassoc denied, can't confirm original Association.\n"); break; case WLAN_MGMT_STATUS_ASSOC_DENIED_UNSPEC: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Assoc denied, undefine in spec\n"); break; case WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Peer doesn't support authen algorithm.\n"); break; case WLAN_MGMT_STATUS_RX_AUTH_NOSEQ: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Authen frame received out of sequence.\n"); break; case WLAN_MGMT_STATUS_CHALLENGE_FAIL: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Authen rejected, challenge failure.\n"); break; case WLAN_MGMT_STATUS_AUTH_TIMEOUT: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Authen rejected, timeout waiting for next frame.\n"); break; case WLAN_MGMT_STATUS_ASSOC_DENIED_BUSY: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Assoc denied, AP too busy.\n"); break; case WLAN_MGMT_STATUS_ASSOC_DENIED_RATES: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Assoc denied, we haven't enough basic rates.\n"); break; case WLAN_MGMT_STATUS_ASSOC_DENIED_SHORTPREAMBLE: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Assoc denied, we do not support short preamble.\n"); break; case WLAN_MGMT_STATUS_ASSOC_DENIED_PBCC: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Assoc denied, we do not support PBCC.\n"); break; case WLAN_MGMT_STATUS_ASSOC_DENIED_AGILITY: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Status code == Assoc denied, we do not support channel agility.\n"); break; default: DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Unknown status code %d.\n", wStatus); break; } } /* * * Description: * Add BSSID in PMKID Candidate list. * * Parameters: * In: * hDeviceContext - device structure point * pbyBSSID - BSSID address for adding * wRSNCap - BSS's RSN capability * Out: * none * * Return Value: none. * -*/ BOOL bAdd_PMKID_Candidate(void *hDeviceContext, PBYTE pbyBSSID, PSRSNCapObject psRSNCapObj) { PSDevice pDevice = (PSDevice)hDeviceContext; PPMKID_CANDIDATE pCandidateList; unsigned int ii = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate START: (%d)\n", (int)pDevice->gsPMKIDCandidate.NumCandidates); if ((pDevice == NULL) || (pbyBSSID == NULL) || (psRSNCapObj == NULL)) return FALSE; if (pDevice->gsPMKIDCandidate.NumCandidates >= MAX_PMKIDLIST) return FALSE; // Update Old Candidate for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) { pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii]; if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) { if ((psRSNCapObj->bRSNCapExist == TRUE) && (psRSNCapObj->wRSNCap & BIT0)) { pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED; } else { pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED); } return TRUE; } } // New Candidate pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates]; if ((psRSNCapObj->bRSNCapExist == TRUE) && (psRSNCapObj->wRSNCap & BIT0)) { pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED; } else { pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED); } memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN); pDevice->gsPMKIDCandidate.NumCandidates++; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"NumCandidates:%d\n", (int)pDevice->gsPMKIDCandidate.NumCandidates); return TRUE; } /* * * Description: * Flush PMKID Candidate list. * * Parameters: * In: * hDeviceContext - device structure point * Out: * none * * Return Value: none. * -*/ void vFlush_PMKID_Candidate(void *hDeviceContext) { PSDevice pDevice = (PSDevice)hDeviceContext; if (pDevice == NULL) return; memset(&pDevice->gsPMKIDCandidate, 0, sizeof(SPMKIDCandidateEvent)); } static BOOL s_bCipherMatch ( PKnownBSS pBSSNode, NDIS_802_11_ENCRYPTION_STATUS EncStatus, PBYTE pbyCCSPK, PBYTE pbyCCSGK ) { BYTE byMulticastCipher = KEY_CTL_INVALID; BYTE byCipherMask = 0x00; int i; if (pBSSNode == NULL) return FALSE; // check cap. of BSS if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) && (EncStatus == Ndis802_11Encryption1Enabled)) { // default is WEP only byMulticastCipher = KEY_CTL_WEP; } if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) && (pBSSNode->bWPA2Valid == TRUE) && ((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) { //WPA2 // check Group Key Cipher if ((pBSSNode->byCSSGK == WLAN_11i_CSS_WEP40) || (pBSSNode->byCSSGK == WLAN_11i_CSS_WEP104)) { byMulticastCipher = KEY_CTL_WEP; } else if (pBSSNode->byCSSGK == WLAN_11i_CSS_TKIP) { byMulticastCipher = KEY_CTL_TKIP; } else if (pBSSNode->byCSSGK == WLAN_11i_CSS_CCMP) { byMulticastCipher = KEY_CTL_CCMP; } else { byMulticastCipher = KEY_CTL_INVALID; } /* check Pairwise Key Cipher */ for (i = 0; i < pBSSNode->wCSSPKCount; i++) { if ((pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP40) || (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP104)) { /* this should not happen as defined 802.11i */ byCipherMask |= 0x01; } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_TKIP) { byCipherMask |= 0x02; } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_CCMP) { byCipherMask |= 0x04; } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_USE_GROUP) { /* use group key only ignore all others */ byCipherMask = 0; i = pBSSNode->wCSSPKCount; } } } else if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) && (pBSSNode->bWPAValid == TRUE) && ((EncStatus == Ndis802_11Encryption2Enabled) || (EncStatus == Ndis802_11Encryption3Enabled))) { //WPA // check Group Key Cipher if ((pBSSNode->byGKType == WPA_WEP40) || (pBSSNode->byGKType == WPA_WEP104)) { byMulticastCipher = KEY_CTL_WEP; } else if (pBSSNode->byGKType == WPA_TKIP) { byMulticastCipher = KEY_CTL_TKIP; } else if (pBSSNode->byGKType == WPA_AESCCMP) { byMulticastCipher = KEY_CTL_CCMP; } else { byMulticastCipher = KEY_CTL_INVALID; } /* check Pairwise Key Cipher */ for (i = 0; i < pBSSNode->wPKCount; i++) { if (pBSSNode->abyPKType[i] == WPA_TKIP) { byCipherMask |= 0x02; } else if (pBSSNode->abyPKType[i] == WPA_AESCCMP) { byCipherMask |= 0x04; } else if (pBSSNode->abyPKType[i] == WPA_NONE) { /* use group key only ignore all others */ byCipherMask = 0; i = pBSSNode->wPKCount; } } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%d, %d, %d, %d, EncStatus:%d\n", byMulticastCipher, byCipherMask, pBSSNode->bWPAValid, pBSSNode->bWPA2Valid, EncStatus); // mask our cap. with BSS if (EncStatus == Ndis802_11Encryption1Enabled) { // For supporting Cisco migration mode, don't care pairwise key cipher //if ((byMulticastCipher == KEY_CTL_WEP) && // (byCipherMask == 0)) { if ((byMulticastCipher == KEY_CTL_WEP) && (byCipherMask == 0)) { *pbyCCSGK = KEY_CTL_WEP; *pbyCCSPK = KEY_CTL_NONE; return TRUE; } else { return FALSE; } } else if (EncStatus == Ndis802_11Encryption2Enabled) { if ((byMulticastCipher == KEY_CTL_TKIP) && (byCipherMask == 0)) { *pbyCCSGK = KEY_CTL_TKIP; *pbyCCSPK = KEY_CTL_NONE; return TRUE; } else if ((byMulticastCipher == KEY_CTL_WEP) && ((byCipherMask & 0x02) != 0)) { *pbyCCSGK = KEY_CTL_WEP; *pbyCCSPK = KEY_CTL_TKIP; return TRUE; } else if ((byMulticastCipher == KEY_CTL_TKIP) && ((byCipherMask & 0x02) != 0)) { *pbyCCSGK = KEY_CTL_TKIP; *pbyCCSPK = KEY_CTL_TKIP; return TRUE; } else { return FALSE; } } else if (EncStatus == Ndis802_11Encryption3Enabled) { if ((byMulticastCipher == KEY_CTL_CCMP) && (byCipherMask == 0)) { // When CCMP is enable, "Use group cipher suite" shall not be a valid option. return FALSE; } else if ((byMulticastCipher == KEY_CTL_WEP) && ((byCipherMask & 0x04) != 0)) { *pbyCCSGK = KEY_CTL_WEP; *pbyCCSPK = KEY_CTL_CCMP; return TRUE; } else if ((byMulticastCipher == KEY_CTL_TKIP) && ((byCipherMask & 0x04) != 0)) { *pbyCCSGK = KEY_CTL_TKIP; *pbyCCSPK = KEY_CTL_CCMP; return TRUE; } else if ((byMulticastCipher == KEY_CTL_CCMP) && ((byCipherMask & 0x04) != 0)) { *pbyCCSGK = KEY_CTL_CCMP; *pbyCCSPK = KEY_CTL_CCMP; return TRUE; } else { return FALSE; } } return TRUE; }
gpl-2.0
NovaFusion/twrp_kernel
sound/soc/pxa/mioa701_wm9713.c
3022
7438
/* * Handles the Mitac mioa701 SoC system * * Copyright (C) 2008 Robert Jarzmik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation in version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * This is a little schema of the sound interconnections : * * Sagem X200 Wolfson WM9713 * +--------+ +-------------------+ Rear Speaker * | | | | /-+ * | +--->----->---+MONOIN SPKL+--->----+-+ | * | GSM | | | | | | * | +--->----->---+PCBEEP SPKR+--->----+-+ | * | CHIP | | | \-+ * | +---<-----<---+MONO | * | | | | Front Speaker * +--------+ | | /-+ * | HPL+--->----+-+ | * | | | | | * | OUT3+--->----+-+ | * | | \-+ * | | * | | Front Micro * | | + * | MIC1+-----<--+o+ * | | + * +-------------------+ --- */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <mach/audio.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/ac97_codec.h> #include "pxa2xx-ac97.h" #include "../codecs/wm9713.h" #define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x) #define AC97_GPIO_PULL 0x58 /* Use GPIO8 for rear speaker amplifier */ static int rear_amp_power(struct snd_soc_codec *codec, int power) { unsigned short reg; if (power) { reg = snd_soc_read(codec, AC97_GPIO_CFG); snd_soc_write(codec, AC97_GPIO_CFG, reg | 0x0100); reg = snd_soc_read(codec, AC97_GPIO_PULL); snd_soc_write(codec, AC97_GPIO_PULL, reg | (1<<15)); } else { reg = snd_soc_read(codec, AC97_GPIO_CFG); snd_soc_write(codec, AC97_GPIO_CFG, reg & ~0x0100); reg = snd_soc_read(codec, AC97_GPIO_PULL); snd_soc_write(codec, AC97_GPIO_PULL, reg & ~(1<<15)); } return 0; } static int rear_amp_event(struct snd_soc_dapm_widget *widget, struct snd_kcontrol *kctl, int event) { struct snd_soc_codec *codec = widget->codec; return rear_amp_power(codec, SND_SOC_DAPM_EVENT_ON(event)); } /* mioa701 machine dapm widgets */ static const struct snd_soc_dapm_widget mioa701_dapm_widgets[] = { SND_SOC_DAPM_SPK("Front Speaker", NULL), SND_SOC_DAPM_SPK("Rear Speaker", rear_amp_event), SND_SOC_DAPM_MIC("Headset", NULL), SND_SOC_DAPM_LINE("GSM Line Out", NULL), SND_SOC_DAPM_LINE("GSM Line In", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Front Mic", NULL), }; static const struct snd_soc_dapm_route audio_map[] = { /* Call Mic */ {"Mic Bias", NULL, "Front Mic"}, {"MIC1", NULL, "Mic Bias"}, /* Headset Mic */ {"LINEL", NULL, "Headset Mic"}, {"LINER", NULL, "Headset Mic"}, /* GSM Module */ {"MONOIN", NULL, "GSM Line Out"}, {"PCBEEP", NULL, "GSM Line Out"}, {"GSM Line In", NULL, "MONO"}, /* headphone connected to HPL, HPR */ {"Headset", NULL, "HPL"}, {"Headset", NULL, "HPR"}, /* front speaker connected to HPL, OUT3 */ {"Front Speaker", NULL, "HPL"}, {"Front Speaker", NULL, "OUT3"}, /* rear speaker connected to SPKL, SPKR */ {"Rear Speaker", NULL, "SPKL"}, {"Rear Speaker", NULL, "SPKR"}, }; static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; unsigned short reg; /* Add mioa701 specific widgets */ snd_soc_dapm_new_controls(dapm, ARRAY_AND_SIZE(mioa701_dapm_widgets)); /* Set up mioa701 specific audio path audio_mapnects */ snd_soc_dapm_add_routes(dapm, ARRAY_AND_SIZE(audio_map)); /* Prepare GPIO8 for rear speaker amplifier */ reg = codec->driver->read(codec, AC97_GPIO_CFG); codec->driver->write(codec, AC97_GPIO_CFG, reg | 0x0100); /* Prepare MIC input */ reg = codec->driver->read(codec, AC97_3D_CONTROL); codec->driver->write(codec, AC97_3D_CONTROL, reg | 0xc000); snd_soc_dapm_enable_pin(dapm, "Front Speaker"); snd_soc_dapm_enable_pin(dapm, "Rear Speaker"); snd_soc_dapm_enable_pin(dapm, "Front Mic"); snd_soc_dapm_enable_pin(dapm, "GSM Line In"); snd_soc_dapm_enable_pin(dapm, "GSM Line Out"); snd_soc_dapm_sync(dapm); return 0; } static struct snd_soc_ops mioa701_ops; static struct snd_soc_dai_link mioa701_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9713-hifi", .codec_name = "wm9713-codec", .init = mioa701_wm9713_init, .platform_name = "pxa-pcm-audio", .ops = &mioa701_ops, }, { .name = "AC97 Aux", .stream_name = "AC97 Aux", .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9713-aux", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", .ops = &mioa701_ops, }, }; static struct snd_soc_card mioa701 = { .name = "MioA701", .dai_link = mioa701_dai, .num_links = ARRAY_SIZE(mioa701_dai), }; static struct platform_device *mioa701_snd_device; static int mioa701_wm9713_probe(struct platform_device *pdev) { int ret; if (!machine_is_mioa701()) return -ENODEV; dev_warn(&pdev->dev, "Be warned that incorrect mixers/muxes setup will" "lead to overheating and possible destruction of your device." "Do not use without a good knowledge of mio's board design!\n"); mioa701_snd_device = platform_device_alloc("soc-audio", -1); if (!mioa701_snd_device) return -ENOMEM; platform_set_drvdata(mioa701_snd_device, &mioa701); ret = platform_device_add(mioa701_snd_device); if (!ret) return 0; platform_device_put(mioa701_snd_device); return ret; } static int __devexit mioa701_wm9713_remove(struct platform_device *pdev) { platform_device_unregister(mioa701_snd_device); return 0; } static struct platform_driver mioa701_wm9713_driver = { .probe = mioa701_wm9713_probe, .remove = __devexit_p(mioa701_wm9713_remove), .driver = { .name = "mioa701-wm9713", .owner = THIS_MODULE, }, }; static int __init mioa701_asoc_init(void) { return platform_driver_register(&mioa701_wm9713_driver); } static void __exit mioa701_asoc_exit(void) { platform_driver_unregister(&mioa701_wm9713_driver); } module_init(mioa701_asoc_init); module_exit(mioa701_asoc_exit); /* Module information */ MODULE_AUTHOR("Robert Jarzmik (rjarzmik@free.fr)"); MODULE_DESCRIPTION("ALSA SoC WM9713 MIO A701"); MODULE_LICENSE("GPL");
gpl-2.0
yinquan529/pandaboard
drivers/mtd/devices/phram.c
3022
5844
/** * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de> * Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de> * * Usage: * * one commend line parameter per device, each in the form: * phram=<name>,<start>,<len> * <name> may be up to 63 characters. * <start> and <len> can be octal, decimal or hexadecimal. If followed * by "ki", "Mi" or "Gi", the numbers will be interpreted as kilo, mega or * gigabytes. * * Example: * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <asm/io.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> struct phram_mtd_list { struct mtd_info mtd; struct list_head list; }; static LIST_HEAD(phram_list); static int phram_erase(struct mtd_info *mtd, struct erase_info *instr) { u_char *start = mtd->priv; if (instr->addr + instr->len > mtd->size) return -EINVAL; memset(start + instr->addr, 0xff, instr->len); /* This'll catch a few races. Free the thing before returning :) * I don't feel at all ashamed. This kind of thing is possible anyway * with flash, but unlikely. */ instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; } static int phram_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys) { if (from + len > mtd->size) return -EINVAL; /* can we return a physical address with this driver? */ if (phys) return -EINVAL; *virt = mtd->priv + from; *retlen = len; return 0; } static void phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { } static int phram_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { u_char *start = mtd->priv; if (from >= mtd->size) return -EINVAL; if (len > mtd->size - from) len = mtd->size - from; memcpy(buf, start + from, len); *retlen = len; return 0; } static int phram_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { u_char *start = mtd->priv; if (to >= mtd->size) return -EINVAL; if (len > mtd->size - to) len = mtd->size - to; memcpy(start + to, buf, len); *retlen = len; return 0; } static void unregister_devices(void) { struct phram_mtd_list *this, *safe; list_for_each_entry_safe(this, safe, &phram_list, list) { mtd_device_unregister(&this->mtd); iounmap(this->mtd.priv); kfree(this->mtd.name); kfree(this); } } static int register_device(char *name, unsigned long start, unsigned long len) { struct phram_mtd_list *new; int ret = -ENOMEM; new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) goto out0; ret = -EIO; new->mtd.priv = ioremap(start, len); if (!new->mtd.priv) { pr_err("ioremap failed\n"); goto out1; } new->mtd.name = name; new->mtd.size = len; new->mtd.flags = MTD_CAP_RAM; new->mtd.erase = phram_erase; new->mtd.point = phram_point; new->mtd.unpoint = phram_unpoint; new->mtd.read = phram_read; new->mtd.write = phram_write; new->mtd.owner = THIS_MODULE; new->mtd.type = MTD_RAM; new->mtd.erasesize = PAGE_SIZE; new->mtd.writesize = 1; ret = -EAGAIN; if (mtd_device_register(&new->mtd, NULL, 0)) { pr_err("Failed to register new device\n"); goto out2; } list_add_tail(&new->list, &phram_list); return 0; out2: iounmap(new->mtd.priv); out1: kfree(new); out0: return ret; } static int ustrtoul(const char *cp, char **endp, unsigned int base) { unsigned long result = simple_strtoul(cp, endp, base); switch (**endp) { case 'G': result *= 1024; case 'M': result *= 1024; case 'k': result *= 1024; /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */ if ((*endp)[1] == 'i') (*endp) += 2; } return result; } static int parse_num32(uint32_t *num32, const char *token) { char *endp; unsigned long n; n = ustrtoul(token, &endp, 0); if (*endp) return -EINVAL; *num32 = n; return 0; } static int parse_name(char **pname, const char *token) { size_t len; char *name; len = strlen(token) + 1; if (len > 64) return -ENOSPC; name = kmalloc(len, GFP_KERNEL); if (!name) return -ENOMEM; strcpy(name, token); *pname = name; return 0; } static inline void kill_final_newline(char *str) { char *newline = strrchr(str, '\n'); if (newline && !newline[1]) *newline = 0; } #define parse_err(fmt, args...) do { \ pr_err(fmt , ## args); \ return 1; \ } while (0) static int phram_setup(const char *val, struct kernel_param *kp) { char buf[64+12+12], *str = buf; char *token[3]; char *name; uint32_t start; uint32_t len; int i, ret; if (strnlen(val, sizeof(buf)) >= sizeof(buf)) parse_err("parameter too long\n"); strcpy(str, val); kill_final_newline(str); for (i=0; i<3; i++) token[i] = strsep(&str, ","); if (str) parse_err("too many arguments\n"); if (!token[2]) parse_err("not enough arguments\n"); ret = parse_name(&name, token[0]); if (ret) return ret; ret = parse_num32(&start, token[1]); if (ret) { kfree(name); parse_err("illegal start address\n"); } ret = parse_num32(&len, token[2]); if (ret) { kfree(name); parse_err("illegal device length\n"); } ret = register_device(name, start, len); if (!ret) pr_info("%s device: %#x at %#x\n", name, len, start); else kfree(name); return ret; } module_param_call(phram, phram_setup, NULL, NULL, 000); MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\""); static int __init init_phram(void) { return 0; } static void __exit cleanup_phram(void) { unregister_devices(); } module_init(init_phram); module_exit(cleanup_phram); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Joern Engel <joern@wh.fh-wedel.de>"); MODULE_DESCRIPTION("MTD driver for physical RAM");
gpl-2.0
delafer/YP-GI1CW
arch/mips/kernel/spinlock_test.c
3790
2381
#include <linux/init.h> #include <linux/kthread.h> #include <linux/hrtimer.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/spinlock.h> static int ss_get(void *data, u64 *val) { ktime_t start, finish; int loops; int cont; DEFINE_RAW_SPINLOCK(ss_spin); loops = 1000000; cont = 1; start = ktime_get(); while (cont) { raw_spin_lock(&ss_spin); loops--; if (loops == 0) cont = 0; raw_spin_unlock(&ss_spin); } finish = ktime_get(); *val = ktime_us_delta(finish, start); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); struct spin_multi_state { raw_spinlock_t lock; atomic_t start_wait; atomic_t enter_wait; atomic_t exit_wait; int loops; }; struct spin_multi_per_thread { struct spin_multi_state *state; ktime_t start; }; static int multi_other(void *data) { int loops; int cont; struct spin_multi_per_thread *pt = data; struct spin_multi_state *s = pt->state; loops = s->loops; cont = 1; atomic_dec(&s->enter_wait); while (atomic_read(&s->enter_wait)) ; /* spin */ pt->start = ktime_get(); atomic_dec(&s->start_wait); while (atomic_read(&s->start_wait)) ; /* spin */ while (cont) { raw_spin_lock(&s->lock); loops--; if (loops == 0) cont = 0; raw_spin_unlock(&s->lock); } atomic_dec(&s->exit_wait); while (atomic_read(&s->exit_wait)) ; /* spin */ return 0; } static int multi_get(void *data, u64 *val) { ktime_t finish; struct spin_multi_state ms; struct spin_multi_per_thread t1, t2; ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get"); ms.loops = 1000000; atomic_set(&ms.start_wait, 2); atomic_set(&ms.enter_wait, 2); atomic_set(&ms.exit_wait, 2); t1.state = &ms; t2.state = &ms; kthread_run(multi_other, &t2, "multi_get"); multi_other(&t1); finish = ktime_get(); *val = ktime_us_delta(finish, t1.start); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); extern struct dentry *mips_debugfs_dir; static int __init spinlock_test(void) { struct dentry *d; if (!mips_debugfs_dir) return -ENODEV; d = debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL, &fops_ss); if (!d) return -ENOMEM; d = debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, &fops_multi); if (!d) return -ENOMEM; return 0; } device_initcall(spinlock_test);
gpl-2.0
loli10K/linux-sunxi
arch/mips/txx9/generic/setup_tx4939.c
4558
16986
/* * TX4939 setup routines * Based on linux/arch/mips/txx9/generic/setup_tx4938.c, * and RBTX49xx patch from CELF patch archive. * * 2003-2005 (c) MontaVista Software, Inc. * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/device.h> #include <linux/ethtool.h> #include <linux/param.h> #include <linux/ptrace.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <asm/bootinfo.h> #include <asm/reboot.h> #include <asm/traps.h> #include <asm/txx9irq.h> #include <asm/txx9tmr.h> #include <asm/txx9/generic.h> #include <asm/txx9/ndfmc.h> #include <asm/txx9/dmac.h> #include <asm/txx9/tx4939.h> static void __init tx4939_wdr_init(void) { /* report watchdog reset status */ if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST) pr_warning("Watchdog reset detected at 0x%lx\n", read_c0_errorepc()); /* clear WatchDogReset (W1C) */ tx4939_ccfg_set(TX4939_CCFG_WDRST); /* do reset on watchdog */ tx4939_ccfg_set(TX4939_CCFG_WR); } void __init tx4939_wdt_init(void) { txx9_wdt_init(TX4939_TMR_REG(2) & 0xfffffffffULL); } static void tx4939_machine_restart(char *command) { local_irq_disable(); pr_emerg("Rebooting (with %s watchdog reset)...\n", (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDREXEN) ? "external" : "internal"); /* clear watchdog status */ tx4939_ccfg_set(TX4939_CCFG_WDRST); /* W1C */ txx9_wdt_now(TX4939_TMR_REG(2) & 0xfffffffffULL); while (!(____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST)) ; mdelay(10); if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDREXEN) { pr_emerg("Rebooting (with internal watchdog reset)...\n"); /* External WDRST failed. Do internal watchdog reset */ tx4939_ccfg_clear(TX4939_CCFG_WDREXEN); } /* fallback */ (*_machine_halt)(); } void show_registers(struct pt_regs *regs); static int tx4939_be_handler(struct pt_regs *regs, int is_fixup) { int data = regs->cp0_cause & 4; console_verbose(); pr_err("%cBE exception at %#lx\n", data ? 'D' : 'I', regs->cp0_epc); pr_err("ccfg:%llx, toea:%llx\n", (unsigned long long)____raw_readq(&tx4939_ccfgptr->ccfg), (unsigned long long)____raw_readq(&tx4939_ccfgptr->toea)); #ifdef CONFIG_PCI tx4927_report_pcic_status(); #endif show_registers(regs); panic("BusError!"); } static void __init tx4939_be_init(void) { board_be_handler = tx4939_be_handler; } static struct resource tx4939_sdram_resource[4]; static struct resource tx4939_sram_resource; #define TX4939_SRAM_SIZE 0x800 void __init tx4939_add_memory_regions(void) { int i; unsigned long start, size; u64 win; for (i = 0; i < 4; i++) { if (!((__u32)____raw_readq(&tx4939_ddrcptr->winen) & (1 << i))) continue; win = ____raw_readq(&tx4939_ddrcptr->win[i]); start = (unsigned long)(win >> 48); size = (((unsigned long)(win >> 32) & 0xffff) + 1) - start; add_memory_region(start << 20, size << 20, BOOT_MEM_RAM); } } void __init tx4939_setup(void) { int i; __u32 divmode; __u64 pcfg; unsigned int cpuclk = 0; txx9_reg_res_init(TX4939_REV_PCODE(), TX4939_REG_BASE, TX4939_REG_SIZE); set_c0_config(TX49_CONF_CWFON); /* SDRAMC,EBUSC are configured by PROM */ for (i = 0; i < 4; i++) { if (!(TX4939_EBUSC_CR(i) & 0x8)) continue; /* disabled */ txx9_ce_res[i].start = (unsigned long)TX4939_EBUSC_BA(i); txx9_ce_res[i].end = txx9_ce_res[i].start + TX4939_EBUSC_SIZE(i) - 1; request_resource(&iomem_resource, &txx9_ce_res[i]); } /* clocks */ if (txx9_master_clock) { /* calculate cpu_clock from master_clock */ divmode = (__u32)____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_MULCLK_MASK; cpuclk = txx9_master_clock * 20 / 2; switch (divmode) { case TX4939_CCFG_MULCLK_8: cpuclk = cpuclk / 3 * 4 /* / 6 * 8 */; break; case TX4939_CCFG_MULCLK_9: cpuclk = cpuclk / 2 * 3 /* / 6 * 9 */; break; case TX4939_CCFG_MULCLK_10: cpuclk = cpuclk / 3 * 5 /* / 6 * 10 */; break; case TX4939_CCFG_MULCLK_11: cpuclk = cpuclk / 6 * 11; break; case TX4939_CCFG_MULCLK_12: cpuclk = cpuclk * 2 /* / 6 * 12 */; break; case TX4939_CCFG_MULCLK_13: cpuclk = cpuclk / 6 * 13; break; case TX4939_CCFG_MULCLK_14: cpuclk = cpuclk / 3 * 7 /* / 6 * 14 */; break; case TX4939_CCFG_MULCLK_15: cpuclk = cpuclk / 2 * 5 /* / 6 * 15 */; break; } txx9_cpu_clock = cpuclk; } else { if (txx9_cpu_clock == 0) txx9_cpu_clock = 400000000; /* 400MHz */ /* calculate master_clock from cpu_clock */ cpuclk = txx9_cpu_clock; divmode = (__u32)____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_MULCLK_MASK; switch (divmode) { case TX4939_CCFG_MULCLK_8: txx9_master_clock = cpuclk * 6 / 8; break; case TX4939_CCFG_MULCLK_9: txx9_master_clock = cpuclk * 6 / 9; break; case TX4939_CCFG_MULCLK_10: txx9_master_clock = cpuclk * 6 / 10; break; case TX4939_CCFG_MULCLK_11: txx9_master_clock = cpuclk * 6 / 11; break; case TX4939_CCFG_MULCLK_12: txx9_master_clock = cpuclk * 6 / 12; break; case TX4939_CCFG_MULCLK_13: txx9_master_clock = cpuclk * 6 / 13; break; case TX4939_CCFG_MULCLK_14: txx9_master_clock = cpuclk * 6 / 14; break; case TX4939_CCFG_MULCLK_15: txx9_master_clock = cpuclk * 6 / 15; break; } txx9_master_clock /= 10; /* * 2 / 20 */ } /* calculate gbus_clock from cpu_clock */ divmode = (__u32)____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_YDIVMODE_MASK; txx9_gbus_clock = txx9_cpu_clock; switch (divmode) { case TX4939_CCFG_YDIVMODE_2: txx9_gbus_clock /= 2; break; case TX4939_CCFG_YDIVMODE_3: txx9_gbus_clock /= 3; break; case TX4939_CCFG_YDIVMODE_5: txx9_gbus_clock /= 5; break; case TX4939_CCFG_YDIVMODE_6: txx9_gbus_clock /= 6; break; } /* change default value to udelay/mdelay take reasonable time */ loops_per_jiffy = txx9_cpu_clock / HZ / 2; /* CCFG */ tx4939_wdr_init(); /* clear BusErrorOnWrite flag (W1C) */ tx4939_ccfg_set(TX4939_CCFG_WDRST | TX4939_CCFG_BEOW); /* enable Timeout BusError */ if (txx9_ccfg_toeon) tx4939_ccfg_set(TX4939_CCFG_TOE); /* DMA selection */ txx9_clear64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_DMASEL_ALL); /* Use external clock for external arbiter */ if (!(____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCIARB)) txx9_clear64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_PCICLKEN_ALL); pr_info("%s -- %dMHz(M%dMHz,G%dMHz) CRIR:%08x CCFG:%llx PCFG:%llx\n", txx9_pcode_str, (cpuclk + 500000) / 1000000, (txx9_master_clock + 500000) / 1000000, (txx9_gbus_clock + 500000) / 1000000, (__u32)____raw_readq(&tx4939_ccfgptr->crir), (unsigned long long)____raw_readq(&tx4939_ccfgptr->ccfg), (unsigned long long)____raw_readq(&tx4939_ccfgptr->pcfg)); pr_info("%s DDRC -- EN:%08x", txx9_pcode_str, (__u32)____raw_readq(&tx4939_ddrcptr->winen)); for (i = 0; i < 4; i++) { __u64 win = ____raw_readq(&tx4939_ddrcptr->win[i]); if (!((__u32)____raw_readq(&tx4939_ddrcptr->winen) & (1 << i))) continue; /* disabled */ printk(KERN_CONT " #%d:%016llx", i, (unsigned long long)win); tx4939_sdram_resource[i].name = "DDR SDRAM"; tx4939_sdram_resource[i].start = (unsigned long)(win >> 48) << 20; tx4939_sdram_resource[i].end = ((((unsigned long)(win >> 32) & 0xffff) + 1) << 20) - 1; tx4939_sdram_resource[i].flags = IORESOURCE_MEM; request_resource(&iomem_resource, &tx4939_sdram_resource[i]); } printk(KERN_CONT "\n"); /* SRAM */ if (____raw_readq(&tx4939_sramcptr->cr) & 1) { unsigned int size = TX4939_SRAM_SIZE; tx4939_sram_resource.name = "SRAM"; tx4939_sram_resource.start = (____raw_readq(&tx4939_sramcptr->cr) >> (39-11)) & ~(size - 1); tx4939_sram_resource.end = tx4939_sram_resource.start + TX4939_SRAM_SIZE - 1; tx4939_sram_resource.flags = IORESOURCE_MEM; request_resource(&iomem_resource, &tx4939_sram_resource); } /* TMR */ /* disable all timers */ for (i = 0; i < TX4939_NR_TMR; i++) txx9_tmr_init(TX4939_TMR_REG(i) & 0xfffffffffULL); /* set PCIC1 reset (required to prevent hangup on BIST) */ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1RST); pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg); if (pcfg & (TX4939_PCFG_ET0MODE | TX4939_PCFG_ET1MODE)) { mdelay(1); /* at least 128 cpu clock */ /* clear PCIC1 reset */ txx9_clear64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1RST); } else { pr_info("%s: stop PCIC1\n", txx9_pcode_str); /* stop PCIC1 */ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1CKD); } if (!(pcfg & TX4939_PCFG_ET0MODE)) { pr_info("%s: stop ETH0\n", txx9_pcode_str); txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH0RST); txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH0CKD); } if (!(pcfg & TX4939_PCFG_ET1MODE)) { pr_info("%s: stop ETH1\n", txx9_pcode_str); txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH1RST); txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH1CKD); } _machine_restart = tx4939_machine_restart; board_be_init = tx4939_be_init; } void __init tx4939_time_init(unsigned int tmrnr) { if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_TINTDIS) txx9_clockevent_init(TX4939_TMR_REG(tmrnr) & 0xfffffffffULL, TXX9_IRQ_BASE + TX4939_IR_TMR(tmrnr), TXX9_IMCLK); } void __init tx4939_sio_init(unsigned int sclk, unsigned int cts_mask) { int i; unsigned int ch_mask = 0; __u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg); cts_mask |= ~1; /* only SIO0 have RTS/CTS */ if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO0) cts_mask |= 1 << 0; /* disable SIO0 RTS/CTS by PCFG setting */ if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO2) ch_mask |= 1 << 2; /* disable SIO2 by PCFG setting */ if (pcfg & TX4939_PCFG_SIO3MODE) ch_mask |= 1 << 3; /* disable SIO3 by PCFG setting */ for (i = 0; i < 4; i++) { if ((1 << i) & ch_mask) continue; txx9_sio_init(TX4939_SIO_REG(i) & 0xfffffffffULL, TXX9_IRQ_BASE + TX4939_IR_SIO(i), i, sclk, (1 << i) & cts_mask); } } #if defined(CONFIG_TC35815) || defined(CONFIG_TC35815_MODULE) static u32 tx4939_get_eth_speed(struct net_device *dev) { struct ethtool_cmd cmd; if (__ethtool_get_settings(dev, &cmd)) return 100; /* default 100Mbps */ return ethtool_cmd_speed(&cmd); } static int tx4939_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; if (event == NETDEV_CHANGE && netif_carrier_ok(dev)) { __u64 bit = 0; if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(0)) bit = TX4939_PCFG_SPEED0; else if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(1)) bit = TX4939_PCFG_SPEED1; if (bit) { if (tx4939_get_eth_speed(dev) == 100) txx9_set64(&tx4939_ccfgptr->pcfg, bit); else txx9_clear64(&tx4939_ccfgptr->pcfg, bit); } } return NOTIFY_DONE; } static struct notifier_block tx4939_netdev_notifier = { .notifier_call = tx4939_netdev_event, .priority = 1, }; void __init tx4939_ethaddr_init(unsigned char *addr0, unsigned char *addr1) { u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg); if (addr0 && (pcfg & TX4939_PCFG_ET0MODE)) txx9_ethaddr_init(TXX9_IRQ_BASE + TX4939_IR_ETH(0), addr0); if (addr1 && (pcfg & TX4939_PCFG_ET1MODE)) txx9_ethaddr_init(TXX9_IRQ_BASE + TX4939_IR_ETH(1), addr1); register_netdevice_notifier(&tx4939_netdev_notifier); } #else void __init tx4939_ethaddr_init(unsigned char *addr0, unsigned char *addr1) { } #endif void __init tx4939_mtd_init(int ch) { struct physmap_flash_data pdata = { .width = TX4939_EBUSC_WIDTH(ch) / 8, }; unsigned long start = txx9_ce_res[ch].start; unsigned long size = txx9_ce_res[ch].end - start + 1; if (!(TX4939_EBUSC_CR(ch) & 0x8)) return; /* disabled */ txx9_physmap_flash_init(ch, start, size, &pdata); } #define TX4939_ATA_REG_PHYS(ch) (TX4939_ATA_REG(ch) & 0xfffffffffULL) void __init tx4939_ata_init(void) { static struct resource ata0_res[] = { { .start = TX4939_ATA_REG_PHYS(0), .end = TX4939_ATA_REG_PHYS(0) + 0x1000 - 1, .flags = IORESOURCE_MEM, }, { .start = TXX9_IRQ_BASE + TX4939_IR_ATA(0), .flags = IORESOURCE_IRQ, }, }; static struct resource ata1_res[] = { { .start = TX4939_ATA_REG_PHYS(1), .end = TX4939_ATA_REG_PHYS(1) + 0x1000 - 1, .flags = IORESOURCE_MEM, }, { .start = TXX9_IRQ_BASE + TX4939_IR_ATA(1), .flags = IORESOURCE_IRQ, }, }; static struct platform_device ata0_dev = { .name = "tx4939ide", .id = 0, .num_resources = ARRAY_SIZE(ata0_res), .resource = ata0_res, }; static struct platform_device ata1_dev = { .name = "tx4939ide", .id = 1, .num_resources = ARRAY_SIZE(ata1_res), .resource = ata1_res, }; __u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg); if (pcfg & TX4939_PCFG_ATA0MODE) platform_device_register(&ata0_dev); if ((pcfg & (TX4939_PCFG_ATA1MODE | TX4939_PCFG_ET1MODE | TX4939_PCFG_ET0MODE)) == TX4939_PCFG_ATA1MODE) platform_device_register(&ata1_dev); } void __init tx4939_rtc_init(void) { static struct resource res[] = { { .start = TX4939_RTC_REG & 0xfffffffffULL, .end = (TX4939_RTC_REG & 0xfffffffffULL) + 0x100 - 1, .flags = IORESOURCE_MEM, }, { .start = TXX9_IRQ_BASE + TX4939_IR_RTC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device rtc_dev = { .name = "tx4939rtc", .id = -1, .num_resources = ARRAY_SIZE(res), .resource = res, }; platform_device_register(&rtc_dev); } void __init tx4939_ndfmc_init(unsigned int hold, unsigned int spw, unsigned char ch_mask, unsigned char wide_mask) { struct txx9ndfmc_platform_data plat_data = { .shift = 1, .gbus_clock = txx9_gbus_clock, .hold = hold, .spw = spw, .flags = NDFMC_PLAT_FLAG_NO_RSTR | NDFMC_PLAT_FLAG_HOLDADD | NDFMC_PLAT_FLAG_DUMMYWRITE, .ch_mask = ch_mask, .wide_mask = wide_mask, }; txx9_ndfmc_init(TX4939_NDFMC_REG & 0xfffffffffULL, &plat_data); } void __init tx4939_dmac_init(int memcpy_chan0, int memcpy_chan1) { struct txx9dmac_platform_data plat_data = { .have_64bit_regs = true, }; int i; for (i = 0; i < 2; i++) { plat_data.memcpy_chan = i ? memcpy_chan1 : memcpy_chan0; txx9_dmac_init(i, TX4939_DMA_REG(i) & 0xfffffffffULL, TXX9_IRQ_BASE + TX4939_IR_DMA(i, 0), &plat_data); } } void __init tx4939_aclc_init(void) { u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg); if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_ACLC) txx9_aclc_init(TX4939_ACLC_REG & 0xfffffffffULL, TXX9_IRQ_BASE + TX4939_IR_ACLC, 1, 0, 1); } void __init tx4939_sramc_init(void) { if (tx4939_sram_resource.start) txx9_sramc_init(&tx4939_sram_resource); } void __init tx4939_rng_init(void) { static struct resource res = { .start = TX4939_RNG_REG & 0xfffffffffULL, .end = (TX4939_RNG_REG & 0xfffffffffULL) + 0x30 - 1, .flags = IORESOURCE_MEM, }; static struct platform_device pdev = { .name = "tx4939-rng", .id = -1, .num_resources = 1, .resource = &res, }; platform_device_register(&pdev); } static void __init tx4939_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; char buf[128]; buf[0] = '\0'; local_irq_disable(); pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg); if ((pcfg & TX4939_PCFG_I2SMODE_MASK) != TX4939_PCFG_I2SMODE_ACLC) { rst |= TX4939_CLKCTR_ACLRST; ckd |= TX4939_CLKCTR_ACLCKD; strcat(buf, " ACLC"); } if ((pcfg & TX4939_PCFG_I2SMODE_MASK) != TX4939_PCFG_I2SMODE_I2S && (pcfg & TX4939_PCFG_I2SMODE_MASK) != TX4939_PCFG_I2SMODE_I2S_ALT) { rst |= TX4939_CLKCTR_I2SRST; ckd |= TX4939_CLKCTR_I2SCKD; strcat(buf, " I2S"); } if (!(pcfg & TX4939_PCFG_ATA0MODE)) { rst |= TX4939_CLKCTR_ATA0RST; ckd |= TX4939_CLKCTR_ATA0CKD; strcat(buf, " ATA0"); } if (!(pcfg & TX4939_PCFG_ATA1MODE)) { rst |= TX4939_CLKCTR_ATA1RST; ckd |= TX4939_CLKCTR_ATA1CKD; strcat(buf, " ATA1"); } if (pcfg & TX4939_PCFG_SPIMODE) { rst |= TX4939_CLKCTR_SPIRST; ckd |= TX4939_CLKCTR_SPICKD; strcat(buf, " SPI"); } if (!(pcfg & (TX4939_PCFG_VSSMODE | TX4939_PCFG_VPSMODE))) { rst |= TX4939_CLKCTR_VPCRST; ckd |= TX4939_CLKCTR_VPCCKD; strcat(buf, " VPC"); } if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO2) { rst |= TX4939_CLKCTR_SIO2RST; ckd |= TX4939_CLKCTR_SIO2CKD; strcat(buf, " SIO2"); } if (pcfg & TX4939_PCFG_SIO3MODE) { rst |= TX4939_CLKCTR_SIO3RST; ckd |= TX4939_CLKCTR_SIO3CKD; strcat(buf, " SIO3"); } if (rst | ckd) { txx9_set64(&tx4939_ccfgptr->clkctr, rst); txx9_set64(&tx4939_ccfgptr->clkctr, ckd); } local_irq_enable(); if (buf[0]) pr_info("%s: stop%s\n", txx9_pcode_str, buf); } static int __init tx4939_late_init(void) { if (txx9_pcode != 0x4939) return -ENODEV; tx4939_stop_unused_modules(); return 0; } late_initcall(tx4939_late_init);
gpl-2.0
Entropy512/android_kernel_motorola_msm8226
drivers/mtd/tests/mtd_torturetest.c
5070
13371
/* * Copyright (C) 2006-2008 Artem Bityutskiy * Copyright (C) 2006-2008 Jarkko Lavinen * Copyright (C) 2006-2008 Adrian Hunter * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; see the file COPYING. If not, write to the Free Software * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Authors: Artem Bityutskiy, Jarkko Lavinen, Adria Hunter * * WARNING: this test program may kill your flash and your device. Do not * use it unless you know what you do. Authors are not responsible for any * damage caused by this program. */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/err.h> #include <linux/mtd/mtd.h> #include <linux/slab.h> #include <linux/sched.h> #define PRINT_PREF KERN_INFO "mtd_torturetest: " #define RETRIES 3 static int eb = 8; module_param(eb, int, S_IRUGO); MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device"); static int ebcnt = 32; module_param(ebcnt, int, S_IRUGO); MODULE_PARM_DESC(ebcnt, "number of consecutive eraseblocks to torture"); static int pgcnt; module_param(pgcnt, int, S_IRUGO); MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)"); static int dev = -EINVAL; module_param(dev, int, S_IRUGO); MODULE_PARM_DESC(dev, "MTD device number to use"); static int gran = 512; module_param(gran, int, S_IRUGO); MODULE_PARM_DESC(gran, "how often the status information should be printed"); static int check = 1; module_param(check, int, S_IRUGO); MODULE_PARM_DESC(check, "if the written data should be checked"); static unsigned int cycles_count; module_param(cycles_count, uint, S_IRUGO); MODULE_PARM_DESC(cycles_count, "how many erase cycles to do " "(infinite by default)"); static struct mtd_info *mtd; /* This buffer contains 0x555555...0xAAAAAA... pattern */ static unsigned char *patt_5A5; /* This buffer contains 0xAAAAAA...0x555555... pattern */ static unsigned char *patt_A5A; /* This buffer contains all 0xFF bytes */ static unsigned char *patt_FF; /* This a temporary buffer is use when checking data */ static unsigned char *check_buf; /* How many erase cycles were done */ static unsigned int erase_cycles; static int pgsize; static struct timeval start, finish; static void report_corrupt(unsigned char *read, unsigned char *written); static inline void start_timing(void) { do_gettimeofday(&start); } static inline void stop_timing(void) { do_gettimeofday(&finish); } /* * Erase eraseblock number @ebnum. */ static inline int erase_eraseblock(int ebnum) { int err; struct erase_info ei; loff_t addr = ebnum * mtd->erasesize; memset(&ei, 0, sizeof(struct erase_info)); ei.mtd = mtd; ei.addr = addr; ei.len = mtd->erasesize; err = mtd_erase(mtd, &ei); if (err) { printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); return err; } if (ei.state == MTD_ERASE_FAILED) { printk(PRINT_PREF "some erase error occurred at EB %d\n", ebnum); return -EIO; } return 0; } /* * Check that the contents of eraseblock number @enbum is equivalent to the * @buf buffer. */ static inline int check_eraseblock(int ebnum, unsigned char *buf) { int err, retries = 0; size_t read; loff_t addr = ebnum * mtd->erasesize; size_t len = mtd->erasesize; if (pgcnt) { addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize; len = pgcnt * pgsize; } retry: err = mtd_read(mtd, addr, len, &read, check_buf); if (mtd_is_bitflip(err)) printk(PRINT_PREF "single bit flip occurred at EB %d " "MTD reported that it was fixed.\n", ebnum); else if (err) { printk(PRINT_PREF "error %d while reading EB %d, " "read %zd\n", err, ebnum, read); return err; } if (read != len) { printk(PRINT_PREF "failed to read %zd bytes from EB %d, " "read only %zd, but no error reported\n", len, ebnum, read); return -EIO; } if (memcmp(buf, check_buf, len)) { printk(PRINT_PREF "read wrong data from EB %d\n", ebnum); report_corrupt(check_buf, buf); if (retries++ < RETRIES) { /* Try read again */ yield(); printk(PRINT_PREF "re-try reading data from EB %d\n", ebnum); goto retry; } else { printk(PRINT_PREF "retried %d times, still errors, " "give-up\n", RETRIES); return -EINVAL; } } if (retries != 0) printk(PRINT_PREF "only attempt number %d was OK (!!!)\n", retries); return 0; } static inline int write_pattern(int ebnum, void *buf) { int err; size_t written; loff_t addr = ebnum * mtd->erasesize; size_t len = mtd->erasesize; if (pgcnt) { addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize; len = pgcnt * pgsize; } err = mtd_write(mtd, addr, len, &written, buf); if (err) { printk(PRINT_PREF "error %d while writing EB %d, written %zd" " bytes\n", err, ebnum, written); return err; } if (written != len) { printk(PRINT_PREF "written only %zd bytes of %zd, but no error" " reported\n", written, len); return -EIO; } return 0; } static int __init tort_init(void) { int err = 0, i, infinite = !cycles_count; int bad_ebs[ebcnt]; printk(KERN_INFO "\n"); printk(KERN_INFO "=================================================\n"); printk(PRINT_PREF "Warning: this program is trying to wear out your " "flash, stop it if this is not wanted.\n"); if (dev < 0) { printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); return -EINVAL; } printk(PRINT_PREF "MTD device: %d\n", dev); printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n", ebcnt, eb, eb + ebcnt - 1, dev); if (pgcnt) printk(PRINT_PREF "torturing just %d pages per eraseblock\n", pgcnt); printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled"); mtd = get_mtd_device(NULL, dev); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); printk(PRINT_PREF "error: cannot get MTD device\n"); return err; } if (mtd->writesize == 1) { printk(PRINT_PREF "not NAND flash, assume page size is 512 " "bytes.\n"); pgsize = 512; } else pgsize = mtd->writesize; if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt); goto out_mtd; } err = -ENOMEM; patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL); if (!patt_5A5) { printk(PRINT_PREF "error: cannot allocate memory\n"); goto out_mtd; } patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL); if (!patt_A5A) { printk(PRINT_PREF "error: cannot allocate memory\n"); goto out_patt_5A5; } patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL); if (!patt_FF) { printk(PRINT_PREF "error: cannot allocate memory\n"); goto out_patt_A5A; } check_buf = kmalloc(mtd->erasesize, GFP_KERNEL); if (!check_buf) { printk(PRINT_PREF "error: cannot allocate memory\n"); goto out_patt_FF; } err = 0; /* Initialize patterns */ memset(patt_FF, 0xFF, mtd->erasesize); for (i = 0; i < mtd->erasesize / pgsize; i++) { if (!(i & 1)) { memset(patt_5A5 + i * pgsize, 0x55, pgsize); memset(patt_A5A + i * pgsize, 0xAA, pgsize); } else { memset(patt_5A5 + i * pgsize, 0xAA, pgsize); memset(patt_A5A + i * pgsize, 0x55, pgsize); } } /* * Check if there is a bad eraseblock among those we are going to test. */ memset(&bad_ebs[0], 0, sizeof(int) * ebcnt); if (mtd_can_have_bb(mtd)) { for (i = eb; i < eb + ebcnt; i++) { err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize); if (err < 0) { printk(PRINT_PREF "block_isbad() returned %d " "for EB %d\n", err, i); goto out; } if (err) { printk("EB %d is bad. Skip it.\n", i); bad_ebs[i - eb] = 1; } } } start_timing(); while (1) { int i; void *patt; /* Erase all eraseblocks */ for (i = eb; i < eb + ebcnt; i++) { if (bad_ebs[i - eb]) continue; err = erase_eraseblock(i); if (err) goto out; cond_resched(); } /* Check if the eraseblocks contain only 0xFF bytes */ if (check) { for (i = eb; i < eb + ebcnt; i++) { if (bad_ebs[i - eb]) continue; err = check_eraseblock(i, patt_FF); if (err) { printk(PRINT_PREF "verify failed" " for 0xFF... pattern\n"); goto out; } cond_resched(); } } /* Write the pattern */ for (i = eb; i < eb + ebcnt; i++) { if (bad_ebs[i - eb]) continue; if ((eb + erase_cycles) & 1) patt = patt_5A5; else patt = patt_A5A; err = write_pattern(i, patt); if (err) goto out; cond_resched(); } /* Verify what we wrote */ if (check) { for (i = eb; i < eb + ebcnt; i++) { if (bad_ebs[i - eb]) continue; if ((eb + erase_cycles) & 1) patt = patt_5A5; else patt = patt_A5A; err = check_eraseblock(i, patt); if (err) { printk(PRINT_PREF "verify failed for %s" " pattern\n", ((eb + erase_cycles) & 1) ? "0x55AA55..." : "0xAA55AA..."); goto out; } cond_resched(); } } erase_cycles += 1; if (erase_cycles % gran == 0) { long ms; stop_timing(); ms = (finish.tv_sec - start.tv_sec) * 1000 + (finish.tv_usec - start.tv_usec) / 1000; printk(PRINT_PREF "%08u erase cycles done, took %lu " "milliseconds (%lu seconds)\n", erase_cycles, ms, ms / 1000); start_timing(); } if (!infinite && --cycles_count == 0) break; } out: printk(PRINT_PREF "finished after %u erase cycles\n", erase_cycles); kfree(check_buf); out_patt_FF: kfree(patt_FF); out_patt_A5A: kfree(patt_A5A); out_patt_5A5: kfree(patt_5A5); out_mtd: put_mtd_device(mtd); if (err) printk(PRINT_PREF "error %d occurred during torturing\n", err); printk(KERN_INFO "=================================================\n"); return err; } module_init(tort_init); static void __exit tort_exit(void) { return; } module_exit(tort_exit); static int countdiffs(unsigned char *buf, unsigned char *check_buf, unsigned offset, unsigned len, unsigned *bytesp, unsigned *bitsp); static void print_bufs(unsigned char *read, unsigned char *written, int start, int len); /* * Report the detailed information about how the read EB differs from what was * written. */ static void report_corrupt(unsigned char *read, unsigned char *written) { int i; int bytes, bits, pages, first; int offset, len; size_t check_len = mtd->erasesize; if (pgcnt) check_len = pgcnt * pgsize; bytes = bits = pages = 0; for (i = 0; i < check_len; i += pgsize) if (countdiffs(written, read, i, pgsize, &bytes, &bits) >= 0) pages++; printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n", pages, bytes, bits); printk(PRINT_PREF "The following is a list of all differences between" " what was read from flash and what was expected\n"); for (i = 0; i < check_len; i += pgsize) { cond_resched(); bytes = bits = 0; first = countdiffs(written, read, i, pgsize, &bytes, &bits); if (first < 0) continue; printk("-------------------------------------------------------" "----------------------------------\n"); printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify," " starting at offset 0x%x\n", (mtd->erasesize - check_len + i) / pgsize, bytes, bits, first); offset = first & ~0x7; len = ((first + bytes) | 0x7) + 1 - offset; print_bufs(read, written, offset, len); } } static void print_bufs(unsigned char *read, unsigned char *written, int start, int len) { int i = 0, j1, j2; char *diff; printk("Offset Read Written\n"); while (i < len) { printk("0x%08x: ", start + i); diff = " "; for (j1 = 0; j1 < 8 && i + j1 < len; j1++) { printk(" %02x", read[start + i + j1]); if (read[start + i + j1] != written[start + i + j1]) diff = "***"; } while (j1 < 8) { printk(" "); j1 += 1; } printk(" %s ", diff); for (j2 = 0; j2 < 8 && i + j2 < len; j2++) printk(" %02x", written[start + i + j2]); printk("\n"); i += 8; } } /* * Count the number of differing bytes and bits and return the first differing * offset. */ static int countdiffs(unsigned char *buf, unsigned char *check_buf, unsigned offset, unsigned len, unsigned *bytesp, unsigned *bitsp) { unsigned i, bit; int first = -1; for (i = offset; i < offset + len; i++) if (buf[i] != check_buf[i]) { first = i; break; } while (i < offset + len) { if (buf[i] != check_buf[i]) { (*bytesp)++; bit = 1; while (bit < 256) { if ((buf[i] & bit) != (check_buf[i] & bit)) (*bitsp)++; bit <<= 1; } } i++; } return first; } MODULE_DESCRIPTION("Eraseblock torturing module"); MODULE_AUTHOR("Artem Bityutskiy, Jarkko Lavinen, Adrian Hunter"); MODULE_LICENSE("GPL");
gpl-2.0
fcooper/sitara-ti-linux-kernel
drivers/platform/x86/ibm_rtl.c
5582
8273
/* * IBM Real-Time Linux driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2010 * * Author: Keith Mannthey <kmannth@us.ibm.com> * Vernon Mauery <vernux@us.ibm.com> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/io.h> #include <linux/dmi.h> #include <linux/efi.h> #include <linux/mutex.h> #include <asm/bios_ebda.h> #include <asm-generic/io-64-nonatomic-lo-hi.h> static bool force; module_param(force, bool, 0); MODULE_PARM_DESC(force, "Force driver load, ignore DMI data"); static bool debug; module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Show debug output"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Keith Mannthey <kmmanth@us.ibm.com>"); MODULE_AUTHOR("Vernon Mauery <vernux@us.ibm.com>"); #define RTL_ADDR_TYPE_IO 1 #define RTL_ADDR_TYPE_MMIO 2 #define RTL_CMD_ENTER_PRTM 1 #define RTL_CMD_EXIT_PRTM 2 /* The RTL table as presented by the EBDA: */ struct ibm_rtl_table { char signature[5]; /* signature should be "_RTL_" */ u8 version; u8 rt_status; u8 command; u8 command_status; u8 cmd_address_type; u8 cmd_granularity; u8 cmd_offset; u16 reserve1; u32 cmd_port_address; /* platform dependent address */ u32 cmd_port_value; /* platform dependent value */ } __attribute__((packed)); /* to locate "_RTL_" signature do a masked 5-byte integer compare */ #define RTL_SIGNATURE 0x0000005f4c54525fULL #define RTL_MASK 0x000000ffffffffffULL #define RTL_DEBUG(fmt, ...) \ do { \ if (debug) \ pr_info(fmt, ##__VA_ARGS__); \ } while (0) static DEFINE_MUTEX(rtl_lock); static struct ibm_rtl_table __iomem *rtl_table; static void __iomem *ebda_map; static void __iomem *rtl_cmd_addr; static u8 rtl_cmd_type; static u8 rtl_cmd_width; static void __iomem *rtl_port_map(phys_addr_t addr, unsigned long len) { if (rtl_cmd_type == RTL_ADDR_TYPE_MMIO) return ioremap(addr, len); return ioport_map(addr, len); } static void rtl_port_unmap(void __iomem *addr) { if (addr && rtl_cmd_type == RTL_ADDR_TYPE_MMIO) iounmap(addr); else ioport_unmap(addr); } static int ibm_rtl_write(u8 value) { int ret = 0, count = 0; static u32 cmd_port_val; RTL_DEBUG("%s(%d)\n", __func__, value); value = value == 1 ? RTL_CMD_ENTER_PRTM : RTL_CMD_EXIT_PRTM; mutex_lock(&rtl_lock); if (ioread8(&rtl_table->rt_status) != value) { iowrite8(value, &rtl_table->command); switch (rtl_cmd_width) { case 8: cmd_port_val = ioread8(&rtl_table->cmd_port_value); RTL_DEBUG("cmd_port_val = %u\n", cmd_port_val); iowrite8((u8)cmd_port_val, rtl_cmd_addr); break; case 16: cmd_port_val = ioread16(&rtl_table->cmd_port_value); RTL_DEBUG("cmd_port_val = %u\n", cmd_port_val); iowrite16((u16)cmd_port_val, rtl_cmd_addr); break; case 32: cmd_port_val = ioread32(&rtl_table->cmd_port_value); RTL_DEBUG("cmd_port_val = %u\n", cmd_port_val); iowrite32(cmd_port_val, rtl_cmd_addr); break; } while (ioread8(&rtl_table->command)) { msleep(10); if (count++ > 500) { pr_err("Hardware not responding to " "mode switch request\n"); ret = -EIO; break; } } if (ioread8(&rtl_table->command_status)) { RTL_DEBUG("command_status reports failed command\n"); ret = -EIO; } } mutex_unlock(&rtl_lock); return ret; } static ssize_t rtl_show_version(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", (int)ioread8(&rtl_table->version)); } static ssize_t rtl_show_state(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", ioread8(&rtl_table->rt_status)); } static ssize_t rtl_set_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t ret; if (count < 1 || count > 2) return -EINVAL; switch (buf[0]) { case '0': ret = ibm_rtl_write(0); break; case '1': ret = ibm_rtl_write(1); break; default: ret = -EINVAL; } if (ret >= 0) ret = count; return ret; } static struct bus_type rtl_subsys = { .name = "ibm_rtl", .dev_name = "ibm_rtl", }; static DEVICE_ATTR(version, S_IRUGO, rtl_show_version, NULL); static DEVICE_ATTR(state, 0600, rtl_show_state, rtl_set_state); static struct device_attribute *rtl_attributes[] = { &dev_attr_version, &dev_attr_state, NULL }; static int rtl_setup_sysfs(void) { int ret, i; ret = subsys_system_register(&rtl_subsys, NULL); if (!ret) { for (i = 0; rtl_attributes[i]; i ++) device_create_file(rtl_subsys.dev_root, rtl_attributes[i]); } return ret; } static void rtl_teardown_sysfs(void) { int i; for (i = 0; rtl_attributes[i]; i ++) device_remove_file(rtl_subsys.dev_root, rtl_attributes[i]); bus_unregister(&rtl_subsys); } static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = { { \ .matches = { \ DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \ }, \ }, { } }; static int __init ibm_rtl_init(void) { unsigned long ebda_addr, ebda_size; unsigned int ebda_kb; int ret = -ENODEV, i; if (force) pr_warn("module loaded by force\n"); /* first ensure that we are running on IBM HW */ else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table)) return -ENODEV; /* Get the address for the Extended BIOS Data Area */ ebda_addr = get_bios_ebda(); if (!ebda_addr) { RTL_DEBUG("no BIOS EBDA found\n"); return -ENODEV; } ebda_map = ioremap(ebda_addr, 4); if (!ebda_map) return -ENOMEM; /* First word in the EDBA is the Size in KB */ ebda_kb = ioread16(ebda_map); RTL_DEBUG("EBDA is %d kB\n", ebda_kb); if (ebda_kb == 0) goto out; iounmap(ebda_map); ebda_size = ebda_kb*1024; /* Remap the whole table */ ebda_map = ioremap(ebda_addr, ebda_size); if (!ebda_map) return -ENOMEM; /* search for the _RTL_ signature at the start of the table */ for (i = 0 ; i < ebda_size/sizeof(unsigned int); i++) { struct ibm_rtl_table __iomem * tmp; tmp = (struct ibm_rtl_table __iomem *) (ebda_map+i); if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) { phys_addr_t addr; unsigned int plen; RTL_DEBUG("found RTL_SIGNATURE at %p\n", tmp); rtl_table = tmp; /* The address, value, width and offset are platform * dependent and found in the ibm_rtl_table */ rtl_cmd_width = ioread8(&rtl_table->cmd_granularity); rtl_cmd_type = ioread8(&rtl_table->cmd_address_type); RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n", rtl_cmd_width, rtl_cmd_type); addr = ioread32(&rtl_table->cmd_port_address); RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr); plen = rtl_cmd_width/sizeof(char); rtl_cmd_addr = rtl_port_map(addr, plen); RTL_DEBUG("rtl_cmd_addr = %p\n", rtl_cmd_addr); if (!rtl_cmd_addr) { ret = -ENOMEM; break; } ret = rtl_setup_sysfs(); break; } } out: if (ret) { iounmap(ebda_map); rtl_port_unmap(rtl_cmd_addr); } return ret; } static void __exit ibm_rtl_exit(void) { if (rtl_table) { RTL_DEBUG("cleaning up"); /* do not leave the machine in SMI-free mode */ ibm_rtl_write(0); /* unmap, unlink and remove all traces */ rtl_teardown_sysfs(); iounmap(ebda_map); rtl_port_unmap(rtl_cmd_addr); } } module_init(ibm_rtl_init); module_exit(ibm_rtl_exit);
gpl-2.0
multirom-m8/kernel_htc_m8gpe
drivers/staging/winbond/mto.c
7886
5141
/* * ============================================================================ * MTO.C - * * Description: * MAC Throughput Optimization for W89C33 802.11g WLAN STA. * * The following MIB attributes or internal variables will be affected * while the MTO is being executed: * dot11FragmentationThreshold, * dot11RTSThreshold, * transmission rate and PLCP preamble type, * CCA mode, * antenna diversity. * * Copyright (c) 2003 Winbond Electronics Corp. All rights reserved. * ============================================================================ */ #include "sme_api.h" #include "wbhal.h" #include "wb35reg_f.h" #include "core.h" /* Declare SQ3 to rate and fragmentation threshold table */ /* Declare fragmentation thresholds table */ #define MTO_MAX_FRAG_TH_LEVELS 5 #define MTO_MAX_DATA_RATE_LEVELS 12 u16 MTO_Frag_Th_Tbl[MTO_MAX_FRAG_TH_LEVELS] = { 256, 384, 512, 768, 1536 }; /* * Declare data rate table: * The following table will be changed at anytime if the opration rate * supported by AP don't match the table */ static u8 MTO_Data_Rate_Tbl[MTO_MAX_DATA_RATE_LEVELS] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 }; /* this record the retry rate at different data rate */ static int retryrate_rec[MTO_MAX_DATA_RATE_LEVELS]; static u8 boSparseTxTraffic; void MTO_Init(struct wbsoft_priv *adapter); void TxRateReductionCtrl(struct wbsoft_priv *adapter); void MTO_SetTxCount(struct wbsoft_priv *adapter, u8 t0, u8 index); void MTO_TxFailed(struct wbsoft_priv *adapter); void hal_get_dto_para(struct wbsoft_priv *adapter, char *buffer); /* * =========================================================================== * MTO_Init -- * * Description: * Initialize MTO parameters. * * This function should be invoked during system initialization. * * Arguments: * adapter - The pointer to the Miniport adapter Context * =========================================================================== */ void MTO_Init(struct wbsoft_priv *adapter) { int i; MTO_PREAMBLE_TYPE() = MTO_PREAMBLE_SHORT; /* for test */ MTO_CNT_ANT(0) = 0; MTO_CNT_ANT(1) = 0; MTO_SQ_ANT(0) = 0; MTO_SQ_ANT(1) = 0; MTO_AGING_TIMEOUT() = 0; /* The following parameters should be initialized to the values set by user */ MTO_RATE_LEVEL() = 0; MTO_FRAG_TH_LEVEL() = 4; MTO_RTS_THRESHOLD() = MTO_FRAG_TH() + 1; MTO_RTS_THRESHOLD_SETUP() = MTO_FRAG_TH() + 1; MTO_RATE_CHANGE_ENABLE() = 1; MTO_FRAG_CHANGE_ENABLE() = 0; MTO_POWER_CHANGE_ENABLE() = 1; MTO_PREAMBLE_CHANGE_ENABLE() = 1; MTO_RTS_CHANGE_ENABLE() = 0; for (i = 0; i < MTO_MAX_DATA_RATE_LEVELS; i++) retryrate_rec[i] = 5; MTO_TXFLOWCOUNT() = 0; /* --------- DTO threshold parameters ------------- */ MTOPARA_PERIODIC_CHECK_CYCLE() = 10; MTOPARA_RSSI_TH_FOR_ANTDIV() = 10; MTOPARA_TXCOUNT_TH_FOR_CALC_RATE() = 50; MTOPARA_TXRATE_INC_TH() = 10; MTOPARA_TXRATE_DEC_TH() = 30; MTOPARA_TXRATE_EQ_TH() = 40; MTOPARA_TXRATE_BACKOFF() = 12; MTOPARA_TXRETRYRATE_REDUCE() = 6; if (MTO_TXPOWER_FROM_EEPROM == 0xff) { switch (MTO_HAL()->phy_type) { case RF_AIROHA_2230: case RF_AIROHA_2230S: MTOPARA_TXPOWER_INDEX() = 46; /* MAX-8 @@ Only for AL 2230 */ break; case RF_AIROHA_7230: MTOPARA_TXPOWER_INDEX() = 49; break; case RF_WB_242: MTOPARA_TXPOWER_INDEX() = 10; break; case RF_WB_242_1: MTOPARA_TXPOWER_INDEX() = 24; break; } } else { /* follow the setting from EEPROM */ MTOPARA_TXPOWER_INDEX() = MTO_TXPOWER_FROM_EEPROM; } RFSynthesizer_SetPowerIndex(MTO_HAL(), (u8) MTOPARA_TXPOWER_INDEX()); /* ------------------------------------------------ */ /* For RSSI turning -- Cancel load from EEPROM */ MTO_DATA().RSSI_high = -41; MTO_DATA().RSSI_low = -60; } /* =========================================================================== * Description: * If we enable DTO, we will ignore the tx count with different tx rate * from DTO rate. This is because when we adjust DTO tx rate, there could * be some packets in the tx queue with previous tx rate */ void MTO_SetTxCount(struct wbsoft_priv *adapter, u8 tx_rate, u8 index) { MTO_TXFLOWCOUNT()++; if ((MTO_ENABLE == 1) && (MTO_RATE_CHANGE_ENABLE() == 1)) { if (tx_rate == MTO_DATA_RATE()) { if (index == 0) { if (boSparseTxTraffic) MTO_HAL()->dto_tx_frag_count += MTOPARA_PERIODIC_CHECK_CYCLE(); else MTO_HAL()->dto_tx_frag_count += 1; } else { if (index < 8) { MTO_HAL()->dto_tx_retry_count += index; MTO_HAL()->dto_tx_frag_count += (index + 1); } else { MTO_HAL()->dto_tx_retry_count += 7; MTO_HAL()->dto_tx_frag_count += 7; } } } else if (MTO_DATA_RATE() > 48 && tx_rate == 48) { /* for reducing data rate scheme, do not calculate different data rate. 3 is the reducing data rate at retry. */ if (index < 3) { MTO_HAL()->dto_tx_retry_count += index; MTO_HAL()->dto_tx_frag_count += (index + 1); } else { MTO_HAL()->dto_tx_retry_count += 3; MTO_HAL()->dto_tx_frag_count += 3; } } } else { MTO_HAL()->dto_tx_retry_count += index; MTO_HAL()->dto_tx_frag_count += (index + 1); } }
gpl-2.0
jinankjain/linux
arch/avr32/mach-at32ap/clock.c
8142
6452
/* * Clock management for AT32AP CPUs * * Copyright (C) 2006 Atmel Corporation * * Based on arch/arm/mach-at91/clock.c * Copyright (C) 2005 David Brownell * Copyright (C) 2005 Ivan Kokshaysky * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/export.h> #include <linux/device.h> #include <linux/string.h> #include <linux/list.h> #include <mach/chip.h> #include "clock.h" /* at32 clock list */ static LIST_HEAD(at32_clock_list); static DEFINE_SPINLOCK(clk_lock); static DEFINE_SPINLOCK(clk_list_lock); void at32_clk_register(struct clk *clk) { spin_lock(&clk_list_lock); /* add the new item to the end of the list */ list_add_tail(&clk->list, &at32_clock_list); spin_unlock(&clk_list_lock); } static struct clk *__clk_get(struct device *dev, const char *id) { struct clk *clk; list_for_each_entry(clk, &at32_clock_list, list) { if (clk->dev == dev && strcmp(id, clk->name) == 0) { return clk; } } return ERR_PTR(-ENOENT); } struct clk *clk_get(struct device *dev, const char *id) { struct clk *clk; spin_lock(&clk_list_lock); clk = __clk_get(dev, id); spin_unlock(&clk_list_lock); return clk; } EXPORT_SYMBOL(clk_get); void clk_put(struct clk *clk) { /* clocks are static for now, we can't free them */ } EXPORT_SYMBOL(clk_put); static void __clk_enable(struct clk *clk) { if (clk->parent) __clk_enable(clk->parent); if (clk->users++ == 0 && clk->mode) clk->mode(clk, 1); } int clk_enable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clk_lock, flags); __clk_enable(clk); spin_unlock_irqrestore(&clk_lock, flags); return 0; } EXPORT_SYMBOL(clk_enable); static void __clk_disable(struct clk *clk) { if (clk->users == 0) { printk(KERN_ERR "%s: mismatched disable\n", clk->name); WARN_ON(1); return; } if (--clk->users == 0 && clk->mode) clk->mode(clk, 0); if (clk->parent) __clk_disable(clk->parent); } void clk_disable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clk_lock, flags); __clk_disable(clk); spin_unlock_irqrestore(&clk_lock, flags); } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { unsigned long flags; unsigned long rate; spin_lock_irqsave(&clk_lock, flags); rate = clk->get_rate(clk); spin_unlock_irqrestore(&clk_lock, flags); return rate; } EXPORT_SYMBOL(clk_get_rate); long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long flags, actual_rate; if (!clk->set_rate) return -ENOSYS; spin_lock_irqsave(&clk_lock, flags); actual_rate = clk->set_rate(clk, rate, 0); spin_unlock_irqrestore(&clk_lock, flags); return actual_rate; } EXPORT_SYMBOL(clk_round_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { unsigned long flags; long ret; if (!clk->set_rate) return -ENOSYS; spin_lock_irqsave(&clk_lock, flags); ret = clk->set_rate(clk, rate, 1); spin_unlock_irqrestore(&clk_lock, flags); return (ret < 0) ? ret : 0; } EXPORT_SYMBOL(clk_set_rate); int clk_set_parent(struct clk *clk, struct clk *parent) { unsigned long flags; int ret; if (!clk->set_parent) return -ENOSYS; spin_lock_irqsave(&clk_lock, flags); ret = clk->set_parent(clk, parent); spin_unlock_irqrestore(&clk_lock, flags); return ret; } EXPORT_SYMBOL(clk_set_parent); struct clk *clk_get_parent(struct clk *clk) { return clk->parent; } EXPORT_SYMBOL(clk_get_parent); #ifdef CONFIG_DEBUG_FS /* /sys/kernel/debug/at32ap_clk */ #include <linux/io.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "pm.h" #define NEST_DELTA 2 #define NEST_MAX 6 struct clkinf { struct seq_file *s; unsigned nest; }; static void dump_clock(struct clk *parent, struct clkinf *r) { unsigned nest = r->nest; char buf[16 + NEST_MAX]; struct clk *clk; unsigned i; /* skip clocks coupled to devices that aren't registered */ if (parent->dev && !dev_name(parent->dev) && !parent->users) return; /* <nest spaces> name <pad to end> */ memset(buf, ' ', sizeof(buf) - 1); buf[sizeof(buf) - 1] = 0; i = strlen(parent->name); memcpy(buf + nest, parent->name, min(i, (unsigned)(sizeof(buf) - 1 - nest))); seq_printf(r->s, "%s%c users=%2d %-3s %9ld Hz", buf, parent->set_parent ? '*' : ' ', parent->users, parent->users ? "on" : "off", /* NOTE: not-paranoid!! */ clk_get_rate(parent)); if (parent->dev) seq_printf(r->s, ", for %s", dev_name(parent->dev)); seq_printf(r->s, "\n"); /* cost of this scan is small, but not linear... */ r->nest = nest + NEST_DELTA; list_for_each_entry(clk, &at32_clock_list, list) { if (clk->parent == parent) dump_clock(clk, r); } r->nest = nest; } static int clk_show(struct seq_file *s, void *unused) { struct clkinf r; int i; struct clk *clk; /* show all the power manager registers */ seq_printf(s, "MCCTRL = %8x\n", pm_readl(MCCTRL)); seq_printf(s, "CKSEL = %8x\n", pm_readl(CKSEL)); seq_printf(s, "CPUMASK = %8x\n", pm_readl(CPU_MASK)); seq_printf(s, "HSBMASK = %8x\n", pm_readl(HSB_MASK)); seq_printf(s, "PBAMASK = %8x\n", pm_readl(PBA_MASK)); seq_printf(s, "PBBMASK = %8x\n", pm_readl(PBB_MASK)); seq_printf(s, "PLL0 = %8x\n", pm_readl(PLL0)); seq_printf(s, "PLL1 = %8x\n", pm_readl(PLL1)); seq_printf(s, "IMR = %8x\n", pm_readl(IMR)); for (i = 0; i < 8; i++) { if (i == 5) continue; seq_printf(s, "GCCTRL%d = %8x\n", i, pm_readl(GCCTRL(i))); } seq_printf(s, "\n"); r.s = s; r.nest = 0; /* protected from changes on the list while dumping */ spin_lock(&clk_list_lock); /* show clock tree as derived from the three oscillators */ clk = __clk_get(NULL, "osc32k"); dump_clock(clk, &r); clk_put(clk); clk = __clk_get(NULL, "osc0"); dump_clock(clk, &r); clk_put(clk); clk = __clk_get(NULL, "osc1"); dump_clock(clk, &r); clk_put(clk); spin_unlock(&clk_list_lock); return 0; } static int clk_open(struct inode *inode, struct file *file) { return single_open(file, clk_show, NULL); } static const struct file_operations clk_operations = { .open = clk_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init clk_debugfs_init(void) { (void) debugfs_create_file("at32ap_clk", S_IFREG | S_IRUGO, NULL, NULL, &clk_operations); return 0; } postcore_initcall(clk_debugfs_init); #endif
gpl-2.0
SoloProject/platform_kernel_lge_hammerhead
sound/pci/asihpi/hpi6000.c
8398
49704
/****************************************************************************** AudioScience HPI driver Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com> This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation; This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Hardware Programming Interface (HPI) for AudioScience ASI6200 series adapters. These PCI bus adapters are based on the TI C6711 DSP. Exported functions: void HPI_6000(struct hpi_message *phm, struct hpi_response *phr) #defines HIDE_PCI_ASSERTS to show the PCI asserts PROFILE_DSP2 get profile data from DSP2 if present (instead of DSP 1) (C) Copyright AudioScience Inc. 1998-2003 *******************************************************************************/ #define SOURCEFILE_NAME "hpi6000.c" #include "hpi_internal.h" #include "hpimsginit.h" #include "hpidebug.h" #include "hpi6000.h" #include "hpidspcd.h" #include "hpicmn.h" #define HPI_HIF_BASE (0x00000200) /* start of C67xx internal RAM */ #define HPI_HIF_ADDR(member) \ (HPI_HIF_BASE + offsetof(struct hpi_hif_6000, member)) #define HPI_HIF_ERROR_MASK 0x4000 /* HPI6000 specific error codes */ #define HPI6000_ERROR_BASE 900 /* not actually used anywhere */ /* operational/messaging errors */ #define HPI6000_ERROR_MSG_RESP_IDLE_TIMEOUT 901 #define HPI6000_ERROR_MSG_RESP_GET_RESP_ACK 903 #define HPI6000_ERROR_MSG_GET_ADR 904 #define HPI6000_ERROR_RESP_GET_ADR 905 #define HPI6000_ERROR_MSG_RESP_BLOCKWRITE32 906 #define HPI6000_ERROR_MSG_RESP_BLOCKREAD32 907 #define HPI6000_ERROR_CONTROL_CACHE_PARAMS 909 #define HPI6000_ERROR_SEND_DATA_IDLE_TIMEOUT 911 #define HPI6000_ERROR_SEND_DATA_ACK 912 #define HPI6000_ERROR_SEND_DATA_ADR 913 #define HPI6000_ERROR_SEND_DATA_TIMEOUT 914 #define HPI6000_ERROR_SEND_DATA_CMD 915 #define HPI6000_ERROR_SEND_DATA_WRITE 916 #define HPI6000_ERROR_SEND_DATA_IDLECMD 917 #define HPI6000_ERROR_GET_DATA_IDLE_TIMEOUT 921 #define HPI6000_ERROR_GET_DATA_ACK 922 #define HPI6000_ERROR_GET_DATA_CMD 923 #define HPI6000_ERROR_GET_DATA_READ 924 #define HPI6000_ERROR_GET_DATA_IDLECMD 925 #define HPI6000_ERROR_CONTROL_CACHE_ADDRLEN 951 #define HPI6000_ERROR_CONTROL_CACHE_READ 952 #define HPI6000_ERROR_CONTROL_CACHE_FLUSH 953 #define HPI6000_ERROR_MSG_RESP_GETRESPCMD 961 #define HPI6000_ERROR_MSG_RESP_IDLECMD 962 /* Initialisation/bootload errors */ #define HPI6000_ERROR_UNHANDLED_SUBSYS_ID 930 /* can't access PCI2040 */ #define HPI6000_ERROR_INIT_PCI2040 931 /* can't access DSP HPI i/f */ #define HPI6000_ERROR_INIT_DSPHPI 932 /* can't access internal DSP memory */ #define HPI6000_ERROR_INIT_DSPINTMEM 933 /* can't access SDRAM - test#1 */ #define HPI6000_ERROR_INIT_SDRAM1 934 /* can't access SDRAM - test#2 */ #define HPI6000_ERROR_INIT_SDRAM2 935 #define HPI6000_ERROR_INIT_VERIFY 938 #define HPI6000_ERROR_INIT_NOACK 939 #define HPI6000_ERROR_INIT_PLDTEST1 941 #define HPI6000_ERROR_INIT_PLDTEST2 942 /* local defines */ #define HIDE_PCI_ASSERTS #define PROFILE_DSP2 /* for PCI2040 i/f chip */ /* HPI CSR registers */ /* word offsets from CSR base */ /* use when io addresses defined as u32 * */ #define INTERRUPT_EVENT_SET 0 #define INTERRUPT_EVENT_CLEAR 1 #define INTERRUPT_MASK_SET 2 #define INTERRUPT_MASK_CLEAR 3 #define HPI_ERROR_REPORT 4 #define HPI_RESET 5 #define HPI_DATA_WIDTH 6 #define MAX_DSPS 2 /* HPI registers, spaced 8K bytes = 2K words apart */ #define DSP_SPACING 0x800 #define CONTROL 0x0000 #define ADDRESS 0x0200 #define DATA_AUTOINC 0x0400 #define DATA 0x0600 #define TIMEOUT 500000 struct dsp_obj { __iomem u32 *prHPI_control; __iomem u32 *prHPI_address; __iomem u32 *prHPI_data; __iomem u32 *prHPI_data_auto_inc; char c_dsp_rev; /*A, B */ u32 control_cache_address_on_dsp; u32 control_cache_length_on_dsp; struct hpi_adapter_obj *pa_parent_adapter; }; struct hpi_hw_obj { __iomem u32 *dw2040_HPICSR; __iomem u32 *dw2040_HPIDSP; u16 num_dsp; struct dsp_obj ado[MAX_DSPS]; u32 message_buffer_address_on_dsp; u32 response_buffer_address_on_dsp; u32 pCI2040HPI_error_count; struct hpi_control_cache_single control_cache[HPI_NMIXER_CONTROLS]; struct hpi_control_cache *p_cache; }; static u16 hpi6000_dsp_block_write32(struct hpi_adapter_obj *pao, u16 dsp_index, u32 hpi_address, u32 *source, u32 count); static u16 hpi6000_dsp_block_read32(struct hpi_adapter_obj *pao, u16 dsp_index, u32 hpi_address, u32 *dest, u32 count); static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao, u32 *pos_error_code); static short hpi6000_check_PCI2040_error_flag(struct hpi_adapter_obj *pao, u16 read_or_write); #define H6READ 1 #define H6WRITE 0 static short hpi6000_update_control_cache(struct hpi_adapter_obj *pao, struct hpi_message *phm); static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr); static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr); static short hpi6000_wait_dsp_ack(struct hpi_adapter_obj *pao, u16 dsp_index, u32 ack_value); static short hpi6000_send_host_command(struct hpi_adapter_obj *pao, u16 dsp_index, u32 host_cmd); static void hpi6000_send_dsp_interrupt(struct dsp_obj *pdo); static short hpi6000_send_data(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr); static short hpi6000_get_data(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr); static void hpi_write_word(struct dsp_obj *pdo, u32 address, u32 data); static u32 hpi_read_word(struct dsp_obj *pdo, u32 address); static void hpi_write_block(struct dsp_obj *pdo, u32 address, u32 *pdata, u32 length); static void hpi_read_block(struct dsp_obj *pdo, u32 address, u32 *pdata, u32 length); static void subsys_create_adapter(struct hpi_message *phm, struct hpi_response *phr); static void adapter_delete(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr); static void adapter_get_asserts(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr); static short create_adapter_obj(struct hpi_adapter_obj *pao, u32 *pos_error_code); static void delete_adapter_obj(struct hpi_adapter_obj *pao); /* local globals */ static u16 gw_pci_read_asserts; /* used to count PCI2040 errors */ static u16 gw_pci_write_asserts; /* used to count PCI2040 errors */ static void subsys_message(struct hpi_message *phm, struct hpi_response *phr) { switch (phm->function) { case HPI_SUBSYS_CREATE_ADAPTER: subsys_create_adapter(phm, phr); break; default: phr->error = HPI_ERROR_INVALID_FUNC; break; } } static void control_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { struct hpi_hw_obj *phw = pao->priv; switch (phm->function) { case HPI_CONTROL_GET_STATE: if (pao->has_control_cache) { u16 err; err = hpi6000_update_control_cache(pao, phm); if (err) { if (err >= HPI_ERROR_BACKEND_BASE) { phr->error = HPI_ERROR_CONTROL_CACHING; phr->specific_error = err; } else { phr->error = err; } break; } if (hpi_check_control_cache(phw->p_cache, phm, phr)) break; } hw_message(pao, phm, phr); break; case HPI_CONTROL_SET_STATE: hw_message(pao, phm, phr); hpi_cmn_control_cache_sync_to_msg(phw->p_cache, phm, phr); break; case HPI_CONTROL_GET_INFO: default: hw_message(pao, phm, phr); break; } } static void adapter_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { switch (phm->function) { case HPI_ADAPTER_GET_ASSERT: adapter_get_asserts(pao, phm, phr); break; case HPI_ADAPTER_DELETE: adapter_delete(pao, phm, phr); break; default: hw_message(pao, phm, phr); break; } } static void outstream_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { switch (phm->function) { case HPI_OSTREAM_HOSTBUFFER_ALLOC: case HPI_OSTREAM_HOSTBUFFER_FREE: /* Don't let these messages go to the HW function because * they're called without locking the spinlock. * For the HPI6000 adapters the HW would return * HPI_ERROR_INVALID_FUNC anyway. */ phr->error = HPI_ERROR_INVALID_FUNC; break; default: hw_message(pao, phm, phr); return; } } static void instream_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { switch (phm->function) { case HPI_ISTREAM_HOSTBUFFER_ALLOC: case HPI_ISTREAM_HOSTBUFFER_FREE: /* Don't let these messages go to the HW function because * they're called without locking the spinlock. * For the HPI6000 adapters the HW would return * HPI_ERROR_INVALID_FUNC anyway. */ phr->error = HPI_ERROR_INVALID_FUNC; break; default: hw_message(pao, phm, phr); return; } } /************************************************************************/ /** HPI_6000() * Entry point from HPIMAN * All calls to the HPI start here */ void HPI_6000(struct hpi_message *phm, struct hpi_response *phr) { struct hpi_adapter_obj *pao = NULL; if (phm->object != HPI_OBJ_SUBSYSTEM) { pao = hpi_find_adapter(phm->adapter_index); if (!pao) { hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_BAD_ADAPTER_NUMBER); HPI_DEBUG_LOG(DEBUG, "invalid adapter index: %d \n", phm->adapter_index); return; } /* Don't even try to communicate with crashed DSP */ if (pao->dsp_crashed >= 10) { hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_DSP_HARDWARE); HPI_DEBUG_LOG(DEBUG, "adapter %d dsp crashed\n", phm->adapter_index); return; } } /* Init default response including the size field */ if (phm->function != HPI_SUBSYS_CREATE_ADAPTER) hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_PROCESSING_MESSAGE); switch (phm->type) { case HPI_TYPE_REQUEST: switch (phm->object) { case HPI_OBJ_SUBSYSTEM: subsys_message(phm, phr); break; case HPI_OBJ_ADAPTER: phr->size = sizeof(struct hpi_response_header) + sizeof(struct hpi_adapter_res); adapter_message(pao, phm, phr); break; case HPI_OBJ_CONTROL: control_message(pao, phm, phr); break; case HPI_OBJ_OSTREAM: outstream_message(pao, phm, phr); break; case HPI_OBJ_ISTREAM: instream_message(pao, phm, phr); break; default: hw_message(pao, phm, phr); break; } break; default: phr->error = HPI_ERROR_INVALID_TYPE; break; } } /************************************************************************/ /* SUBSYSTEM */ /* create an adapter object and initialise it based on resource information * passed in in the message * NOTE - you cannot use this function AND the FindAdapters function at the * same time, the application must use only one of them to get the adapters */ static void subsys_create_adapter(struct hpi_message *phm, struct hpi_response *phr) { /* create temp adapter obj, because we don't know what index yet */ struct hpi_adapter_obj ao; struct hpi_adapter_obj *pao; u32 os_error_code; u16 err = 0; u32 dsp_index = 0; HPI_DEBUG_LOG(VERBOSE, "subsys_create_adapter\n"); memset(&ao, 0, sizeof(ao)); ao.priv = kzalloc(sizeof(struct hpi_hw_obj), GFP_KERNEL); if (!ao.priv) { HPI_DEBUG_LOG(ERROR, "can't get mem for adapter object\n"); phr->error = HPI_ERROR_MEMORY_ALLOC; return; } /* create the adapter object based on the resource information */ ao.pci = *phm->u.s.resource.r.pci; err = create_adapter_obj(&ao, &os_error_code); if (err) { delete_adapter_obj(&ao); if (err >= HPI_ERROR_BACKEND_BASE) { phr->error = HPI_ERROR_DSP_BOOTLOAD; phr->specific_error = err; } else { phr->error = err; } phr->u.s.data = os_error_code; return; } /* need to update paParentAdapter */ pao = hpi_find_adapter(ao.index); if (!pao) { /* We just added this adapter, why can't we find it!? */ HPI_DEBUG_LOG(ERROR, "lost adapter after boot\n"); phr->error = HPI_ERROR_BAD_ADAPTER; return; } for (dsp_index = 0; dsp_index < MAX_DSPS; dsp_index++) { struct hpi_hw_obj *phw = pao->priv; phw->ado[dsp_index].pa_parent_adapter = pao; } phr->u.s.adapter_type = ao.type; phr->u.s.adapter_index = ao.index; phr->error = 0; } static void adapter_delete(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { delete_adapter_obj(pao); hpi_delete_adapter(pao); phr->error = 0; } /* this routine is called from SubSysFindAdapter and SubSysCreateAdapter */ static short create_adapter_obj(struct hpi_adapter_obj *pao, u32 *pos_error_code) { short boot_error = 0; u32 dsp_index = 0; u32 control_cache_size = 0; u32 control_cache_count = 0; struct hpi_hw_obj *phw = pao->priv; /* The PCI2040 has the following address map */ /* BAR0 - 4K = HPI control and status registers on PCI2040 (HPI CSR) */ /* BAR1 - 32K = HPI registers on DSP */ phw->dw2040_HPICSR = pao->pci.ap_mem_base[0]; phw->dw2040_HPIDSP = pao->pci.ap_mem_base[1]; HPI_DEBUG_LOG(VERBOSE, "csr %p, dsp %p\n", phw->dw2040_HPICSR, phw->dw2040_HPIDSP); /* set addresses for the possible DSP HPI interfaces */ for (dsp_index = 0; dsp_index < MAX_DSPS; dsp_index++) { phw->ado[dsp_index].prHPI_control = phw->dw2040_HPIDSP + (CONTROL + DSP_SPACING * dsp_index); phw->ado[dsp_index].prHPI_address = phw->dw2040_HPIDSP + (ADDRESS + DSP_SPACING * dsp_index); phw->ado[dsp_index].prHPI_data = phw->dw2040_HPIDSP + (DATA + DSP_SPACING * dsp_index); phw->ado[dsp_index].prHPI_data_auto_inc = phw->dw2040_HPIDSP + (DATA_AUTOINC + DSP_SPACING * dsp_index); HPI_DEBUG_LOG(VERBOSE, "ctl %p, adr %p, dat %p, dat++ %p\n", phw->ado[dsp_index].prHPI_control, phw->ado[dsp_index].prHPI_address, phw->ado[dsp_index].prHPI_data, phw->ado[dsp_index].prHPI_data_auto_inc); phw->ado[dsp_index].pa_parent_adapter = pao; } phw->pCI2040HPI_error_count = 0; pao->has_control_cache = 0; /* Set the default number of DSPs on this card */ /* This is (conditionally) adjusted after bootloading */ /* of the first DSP in the bootload section. */ phw->num_dsp = 1; boot_error = hpi6000_adapter_boot_load_dsp(pao, pos_error_code); if (boot_error) return boot_error; HPI_DEBUG_LOG(INFO, "bootload DSP OK\n"); phw->message_buffer_address_on_dsp = 0L; phw->response_buffer_address_on_dsp = 0L; /* get info about the adapter by asking the adapter */ /* send a HPI_ADAPTER_GET_INFO message */ { struct hpi_message hm; struct hpi_response hr0; /* response from DSP 0 */ struct hpi_response hr1; /* response from DSP 1 */ u16 error = 0; HPI_DEBUG_LOG(VERBOSE, "send ADAPTER_GET_INFO\n"); memset(&hm, 0, sizeof(hm)); hm.type = HPI_TYPE_REQUEST; hm.size = sizeof(struct hpi_message); hm.object = HPI_OBJ_ADAPTER; hm.function = HPI_ADAPTER_GET_INFO; hm.adapter_index = 0; memset(&hr0, 0, sizeof(hr0)); memset(&hr1, 0, sizeof(hr1)); hr0.size = sizeof(hr0); hr1.size = sizeof(hr1); error = hpi6000_message_response_sequence(pao, 0, &hm, &hr0); if (hr0.error) { HPI_DEBUG_LOG(DEBUG, "message error %d\n", hr0.error); return hr0.error; } if (phw->num_dsp == 2) { error = hpi6000_message_response_sequence(pao, 1, &hm, &hr1); if (error) return error; } pao->type = hr0.u.ax.info.adapter_type; pao->index = hr0.u.ax.info.adapter_index; } memset(&phw->control_cache[0], 0, sizeof(struct hpi_control_cache_single) * HPI_NMIXER_CONTROLS); /* Read the control cache length to figure out if it is turned on */ control_cache_size = hpi_read_word(&phw->ado[0], HPI_HIF_ADDR(control_cache_size_in_bytes)); if (control_cache_size) { control_cache_count = hpi_read_word(&phw->ado[0], HPI_HIF_ADDR(control_cache_count)); phw->p_cache = hpi_alloc_control_cache(control_cache_count, control_cache_size, (unsigned char *) &phw->control_cache[0] ); if (phw->p_cache) pao->has_control_cache = 1; } HPI_DEBUG_LOG(DEBUG, "get adapter info ASI%04X index %d\n", pao->type, pao->index); if (phw->p_cache) phw->p_cache->adap_idx = pao->index; return hpi_add_adapter(pao); } static void delete_adapter_obj(struct hpi_adapter_obj *pao) { struct hpi_hw_obj *phw = pao->priv; if (pao->has_control_cache) hpi_free_control_cache(phw->p_cache); /* reset DSPs on adapter */ iowrite32(0x0003000F, phw->dw2040_HPICSR + HPI_RESET); kfree(phw); } /************************************************************************/ /* ADAPTER */ static void adapter_get_asserts(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { #ifndef HIDE_PCI_ASSERTS /* if we have PCI2040 asserts then collect them */ if ((gw_pci_read_asserts > 0) || (gw_pci_write_asserts > 0)) { phr->u.ax.assert.p1 = gw_pci_read_asserts * 100 + gw_pci_write_asserts; phr->u.ax.assert.p2 = 0; phr->u.ax.assert.count = 1; /* assert count */ phr->u.ax.assert.dsp_index = -1; /* "dsp index" */ strcpy(phr->u.ax.assert.sz_message, "PCI2040 error"); phr->u.ax.assert.dsp_msg_addr = 0; gw_pci_read_asserts = 0; gw_pci_write_asserts = 0; phr->error = 0; } else #endif hw_message(pao, phm, phr); /*get DSP asserts */ return; } /************************************************************************/ /* LOW-LEVEL */ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao, u32 *pos_error_code) { struct hpi_hw_obj *phw = pao->priv; short error; u32 timeout; u32 read = 0; u32 i = 0; u32 data = 0; u32 j = 0; u32 test_addr = 0x80000000; u32 test_data = 0x00000001; u32 dw2040_reset = 0; u32 dsp_index = 0; u32 endian = 0; u32 adapter_info = 0; u32 delay = 0; struct dsp_code dsp_code; u16 boot_load_family = 0; /* NOTE don't use wAdapterType in this routine. It is not setup yet */ switch (pao->pci.pci_dev->subsystem_device) { case 0x5100: case 0x5110: /* ASI5100 revB or higher with C6711D */ case 0x5200: /* ASI5200 PCIe version of ASI5100 */ case 0x6100: case 0x6200: boot_load_family = HPI_ADAPTER_FAMILY_ASI(0x6200); break; default: return HPI6000_ERROR_UNHANDLED_SUBSYS_ID; } /* reset all DSPs, indicate two DSPs are present * set RST3-=1 to disconnect HAD8 to set DSP in little endian mode */ endian = 0; dw2040_reset = 0x0003000F; iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); /* read back register to make sure PCI2040 chip is functioning * note that bits 4..15 are read-only and so should always return zero, * even though we wrote 1 to them */ hpios_delay_micro_seconds(1000); delay = ioread32(phw->dw2040_HPICSR + HPI_RESET); if (delay != dw2040_reset) { HPI_DEBUG_LOG(ERROR, "INIT_PCI2040 %x %x\n", dw2040_reset, delay); return HPI6000_ERROR_INIT_PCI2040; } /* Indicate that DSP#0,1 is a C6X */ iowrite32(0x00000003, phw->dw2040_HPICSR + HPI_DATA_WIDTH); /* set Bit30 and 29 - which will prevent Target aborts from being * issued upon HPI or GP error */ iowrite32(0x60000000, phw->dw2040_HPICSR + INTERRUPT_MASK_SET); /* isolate DSP HAD8 line from PCI2040 so that * Little endian can be set by pullup */ dw2040_reset = dw2040_reset & (~(endian << 3)); iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); phw->ado[0].c_dsp_rev = 'B'; /* revB */ phw->ado[1].c_dsp_rev = 'B'; /* revB */ /*Take both DSPs out of reset, setting HAD8 to the correct Endian */ dw2040_reset = dw2040_reset & (~0x00000001); /* start DSP 0 */ iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); dw2040_reset = dw2040_reset & (~0x00000002); /* start DSP 1 */ iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); /* set HAD8 back to PCI2040, now that DSP set to little endian mode */ dw2040_reset = dw2040_reset & (~0x00000008); iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); /*delay to allow DSP to get going */ hpios_delay_micro_seconds(100); /* loop through all DSPs, downloading DSP code */ for (dsp_index = 0; dsp_index < phw->num_dsp; dsp_index++) { struct dsp_obj *pdo = &phw->ado[dsp_index]; /* configure DSP so that we download code into the SRAM */ /* set control reg for little endian, HWOB=1 */ iowrite32(0x00010001, pdo->prHPI_control); /* test access to the HPI address register (HPIA) */ test_data = 0x00000001; for (j = 0; j < 32; j++) { iowrite32(test_data, pdo->prHPI_address); data = ioread32(pdo->prHPI_address); if (data != test_data) { HPI_DEBUG_LOG(ERROR, "INIT_DSPHPI %x %x %x\n", test_data, data, dsp_index); return HPI6000_ERROR_INIT_DSPHPI; } test_data = test_data << 1; } /* if C6713 the setup PLL to generate 225MHz from 25MHz. * Since the PLLDIV1 read is sometimes wrong, even on a C6713, * we're going to do this unconditionally */ /* PLLDIV1 should have a value of 8000 after reset */ /* if (HpiReadWord(pdo,0x01B7C118) == 0x8000) */ { /* C6713 datasheet says we cannot program PLL from HPI, * and indeed if we try to set the PLL multiply from the * HPI, the PLL does not seem to lock, * so we enable the PLL and use the default of x 7 */ /* bypass PLL */ hpi_write_word(pdo, 0x01B7C100, 0x0000); hpios_delay_micro_seconds(100); /* ** use default of PLL x7 ** */ /* EMIF = 225/3=75MHz */ hpi_write_word(pdo, 0x01B7C120, 0x8002); hpios_delay_micro_seconds(100); /* peri = 225/2 */ hpi_write_word(pdo, 0x01B7C11C, 0x8001); hpios_delay_micro_seconds(100); /* cpu = 225/1 */ hpi_write_word(pdo, 0x01B7C118, 0x8000); /* ~2ms delay */ hpios_delay_micro_seconds(2000); /* PLL not bypassed */ hpi_write_word(pdo, 0x01B7C100, 0x0001); /* ~2ms delay */ hpios_delay_micro_seconds(2000); } /* test r/w to internal DSP memory * C6711 has L2 cache mapped to 0x0 when reset * * revB - because of bug 3.0.1 last HPI read * (before HPI address issued) must be non-autoinc */ /* test each bit in the 32bit word */ for (i = 0; i < 100; i++) { test_addr = 0x00000000; test_data = 0x00000001; for (j = 0; j < 32; j++) { hpi_write_word(pdo, test_addr + i, test_data); data = hpi_read_word(pdo, test_addr + i); if (data != test_data) { HPI_DEBUG_LOG(ERROR, "DSP mem %x %x %x %x\n", test_addr + i, test_data, data, dsp_index); return HPI6000_ERROR_INIT_DSPINTMEM; } test_data = test_data << 1; } } /* memory map of ASI6200 00000000-0000FFFF 16Kx32 internal program 01800000-019FFFFF Internal peripheral 80000000-807FFFFF CE0 2Mx32 SDRAM running @ 100MHz 90000000-9000FFFF CE1 Async peripherals: EMIF config ------------ Global EMIF control 0 - 1 - 2 - 3 CLK2EN = 1 CLKOUT2 enabled 4 CLK1EN = 0 CLKOUT1 disabled 5 EKEN = 1 <--!! C6713 specific, enables ECLKOUT 6 - 7 NOHOLD = 1 external HOLD disabled 8 HOLDA = 0 HOLDA output is low 9 HOLD = 0 HOLD input is low 10 ARDY = 1 ARDY input is high 11 BUSREQ = 0 BUSREQ output is low 12,13 Reserved = 1 */ hpi_write_word(pdo, 0x01800000, 0x34A8); /* EMIF CE0 setup - 2Mx32 Sync DRAM 31..28 Wr setup 27..22 Wr strobe 21..20 Wr hold 19..16 Rd setup 15..14 - 13..8 Rd strobe 7..4 MTYPE 0011 Sync DRAM 32bits 3 Wr hold MSB 2..0 Rd hold */ hpi_write_word(pdo, 0x01800008, 0x00000030); /* EMIF SDRAM Extension 31-21 0 20 WR2RD = 0 19-18 WR2DEAC = 1 17 WR2WR = 0 16-15 R2WDQM = 2 14-12 RD2WR = 4 11-10 RD2DEAC = 1 9 RD2RD = 1 8-7 THZP = 10b 6-5 TWR = 2-1 = 01b (tWR = 10ns) 4 TRRD = 0b = 2 ECLK (tRRD = 14ns) 3-1 TRAS = 5-1 = 100b (Tras=42ns = 5 ECLK) 1 CAS latency = 3 ECLK (for Micron 2M32-7 operating at 100Mhz) */ /* need to use this else DSP code crashes */ hpi_write_word(pdo, 0x01800020, 0x001BDF29); /* EMIF SDRAM control - set up for a 2Mx32 SDRAM (512x32x4 bank) 31 - - 30 SDBSZ 1 4 bank 29..28 SDRSZ 00 11 row address pins 27..26 SDCSZ 01 8 column address pins 25 RFEN 1 refersh enabled 24 INIT 1 init SDRAM 23..20 TRCD 0001 19..16 TRP 0001 15..12 TRC 0110 11..0 - - */ /* need to use this else DSP code crashes */ hpi_write_word(pdo, 0x01800018, 0x47117000); /* EMIF SDRAM Refresh Timing */ hpi_write_word(pdo, 0x0180001C, 0x00000410); /*MIF CE1 setup - Async peripherals @100MHz bus speed, each cycle is 10ns, 31..28 Wr setup = 1 27..22 Wr strobe = 3 30ns 21..20 Wr hold = 1 19..16 Rd setup =1 15..14 Ta = 2 13..8 Rd strobe = 3 30ns 7..4 MTYPE 0010 Async 32bits 3 Wr hold MSB =0 2..0 Rd hold = 1 */ { u32 cE1 = (1L << 28) | (3L << 22) | (1L << 20) | (1L << 16) | (2L << 14) | (3L << 8) | (2L << 4) | 1L; hpi_write_word(pdo, 0x01800004, cE1); } /* delay a little to allow SDRAM and DSP to "get going" */ hpios_delay_micro_seconds(1000); /* test access to SDRAM */ { test_addr = 0x80000000; test_data = 0x00000001; /* test each bit in the 32bit word */ for (j = 0; j < 32; j++) { hpi_write_word(pdo, test_addr, test_data); data = hpi_read_word(pdo, test_addr); if (data != test_data) { HPI_DEBUG_LOG(ERROR, "DSP dram %x %x %x %x\n", test_addr, test_data, data, dsp_index); return HPI6000_ERROR_INIT_SDRAM1; } test_data = test_data << 1; } /* test every Nth address in the DRAM */ #define DRAM_SIZE_WORDS 0x200000 /*2_mx32 */ #define DRAM_INC 1024 test_addr = 0x80000000; test_data = 0x0; for (i = 0; i < DRAM_SIZE_WORDS; i = i + DRAM_INC) { hpi_write_word(pdo, test_addr + i, test_data); test_data++; } test_addr = 0x80000000; test_data = 0x0; for (i = 0; i < DRAM_SIZE_WORDS; i = i + DRAM_INC) { data = hpi_read_word(pdo, test_addr + i); if (data != test_data) { HPI_DEBUG_LOG(ERROR, "DSP dram %x %x %x %x\n", test_addr + i, test_data, data, dsp_index); return HPI6000_ERROR_INIT_SDRAM2; } test_data++; } } /* write the DSP code down into the DSPs memory */ error = hpi_dsp_code_open(boot_load_family, pao->pci.pci_dev, &dsp_code, pos_error_code); if (error) return error; while (1) { u32 length; u32 address; u32 type; u32 *pcode; error = hpi_dsp_code_read_word(&dsp_code, &length); if (error) break; if (length == 0xFFFFFFFF) break; /* end of code */ error = hpi_dsp_code_read_word(&dsp_code, &address); if (error) break; error = hpi_dsp_code_read_word(&dsp_code, &type); if (error) break; error = hpi_dsp_code_read_block(length, &dsp_code, &pcode); if (error) break; error = hpi6000_dsp_block_write32(pao, (u16)dsp_index, address, pcode, length); if (error) break; } if (error) { hpi_dsp_code_close(&dsp_code); return error; } /* verify that code was written correctly */ /* this time through, assume no errors in DSP code file/array */ hpi_dsp_code_rewind(&dsp_code); while (1) { u32 length; u32 address; u32 type; u32 *pcode; hpi_dsp_code_read_word(&dsp_code, &length); if (length == 0xFFFFFFFF) break; /* end of code */ hpi_dsp_code_read_word(&dsp_code, &address); hpi_dsp_code_read_word(&dsp_code, &type); hpi_dsp_code_read_block(length, &dsp_code, &pcode); for (i = 0; i < length; i++) { data = hpi_read_word(pdo, address); if (data != *pcode) { error = HPI6000_ERROR_INIT_VERIFY; HPI_DEBUG_LOG(ERROR, "DSP verify %x %x %x %x\n", address, *pcode, data, dsp_index); break; } pcode++; address += 4; } if (error) break; } hpi_dsp_code_close(&dsp_code); if (error) return error; /* zero out the hostmailbox */ { u32 address = HPI_HIF_ADDR(host_cmd); for (i = 0; i < 4; i++) { hpi_write_word(pdo, address, 0); address += 4; } } /* write the DSP number into the hostmailbox */ /* structure before starting the DSP */ hpi_write_word(pdo, HPI_HIF_ADDR(dsp_number), dsp_index); /* write the DSP adapter Info into the */ /* hostmailbox before starting the DSP */ if (dsp_index > 0) hpi_write_word(pdo, HPI_HIF_ADDR(adapter_info), adapter_info); /* step 3. Start code by sending interrupt */ iowrite32(0x00030003, pdo->prHPI_control); hpios_delay_micro_seconds(10000); /* wait for a non-zero value in hostcmd - * indicating initialization is complete * * Init could take a while if DSP checks SDRAM memory * Was 200000. Increased to 2000000 for ASI8801 so we * don't get 938 errors. */ timeout = 2000000; while (timeout) { do { read = hpi_read_word(pdo, HPI_HIF_ADDR(host_cmd)); } while (--timeout && hpi6000_check_PCI2040_error_flag(pao, H6READ)); if (read) break; /* The following is a workaround for bug #94: * Bluescreen on install and subsequent boots on a * DELL PowerEdge 600SC PC with 1.8GHz P4 and * ServerWorks chipset. Without this delay the system * locks up with a bluescreen (NOT GPF or pagefault). */ else hpios_delay_micro_seconds(10000); } if (timeout == 0) return HPI6000_ERROR_INIT_NOACK; /* read the DSP adapter Info from the */ /* hostmailbox structure after starting the DSP */ if (dsp_index == 0) { /*u32 dwTestData=0; */ u32 mask = 0; adapter_info = hpi_read_word(pdo, HPI_HIF_ADDR(adapter_info)); if (HPI_ADAPTER_FAMILY_ASI (HPI_HIF_ADAPTER_INFO_EXTRACT_ADAPTER (adapter_info)) == HPI_ADAPTER_FAMILY_ASI(0x6200)) /* all 6200 cards have this many DSPs */ phw->num_dsp = 2; /* test that the PLD is programmed */ /* and we can read/write 24bits */ #define PLD_BASE_ADDRESS 0x90000000L /*for ASI6100/6200/8800 */ switch (boot_load_family) { case HPI_ADAPTER_FAMILY_ASI(0x6200): /* ASI6100/6200 has 24bit path to FPGA */ mask = 0xFFFFFF00L; /* ASI5100 uses AX6 code, */ /* but has no PLD r/w register to test */ if (HPI_ADAPTER_FAMILY_ASI(pao->pci.pci_dev-> subsystem_device) == HPI_ADAPTER_FAMILY_ASI(0x5100)) mask = 0x00000000L; /* ASI5200 uses AX6 code, */ /* but has no PLD r/w register to test */ if (HPI_ADAPTER_FAMILY_ASI(pao->pci.pci_dev-> subsystem_device) == HPI_ADAPTER_FAMILY_ASI(0x5200)) mask = 0x00000000L; break; case HPI_ADAPTER_FAMILY_ASI(0x8800): /* ASI8800 has 16bit path to FPGA */ mask = 0xFFFF0000L; break; } test_data = 0xAAAAAA00L & mask; /* write to 24 bit Debug register (D31-D8) */ hpi_write_word(pdo, PLD_BASE_ADDRESS + 4L, test_data); read = hpi_read_word(pdo, PLD_BASE_ADDRESS + 4L) & mask; if (read != test_data) { HPI_DEBUG_LOG(ERROR, "PLD %x %x\n", test_data, read); return HPI6000_ERROR_INIT_PLDTEST1; } test_data = 0x55555500L & mask; hpi_write_word(pdo, PLD_BASE_ADDRESS + 4L, test_data); read = hpi_read_word(pdo, PLD_BASE_ADDRESS + 4L) & mask; if (read != test_data) { HPI_DEBUG_LOG(ERROR, "PLD %x %x\n", test_data, read); return HPI6000_ERROR_INIT_PLDTEST2; } } } /* for numDSP */ return 0; } #define PCI_TIMEOUT 100 static int hpi_set_address(struct dsp_obj *pdo, u32 address) { u32 timeout = PCI_TIMEOUT; do { iowrite32(address, pdo->prHPI_address); } while (hpi6000_check_PCI2040_error_flag(pdo->pa_parent_adapter, H6WRITE) && --timeout); if (timeout) return 0; return 1; } /* write one word to the HPI port */ static void hpi_write_word(struct dsp_obj *pdo, u32 address, u32 data) { if (hpi_set_address(pdo, address)) return; iowrite32(data, pdo->prHPI_data); } /* read one word from the HPI port */ static u32 hpi_read_word(struct dsp_obj *pdo, u32 address) { u32 data = 0; if (hpi_set_address(pdo, address)) return 0; /*? No way to return error */ /* take care of errata in revB DSP (2.0.1) */ data = ioread32(pdo->prHPI_data); return data; } /* write a block of 32bit words to the DSP HPI port using auto-inc mode */ static void hpi_write_block(struct dsp_obj *pdo, u32 address, u32 *pdata, u32 length) { u16 length16 = length - 1; if (length == 0) return; if (hpi_set_address(pdo, address)) return; iowrite32_rep(pdo->prHPI_data_auto_inc, pdata, length16); /* take care of errata in revB DSP (2.0.1) */ /* must end with non auto-inc */ iowrite32(*(pdata + length - 1), pdo->prHPI_data); } /** read a block of 32bit words from the DSP HPI port using auto-inc mode */ static void hpi_read_block(struct dsp_obj *pdo, u32 address, u32 *pdata, u32 length) { u16 length16 = length - 1; if (length == 0) return; if (hpi_set_address(pdo, address)) return; ioread32_rep(pdo->prHPI_data_auto_inc, pdata, length16); /* take care of errata in revB DSP (2.0.1) */ /* must end with non auto-inc */ *(pdata + length - 1) = ioread32(pdo->prHPI_data); } static u16 hpi6000_dsp_block_write32(struct hpi_adapter_obj *pao, u16 dsp_index, u32 hpi_address, u32 *source, u32 count) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 time_out = PCI_TIMEOUT; int c6711_burst_size = 128; u32 local_hpi_address = hpi_address; int local_count = count; int xfer_size; u32 *pdata = source; while (local_count) { if (local_count > c6711_burst_size) xfer_size = c6711_burst_size; else xfer_size = local_count; time_out = PCI_TIMEOUT; do { hpi_write_block(pdo, local_hpi_address, pdata, xfer_size); } while (hpi6000_check_PCI2040_error_flag(pao, H6WRITE) && --time_out); if (!time_out) break; pdata += xfer_size; local_hpi_address += sizeof(u32) * xfer_size; local_count -= xfer_size; } if (time_out) return 0; else return 1; } static u16 hpi6000_dsp_block_read32(struct hpi_adapter_obj *pao, u16 dsp_index, u32 hpi_address, u32 *dest, u32 count) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 time_out = PCI_TIMEOUT; int c6711_burst_size = 16; u32 local_hpi_address = hpi_address; int local_count = count; int xfer_size; u32 *pdata = dest; u32 loop_count = 0; while (local_count) { if (local_count > c6711_burst_size) xfer_size = c6711_burst_size; else xfer_size = local_count; time_out = PCI_TIMEOUT; do { hpi_read_block(pdo, local_hpi_address, pdata, xfer_size); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --time_out); if (!time_out) break; pdata += xfer_size; local_hpi_address += sizeof(u32) * xfer_size; local_count -= xfer_size; loop_count++; } if (time_out) return 0; else return 1; } static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 timeout; u16 ack; u32 address; u32 length; u32 *p_data; u16 error = 0; ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE); if (ack & HPI_HIF_ERROR_MASK) { pao->dsp_crashed++; return HPI6000_ERROR_MSG_RESP_IDLE_TIMEOUT; } pao->dsp_crashed = 0; /* get the message address and size */ if (phw->message_buffer_address_on_dsp == 0) { timeout = TIMEOUT; do { address = hpi_read_word(pdo, HPI_HIF_ADDR(message_buffer_address)); phw->message_buffer_address_on_dsp = address; } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); if (!timeout) return HPI6000_ERROR_MSG_GET_ADR; } else address = phw->message_buffer_address_on_dsp; length = phm->size; /* send the message */ p_data = (u32 *)phm; if (hpi6000_dsp_block_write32(pao, dsp_index, address, p_data, (u16)length / 4)) return HPI6000_ERROR_MSG_RESP_BLOCKWRITE32; if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_GET_RESP)) return HPI6000_ERROR_MSG_RESP_GETRESPCMD; hpi6000_send_dsp_interrupt(pdo); ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_GET_RESP); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_MSG_RESP_GET_RESP_ACK; /* get the response address */ if (phw->response_buffer_address_on_dsp == 0) { timeout = TIMEOUT; do { address = hpi_read_word(pdo, HPI_HIF_ADDR(response_buffer_address)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); phw->response_buffer_address_on_dsp = address; if (!timeout) return HPI6000_ERROR_RESP_GET_ADR; } else address = phw->response_buffer_address_on_dsp; /* read the length of the response back from the DSP */ timeout = TIMEOUT; do { length = hpi_read_word(pdo, HPI_HIF_ADDR(length)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); if (!timeout) length = sizeof(struct hpi_response); /* get the response */ p_data = (u32 *)phr; if (hpi6000_dsp_block_read32(pao, dsp_index, address, p_data, (u16)length / 4)) return HPI6000_ERROR_MSG_RESP_BLOCKREAD32; /* set i/f back to idle */ if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_IDLE)) return HPI6000_ERROR_MSG_RESP_IDLECMD; hpi6000_send_dsp_interrupt(pdo); error = hpi_validate_response(phm, phr); return error; } /* have to set up the below defines to match stuff in the MAP file */ #define MSG_ADDRESS (HPI_HIF_BASE+0x18) #define MSG_LENGTH 11 #define RESP_ADDRESS (HPI_HIF_BASE+0x44) #define RESP_LENGTH 16 #define QUEUE_START (HPI_HIF_BASE+0x88) #define QUEUE_SIZE 0x8000 static short hpi6000_send_data_check_adr(u32 address, u32 length_in_dwords) { /*#define CHECKING // comment this line in to enable checking */ #ifdef CHECKING if (address < (u32)MSG_ADDRESS) return 0; if (address > (u32)(QUEUE_START + QUEUE_SIZE)) return 0; if ((address + (length_in_dwords << 2)) > (u32)(QUEUE_START + QUEUE_SIZE)) return 0; #else (void)address; (void)length_in_dwords; return 1; #endif } static short hpi6000_send_data(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 data_sent = 0; u16 ack; u32 length, address; u32 *p_data = (u32 *)phm->u.d.u.data.pb_data; u16 time_out = 8; (void)phr; /* round dwDataSize down to nearest 4 bytes */ while ((data_sent < (phm->u.d.u.data.data_size & ~3L)) && --time_out) { ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_SEND_DATA_IDLE_TIMEOUT; if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_SEND_DATA)) return HPI6000_ERROR_SEND_DATA_CMD; hpi6000_send_dsp_interrupt(pdo); ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_SEND_DATA); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_SEND_DATA_ACK; do { /* get the address and size */ address = hpi_read_word(pdo, HPI_HIF_ADDR(address)); /* DSP returns number of DWORDS */ length = hpi_read_word(pdo, HPI_HIF_ADDR(length)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ)); if (!hpi6000_send_data_check_adr(address, length)) return HPI6000_ERROR_SEND_DATA_ADR; /* send the data. break data into 512 DWORD blocks (2K bytes) * and send using block write. 2Kbytes is the max as this is the * memory window given to the HPI data register by the PCI2040 */ { u32 len = length; u32 blk_len = 512; while (len) { if (len < blk_len) blk_len = len; if (hpi6000_dsp_block_write32(pao, dsp_index, address, p_data, blk_len)) return HPI6000_ERROR_SEND_DATA_WRITE; address += blk_len * 4; p_data += blk_len; len -= blk_len; } } if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_IDLE)) return HPI6000_ERROR_SEND_DATA_IDLECMD; hpi6000_send_dsp_interrupt(pdo); data_sent += length * 4; } if (!time_out) return HPI6000_ERROR_SEND_DATA_TIMEOUT; return 0; } static short hpi6000_get_data(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 data_got = 0; u16 ack; u32 length, address; u32 *p_data = (u32 *)phm->u.d.u.data.pb_data; (void)phr; /* this parameter not used! */ /* round dwDataSize down to nearest 4 bytes */ while (data_got < (phm->u.d.u.data.data_size & ~3L)) { ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_GET_DATA_IDLE_TIMEOUT; if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_GET_DATA)) return HPI6000_ERROR_GET_DATA_CMD; hpi6000_send_dsp_interrupt(pdo); ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_GET_DATA); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_GET_DATA_ACK; /* get the address and size */ do { address = hpi_read_word(pdo, HPI_HIF_ADDR(address)); length = hpi_read_word(pdo, HPI_HIF_ADDR(length)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ)); /* read the data */ { u32 len = length; u32 blk_len = 512; while (len) { if (len < blk_len) blk_len = len; if (hpi6000_dsp_block_read32(pao, dsp_index, address, p_data, blk_len)) return HPI6000_ERROR_GET_DATA_READ; address += blk_len * 4; p_data += blk_len; len -= blk_len; } } if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_IDLE)) return HPI6000_ERROR_GET_DATA_IDLECMD; hpi6000_send_dsp_interrupt(pdo); data_got += length * 4; } return 0; } static void hpi6000_send_dsp_interrupt(struct dsp_obj *pdo) { iowrite32(0x00030003, pdo->prHPI_control); /* DSPINT */ } static short hpi6000_send_host_command(struct hpi_adapter_obj *pao, u16 dsp_index, u32 host_cmd) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 timeout = TIMEOUT; /* set command */ do { hpi_write_word(pdo, HPI_HIF_ADDR(host_cmd), host_cmd); /* flush the FIFO */ hpi_set_address(pdo, HPI_HIF_ADDR(host_cmd)); } while (hpi6000_check_PCI2040_error_flag(pao, H6WRITE) && --timeout); /* reset the interrupt bit */ iowrite32(0x00040004, pdo->prHPI_control); if (timeout) return 0; else return 1; } /* if the PCI2040 has recorded an HPI timeout, reset the error and return 1 */ static short hpi6000_check_PCI2040_error_flag(struct hpi_adapter_obj *pao, u16 read_or_write) { u32 hPI_error; struct hpi_hw_obj *phw = pao->priv; /* read the error bits from the PCI2040 */ hPI_error = ioread32(phw->dw2040_HPICSR + HPI_ERROR_REPORT); if (hPI_error) { /* reset the error flag */ iowrite32(0L, phw->dw2040_HPICSR + HPI_ERROR_REPORT); phw->pCI2040HPI_error_count++; if (read_or_write == 1) gw_pci_read_asserts++; /************* inc global */ else gw_pci_write_asserts++; return 1; } else return 0; } static short hpi6000_wait_dsp_ack(struct hpi_adapter_obj *pao, u16 dsp_index, u32 ack_value) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 ack = 0L; u32 timeout; u32 hPIC = 0L; /* wait for host interrupt to signal ack is ready */ timeout = TIMEOUT; while (--timeout) { hPIC = ioread32(pdo->prHPI_control); if (hPIC & 0x04) /* 0x04 = HINT from DSP */ break; } if (timeout == 0) return HPI_HIF_ERROR_MASK; /* wait for dwAckValue */ timeout = TIMEOUT; while (--timeout) { /* read the ack mailbox */ ack = hpi_read_word(pdo, HPI_HIF_ADDR(dsp_ack)); if (ack == ack_value) break; if ((ack & HPI_HIF_ERROR_MASK) && !hpi6000_check_PCI2040_error_flag(pao, H6READ)) break; /*for (i=0;i<1000;i++) */ /* dwPause=i+1; */ } if (ack & HPI_HIF_ERROR_MASK) /* indicates bad read from DSP - typically 0xffffff is read for some reason */ ack = HPI_HIF_ERROR_MASK; if (timeout == 0) ack = HPI_HIF_ERROR_MASK; return (short)ack; } static short hpi6000_update_control_cache(struct hpi_adapter_obj *pao, struct hpi_message *phm) { const u16 dsp_index = 0; struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 timeout; u32 cache_dirty_flag; u16 err; hpios_dsplock_lock(pao); timeout = TIMEOUT; do { cache_dirty_flag = hpi_read_word((struct dsp_obj *)pdo, HPI_HIF_ADDR(control_cache_is_dirty)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); if (!timeout) { err = HPI6000_ERROR_CONTROL_CACHE_PARAMS; goto unlock; } if (cache_dirty_flag) { /* read the cached controls */ u32 address; u32 length; timeout = TIMEOUT; if (pdo->control_cache_address_on_dsp == 0) { do { address = hpi_read_word((struct dsp_obj *)pdo, HPI_HIF_ADDR(control_cache_address)); length = hpi_read_word((struct dsp_obj *)pdo, HPI_HIF_ADDR (control_cache_size_in_bytes)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); if (!timeout) { err = HPI6000_ERROR_CONTROL_CACHE_ADDRLEN; goto unlock; } pdo->control_cache_address_on_dsp = address; pdo->control_cache_length_on_dsp = length; } else { address = pdo->control_cache_address_on_dsp; length = pdo->control_cache_length_on_dsp; } if (hpi6000_dsp_block_read32(pao, dsp_index, address, (u32 *)&phw->control_cache[0], length / sizeof(u32))) { err = HPI6000_ERROR_CONTROL_CACHE_READ; goto unlock; } do { hpi_write_word((struct dsp_obj *)pdo, HPI_HIF_ADDR(control_cache_is_dirty), 0); /* flush the FIFO */ hpi_set_address(pdo, HPI_HIF_ADDR(host_cmd)); } while (hpi6000_check_PCI2040_error_flag(pao, H6WRITE) && --timeout); if (!timeout) { err = HPI6000_ERROR_CONTROL_CACHE_FLUSH; goto unlock; } } err = 0; unlock: hpios_dsplock_unlock(pao); return err; } /** Get dsp index for multi DSP adapters only */ static u16 get_dsp_index(struct hpi_adapter_obj *pao, struct hpi_message *phm) { u16 ret = 0; switch (phm->object) { case HPI_OBJ_ISTREAM: if (phm->obj_index < 2) ret = 1; break; case HPI_OBJ_PROFILE: ret = phm->obj_index; break; default: break; } return ret; } /** Complete transaction with DSP Send message, get response, send or get stream data if any. */ static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { u16 error = 0; u16 dsp_index = 0; struct hpi_hw_obj *phw = pao->priv; u16 num_dsp = phw->num_dsp; if (num_dsp < 2) dsp_index = 0; else { dsp_index = get_dsp_index(pao, phm); /* is this checked on the DSP anyway? */ if ((phm->function == HPI_ISTREAM_GROUP_ADD) || (phm->function == HPI_OSTREAM_GROUP_ADD)) { struct hpi_message hm; u16 add_index; hm.obj_index = phm->u.d.u.stream.stream_index; hm.object = phm->u.d.u.stream.object_type; add_index = get_dsp_index(pao, &hm); if (add_index != dsp_index) { phr->error = HPI_ERROR_NO_INTERDSP_GROUPS; return; } } } hpios_dsplock_lock(pao); error = hpi6000_message_response_sequence(pao, dsp_index, phm, phr); if (error) /* something failed in the HPI/DSP interface */ goto err; if (phr->error) /* something failed in the DSP */ goto out; switch (phm->function) { case HPI_OSTREAM_WRITE: case HPI_ISTREAM_ANC_WRITE: error = hpi6000_send_data(pao, dsp_index, phm, phr); break; case HPI_ISTREAM_READ: case HPI_OSTREAM_ANC_READ: error = hpi6000_get_data(pao, dsp_index, phm, phr); break; case HPI_ADAPTER_GET_ASSERT: phr->u.ax.assert.dsp_index = 0; /* dsp 0 default */ if (num_dsp == 2) { if (!phr->u.ax.assert.count) { /* no assert from dsp 0, check dsp 1 */ error = hpi6000_message_response_sequence(pao, 1, phm, phr); phr->u.ax.assert.dsp_index = 1; } } } err: if (error) { if (error >= HPI_ERROR_BACKEND_BASE) { phr->error = HPI_ERROR_DSP_COMMUNICATION; phr->specific_error = error; } else { phr->error = error; } /* just the header of the response is valid */ phr->size = sizeof(struct hpi_response_header); } out: hpios_dsplock_unlock(pao); return; }
gpl-2.0
CyanogenMod/android_kernel_motorola_msm8960dt-common
arch/arm/plat-s3c24xx/s3c2410-iotiming.c
9166
12013
/* linux/arch/arm/plat-s3c24xx/s3c2410-iotiming.c * * Copyright (c) 2006-2009 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * S3C24XX CPU Frequency scaling - IO timing for S3C2410/S3C2440/S3C2442 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/cpufreq.h> #include <linux/seq_file.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/map.h> #include <mach/regs-mem.h> #include <mach/regs-clock.h> #include <plat/cpu-freq-core.h> #define print_ns(x) ((x) / 10), ((x) % 10) /** * s3c2410_print_timing - print bank timing data for debug purposes * @pfx: The prefix to put on the output * @timings: The timing inforamtion to print. */ static void s3c2410_print_timing(const char *pfx, struct s3c_iotimings *timings) { struct s3c2410_iobank_timing *bt; int bank; for (bank = 0; bank < MAX_BANKS; bank++) { bt = timings->bank[bank].io_2410; if (!bt) continue; printk(KERN_DEBUG "%s %d: Tacs=%d.%d, Tcos=%d.%d, Tacc=%d.%d, " "Tcoh=%d.%d, Tcah=%d.%d\n", pfx, bank, print_ns(bt->tacs), print_ns(bt->tcos), print_ns(bt->tacc), print_ns(bt->tcoh), print_ns(bt->tcah)); } } /** * bank_reg - convert bank number to pointer to the control register. * @bank: The IO bank number. */ static inline void __iomem *bank_reg(unsigned int bank) { return S3C2410_BANKCON0 + (bank << 2); } /** * bank_is_io - test whether bank is used for IO * @bankcon: The bank control register. * * This is a simplistic test to see if any BANKCON[x] is not an IO * bank. It currently does not take into account whether BWSCON has * an illegal width-setting in it, or if the pin connected to nCS[x] * is actually being handled as a chip-select. */ static inline int bank_is_io(unsigned long bankcon) { return !(bankcon & S3C2410_BANKCON_SDRAM); } /** * to_div - convert cycle time to divisor * @cyc: The cycle time, in 10ths of nanoseconds. * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * * Convert the given cycle time into the divisor to use to obtain it from * HCLK. */ static inline unsigned int to_div(unsigned int cyc, unsigned int hclk_tns) { if (cyc == 0) return 0; return DIV_ROUND_UP(cyc, hclk_tns); } /** * calc_0124 - calculate divisor control for divisors that do /0, /1. /2 and /4 * @cyc: The cycle time, in 10ths of nanoseconds. * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * @v: Pointer to register to alter. * @shift: The shift to get to the control bits. * * Calculate the divisor, and turn it into the correct control bits to * set in the result, @v. */ static unsigned int calc_0124(unsigned int cyc, unsigned long hclk_tns, unsigned long *v, int shift) { unsigned int div = to_div(cyc, hclk_tns); unsigned long val; s3c_freq_iodbg("%s: cyc=%d, hclk=%lu, shift=%d => div %d\n", __func__, cyc, hclk_tns, shift, div); switch (div) { case 0: val = 0; break; case 1: val = 1; break; case 2: val = 2; break; case 3: case 4: val = 3; break; default: return -1; } *v |= val << shift; return 0; } int calc_tacp(unsigned int cyc, unsigned long hclk, unsigned long *v) { /* Currently no support for Tacp calculations. */ return 0; } /** * calc_tacc - calculate divisor control for tacc. * @cyc: The cycle time, in 10ths of nanoseconds. * @nwait_en: IS nWAIT enabled for this bank. * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * @v: Pointer to register to alter. * * Calculate the divisor control for tACC, taking into account whether * the bank has nWAIT enabled. The result is used to modify the value * pointed to by @v. */ static int calc_tacc(unsigned int cyc, int nwait_en, unsigned long hclk_tns, unsigned long *v) { unsigned int div = to_div(cyc, hclk_tns); unsigned long val; s3c_freq_iodbg("%s: cyc=%u, nwait=%d, hclk=%lu => div=%u\n", __func__, cyc, nwait_en, hclk_tns, div); /* if nWait enabled on an bank, Tacc must be at-least 4 cycles. */ if (nwait_en && div < 4) div = 4; switch (div) { case 0: val = 0; break; case 1: case 2: case 3: case 4: val = div - 1; break; case 5: case 6: val = 4; break; case 7: case 8: val = 5; break; case 9: case 10: val = 6; break; case 11: case 12: case 13: case 14: val = 7; break; default: return -1; } *v |= val << 8; return 0; } /** * s3c2410_calc_bank - calculate bank timing infromation * @cfg: The configuration we need to calculate for. * @bt: The bank timing information. * * Given the cycle timine for a bank @bt, calculate the new BANKCON * setting for the @cfg timing. This updates the timing information * ready for the cpu frequency change. */ static int s3c2410_calc_bank(struct s3c_cpufreq_config *cfg, struct s3c2410_iobank_timing *bt) { unsigned long hclk = cfg->freq.hclk_tns; unsigned long res; int ret; res = bt->bankcon; res &= (S3C2410_BANKCON_SDRAM | S3C2410_BANKCON_PMC16); /* tacp: 2,3,4,5 */ /* tcah: 0,1,2,4 */ /* tcoh: 0,1,2,4 */ /* tacc: 1,2,3,4,6,7,10,14 (>4 for nwait) */ /* tcos: 0,1,2,4 */ /* tacs: 0,1,2,4 */ ret = calc_0124(bt->tacs, hclk, &res, S3C2410_BANKCON_Tacs_SHIFT); ret |= calc_0124(bt->tcos, hclk, &res, S3C2410_BANKCON_Tcos_SHIFT); ret |= calc_0124(bt->tcah, hclk, &res, S3C2410_BANKCON_Tcah_SHIFT); ret |= calc_0124(bt->tcoh, hclk, &res, S3C2410_BANKCON_Tcoh_SHIFT); if (ret) return -EINVAL; ret |= calc_tacp(bt->tacp, hclk, &res); ret |= calc_tacc(bt->tacc, bt->nwait_en, hclk, &res); if (ret) return -EINVAL; bt->bankcon = res; return 0; } static unsigned int tacc_tab[] = { [0] = 1, [1] = 2, [2] = 3, [3] = 4, [4] = 6, [5] = 9, [6] = 10, [7] = 14, }; /** * get_tacc - turn tACC value into cycle time * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * @val: The bank timing register value, shifed down. */ static unsigned int get_tacc(unsigned long hclk_tns, unsigned long val) { val &= 7; return hclk_tns * tacc_tab[val]; } /** * get_0124 - turn 0/1/2/4 divider into cycle time * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * @val: The bank timing register value, shifed down. */ static unsigned int get_0124(unsigned long hclk_tns, unsigned long val) { val &= 3; return hclk_tns * ((val == 3) ? 4 : val); } /** * s3c2410_iotiming_getbank - turn BANKCON into cycle time information * @cfg: The frequency configuration * @bt: The bank timing to fill in (uses cached BANKCON) * * Given the BANKCON setting in @bt and the current frequency settings * in @cfg, update the cycle timing information. */ void s3c2410_iotiming_getbank(struct s3c_cpufreq_config *cfg, struct s3c2410_iobank_timing *bt) { unsigned long bankcon = bt->bankcon; unsigned long hclk = cfg->freq.hclk_tns; bt->tcah = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcah_SHIFT); bt->tcoh = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcoh_SHIFT); bt->tcos = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcos_SHIFT); bt->tacs = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tacs_SHIFT); bt->tacc = get_tacc(hclk, bankcon >> S3C2410_BANKCON_Tacc_SHIFT); } /** * s3c2410_iotiming_debugfs - debugfs show io bank timing information * @seq: The seq_file to write output to using seq_printf(). * @cfg: The current configuration. * @iob: The IO bank information to decode. */ void s3c2410_iotiming_debugfs(struct seq_file *seq, struct s3c_cpufreq_config *cfg, union s3c_iobank *iob) { struct s3c2410_iobank_timing *bt = iob->io_2410; unsigned long bankcon = bt->bankcon; unsigned long hclk = cfg->freq.hclk_tns; unsigned int tacs; unsigned int tcos; unsigned int tacc; unsigned int tcoh; unsigned int tcah; seq_printf(seq, "BANKCON=0x%08lx\n", bankcon); tcah = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcah_SHIFT); tcoh = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcoh_SHIFT); tcos = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcos_SHIFT); tacs = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tacs_SHIFT); tacc = get_tacc(hclk, bankcon >> S3C2410_BANKCON_Tacc_SHIFT); seq_printf(seq, "\tRead: Tacs=%d.%d, Tcos=%d.%d, Tacc=%d.%d, Tcoh=%d.%d, Tcah=%d.%d\n", print_ns(bt->tacs), print_ns(bt->tcos), print_ns(bt->tacc), print_ns(bt->tcoh), print_ns(bt->tcah)); seq_printf(seq, "\t Set: Tacs=%d.%d, Tcos=%d.%d, Tacc=%d.%d, Tcoh=%d.%d, Tcah=%d.%d\n", print_ns(tacs), print_ns(tcos), print_ns(tacc), print_ns(tcoh), print_ns(tcah)); } /** * s3c2410_iotiming_calc - Calculate bank timing for frequency change. * @cfg: The frequency configuration * @iot: The IO timing information to fill out. * * Calculate the new values for the banks in @iot based on the new * frequency information in @cfg. This is then used by s3c2410_iotiming_set() * to update the timing when necessary. */ int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot) { struct s3c2410_iobank_timing *bt; unsigned long bankcon; int bank; int ret; for (bank = 0; bank < MAX_BANKS; bank++) { bankcon = __raw_readl(bank_reg(bank)); bt = iot->bank[bank].io_2410; if (!bt) continue; bt->bankcon = bankcon; ret = s3c2410_calc_bank(cfg, bt); if (ret) { printk(KERN_ERR "%s: cannot calculate bank %d io\n", __func__, bank); goto err; } s3c_freq_iodbg("%s: bank %d: con=%08lx\n", __func__, bank, bt->bankcon); } return 0; err: return ret; } /** * s3c2410_iotiming_set - set the IO timings from the given setup. * @cfg: The frequency configuration * @iot: The IO timing information to use. * * Set all the currently used IO bank timing information generated * by s3c2410_iotiming_calc() once the core has validated that all * the new values are within permitted bounds. */ void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot) { struct s3c2410_iobank_timing *bt; int bank; /* set the io timings from the specifier */ for (bank = 0; bank < MAX_BANKS; bank++) { bt = iot->bank[bank].io_2410; if (!bt) continue; __raw_writel(bt->bankcon, bank_reg(bank)); } } /** * s3c2410_iotiming_get - Get the timing information from current registers. * @cfg: The frequency configuration * @timings: The IO timing information to fill out. * * Calculate the @timings timing information from the current frequency * information in @cfg, and the new frequency configur * through all the IO banks, reading the state and then updating @iot * as necessary. * * This is used at the moment on initialisation to get the current * configuration so that boards do not have to carry their own setup * if the timings are correct on initialisation. */ int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *timings) { struct s3c2410_iobank_timing *bt; unsigned long bankcon; unsigned long bwscon; int bank; bwscon = __raw_readl(S3C2410_BWSCON); /* look through all banks to see what is currently set. */ for (bank = 0; bank < MAX_BANKS; bank++) { bankcon = __raw_readl(bank_reg(bank)); if (!bank_is_io(bankcon)) continue; s3c_freq_iodbg("%s: bank %d: con %08lx\n", __func__, bank, bankcon); bt = kzalloc(sizeof(struct s3c2410_iobank_timing), GFP_KERNEL); if (!bt) { printk(KERN_ERR "%s: no memory for bank\n", __func__); return -ENOMEM; } /* find out in nWait is enabled for bank. */ if (bank != 0) { unsigned long tmp = S3C2410_BWSCON_GET(bwscon, bank); if (tmp & S3C2410_BWSCON_WS) bt->nwait_en = 1; } timings->bank[bank].io_2410 = bt; bt->bankcon = bankcon; s3c2410_iotiming_getbank(cfg, bt); } s3c2410_print_timing("get", timings); return 0; }
gpl-2.0
drhonk/SGH-T959V-GB
arch/ia64/kernel/paravirt_patchlist.c
12750
2312
/****************************************************************************** * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/bug.h> #include <linux/init.h> #include <linux/kernel.h> #include <asm/paravirt.h> #define DECLARE(name) \ extern unsigned long \ __ia64_native_start_gate_##name##_patchlist[]; \ extern unsigned long \ __ia64_native_end_gate_##name##_patchlist[] DECLARE(fsyscall); DECLARE(brl_fsys_bubble_down); DECLARE(vtop); DECLARE(mckinley_e9); extern unsigned long __start_gate_section[]; #define ASSIGN(name) \ .start_##name##_patchlist = \ (unsigned long)__ia64_native_start_gate_##name##_patchlist, \ .end_##name##_patchlist = \ (unsigned long)__ia64_native_end_gate_##name##_patchlist struct pv_patchdata pv_patchdata __initdata = { ASSIGN(fsyscall), ASSIGN(brl_fsys_bubble_down), ASSIGN(vtop), ASSIGN(mckinley_e9), .gate_section = (void*)__start_gate_section, }; unsigned long __init paravirt_get_gate_patchlist(enum pv_gate_patchlist type) { #define CASE(NAME, name) \ case PV_GATE_START_##NAME: \ return pv_patchdata.start_##name##_patchlist; \ case PV_GATE_END_##NAME: \ return pv_patchdata.end_##name##_patchlist; \ switch (type) { CASE(FSYSCALL, fsyscall); CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down); CASE(VTOP, vtop); CASE(MCKINLEY_E9, mckinley_e9); default: BUG(); break; } return 0; } void * __init paravirt_get_gate_section(void) { return pv_patchdata.gate_section; }
gpl-2.0
loxdegio/Android_kernel_samsung_msm7x27a-1
arch/m68k/sun3/idprom.c
13262
4541
/* * idprom.c: Routines to load the idprom into kernel addresses and * interpret the data contained within. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Sun3/3x models added by David Monro (davidm@psrg.cs.usyd.edu.au) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/string.h> #include <asm/oplib.h> #include <asm/idprom.h> #include <asm/machines.h> /* Fun with Sun released architectures. */ struct idprom *idprom; EXPORT_SYMBOL(idprom); static struct idprom idprom_buffer; /* Here is the master table of Sun machines which use some implementation * of the Sparc CPU and have a meaningful IDPROM machtype value that we * know about. See asm-sparc/machines.h for empirical constants. */ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = { /* First, Sun3's */ { .name = "Sun 3/160 Series", .id_machtype = (SM_SUN3 | SM_3_160) }, { .name = "Sun 3/50", .id_machtype = (SM_SUN3 | SM_3_50) }, { .name = "Sun 3/260 Series", .id_machtype = (SM_SUN3 | SM_3_260) }, { .name = "Sun 3/110 Series", .id_machtype = (SM_SUN3 | SM_3_110) }, { .name = "Sun 3/60", .id_machtype = (SM_SUN3 | SM_3_60) }, { .name = "Sun 3/E", .id_machtype = (SM_SUN3 | SM_3_E) }, /* Now, Sun3x's */ { .name = "Sun 3/460 Series", .id_machtype = (SM_SUN3X | SM_3_460) }, { .name = "Sun 3/80", .id_machtype = (SM_SUN3X | SM_3_80) }, /* Then, Sun4's */ // { .name = "Sun 4/100 Series", .id_machtype = (SM_SUN4 | SM_4_110) }, // { .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, // { .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, // { .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, /* And now, Sun4c's */ // { .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, // { .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, // { .name = "Sun4c SparcStation 1+", .id_machtype = (SM_SUN4C | SM_4C_SS1PLUS) }, // { .name = "Sun4c SparcStation SLC", .id_machtype = (SM_SUN4C | SM_4C_SLC) }, // { .name = "Sun4c SparcStation 2", .id_machtype = (SM_SUN4C | SM_4C_SS2) }, // { .name = "Sun4c SparcStation ELC", .id_machtype = (SM_SUN4C | SM_4C_ELC) }, // { .name = "Sun4c SparcStation IPX", .id_machtype = (SM_SUN4C | SM_4C_IPX) }, /* Finally, early Sun4m's */ // { .name = "Sun4m SparcSystem600", .id_machtype = (SM_SUN4M | SM_4M_SS60) }, // { .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) }, // { .name = "Sun4m SparcStation5", .id_machtype = (SM_SUN4M | SM_4M_SS40) }, /* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */ // { .name = "Sun4M OBP based system", .id_machtype = (SM_SUN4M_OBP | 0x0) } }; static void __init display_system_type(unsigned char machtype) { register int i; for (i = 0; i < NUM_SUN_MACHINES; i++) { if(Sun_Machines[i].id_machtype == machtype) { if (machtype != (SM_SUN4M_OBP | 0x00)) printk("TYPE: %s\n", Sun_Machines[i].name); else { #if 0 prom_getproperty(prom_root_node, "banner-name", sysname, sizeof(sysname)); printk("TYPE: %s\n", sysname); #endif } return; } } prom_printf("IDPROM: Bogus id_machtype value, 0x%x\n", machtype); prom_halt(); } void sun3_get_model(unsigned char* model) { register int i; for (i = 0; i < NUM_SUN_MACHINES; i++) { if(Sun_Machines[i].id_machtype == idprom->id_machtype) { strcpy(model, Sun_Machines[i].name); return; } } } /* Calculate the IDPROM checksum (xor of the data bytes). */ static unsigned char __init calc_idprom_cksum(struct idprom *idprom) { unsigned char cksum, i, *ptr = (unsigned char *)idprom; for (i = cksum = 0; i <= 0x0E; i++) cksum ^= *ptr++; return cksum; } /* Create a local IDPROM copy, verify integrity, and display information. */ void __init idprom_init(void) { prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer)); idprom = &idprom_buffer; if (idprom->id_format != 0x01) { prom_printf("IDPROM: Unknown format type!\n"); prom_halt(); } if (idprom->id_cksum != calc_idprom_cksum(idprom)) { prom_printf("IDPROM: Checksum failure (nvram=%x, calc=%x)!\n", idprom->id_cksum, calc_idprom_cksum(idprom)); prom_halt(); } display_system_type(idprom->id_machtype); printk("Ethernet address: %x:%x:%x:%x:%x:%x\n", idprom->id_ethaddr[0], idprom->id_ethaddr[1], idprom->id_ethaddr[2], idprom->id_ethaddr[3], idprom->id_ethaddr[4], idprom->id_ethaddr[5]); }
gpl-2.0
mangelajo/linux-2.6.28.2-lpc313x-nbee
arch/arm/mach-s3c2410/h1940-bluetooth.c
207
3237
/* * arch/arm/mach-s3c2410/h1940-bluetooth.c * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * S3C2410 bluetooth "driver" * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/leds.h> #include <mach/regs-gpio.h> #include <mach/hardware.h> #include <mach/h1940-latch.h> #define DRV_NAME "h1940-bt" #ifdef CONFIG_LEDS_H1940 DEFINE_LED_TRIGGER(bt_led_trigger); #endif static int state; /* Bluetooth control */ static void h1940bt_enable(int on) { if (on) { #ifdef CONFIG_LEDS_H1940 /* flashing Blue */ led_trigger_event(bt_led_trigger, LED_HALF); #endif /* Power on the chip */ h1940_latch_control(0, H1940_LATCH_BLUETOOTH_POWER); /* Reset the chip */ mdelay(10); s3c2410_gpio_setpin(S3C2410_GPH1, 1); mdelay(10); s3c2410_gpio_setpin(S3C2410_GPH1, 0); state = 1; } else { #ifdef CONFIG_LEDS_H1940 led_trigger_event(bt_led_trigger, 0); #endif s3c2410_gpio_setpin(S3C2410_GPH1, 1); mdelay(10); s3c2410_gpio_setpin(S3C2410_GPH1, 0); mdelay(10); h1940_latch_control(H1940_LATCH_BLUETOOTH_POWER, 0); state = 0; } } static ssize_t h1940bt_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", state); } static ssize_t h1940bt_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int new_state; char *endp; new_state = simple_strtoul(buf, &endp, 0); if (*endp && !isspace(*endp)) return -EINVAL; h1940bt_enable(new_state); return count; } static DEVICE_ATTR(enable, 0644, h1940bt_show, h1940bt_store); static int __init h1940bt_probe(struct platform_device *pdev) { /* Configures BT serial port GPIOs */ s3c2410_gpio_cfgpin(S3C2410_GPH0, S3C2410_GPH0_nCTS0); s3c2410_gpio_pullup(S3C2410_GPH0, 1); s3c2410_gpio_cfgpin(S3C2410_GPH1, S3C2410_GPH1_OUTP); s3c2410_gpio_pullup(S3C2410_GPH1, 1); s3c2410_gpio_cfgpin(S3C2410_GPH2, S3C2410_GPH2_TXD0); s3c2410_gpio_pullup(S3C2410_GPH2, 1); s3c2410_gpio_cfgpin(S3C2410_GPH3, S3C2410_GPH3_RXD0); s3c2410_gpio_pullup(S3C2410_GPH3, 1); #ifdef CONFIG_LEDS_H1940 led_trigger_register_simple("h1940-bluetooth", &bt_led_trigger); #endif /* disable BT by default */ h1940bt_enable(0); return device_create_file(&pdev->dev, &dev_attr_enable); } static int h1940bt_remove(struct platform_device *pdev) { #ifdef CONFIG_LEDS_H1940 led_trigger_unregister_simple(bt_led_trigger); #endif return 0; } static struct platform_driver h1940bt_driver = { .driver = { .name = DRV_NAME, }, .probe = h1940bt_probe, .remove = h1940bt_remove, }; static int __init h1940bt_init(void) { return platform_driver_register(&h1940bt_driver); } static void __exit h1940bt_exit(void) { platform_driver_unregister(&h1940bt_driver); } module_init(h1940bt_init); module_exit(h1940bt_exit); MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("Driver for the iPAQ H1940 bluetooth chip"); MODULE_LICENSE("GPL");
gpl-2.0
Prometheus1408/android_kernel_cyanogen_msm8916
drivers/staging/prima/CORE/MAC/src/pe/lim/limSendMessages.c
207
36645
/* * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /* * * limSendMessages.c: Provides functions to send messages or Indications to HAL. * Author: Sunit Bhatia * Date: 09/21/2006 * History:- * Date Modified by Modification Information * * -------------------------------------------------------------------------- * */ #include "limSendMessages.h" #include "cfgApi.h" #include "limTrace.h" #ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT #include "vos_diag_core_log.h" #endif //FEATURE_WLAN_DIAG_SUPPORT /* When beacon filtering is enabled, firmware will * analyze the selected beacons received during BMPS, * and monitor any changes in the IEs as listed below. * The format of the table is: * - EID * - Check for IE presence * - Byte offset * - Byte value * - Bit Mask * - Byte refrence */ static tBeaconFilterIe beaconFilterTable[] = { {SIR_MAC_DS_PARAM_SET_EID, 0, {0, 0, DS_PARAM_CHANNEL_MASK, 0}}, {SIR_MAC_ERP_INFO_EID, 0, {0, 0, ERP_FILTER_MASK, 0}}, {SIR_MAC_EDCA_PARAM_SET_EID, 0, {0, 0, EDCA_FILTER_MASK, 0}}, {SIR_MAC_QOS_CAPABILITY_EID, 0, {0, 0, QOS_FILTER_MASK, 0}}, {SIR_MAC_CHNL_SWITCH_ANN_EID, 1, {0, 0, 0, 0}}, {SIR_MAC_HT_INFO_EID, 0, {0, 0, HT_BYTE0_FILTER_MASK, 0}}, //primary channel {SIR_MAC_HT_INFO_EID, 0, {1, 0, HT_BYTE1_FILTER_MASK, 0}}, //Secondary Channel {SIR_MAC_HT_INFO_EID, 0, {2, 0, HT_BYTE2_FILTER_MASK, 0}}, //HT protection {SIR_MAC_HT_INFO_EID, 0, {5, 0, HT_BYTE5_FILTER_MASK, 0}} #if defined WLAN_FEATURE_VOWIFI ,{SIR_MAC_PWR_CONSTRAINT_EID, 0, {0, 0, 0, 0}} #endif #ifdef WLAN_FEATURE_11AC ,{SIR_MAC_VHT_OPMODE_EID, 0, {0, 0, 0, 0}} ,{SIR_MAC_VHT_OPERATION_EID, 0, {0, 0, VHTOP_CHWIDTH_MASK, 0}} #endif ,{SIR_MAC_RSN_EID, 1, {0, 0, 0, 0}} ,{SIR_MAC_WPA_EID, 1, {0, 0, 0, 0}} }; /** * limSendCFParams() * *FUNCTION: * This function is called to send CFP Parameters to WDA, when they are changed. * *LOGIC: * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac pointer to Global Mac structure. * @param bssIdx Bss Index of the BSS to which STA is associated. * @param cfpCount CFP Count, if that is changed. * @param cfpPeriod CFP Period if that is changed. * * @return success if message send is ok, else false. */ tSirRetStatus limSendCFParams(tpAniSirGlobal pMac, tANI_U8 bssIdx, tANI_U8 cfpCount, tANI_U8 cfpPeriod) { tpUpdateCFParams pCFParams = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pCFParams = vos_mem_malloc(sizeof( tUpdateCFParams )); if ( NULL == pCFParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update CF Params" )); retCode = eSIR_MEM_ALLOC_FAILED; goto returnFailure; } vos_mem_set( (tANI_U8 *) pCFParams, sizeof(tUpdateCFParams), 0); pCFParams->cfpCount = cfpCount; pCFParams->cfpPeriod = cfpPeriod; pCFParams->bssIdx = bssIdx; msgQ.type = WDA_UPDATE_CF_IND; msgQ.reserved = 0; msgQ.bodyptr = pCFParams; msgQ.bodyval = 0; limLog( pMac, LOG3, FL( "Sending WDA_UPDATE_CF_IND..." )); MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pCFParams); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_CF_IND to WDA failed, reason=%X"), retCode ); } returnFailure: return retCode; } /** * limSendBeaconParams() * *FUNCTION: * This function is called to send beacnon interval, short preamble or other * parameters to WDA, which are changed and indication is received in beacon. * *LOGIC: * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac pointer to Global Mac structure. * @param tpUpdateBeaconParams pointer to the structure, which contains the beacon parameters which are changed. * * @return success if message send is ok, else false. */ tSirRetStatus limSendBeaconParams(tpAniSirGlobal pMac, tpUpdateBeaconParams pUpdatedBcnParams, tpPESession psessionEntry ) { tpUpdateBeaconParams pBcnParams = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pBcnParams = vos_mem_malloc(sizeof(*pBcnParams)); if ( NULL == pBcnParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update Beacon Params" )); return eSIR_MEM_ALLOC_FAILED; } vos_mem_copy((tANI_U8 *) pBcnParams, pUpdatedBcnParams, sizeof(*pBcnParams)); msgQ.type = WDA_UPDATE_BEACON_IND; msgQ.reserved = 0; msgQ.bodyptr = pBcnParams; msgQ.bodyval = 0; PELOG3(limLog( pMac, LOG3, FL( "Sending WDA_UPDATE_BEACON_IND, paramChangeBitmap in hex = %x" ), pUpdatedBcnParams->paramChangeBitmap);) if(NULL == psessionEntry) { MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); } else { MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); } if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pBcnParams); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_BEACON_IND to WDA failed, reason=%X"), retCode ); } limSendBeaconInd(pMac, psessionEntry); return retCode; } /** * limSendSwitchChnlParams() * *FUNCTION: * This function is called to send Channel Switch Indication to WDA * *LOGIC: * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac pointer to Global Mac structure. * @param chnlNumber New Channel Number to be switched to. * @param secondaryChnlOffset an enum for secondary channel offset. * @param localPowerConstraint 11h local power constraint value * * @return success if message send is ok, else false. */ #if !defined WLAN_FEATURE_VOWIFI tSirRetStatus limSendSwitchChnlParams(tpAniSirGlobal pMac, tANI_U8 chnlNumber, ePhyChanBondState secondaryChnlOffset, tANI_U8 localPwrConstraint, tANI_U8 peSessionId) #else tSirRetStatus limSendSwitchChnlParams(tpAniSirGlobal pMac, tANI_U8 chnlNumber, ePhyChanBondState secondaryChnlOffset, tPowerdBm maxTxPower, tANI_U8 peSessionId) #endif { tpSwitchChannelParams pChnlParams = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; tpPESession pSessionEntry; if((pSessionEntry = peFindSessionBySessionId(pMac, peSessionId)) == NULL) { limLog( pMac, LOGP, FL( "Unable to get Session for session Id %d" ), peSessionId); return eSIR_FAILURE; } pChnlParams = vos_mem_malloc(sizeof( tSwitchChannelParams )); if ( NULL == pChnlParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Switch Channel Params" )); retCode = eSIR_MEM_ALLOC_FAILED; goto returnFailure; } vos_mem_set((tANI_U8 *) pChnlParams, sizeof(tSwitchChannelParams), 0); pChnlParams->secondaryChannelOffset = secondaryChnlOffset; pChnlParams->channelNumber= chnlNumber; #if defined WLAN_FEATURE_VOWIFI pChnlParams->maxTxPower = maxTxPower; vos_mem_copy( pChnlParams->selfStaMacAddr, pSessionEntry->selfMacAddr, sizeof(tSirMacAddr) ); #else pChnlParams->localPowerConstraint = localPwrConstraint; #endif vos_mem_copy( pChnlParams->bssId, pSessionEntry->bssId, sizeof(tSirMacAddr) ); pChnlParams->peSessionId = peSessionId; if (LIM_SWITCH_CHANNEL_CSA == pSessionEntry->channelChangeCSA ) { pChnlParams->channelSwitchSrc = eHAL_CHANNEL_SWITCH_SOURCE_CSA; pSessionEntry->channelChangeCSA = 0; } //we need to defer the message until we get the response back from WDA. SET_LIM_PROCESS_DEFD_MESGS(pMac, false); msgQ.type = WDA_CHNL_SWITCH_REQ; msgQ.reserved = 0; msgQ.bodyptr = pChnlParams; msgQ.bodyval = 0; #if defined WLAN_FEATURE_VOWIFI limLog( pMac, LOG1, FL( "Sending WDA_CHNL_SWITCH_REQ with SecondaryChnOffset - %d," " ChannelNumber - %d, maxTxPower - %d"), pChnlParams->secondaryChannelOffset, pChnlParams->channelNumber, pChnlParams->maxTxPower); #else limLog( pMac, LOG1, FL( "Sending WDA_CHNL_SWITCH_REQ with SecondaryChnOffset - %d, " "ChannelNumber - %d, LocalPowerConstraint - %d"), pChnlParams->secondaryChannelOffset, pChnlParams->channelNumber, pChnlParams->localPowerConstraint); #endif MTRACE(macTraceMsgTx(pMac, peSessionId, msgQ.type)); limLog(pMac,LOG1,"SessionId:%d WDA_CHNL_SWITCH_REQ for SSID:%s",peSessionId, pSessionEntry->ssId.ssId); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pChnlParams); limLog( pMac, LOGP, FL("Posting WDA_CHNL_SWITCH_REQ to WDA failed, reason=%X"), retCode ); } returnFailure: return retCode; } /** * limSendEdcaParams() * *FUNCTION: * This function is called to send dynamically changing EDCA Parameters to WDA. * *LOGIC: * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac pointer to Global Mac structure. * @param tpUpdatedEdcaParams pointer to the structure which contains * dynamically changing EDCA parameters. * @param highPerformance If the peer is Airgo (taurus) then switch to highPerformance is true. * * @return success if message send is ok, else false. */ tSirRetStatus limSendEdcaParams(tpAniSirGlobal pMac, tSirMacEdcaParamRecord *pUpdatedEdcaParams, tANI_U16 bssIdx, tANI_BOOLEAN highPerformance) { tEdcaParams *pEdcaParams = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pEdcaParams = vos_mem_malloc(sizeof(tEdcaParams)); if ( NULL == pEdcaParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update EDCA Params" )); retCode = eSIR_MEM_ALLOC_FAILED; return retCode; } pEdcaParams->bssIdx = bssIdx; pEdcaParams->acbe = pUpdatedEdcaParams[EDCA_AC_BE]; pEdcaParams->acbk = pUpdatedEdcaParams[EDCA_AC_BK]; pEdcaParams->acvi = pUpdatedEdcaParams[EDCA_AC_VI]; pEdcaParams->acvo = pUpdatedEdcaParams[EDCA_AC_VO]; pEdcaParams->highPerformance = highPerformance; msgQ.type = WDA_UPDATE_EDCA_PROFILE_IND; msgQ.reserved = 0; msgQ.bodyptr = pEdcaParams; msgQ.bodyval = 0; { tANI_U8 i; PELOG1(limLog( pMac, LOG1,FL("Sending WDA_UPDATE_EDCA_PROFILE_IND with EDCA Parameters:" ));) for(i=0; i<MAX_NUM_AC; i++) { PELOG1(limLog(pMac, LOG1, FL("AC[%d]: AIFSN %d, ACM %d, CWmin %d, CWmax %d, TxOp %d "), i, pUpdatedEdcaParams[i].aci.aifsn, pUpdatedEdcaParams[i].aci.acm, pUpdatedEdcaParams[i].cw.min, pUpdatedEdcaParams[i].cw.max, pUpdatedEdcaParams[i].txoplimit);) } } MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pEdcaParams); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_EDCA_PROFILE_IND to WDA failed, reason=%X"), retCode ); } return retCode; } /** * limSetActiveEdcaParams() * * FUNCTION: * This function is called to set the most up-to-date EDCA parameters * given the default local EDCA parameters. The rules are as following: * - If ACM bit is set for all ACs, then downgrade everything to Best Effort. * - If ACM is not set for any AC, then PE will use the default EDCA * parameters as advertised by AP. * - If ACM is set in any of the ACs, PE will use the EDCA parameters * from the next best AC for which ACM is not enabled. * * @param pMac pointer to Global Mac structure. * @param plocalEdcaParams pointer to the local EDCA parameters * @ param psessionEntry point to the session entry * @return none */ void limSetActiveEdcaParams(tpAniSirGlobal pMac, tSirMacEdcaParamRecord *plocalEdcaParams, tpPESession psessionEntry) { tANI_U8 ac, newAc, i; tANI_U8 acAdmitted; #ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT vos_log_qos_edca_pkt_type *log_ptr = NULL; #endif //FEATURE_WLAN_DIAG_SUPPORT // Initialize gLimEdcaParamsActive[] to be same as localEdcaParams psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE] = plocalEdcaParams[EDCA_AC_BE]; psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK] = plocalEdcaParams[EDCA_AC_BK]; psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI] = plocalEdcaParams[EDCA_AC_VI]; psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO] = plocalEdcaParams[EDCA_AC_VO]; /* An AC requires downgrade if the ACM bit is set, and the AC has not * yet been admitted in uplink or bi-directions. * If an AC requires downgrade, it will downgrade to the next beset AC * for which ACM is not enabled. * * - There's no need to downgrade AC_BE since it IS the lowest AC. Hence * start the for loop with AC_BK. * - If ACM bit is set for an AC, initially downgrade it to AC_BE. Then * traverse thru the AC list. If we do find the next best AC which is * better than AC_BE, then use that one. For example, if ACM bits are set * such that: BE_ACM=1, BK_ACM=1, VI_ACM=1, VO_ACM=0 * then all AC will be downgraded to AC_BE. */ limLog(pMac, LOG1, FL("adAdmitMask[UPLINK] = 0x%x "), pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_UPLINK] ); limLog(pMac, LOG1, FL("adAdmitMask[DOWNLINK] = 0x%x "), pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_DNLINK] ); for (ac = EDCA_AC_BK; ac <= EDCA_AC_VO; ac++) { acAdmitted = ( (pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_UPLINK] & (1 << ac)) >> ac ); limLog(pMac, LOG1, FL("For AC[%d]: acm=%d, acAdmit=%d "), ac, plocalEdcaParams[ac].aci.acm, acAdmitted); if ( (plocalEdcaParams[ac].aci.acm == 1) && (acAdmitted == 0) ) { limLog(pMac, LOG1, FL("We need to downgrade AC %d!! "), ac); newAc = EDCA_AC_BE; for (i=ac-1; i>0; i--) { if (plocalEdcaParams[i].aci.acm == 0) { newAc = i; break; } } limLog(pMac, LOGW, FL("Downgrading AC %d ---> AC %d "), ac, newAc); psessionEntry->gLimEdcaParamsActive[ac] = plocalEdcaParams[newAc]; } } //log: LOG_WLAN_QOS_EDCA_C #ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT WLAN_VOS_DIAG_LOG_ALLOC(log_ptr, vos_log_qos_edca_pkt_type, LOG_WLAN_QOS_EDCA_C); if(log_ptr) { log_ptr->aci_be = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE].aci.aci; log_ptr->cw_be = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE].cw.max << 4 | psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE].cw.min; log_ptr->txoplimit_be = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BE].txoplimit; log_ptr->aci_bk = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK].aci.aci; log_ptr->cw_bk = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK].cw.max << 4 | psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK].cw.min; log_ptr->txoplimit_bk = psessionEntry->gLimEdcaParamsActive[EDCA_AC_BK].txoplimit; log_ptr->aci_vi = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI].aci.aci; log_ptr->cw_vi = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI].cw.max << 4 | psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI].cw.min; log_ptr->txoplimit_vi = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VI].txoplimit; log_ptr->aci_vo = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO].aci.aci; log_ptr->cw_vo = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO].cw.max << 4 | psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO].cw.min; log_ptr->txoplimit_vo = psessionEntry->gLimEdcaParamsActive[EDCA_AC_VO].txoplimit; } WLAN_VOS_DIAG_LOG_REPORT(log_ptr); #endif //FEATURE_WLAN_DIAG_SUPPORT return; } /** --------------------------------------------------------- \fn limSetLinkState \brief LIM sends a message to WDA to set the link state \param tpAniSirGlobal pMac \param tSirLinkState state \return None -----------------------------------------------------------*/ //Original code with out anu's change #if 0 tSirRetStatus limSetLinkState(tpAniSirGlobal pMac, tSirLinkState state,tSirMacAddr bssId) { tSirMsgQ msg; tSirRetStatus retCode; msg.type = WDA_SET_LINK_STATE; msg.bodyval = (tANI_U32) state; msg.bodyptr = NULL; MTRACE(macTraceMsgTx(pMac, 0, msg.type)); retCode = wdaPostCtrlMsg(pMac, &msg); if (retCode != eSIR_SUCCESS) limLog(pMac, LOGP, FL("Posting link state %d failed, reason = %x "), retCode); return retCode; } #endif //0 tSirRetStatus limSetLinkState(tpAniSirGlobal pMac, tSirLinkState state,tSirMacAddr bssId, tSirMacAddr selfMacAddr, tpSetLinkStateCallback callback, void *callbackArg) { tSirMsgQ msgQ; tSirRetStatus retCode; tpLinkStateParams pLinkStateParams = NULL; // Allocate memory. pLinkStateParams = vos_mem_malloc(sizeof(tLinkStateParams)); if ( NULL == pLinkStateParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory while sending Set Link State" )); retCode = eSIR_SME_RESOURCES_UNAVAILABLE; return retCode; } vos_mem_set((tANI_U8 *) pLinkStateParams, sizeof(tLinkStateParams), 0); pLinkStateParams->state = state; pLinkStateParams->callback = callback; pLinkStateParams->callbackArg = callbackArg; /* Copy Mac address */ sirCopyMacAddr(pLinkStateParams->bssid,bssId); sirCopyMacAddr(pLinkStateParams->selfMacAddr, selfMacAddr); msgQ.type = WDA_SET_LINK_STATE; msgQ.reserved = 0; msgQ.bodyptr = pLinkStateParams; msgQ.bodyval = 0; MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); retCode = (tANI_U32)wdaPostCtrlMsg(pMac, &msgQ); if (retCode != eSIR_SUCCESS) { vos_mem_free(pLinkStateParams); limLog(pMac, LOGP, FL("Posting link state %d failed, reason = %x "), state, retCode); } return retCode; } #ifdef WLAN_FEATURE_VOWIFI_11R extern tSirRetStatus limSetLinkStateFT(tpAniSirGlobal pMac, tSirLinkState state,tSirMacAddr bssId, tSirMacAddr selfMacAddr, int ft, tpPESession psessionEntry) { tSirMsgQ msgQ; tSirRetStatus retCode; tpLinkStateParams pLinkStateParams = NULL; // Allocate memory. pLinkStateParams = vos_mem_malloc(sizeof(tLinkStateParams)); if ( NULL == pLinkStateParams ) { limLog( pMac, LOGP, FL( "Unable to allocate memory while sending Set Link State" )); retCode = eSIR_SME_RESOURCES_UNAVAILABLE; return retCode; } vos_mem_set((tANI_U8 *) pLinkStateParams, sizeof(tLinkStateParams), 0); pLinkStateParams->state = state; /* Copy Mac address */ sirCopyMacAddr(pLinkStateParams->bssid,bssId); sirCopyMacAddr(pLinkStateParams->selfMacAddr, selfMacAddr); pLinkStateParams->ft = 1; pLinkStateParams->session = psessionEntry; msgQ.type = WDA_SET_LINK_STATE; msgQ.reserved = 0; msgQ.bodyptr = pLinkStateParams; msgQ.bodyval = 0; if(NULL == psessionEntry) { MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); } else { MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); } retCode = (tANI_U32)wdaPostCtrlMsg(pMac, &msgQ); if (retCode != eSIR_SUCCESS) { vos_mem_free(pLinkStateParams); limLog(pMac, LOGP, FL("Posting link state %d failed, reason = %x "), state, retCode); } return retCode; } #endif /** --------------------------------------------------------- \fn limSendSetTxPowerReq \brief LIM sends a WDA_SET_TX_POWER_REQ message to WDA \param tpAniSirGlobal pMac \param tpSirSetTxPowerReq request message \return None -----------------------------------------------------------*/ tSirRetStatus limSendSetTxPowerReq(tpAniSirGlobal pMac, tANI_U32 *pMsgBuf) { tSirSetTxPowerReq *txPowerReq; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; tpPESession psessionEntry; tANI_U8 sessionId = 0; if (NULL == pMsgBuf) return eSIR_FAILURE; txPowerReq = vos_mem_malloc(sizeof(tSirSetTxPowerReq)); if ( NULL == txPowerReq ) { return eSIR_FAILURE; } vos_mem_copy(txPowerReq, (tSirSetTxPowerReq *)pMsgBuf, sizeof(tSirSetTxPowerReq)); /* Found corresponding seesion to find BSS IDX */ psessionEntry = peFindSessionByBssid(pMac, txPowerReq->bssId, &sessionId); if (NULL == psessionEntry) { vos_mem_free(txPowerReq); limLog(pMac, LOGE, FL("Session does not exist for given BSSID")); return eSIR_FAILURE; } /* FW API requests BSS IDX */ txPowerReq->bssIdx = psessionEntry->bssIdx; msgQ.type = WDA_SET_TX_POWER_REQ; msgQ.reserved = 0; msgQ.bodyptr = txPowerReq; msgQ.bodyval = 0; PELOGW(limLog(pMac, LOGW, FL("Sending WDA_SET_TX_POWER_REQ to WDA"));) MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); retCode = wdaPostCtrlMsg(pMac, &msgQ); if (eSIR_SUCCESS != retCode) { limLog(pMac, LOGP, FL("Posting WDA_SET_TX_POWER_REQ to WDA failed, reason=%X"), retCode); vos_mem_free(txPowerReq); return retCode; } return retCode; } /** --------------------------------------------------------- \fn limSendHT40OBSSScanInd \brief LIM sends a WDA_HT40_OBSS_SCAN_IND message to WDA \param tpAniSirGlobal pMac \param psessionEntry session Entry \return None -----------------------------------------------------------*/ tSirRetStatus limSendHT40OBSSScanInd(tpAniSirGlobal pMac, tpPESession psessionEntry) { tSirRetStatus retCode = eSIR_SUCCESS; tSirHT40OBSSScanInd *ht40OBSSScanInd; tANI_U32 validChannelNum; tSirMsgQ msgQ; tANI_U8 validChanList[WNI_CFG_VALID_CHANNEL_LIST_LEN]; tANI_U8 channel24GNum, count; ht40OBSSScanInd = vos_mem_malloc(sizeof(tSirHT40OBSSScanInd)); if ( NULL == ht40OBSSScanInd) { return eSIR_FAILURE; } VOS_TRACE(VOS_MODULE_ID_PE,VOS_TRACE_LEVEL_INFO, "OBSS Scan Indication bssIdx- %d staId %d", psessionEntry->bssIdx, psessionEntry->staId); ht40OBSSScanInd->cmdType = HT40_OBSS_SCAN_PARAM_START; ht40OBSSScanInd->scanType = eSIR_ACTIVE_SCAN; ht40OBSSScanInd->OBSSScanPassiveDwellTime = psessionEntry->obssHT40ScanParam.OBSSScanPassiveDwellTime; ht40OBSSScanInd->OBSSScanActiveDwellTime = psessionEntry->obssHT40ScanParam.OBSSScanActiveDwellTime; ht40OBSSScanInd->BSSChannelWidthTriggerScanInterval = psessionEntry->obssHT40ScanParam.BSSChannelWidthTriggerScanInterval; ht40OBSSScanInd->OBSSScanPassiveTotalPerChannel = psessionEntry->obssHT40ScanParam.OBSSScanPassiveTotalPerChannel; ht40OBSSScanInd->OBSSScanActiveTotalPerChannel = psessionEntry->obssHT40ScanParam.OBSSScanActiveTotalPerChannel; ht40OBSSScanInd->BSSWidthChannelTransitionDelayFactor = psessionEntry->obssHT40ScanParam.BSSWidthChannelTransitionDelayFactor; ht40OBSSScanInd->OBSSScanActivityThreshold = psessionEntry->obssHT40ScanParam.OBSSScanActivityThreshold; /* TODO update it from the associated BSS*/ ht40OBSSScanInd->currentOperatingClass = 81; validChannelNum = WNI_CFG_VALID_CHANNEL_LIST_LEN; if (wlan_cfgGetStr(pMac, WNI_CFG_VALID_CHANNEL_LIST, validChanList, &validChannelNum) != eSIR_SUCCESS) { limLog(pMac, LOGE, FL("could not retrieve Valid channel list")); vos_mem_free(ht40OBSSScanInd); return eSIR_FAILURE; } /* Extract 24G channel list */ channel24GNum = 0; for( count =0 ;count < validChannelNum && (channel24GNum < SIR_ROAM_MAX_CHANNELS);count++) { if ((validChanList[count]> RF_CHAN_1) && (validChanList[count] < RF_CHAN_14)) { ht40OBSSScanInd->channels[channel24GNum] = validChanList[count]; channel24GNum++; } } ht40OBSSScanInd->channelCount = channel24GNum; /* FW API requests BSS IDX */ ht40OBSSScanInd->selfStaIdx = psessionEntry->staId; ht40OBSSScanInd->bssIdx = psessionEntry->bssIdx; ht40OBSSScanInd->fortyMHZIntolerent = 0; ht40OBSSScanInd->ieFieldLen = 0; msgQ.type = WDA_HT40_OBSS_SCAN_IND; msgQ.reserved = 0; msgQ.bodyptr = (void *)ht40OBSSScanInd; msgQ.bodyval = 0; limLog(pMac, LOG1, FL("Sending WDA_HT40_OBSS_SCAN_IND to WDA" "Obss Scan trigger width = %d, delay factor = %d"), ht40OBSSScanInd->BSSChannelWidthTriggerScanInterval, ht40OBSSScanInd->BSSWidthChannelTransitionDelayFactor); MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); retCode = wdaPostCtrlMsg(pMac, &msgQ); if (eSIR_SUCCESS != retCode) { limLog(pMac, LOGP, FL("Posting WDA_HT40_OBSS_SCAN_IND " "to WDA failed, reason=%X"), retCode); vos_mem_free(ht40OBSSScanInd); return retCode; } return retCode; } /** --------------------------------------------------------- \fn limSendHT40OBSSScanInd \brief LIM sends a WDA_HT40_OBSS_SCAN_IND message to WDA \ Stop command support is only for debugging \ As per 802.11 spec OBSS scan is mandatory while \ operating in HT40 on 2.4GHz band \param tpAniSirGlobal pMac \param psessionEntry Session entry \return None -----------------------------------------------------------*/ tSirRetStatus limSendHT40OBSSStopScanInd(tpAniSirGlobal pMac, tpPESession psessionEntry) { tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; tANI_U8 bssIdx; bssIdx = psessionEntry->bssIdx; VOS_TRACE (VOS_MODULE_ID_PE,VOS_TRACE_LEVEL_INFO, " Sending STOP OBSS cmd, bssid %d staid %d ", psessionEntry->bssIdx, psessionEntry->staId); msgQ.type = WDA_HT40_OBSS_STOP_SCAN_IND; msgQ.reserved = 0; msgQ.bodyptr = (void *)&bssIdx; msgQ.bodyval = 0; PELOGW(limLog(pMac, LOGW, FL("Sending WDA_HT40_OBSS_STOP_SCAN_IND to WDA"));) MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); retCode = wdaPostCtrlMsg(pMac, &msgQ); if (eSIR_SUCCESS != retCode) { limLog(pMac, LOGE, FL("Posting WDA_HT40_OBSS_SCAN_IND " "to WDA failed, reason=%X"), retCode); return retCode; } return retCode; } /** --------------------------------------------------------- \fn limSendGetTxPowerReq \brief LIM sends a WDA_GET_TX_POWER_REQ message to WDA \param tpAniSirGlobal pMac \param tpSirGetTxPowerReq request message \return None -----------------------------------------------------------*/ tSirRetStatus limSendGetTxPowerReq(tpAniSirGlobal pMac, tpSirGetTxPowerReq pTxPowerReq) { tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; if (NULL == pTxPowerReq) return retCode; msgQ.type = WDA_GET_TX_POWER_REQ; msgQ.reserved = 0; msgQ.bodyptr = pTxPowerReq; msgQ.bodyval = 0; PELOGW(limLog(pMac, LOGW, FL( "Sending WDA_GET_TX_POWER_REQ to WDA"));) MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { limLog( pMac, LOGP, FL("Posting WDA_GET_TX_POWER_REQ to WDA failed, reason=%X"), retCode ); if (NULL != pTxPowerReq) { vos_mem_free(pTxPowerReq); } return retCode; } return retCode; } /** --------------------------------------------------------- \fn limSendBeaconFilterInfo \brief LIM sends beacon filtering info to WDA \param tpAniSirGlobal pMac \return None -----------------------------------------------------------*/ tSirRetStatus limSendBeaconFilterInfo(tpAniSirGlobal pMac,tpPESession psessionEntry) { tpBeaconFilterMsg pBeaconFilterMsg = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; tANI_U8 *ptr; tANI_U32 i; tANI_U32 msgSize; tpBeaconFilterIe pIe; if( psessionEntry == NULL ) { limLog( pMac, LOGE, FL("Fail to find the right session ")); retCode = eSIR_FAILURE; return retCode; } /* * Dont send the WPA and RSN iE in filter if FW doesnt support * IS_FEATURE_BCN_FLT_DELTA_ENABLE, * else host will get all beacons which have RSN IE or WPA IE */ if(IS_FEATURE_BCN_FLT_DELTA_ENABLE) msgSize = sizeof(tBeaconFilterMsg) + sizeof(beaconFilterTable); else msgSize = sizeof(tBeaconFilterMsg) + sizeof(beaconFilterTable) - (2 * sizeof(tBeaconFilterIe)); pBeaconFilterMsg = vos_mem_malloc(msgSize); if ( NULL == pBeaconFilterMsg ) { limLog( pMac, LOGP, FL("Fail to allocate memory for beaconFiilterMsg ")); retCode = eSIR_MEM_ALLOC_FAILED; return retCode; } vos_mem_set((tANI_U8 *) pBeaconFilterMsg, msgSize, 0); // Fill in capability Info and mask //TBD-RAJESH get the BSS capability from session. //Don't send this message if no active Infra session is found. pBeaconFilterMsg->capabilityInfo = psessionEntry->limCurrentBssCaps; pBeaconFilterMsg->capabilityMask = CAPABILITY_FILTER_MASK; pBeaconFilterMsg->beaconInterval = (tANI_U16) psessionEntry->beaconParams.beaconInterval; // Fill in number of IEs in beaconFilterTable /* * Dont send the WPA and RSN iE in filter if FW doesnt support * IS_FEATURE_BCN_FLT_DELTA_ENABLE, * else host will get all beacons which have RSN IE or WPA IE */ if(IS_FEATURE_BCN_FLT_DELTA_ENABLE) pBeaconFilterMsg->ieNum = (tANI_U16) (sizeof(beaconFilterTable) / sizeof(tBeaconFilterIe)); else pBeaconFilterMsg->ieNum = (tANI_U16) ((sizeof(beaconFilterTable) / sizeof(tBeaconFilterIe)) - 2); //Fill the BSSIDX pBeaconFilterMsg->bssIdx = psessionEntry->bssIdx; //Fill message with info contained in the beaconFilterTable ptr = (tANI_U8 *)pBeaconFilterMsg + sizeof(tBeaconFilterMsg); for(i=0; i < (pBeaconFilterMsg->ieNum); i++) { pIe = (tpBeaconFilterIe) ptr; pIe->elementId = beaconFilterTable[i].elementId; pIe->checkIePresence = beaconFilterTable[i].checkIePresence; pIe->byte.offset = beaconFilterTable[i].byte.offset; pIe->byte.value = beaconFilterTable[i].byte.value; pIe->byte.bitMask = beaconFilterTable[i].byte.bitMask; pIe->byte.ref = beaconFilterTable[i].byte.ref; ptr += sizeof(tBeaconFilterIe); } msgQ.type = WDA_BEACON_FILTER_IND; msgQ.reserved = 0; msgQ.bodyptr = pBeaconFilterMsg; msgQ.bodyval = 0; limLog( pMac, LOG3, FL( "Sending WDA_BEACON_FILTER_IND..." )); MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pBeaconFilterMsg); limLog( pMac, LOGP, FL("Posting WDA_BEACON_FILTER_IND to WDA failed, reason=%X"), retCode ); return retCode; } return retCode; } /** * \brief Send CB mode update to WDA * * \param pMac Pointer to the global MAC structure * * \param psessionEntry session entry * pTempParam CB mode * \return eSIR_SUCCESS on success, eSIR_FAILURE else */ tSirRetStatus limSendModeUpdate(tpAniSirGlobal pMac, tUpdateVHTOpMode *pTempParam, tpPESession psessionEntry ) { tUpdateVHTOpMode *pVhtOpMode = NULL; tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; pVhtOpMode = vos_mem_malloc(sizeof(tUpdateVHTOpMode)); if ( NULL == pVhtOpMode ) { limLog( pMac, LOGP, FL( "Unable to allocate memory during Update Op Mode" )); return eSIR_MEM_ALLOC_FAILED; } vos_mem_copy((tANI_U8 *)pVhtOpMode, pTempParam, sizeof(tUpdateVHTOpMode)); msgQ.type = WDA_UPDATE_OP_MODE; msgQ.reserved = 0; msgQ.bodyptr = pVhtOpMode; msgQ.bodyval = 0; limLog( pMac, LOG1, FL( "Sending WDA_UPDATE_OP_MODE, opMode = %d staid = %d" ), pVhtOpMode->opMode,pVhtOpMode->staId); if(NULL == psessionEntry) { MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type)); } else { MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); } if( eSIR_SUCCESS != (retCode = wdaPostCtrlMsg( pMac, &msgQ ))) { vos_mem_free(pVhtOpMode); limLog( pMac, LOGP, FL("Posting WDA_UPDATE_OP_MODE to WDA failed, reason=%X"), retCode ); } return retCode; } #ifdef WLAN_FEATURE_11W /** --------------------------------------------------------- \fn limSendExcludeUnencryptInd \brief LIM sends a message to HAL to indicate whether to ignore or indicate the unprotected packet error \param tpAniSirGlobal pMac \param tANI_BOOLEAN excludeUnenc - true: ignore, false: indicate \param tpPESession psessionEntry - session context \return status -----------------------------------------------------------*/ tSirRetStatus limSendExcludeUnencryptInd(tpAniSirGlobal pMac, tANI_BOOLEAN excludeUnenc, tpPESession psessionEntry) { tSirRetStatus retCode = eSIR_SUCCESS; tSirMsgQ msgQ; tSirWlanExcludeUnencryptParam * pExcludeUnencryptParam; pExcludeUnencryptParam = vos_mem_malloc(sizeof(tSirWlanExcludeUnencryptParam)); if ( NULL == pExcludeUnencryptParam ) { limLog(pMac, LOGP, FL( "Unable to allocate memory during limSendExcludeUnencryptInd")); return eSIR_MEM_ALLOC_FAILED; } pExcludeUnencryptParam->excludeUnencrypt = excludeUnenc; sirCopyMacAddr(pExcludeUnencryptParam->bssId, psessionEntry->bssId); msgQ.type = WDA_EXCLUDE_UNENCRYPTED_IND; msgQ.reserved = 0; msgQ.bodyptr = pExcludeUnencryptParam; msgQ.bodyval = 0; limLog(pMac, LOG1, FL("Sending WDA_EXCLUDE_UNENCRYPTED_IND")); MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type)); retCode = wdaPostCtrlMsg(pMac, &msgQ); if (eSIR_SUCCESS != retCode) { vos_mem_free(pExcludeUnencryptParam); limLog(pMac, LOGP, FL("Posting WDA_EXCLUDE_UNENCRYPTED_IND to WDA failed, reason=%X"), retCode); } return retCode; } #endif
gpl-2.0
mfornero/linux
drivers/net/ethernet/chelsio/cxgb/pm3393.c
1743
30258
/***************************************************************************** * * * File: pm3393.c * * $Revision: 1.16 $ * * $Date: 2005/05/14 00:59:32 $ * * Description: * * PMC/SIERRA (pm3393) MAC-PHY functionality. * * part of the Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, see <http://www.gnu.org/licenses/>. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "common.h" #include "regs.h" #include "gmac.h" #include "elmer0.h" #include "suni1x10gexp_regs.h" #include <linux/crc32.h> #include <linux/slab.h> #define OFFSET(REG_ADDR) ((REG_ADDR) << 2) /* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ #define MAX_FRAME_SIZE 9600 #define IPG 12 #define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \ SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \ SUNI1x10GEXP_BITMSK_TXXG_PADEN) #define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \ SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP) /* Update statistics every 15 minutes */ #define STATS_TICK_SECS (15 * 60) enum { /* RMON registers */ RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW, RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW, RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW, RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW, RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW, RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW, RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW, RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW, RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW, RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW, RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW, RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW, RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW, RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW, RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW, TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW, TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW, TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW, TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW, TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW, TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW, TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW, TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW, TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW }; struct _cmac_instance { u8 enabled; u8 fc; u8 mac_addr[6]; }; static int pmread(struct cmac *cmac, u32 reg, u32 * data32) { t1_tpi_read(cmac->adapter, OFFSET(reg), data32); return 0; } static int pmwrite(struct cmac *cmac, u32 reg, u32 data32) { t1_tpi_write(cmac->adapter, OFFSET(reg), data32); return 0; } /* Port reset. */ static int pm3393_reset(struct cmac *cmac) { return 0; } /* * Enable interrupts for the PM3393 * * 1. Enable PM3393 BLOCK interrupts. * 2. Enable PM3393 Master Interrupt bit(INTE) * 3. Enable ELMER's PM3393 bit. * 4. Enable Terminator external interrupt. */ static int pm3393_interrupt_enable(struct cmac *cmac) { u32 pl_intr; /* PM3393 - Enabling all hardware block interrupts. */ pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff); /* Don't interrupt on statistics overflow, we are polling */ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff); /* PM3393 - Global interrupt enable */ /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */ pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ ); /* TERMINATOR - PL_INTERUPTS_EXT */ pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE); pl_intr |= F_PL_INTR_EXT; writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE); return 0; } static int pm3393_interrupt_disable(struct cmac *cmac) { u32 elmer; /* PM3393 - Enabling HW interrupt blocks. */ pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0); pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0); pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0); pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0); pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0); pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0); pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0); pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0); pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0); pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0); pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0); pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0); /* PM3393 - Global interrupt enable */ pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0); /* ELMER - External chip interrupts. */ t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer); elmer &= ~ELMER0_GP_BIT1; t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer); /* TERMINATOR - PL_INTERUPTS_EXT */ /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level. */ return 0; } static int pm3393_interrupt_clear(struct cmac *cmac) { u32 elmer; u32 pl_intr; u32 val32; /* PM3393 - Clearing HW interrupt blocks. Note, this assumes * bit WCIMODE=0 for a clear-on-read. */ pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32); pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32); pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32); pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32); pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32); pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32); pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32); pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32); pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32); pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32); pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32); pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION, &val32); pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32); pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32); /* PM3393 - Global interrupt status */ pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32); /* ELMER - External chip interrupts. */ t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer); elmer |= ELMER0_GP_BIT1; t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer); /* TERMINATOR - PL_INTERUPTS_EXT */ pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE); pl_intr |= F_PL_INTR_EXT; writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE); return 0; } /* Interrupt handler */ static int pm3393_interrupt_handler(struct cmac *cmac) { u32 master_intr_status; /* Read the master interrupt status register. */ pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &master_intr_status); if (netif_msg_intr(cmac->adapter)) dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n", master_intr_status); /* TBD XXX Lets just clear everything for now */ pm3393_interrupt_clear(cmac); return 0; } static int pm3393_enable(struct cmac *cmac, int which) { if (which & MAC_DIRECTION_RX) pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN)); if (which & MAC_DIRECTION_TX) { u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0; if (cmac->instance->fc & PAUSE_RX) val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX; if (cmac->instance->fc & PAUSE_TX) val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX; pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val); } cmac->instance->enabled |= which; return 0; } static int pm3393_enable_port(struct cmac *cmac, int which) { /* Clear port statistics */ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL, SUNI1x10GEXP_BITMSK_MSTAT_CLEAR); udelay(2); memset(&cmac->stats, 0, sizeof(struct cmac_statistics)); pm3393_enable(cmac, which); /* * XXX This should be done by the PHY and preferably not at all. * The PHY doesn't give us link status indication on its own so have * the link management code query it instead. */ t1_link_changed(cmac->adapter, 0); return 0; } static int pm3393_disable(struct cmac *cmac, int which) { if (which & MAC_DIRECTION_RX) pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL); if (which & MAC_DIRECTION_TX) pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL); /* * The disable is graceful. Give the PM3393 time. Can't wait very * long here, we may be holding locks. */ udelay(20); cmac->instance->enabled &= ~which; return 0; } static int pm3393_loopback_enable(struct cmac *cmac) { return 0; } static int pm3393_loopback_disable(struct cmac *cmac) { return 0; } static int pm3393_set_mtu(struct cmac *cmac, int mtu) { int enabled = cmac->instance->enabled; /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */ mtu += 14 + 4; if (mtu > MAX_FRAME_SIZE) return -EINVAL; /* Disable Rx/Tx MAC before configuring it. */ if (enabled) pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu); pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu); if (enabled) pm3393_enable(cmac, enabled); return 0; } static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) { int enabled = cmac->instance->enabled & MAC_DIRECTION_RX; u32 rx_mode; /* Disable MAC RX before reconfiguring it */ if (enabled) pm3393_disable(cmac, MAC_DIRECTION_RX); pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode); rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE | SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode); if (t1_rx_mode_promisc(rm)) { /* Promiscuous mode. */ rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE; } if (t1_rx_mode_allmulti(rm)) { /* Accept all multicast. */ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff); rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; } else if (t1_rx_mode_mc_cnt(rm)) { /* Accept one or more multicast(s). */ struct netdev_hw_addr *ha; int bit; u16 mc_filter[4] = { 0, }; netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) { /* bit[23:28] */ bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f; mc_filter[bit >> 4] |= 1 << (bit & 0xf); } pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]); rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; } pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode); if (enabled) pm3393_enable(cmac, MAC_DIRECTION_RX); return 0; } static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed, int *duplex, int *fc) { if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; if (fc) *fc = cmac->instance->fc; return 0; } static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, int fc) { if (speed >= 0 && speed != SPEED_10000) return -1; if (duplex >= 0 && duplex != DUPLEX_FULL) return -1; if (fc & ~(PAUSE_TX | PAUSE_RX)) return -1; if (fc != cmac->instance->fc) { cmac->instance->fc = (u8) fc; if (cmac->instance->enabled & MAC_DIRECTION_TX) pm3393_enable(cmac, MAC_DIRECTION_TX); } return 0; } #define RMON_UPDATE(mac, name, stat_name) \ { \ t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \ t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \ (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \ ((u64)(val1 & 0xffff) << 16) | \ ((u64)(val2 & 0xff) << 32) | \ ((mac)->stats.stat_name & \ 0xffffff0000000000ULL); \ if (ro & \ (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \ (mac)->stats.stat_name += 1ULL << 40; \ } static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, int flag) { u64 ro; u32 val0, val1, val2, val3; /* Snap the counters */ pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, SUNI1x10GEXP_BITMSK_MSTAT_SNAP); /* Counter rollover, clear on read */ pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0); pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1); pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2); pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3); ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); /* Rx stats */ RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, RxInternalMACRcvError); RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); RMON_UPDATE(mac, RxJabbers, RxJabberErrors); RMON_UPDATE(mac, RxFragments, RxRuntErrors); RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); /* Tx stats */ RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, TxInternalMACXmitError); RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); return &mac->stats; } static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6]) { memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN); return 0; } static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6]) { u32 val, lo, mid, hi, enabled = cmac->instance->enabled; /* * MAC addr: 00:07:43:00:13:09 * * ma[5] = 0x09 * ma[4] = 0x13 * ma[3] = 0x00 * ma[2] = 0x43 * ma[1] = 0x07 * ma[0] = 0x00 * * The PM3393 requires byte swapping and reverse order entry * when programming MAC addresses: * * low_bits[15:0] = ma[1]:ma[0] * mid_bits[31:16] = ma[3]:ma[2] * high_bits[47:32] = ma[5]:ma[4] */ /* Store local copy */ memcpy(cmac->instance->mac_addr, ma, ETH_ALEN); lo = ((u32) ma[1] << 8) | (u32) ma[0]; mid = ((u32) ma[3] << 8) | (u32) ma[2]; hi = ((u32) ma[5] << 8) | (u32) ma[4]; /* Disable Rx/Tx MAC before configuring it. */ if (enabled) pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); /* Set RXXG Station Address */ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi); /* Set TXXG Station Address */ pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo); pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid); pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi); /* Setup Exact Match Filter 1 with our MAC address * * Must disable exact match filter before configuring it. */ pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val); val &= 0xff0f; pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid); pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi); val |= 0x0090; pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); if (enabled) pm3393_enable(cmac, enabled); return 0; } static void pm3393_destroy(struct cmac *cmac) { kfree(cmac); } static struct cmac_ops pm3393_ops = { .destroy = pm3393_destroy, .reset = pm3393_reset, .interrupt_enable = pm3393_interrupt_enable, .interrupt_disable = pm3393_interrupt_disable, .interrupt_clear = pm3393_interrupt_clear, .interrupt_handler = pm3393_interrupt_handler, .enable = pm3393_enable_port, .disable = pm3393_disable, .loopback_enable = pm3393_loopback_enable, .loopback_disable = pm3393_loopback_disable, .set_mtu = pm3393_set_mtu, .set_rx_mode = pm3393_set_rx_mode, .get_speed_duplex_fc = pm3393_get_speed_duplex_fc, .set_speed_duplex_fc = pm3393_set_speed_duplex_fc, .statistics_update = pm3393_update_statistics, .macaddress_get = pm3393_macaddress_get, .macaddress_set = pm3393_macaddress_set }; static struct cmac *pm3393_mac_create(adapter_t *adapter, int index) { struct cmac *cmac; cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL); if (!cmac) return NULL; cmac->ops = &pm3393_ops; cmac->instance = (cmac_instance *) (cmac + 1); cmac->adapter = adapter; cmac->instance->fc = PAUSE_TX | PAUSE_RX; t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000); t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000); t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800); t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */ t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800); t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800); t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800); t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800); t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800); t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800); t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800); t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800); t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800); t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800); t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800); t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800); t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800); t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800); t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800); t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800); t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00); t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */ t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */ t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */ t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */ t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */ t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */ t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */ t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */ t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */ t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */ t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */ t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */ t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */ t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */ t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */ t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */ t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */ t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */ t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */ t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */ t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */ t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */ t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */ t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */ t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */ t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */ t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */ t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */ t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */ t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */ t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */ t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */ t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */ t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */ t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */ t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */ /* For T1 use timer based Mac flow control. */ t1_tpi_write(adapter, OFFSET(0x304d), 0x8000); t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */ t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */ t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */ /* Setup Exact Match Filter 0 to allow broadcast packets. */ t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */ t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */ t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */ t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */ t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */ t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */ t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */ t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */ return cmac; } static int pm3393_mac_reset(adapter_t * adapter) { u32 val; u32 x; u32 is_pl4_reset_finished; u32 is_pl4_outof_lock; u32 is_xaui_mabc_pll_locked; u32 successful_reset; int i; /* The following steps are required to properly reset * the PM3393. This information is provided in the * PM3393 datasheet (Issue 2: November 2002) * section 13.1 -- Device Reset. * * The PM3393 has three types of components that are * individually reset: * * DRESETB - Digital circuitry * PL4_ARESETB - PL4 analog circuitry * XAUI_ARESETB - XAUI bus analog circuitry * * Steps to reset PM3393 using RSTB pin: * * 1. Assert RSTB pin low ( write 0 ) * 2. Wait at least 1ms to initiate a complete initialization of device. * 3. Wait until all external clocks and REFSEL are stable. * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable) * 5. De-assert RSTB ( write 1 ) * 6. Wait until internal timers to expires after ~14ms. * - Allows analog clock synthesizer(PL4CSU) to stabilize to * selected reference frequency before allowing the digital * portion of the device to operate. * 7. Wait at least 200us for XAUI interface to stabilize. * 8. Verify the PM3393 came out of reset successfully. * Set successful reset flag if everything worked else try again * a few more times. */ successful_reset = 0; for (i = 0; i < 3 && !successful_reset; i++) { /* 1 */ t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~1; t1_tpi_write(adapter, A_ELMER0_GPO, val); /* 2 */ msleep(1); /* 3 */ msleep(1); /* 4 */ msleep(2 /*1 extra ms for safety */ ); /* 5 */ val |= 1; t1_tpi_write(adapter, A_ELMER0_GPO, val); /* 6 */ msleep(15 /*1 extra ms for safety */ ); /* 7 */ msleep(1); /* 8 */ /* Has PL4 analog block come out of reset correctly? */ t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val); is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED); /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence * figure out why? */ /* Have all PL4 block clocks locked? */ x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ | SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL | SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL | SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL); is_pl4_outof_lock = (val & x); /* ??? If this fails, might be able to software reset the XAUI part * and try to recover... thus saving us from doing another HW reset */ /* Has the XAUI MABC PLL circuitry stablized? */ is_xaui_mabc_pll_locked = (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED); successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock && is_xaui_mabc_pll_locked); if (netif_msg_hw(adapter)) dev_dbg(&adapter->pdev->dev, "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, " "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n", i, is_pl4_reset_finished, val, is_pl4_outof_lock, is_xaui_mabc_pll_locked); } return successful_reset ? 0 : 1; } const struct gmac t1_pm3393_ops = { .stats_update_period = STATS_TICK_SECS, .create = pm3393_mac_create, .reset = pm3393_mac_reset, };
gpl-2.0
STS-Dev-Team/kernel_omap4_xt910s
net/ipv6/icmp.c
1743
22567
/* * Internet Control Message Protocol (ICMPv6) * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on net/ipv4/icmp.c * * RFC 1885 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * * Andi Kleen : exception handling * Andi Kleen add rate limits. never reply to a icmp. * add more length checks and other fixes. * yoshfuji : ensure to sent parameter problem for * fragments. * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit. * Randy Dunlap and * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data */ #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/netfilter.h> #include <linux/slab.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/icmpv6.h> #include <net/ip.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> #include <net/protocol.h> #include <net/raw.h> #include <net/rawv6.h> #include <net/transp_v6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/inet_common.h> #include <asm/uaccess.h> #include <asm/system.h> /* * The ICMP socket(s). This is the most convenient way to flow control * our ICMP output as well as maintain a clean interface throughout * all layers. All Socketless IP sends will soon be gone. * * On SMP we have one ICMP socket per-cpu. */ static inline struct sock *icmpv6_sk(struct net *net) { return net->ipv6.icmp_sk[smp_processor_id()]; } static int icmpv6_rcv(struct sk_buff *skb); static const struct inet6_protocol icmpv6_protocol = { .handler = icmpv6_rcv, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) { struct sock *sk; local_bh_disable(); sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ local_bh_enable(); return NULL; } return sk; } static __inline__ void icmpv6_xmit_unlock(struct sock *sk) { spin_unlock_bh(&sk->sk_lock.slock); } /* * Slightly more convenient version of icmpv6_send. */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos); kfree_skb(skb); } /* * Figure out, may we reply to this packet with icmp error. * * We do not reply, if: * - it was icmp error message. * - it is truncated, so that it is known, that protocol is ICMPV6 * (i.e. in the middle of some exthdr) * * --ANK (980726) */ static int is_ineligible(struct sk_buff *skb) { int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; int len = skb->len - ptr; __u8 nexthdr = ipv6_hdr(skb)->nexthdr; if (len < 0) return 1; ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr); if (ptr < 0) return 0; if (nexthdr == IPPROTO_ICMPV6) { u8 _type, *tp; tp = skb_header_pointer(skb, ptr+offsetof(struct icmp6hdr, icmp6_type), sizeof(_type), &_type); if (tp == NULL || !(*tp & ICMPV6_INFOMSG_MASK)) return 1; } return 0; } /* * Check the ICMP output rate limit */ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type, struct flowi6 *fl6) { struct dst_entry *dst; struct net *net = sock_net(sk); bool res = false; /* Informational messages are not limited. */ if (type & ICMPV6_INFOMSG_MASK) return true; /* Do not limit pmtu discovery, it would break it. */ if (type == ICMPV6_PKT_TOOBIG) return true; /* * Look up the output route. * XXX: perhaps the expire for routing entries cloned by * this lookup should be more aggressive (not longer than timeout). */ dst = ip6_route_output(net, sk, fl6); if (dst->error) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) { res = true; } else { struct rt6_info *rt = (struct rt6_info *)dst; int tmo = net->ipv6.sysctl.icmpv6_time; /* Give more bandwidth to wider prefixes. */ if (rt->rt6i_dst.plen < 128) tmo >>= ((128 - rt->rt6i_dst.plen)>>5); if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo); } dst_release(dst); return res; } /* * an inline helper for the "simple" if statement below * checks if parameter problem report is caused by an * unrecognized IPv6 option that has the Option Type * highest-order two bits set to 10 */ static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset) { u8 _optval, *op; offset += skb_network_offset(skb); op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval); if (op == NULL) return 1; return (*op & 0xC0) == 0x80; } static int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len) { struct sk_buff *skb; struct icmp6hdr *icmp6h; int err = 0; if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; icmp6h = icmp6_hdr(skb); memcpy(icmp6h, thdr, sizeof(struct icmp6hdr)); icmp6h->icmp6_cksum = 0; if (skb_queue_len(&sk->sk_write_queue) == 1) { skb->csum = csum_partial(icmp6h, sizeof(struct icmp6hdr), skb->csum); icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, len, fl6->flowi6_proto, skb->csum); } else { __wsum tmp_csum = 0; skb_queue_walk(&sk->sk_write_queue, skb) { tmp_csum = csum_add(tmp_csum, skb->csum); } tmp_csum = csum_partial(icmp6h, sizeof(struct icmp6hdr), tmp_csum); icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, len, fl6->flowi6_proto, tmp_csum); } ip6_push_pending_frames(sk); out: return err; } struct icmpv6_msg { struct sk_buff *skb; int offset; uint8_t type; }; static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct icmpv6_msg *msg = (struct icmpv6_msg *) from; struct sk_buff *org_skb = msg->skb; __wsum csum = 0; csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, to, len, csum); skb->csum = csum_block_add(skb->csum, csum, odd); if (!(msg->type & ICMPV6_INFOMSG_MASK)) nf_ct_attach(skb, org_skb); return 0; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) static void mip6_addr_swap(struct sk_buff *skb) { struct ipv6hdr *iph = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6_destopt_hao *hao; struct in6_addr tmp; int off; if (opt->dsthao) { off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); if (likely(off >= 0)) { hao = (struct ipv6_destopt_hao *) (skb_network_header(skb) + off); ipv6_addr_copy(&tmp, &iph->saddr); ipv6_addr_copy(&iph->saddr, &hao->addr); ipv6_addr_copy(&hao->addr, &tmp); } } } #else static inline void mip6_addr_swap(struct sk_buff *skb) {} #endif static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb, struct sock *sk, struct flowi6 *fl6) { struct dst_entry *dst, *dst2; struct flowi6 fl2; int err; err = ip6_dst_lookup(sk, &dst, fl6); if (err) return ERR_PTR(err); /* * We won't send icmp if the destination is known * anycast. */ if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) { LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n"); dst_release(dst); return ERR_PTR(-EINVAL); } /* No need to clone since we're just using its address. */ dst2 = dst; dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0); if (!IS_ERR(dst)) { if (dst != dst2) return dst; } else { if (PTR_ERR(dst) == -EPERM) dst = NULL; else return dst; } err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6); if (err) goto relookup_failed; err = ip6_dst_lookup(sk, &dst2, &fl2); if (err) goto relookup_failed; dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP); if (!IS_ERR(dst2)) { dst_release(dst); dst = dst2; } else { err = PTR_ERR(dst2); if (err == -EPERM) { dst_release(dst); return dst2; } else goto relookup_failed; } relookup_failed: if (dst) return dst; return ERR_PTR(err); } /* * Send an ICMP message in response to a packet in error */ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { struct net *net = dev_net(skb->dev); struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); struct sock *sk; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct dst_entry *dst; struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; int iif = 0; int addr_type = 0; int len; int hlimit; int err = 0; if ((u8 *)hdr < skb->head || (skb->network_header + sizeof(*hdr)) > skb->tail) return; /* * Make sure we respect the rules * i.e. RFC 1885 2.4(e) * Rule (e.1) is enforced by not using icmpv6_send * in any code that processes icmp errors. */ addr_type = ipv6_addr_type(&hdr->daddr); if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0)) saddr = &hdr->daddr; /* * Dest addr check */ if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) { if (type != ICMPV6_PKT_TOOBIG && !(type == ICMPV6_PARAMPROB && code == ICMPV6_UNK_OPTION && (opt_unrec(skb, info)))) return; saddr = NULL; } addr_type = ipv6_addr_type(&hdr->saddr); /* * Source addr check */ if (addr_type & IPV6_ADDR_LINKLOCAL) iif = skb->dev->ifindex; /* * Must not send error if the source does not uniquely * identify a single node (RFC2463 Section 2.4). * We check unspecified / multicast addresses here, * and anycast addresses will be checked later. */ if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) { LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n"); return; } /* * Never answer to a ICMP packet. */ if (is_ineligible(skb)) { LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n"); return; } mip6_addr_swap(skb); memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; ipv6_addr_copy(&fl6.daddr, &hdr->saddr); if (saddr) ipv6_addr_copy(&fl6.saddr, saddr); fl6.flowi6_oif = iif; fl6.fl6_icmp_type = type; fl6.fl6_icmp_code = code; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); sk = icmpv6_xmit_lock(net); if (sk == NULL) return; np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl6)) goto out; tmp_hdr.icmp6_type = type; tmp_hdr.icmp6_code = code; tmp_hdr.icmp6_cksum = 0; tmp_hdr.icmp6_pointer = htonl(info); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; dst = icmpv6_route_lookup(net, skb, sk, &fl6); if (IS_ERR(dst)) goto out; if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); msg.skb = skb; msg.offset = skb_network_offset(skb); msg.type = type; len = skb->len - msg.offset; len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); if (len < 0) { LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n"); goto out_dst_release; } idev = in6_dev_get(skb->dev); err = ip6_append_data(sk, icmpv6_getfrag, &msg, len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, (struct rt6_info*)dst, MSG_DONTWAIT, np->dontfrag); if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); goto out_put; } err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr)); out_put: if (likely(idev != NULL)) in6_dev_put(idev); out_dst_release: dst_release(dst); out: icmpv6_xmit_unlock(sk); } EXPORT_SYMBOL(icmpv6_send); static void icmpv6_echo_reply(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); struct sock *sk; struct inet6_dev *idev; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct icmp6hdr *icmph = icmp6_hdr(skb); struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; struct dst_entry *dst; int err = 0; int hlimit; saddr = &ipv6_hdr(skb)->daddr; if (!ipv6_unicast_destination(skb)) saddr = NULL; memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr)); tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr); if (saddr) ipv6_addr_copy(&fl6.saddr, saddr); fl6.flowi6_oif = skb->dev->ifindex; fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); sk = icmpv6_xmit_lock(net); if (sk == NULL) return; np = inet6_sk(sk); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; err = ip6_dst_lookup(sk, &dst, &fl6); if (err) goto out; dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); if (IS_ERR(dst)) goto out; if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); idev = in6_dev_get(skb->dev); msg.skb = skb; msg.offset = 0; msg.type = ICMPV6_ECHO_REPLY; err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, (struct rt6_info*)dst, MSG_DONTWAIT, np->dontfrag); if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); goto out_put; } err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr)); out_put: if (likely(idev != NULL)) in6_dev_put(idev); dst_release(dst); out: icmpv6_xmit_unlock(sk); } static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) { const struct inet6_protocol *ipprot; int inner_offset; int hash; u8 nexthdr; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return; nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr; if (ipv6_ext_hdr(nexthdr)) { /* now skip over extension headers */ inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr); if (inner_offset<0) return; } else { inner_offset = sizeof(struct ipv6hdr); } /* Checkin header including 8 bytes of inner protocol header. */ if (!pskb_may_pull(skb, inner_offset+8)) return; /* BUGGG_FUTURE: we should try to parse exthdrs in this packet. Without this we will not able f.e. to make source routed pmtu discovery. Corresponding argument (opt) to notifiers is already added. --ANK (980726) */ hash = nexthdr & (MAX_INET_PROTOS - 1); rcu_read_lock(); ipprot = rcu_dereference(inet6_protos[hash]); if (ipprot && ipprot->err_handler) ipprot->err_handler(skb, NULL, type, code, inner_offset, info); rcu_read_unlock(); raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info); } /* * Handle icmp messages */ static int icmpv6_rcv(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct inet6_dev *idev = __in6_dev_get(dev); const struct in6_addr *saddr, *daddr; const struct ipv6hdr *orig_hdr; struct icmp6hdr *hdr; u8 type; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); int nh; if (!(sp && sp->xvec[sp->len - 1]->props.flags & XFRM_STATE_ICMP)) goto drop_no_count; if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr))) goto drop_no_count; nh = skb_network_offset(skb); skb_set_network_header(skb, sizeof(*hdr)); if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb)) goto drop_no_count; skb_set_network_header(skb, nh); } ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS); saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; /* Perform checksum. */ switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, skb->csum)) break; /* fall through */ case CHECKSUM_NONE: skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 0)); if (__skb_checksum_complete(skb)) { LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n", saddr, daddr); goto discard_it; } } if (!pskb_pull(skb, sizeof(*hdr))) goto discard_it; hdr = icmp6_hdr(skb); type = hdr->icmp6_type; ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type); switch (type) { case ICMPV6_ECHO_REQUEST: icmpv6_echo_reply(skb); break; case ICMPV6_ECHO_REPLY: /* we couldn't care less */ break; case ICMPV6_PKT_TOOBIG: /* BUGGG_FUTURE: if packet contains rthdr, we cannot update standard destination cache. Seems, only "advanced" destination cache will allow to solve this problem --ANK (980726) */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto discard_it; hdr = icmp6_hdr(skb); orig_hdr = (struct ipv6hdr *) (hdr + 1); rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev, ntohl(hdr->icmp6_mtu)); /* * Drop through to notify */ case ICMPV6_DEST_UNREACH: case ICMPV6_TIME_EXCEED: case ICMPV6_PARAMPROB: icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); break; case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: case NDISC_NEIGHBOUR_SOLICITATION: case NDISC_NEIGHBOUR_ADVERTISEMENT: case NDISC_REDIRECT: ndisc_rcv(skb); break; case ICMPV6_MGM_QUERY: igmp6_event_query(skb); break; case ICMPV6_MGM_REPORT: igmp6_event_report(skb); break; case ICMPV6_MGM_REDUCTION: case ICMPV6_NI_QUERY: case ICMPV6_NI_REPLY: case ICMPV6_MLD2_REPORT: case ICMPV6_DHAAD_REQUEST: case ICMPV6_DHAAD_REPLY: case ICMPV6_MOBILE_PREFIX_SOL: case ICMPV6_MOBILE_PREFIX_ADV: break; default: LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n"); /* informational */ if (type & ICMPV6_INFOMSG_MASK) break; /* * error of unknown type. * must pass to upper level */ icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); } kfree_skb(skb); return 0; discard_it: ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS); drop_no_count: kfree_skb(skb); return 0; } void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, u8 type, const struct in6_addr *saddr, const struct in6_addr *daddr, int oif) { memset(fl6, 0, sizeof(*fl6)); ipv6_addr_copy(&fl6->saddr, saddr); ipv6_addr_copy(&fl6->daddr, daddr); fl6->flowi6_proto = IPPROTO_ICMPV6; fl6->fl6_icmp_type = type; fl6->fl6_icmp_code = 0; fl6->flowi6_oif = oif; security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); } /* * Special lock-class for __icmpv6_sk: */ static struct lock_class_key icmpv6_socket_sk_dst_lock_key; static int __net_init icmpv6_sk_init(struct net *net) { struct sock *sk; int err, i, j; net->ipv6.icmp_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); if (net->ipv6.icmp_sk == NULL) return -ENOMEM; for_each_possible_cpu(i) { err = inet_ctl_sock_create(&sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { printk(KERN_ERR "Failed to initialize the ICMP6 control socket " "(err %d).\n", err); goto fail; } net->ipv6.icmp_sk[i] = sk; /* * Split off their lock-class, because sk->sk_dst_lock * gets used from softirqs, which is safe for * __icmpv6_sk (because those never get directly used * via userspace syscalls), but unsafe for normal sockets. */ lockdep_set_class(&sk->sk_dst_lock, &icmpv6_socket_sk_dst_lock_key); /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. */ sk->sk_sndbuf = (2 * ((64 * 1024) + sizeof(struct sk_buff))); } return 0; fail: for (j = 0; j < i; j++) inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]); kfree(net->ipv6.icmp_sk); return err; } static void __net_exit icmpv6_sk_exit(struct net *net) { int i; for_each_possible_cpu(i) { inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]); } kfree(net->ipv6.icmp_sk); } static struct pernet_operations icmpv6_sk_ops = { .init = icmpv6_sk_init, .exit = icmpv6_sk_exit, }; int __init icmpv6_init(void) { int err; err = register_pernet_subsys(&icmpv6_sk_ops); if (err < 0) return err; err = -EAGAIN; if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) goto fail; return 0; fail: printk(KERN_ERR "Failed to register ICMP6 protocol\n"); unregister_pernet_subsys(&icmpv6_sk_ops); return err; } void icmpv6_cleanup(void) { unregister_pernet_subsys(&icmpv6_sk_ops); inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); } static const struct icmp6_err { int err; int fatal; } tab_unreach[] = { { /* NOROUTE */ .err = ENETUNREACH, .fatal = 0, }, { /* ADM_PROHIBITED */ .err = EACCES, .fatal = 1, }, { /* Was NOT_NEIGHBOUR, now reserved */ .err = EHOSTUNREACH, .fatal = 0, }, { /* ADDR_UNREACH */ .err = EHOSTUNREACH, .fatal = 0, }, { /* PORT_UNREACH */ .err = ECONNREFUSED, .fatal = 1, }, }; int icmpv6_err_convert(u8 type, u8 code, int *err) { int fatal = 0; *err = EPROTO; switch (type) { case ICMPV6_DEST_UNREACH: fatal = 1; if (code <= ICMPV6_PORT_UNREACH) { *err = tab_unreach[code].err; fatal = tab_unreach[code].fatal; } break; case ICMPV6_PKT_TOOBIG: *err = EMSGSIZE; break; case ICMPV6_PARAMPROB: *err = EPROTO; fatal = 1; break; case ICMPV6_TIME_EXCEED: *err = EHOSTUNREACH; break; } return fatal; } EXPORT_SYMBOL(icmpv6_err_convert); #ifdef CONFIG_SYSCTL ctl_table ipv6_icmp_table_template[] = { { .procname = "ratelimit", .data = &init_net.ipv6.sysctl.icmpv6_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { }, }; struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) { struct ctl_table *table; table = kmemdup(ipv6_icmp_table_template, sizeof(ipv6_icmp_table_template), GFP_KERNEL); if (table) table[0].data = &net->ipv6.sysctl.icmpv6_time; return table; } #endif
gpl-2.0
pranav01/android_kernel_xiaomi_ferrari
drivers/pinctrl/mvebu/pinctrl-armada-370.c
1999
13701
/* * Marvell Armada 370 pinctrl driver based on mvebu pinctrl core * * Copyright (C) 2012 Marvell * * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-mvebu.h" static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = { MPP_MODE(0, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "uart0", "rxd")), MPP_MODE(1, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "uart0", "txd")), MPP_MODE(2, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "i2c0", "sck"), MPP_FUNCTION(0x2, "uart0", "txd")), MPP_MODE(3, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "i2c0", "sda"), MPP_FUNCTION(0x2, "uart0", "rxd")), MPP_MODE(4, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "cpu_pd", "vdd")), MPP_MODE(5, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txclko"), MPP_FUNCTION(0x2, "uart1", "txd"), MPP_FUNCTION(0x4, "spi1", "clk"), MPP_FUNCTION(0x5, "audio", "mclk")), MPP_MODE(6, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "txd0"), MPP_FUNCTION(0x2, "sata0", "prsnt"), MPP_FUNCTION(0x4, "tdm", "rst"), MPP_FUNCTION(0x5, "audio", "sdo")), MPP_MODE(7, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd1"), MPP_FUNCTION(0x4, "tdm", "tdx"), MPP_FUNCTION(0x5, "audio", "lrclk")), MPP_MODE(8, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "txd2"), MPP_FUNCTION(0x2, "uart0", "rts"), MPP_FUNCTION(0x4, "tdm", "drx"), MPP_FUNCTION(0x5, "audio", "bclk")), MPP_MODE(9, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd3"), MPP_FUNCTION(0x2, "uart1", "txd"), MPP_FUNCTION(0x3, "sd0", "clk"), MPP_FUNCTION(0x5, "audio", "spdifo")), MPP_MODE(10, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "txctl"), MPP_FUNCTION(0x2, "uart0", "cts"), MPP_FUNCTION(0x4, "tdm", "fsync"), MPP_FUNCTION(0x5, "audio", "sdi")), MPP_MODE(11, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd0"), MPP_FUNCTION(0x2, "uart1", "rxd"), MPP_FUNCTION(0x3, "sd0", "cmd"), MPP_FUNCTION(0x4, "spi0", "cs1"), MPP_FUNCTION(0x5, "sata1", "prsnt"), MPP_FUNCTION(0x6, "spi1", "cs1")), MPP_MODE(12, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd1"), MPP_FUNCTION(0x2, "i2c1", "sda"), MPP_FUNCTION(0x3, "sd0", "d0"), MPP_FUNCTION(0x4, "spi1", "cs0"), MPP_FUNCTION(0x5, "audio", "spdifi")), MPP_MODE(13, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd2"), MPP_FUNCTION(0x2, "i2c1", "sck"), MPP_FUNCTION(0x3, "sd0", "d1"), MPP_FUNCTION(0x4, "tdm", "pclk"), MPP_FUNCTION(0x5, "audio", "rmclk")), MPP_MODE(14, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd3"), MPP_FUNCTION(0x2, "pcie", "clkreq0"), MPP_FUNCTION(0x3, "sd0", "d2"), MPP_FUNCTION(0x4, "spi1", "mosi"), MPP_FUNCTION(0x5, "spi0", "cs2")), MPP_MODE(15, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxctl"), MPP_FUNCTION(0x2, "pcie", "clkreq1"), MPP_FUNCTION(0x3, "sd0", "d3"), MPP_FUNCTION(0x4, "spi1", "miso"), MPP_FUNCTION(0x5, "spi0", "cs3")), MPP_MODE(16, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxclk"), MPP_FUNCTION(0x2, "uart1", "rxd"), MPP_FUNCTION(0x4, "tdm", "int"), MPP_FUNCTION(0x5, "audio", "extclk")), MPP_MODE(17, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge", "mdc")), MPP_MODE(18, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge", "mdio")), MPP_MODE(19, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "txclk"), MPP_FUNCTION(0x2, "ge1", "txclkout"), MPP_FUNCTION(0x4, "tdm", "pclk")), MPP_MODE(20, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd4"), MPP_FUNCTION(0x2, "ge1", "txd0")), MPP_MODE(21, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd5"), MPP_FUNCTION(0x2, "ge1", "txd1"), MPP_FUNCTION(0x4, "uart1", "txd")), MPP_MODE(22, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd6"), MPP_FUNCTION(0x2, "ge1", "txd2"), MPP_FUNCTION(0x4, "uart0", "rts")), MPP_MODE(23, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd7"), MPP_FUNCTION(0x2, "ge1", "txd3"), MPP_FUNCTION(0x4, "spi1", "mosi")), MPP_MODE(24, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "col"), MPP_FUNCTION(0x2, "ge1", "txctl"), MPP_FUNCTION(0x4, "spi1", "cs0")), MPP_MODE(25, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxerr"), MPP_FUNCTION(0x2, "ge1", "rxd0"), MPP_FUNCTION(0x4, "uart1", "rxd")), MPP_MODE(26, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "crs"), MPP_FUNCTION(0x2, "ge1", "rxd1"), MPP_FUNCTION(0x4, "spi1", "miso")), MPP_MODE(27, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd4"), MPP_FUNCTION(0x2, "ge1", "rxd2"), MPP_FUNCTION(0x4, "uart0", "cts")), MPP_MODE(28, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd5"), MPP_FUNCTION(0x2, "ge1", "rxd3")), MPP_MODE(29, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd6"), MPP_FUNCTION(0x2, "ge1", "rxctl"), MPP_FUNCTION(0x4, "i2c1", "sda")), MPP_MODE(30, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd7"), MPP_FUNCTION(0x2, "ge1", "rxclk"), MPP_FUNCTION(0x4, "i2c1", "sck")), MPP_MODE(31, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x3, "tclk", NULL), MPP_FUNCTION(0x4, "ge0", "txerr")), MPP_MODE(32, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "spi0", "cs0")), MPP_MODE(33, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "bootcs"), MPP_FUNCTION(0x2, "spi0", "cs0")), MPP_MODE(34, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "wen0"), MPP_FUNCTION(0x2, "spi0", "mosi")), MPP_MODE(35, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "oen"), MPP_FUNCTION(0x2, "spi0", "sck")), MPP_MODE(36, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "a1"), MPP_FUNCTION(0x2, "spi0", "miso")), MPP_MODE(37, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "a0"), MPP_FUNCTION(0x2, "sata0", "prsnt")), MPP_MODE(38, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ready"), MPP_FUNCTION(0x2, "uart1", "cts"), MPP_FUNCTION(0x3, "uart0", "cts")), MPP_MODE(39, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad0"), MPP_FUNCTION(0x2, "audio", "spdifo")), MPP_MODE(40, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad1"), MPP_FUNCTION(0x2, "uart1", "rts"), MPP_FUNCTION(0x3, "uart0", "rts")), MPP_MODE(41, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad2"), MPP_FUNCTION(0x2, "uart1", "rxd")), MPP_MODE(42, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad3"), MPP_FUNCTION(0x2, "uart1", "txd")), MPP_MODE(43, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad4"), MPP_FUNCTION(0x2, "audio", "bclk")), MPP_MODE(44, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad5"), MPP_FUNCTION(0x2, "audio", "mclk")), MPP_MODE(45, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad6"), MPP_FUNCTION(0x2, "audio", "lrclk")), MPP_MODE(46, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad7"), MPP_FUNCTION(0x2, "audio", "sdo")), MPP_MODE(47, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad8"), MPP_FUNCTION(0x3, "sd0", "clk"), MPP_FUNCTION(0x5, "audio", "spdifo")), MPP_MODE(48, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad9"), MPP_FUNCTION(0x2, "uart0", "rts"), MPP_FUNCTION(0x3, "sd0", "cmd"), MPP_FUNCTION(0x4, "sata1", "prsnt"), MPP_FUNCTION(0x5, "spi0", "cs1")), MPP_MODE(49, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad10"), MPP_FUNCTION(0x2, "pcie", "clkreq1"), MPP_FUNCTION(0x3, "sd0", "d0"), MPP_FUNCTION(0x4, "spi1", "cs0"), MPP_FUNCTION(0x5, "audio", "spdifi")), MPP_MODE(50, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad11"), MPP_FUNCTION(0x2, "uart0", "cts"), MPP_FUNCTION(0x3, "sd0", "d1"), MPP_FUNCTION(0x4, "spi1", "miso"), MPP_FUNCTION(0x5, "audio", "rmclk")), MPP_MODE(51, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad12"), MPP_FUNCTION(0x2, "i2c1", "sda"), MPP_FUNCTION(0x3, "sd0", "d2"), MPP_FUNCTION(0x4, "spi1", "mosi")), MPP_MODE(52, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad13"), MPP_FUNCTION(0x2, "i2c1", "sck"), MPP_FUNCTION(0x3, "sd0", "d3"), MPP_FUNCTION(0x4, "spi1", "sck")), MPP_MODE(53, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad14"), MPP_FUNCTION(0x2, "sd0", "clk"), MPP_FUNCTION(0x3, "tdm", "pclk"), MPP_FUNCTION(0x4, "spi0", "cs2"), MPP_FUNCTION(0x5, "pcie", "clkreq1")), MPP_MODE(54, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad15"), MPP_FUNCTION(0x3, "tdm", "dtx")), MPP_MODE(55, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "cs1"), MPP_FUNCTION(0x2, "uart1", "txd"), MPP_FUNCTION(0x3, "tdm", "rst"), MPP_FUNCTION(0x4, "sata1", "prsnt"), MPP_FUNCTION(0x5, "sata0", "prsnt")), MPP_MODE(56, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "cs2"), MPP_FUNCTION(0x2, "uart1", "cts"), MPP_FUNCTION(0x3, "uart0", "cts"), MPP_FUNCTION(0x4, "spi0", "cs3"), MPP_FUNCTION(0x5, "pcie", "clkreq0"), MPP_FUNCTION(0x6, "spi1", "cs1")), MPP_MODE(57, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "cs3"), MPP_FUNCTION(0x2, "uart1", "rxd"), MPP_FUNCTION(0x3, "tdm", "fsync"), MPP_FUNCTION(0x4, "sata0", "prsnt"), MPP_FUNCTION(0x5, "audio", "sdo")), MPP_MODE(58, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "cs0"), MPP_FUNCTION(0x2, "uart1", "rts"), MPP_FUNCTION(0x3, "tdm", "int"), MPP_FUNCTION(0x5, "audio", "extclk"), MPP_FUNCTION(0x6, "uart0", "rts")), MPP_MODE(59, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ale0"), MPP_FUNCTION(0x2, "uart1", "rts"), MPP_FUNCTION(0x3, "uart0", "rts"), MPP_FUNCTION(0x5, "audio", "bclk")), MPP_MODE(60, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ale1"), MPP_FUNCTION(0x2, "uart1", "rxd"), MPP_FUNCTION(0x3, "sata0", "prsnt"), MPP_FUNCTION(0x4, "pcie", "rst-out"), MPP_FUNCTION(0x5, "audio", "sdi")), MPP_MODE(61, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "wen1"), MPP_FUNCTION(0x2, "uart1", "txd"), MPP_FUNCTION(0x5, "audio", "rclk")), MPP_MODE(62, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "a2"), MPP_FUNCTION(0x2, "uart1", "cts"), MPP_FUNCTION(0x3, "tdm", "drx"), MPP_FUNCTION(0x4, "pcie", "clkreq0"), MPP_FUNCTION(0x5, "audio", "mclk"), MPP_FUNCTION(0x6, "uart0", "cts")), MPP_MODE(63, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "spi0", "sck"), MPP_FUNCTION(0x2, "tclk", NULL)), MPP_MODE(64, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "spi0", "miso"), MPP_FUNCTION(0x2, "spi0-1", "cs1")), MPP_MODE(65, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "spi0", "mosi"), MPP_FUNCTION(0x2, "spi0-1", "cs2")), }; static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info; static struct of_device_id armada_370_pinctrl_of_match[] = { { .compatible = "marvell,mv88f6710-pinctrl" }, { }, }; static struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = { MPP_REG_CTRL(0, 65), }; static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = { MPP_GPIO_RANGE(0, 0, 0, 32), MPP_GPIO_RANGE(1, 32, 32, 32), MPP_GPIO_RANGE(2, 64, 64, 2), }; static int armada_370_pinctrl_probe(struct platform_device *pdev) { struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info; soc->variant = 0; /* no variants for Armada 370 */ soc->controls = mv88f6710_mpp_controls; soc->ncontrols = ARRAY_SIZE(mv88f6710_mpp_controls); soc->modes = mv88f6710_mpp_modes; soc->nmodes = ARRAY_SIZE(mv88f6710_mpp_modes); soc->gpioranges = mv88f6710_mpp_gpio_ranges; soc->ngpioranges = ARRAY_SIZE(mv88f6710_mpp_gpio_ranges); pdev->dev.platform_data = soc; return mvebu_pinctrl_probe(pdev); } static int armada_370_pinctrl_remove(struct platform_device *pdev) { return mvebu_pinctrl_remove(pdev); } static struct platform_driver armada_370_pinctrl_driver = { .driver = { .name = "armada-370-pinctrl", .owner = THIS_MODULE, .of_match_table = of_match_ptr(armada_370_pinctrl_of_match), }, .probe = armada_370_pinctrl_probe, .remove = armada_370_pinctrl_remove, }; module_platform_driver(armada_370_pinctrl_driver); MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); MODULE_DESCRIPTION("Marvell Armada 370 pinctrl driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
jkoelker/pandaboard-kernel
arch/arm/mach-exynos4/clock.c
1999
28645
/* linux/arch/arm/mach-exynos4/clock.c * * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS4 - Clock support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/io.h> #include <plat/cpu-freq.h> #include <plat/clock.h> #include <plat/cpu.h> #include <plat/pll.h> #include <plat/s5p-clock.h> #include <plat/clock-clksrc.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <mach/sysmmu.h> static struct clk clk_sclk_hdmi27m = { .name = "sclk_hdmi27m", .id = -1, .rate = 27000000, }; static struct clk clk_sclk_hdmiphy = { .name = "sclk_hdmiphy", .id = -1, }; static struct clk clk_sclk_usbphy0 = { .name = "sclk_usbphy0", .id = -1, .rate = 27000000, }; static struct clk clk_sclk_usbphy1 = { .name = "sclk_usbphy1", .id = -1, }; static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable); } static int exynos4_clksrc_mask_cam_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKSRC_MASK_CAM, clk, enable); } static int exynos4_clksrc_mask_lcd0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKSRC_MASK_LCD0, clk, enable); } static int exynos4_clksrc_mask_lcd1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKSRC_MASK_LCD1, clk, enable); } static int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKSRC_MASK_FSYS, clk, enable); } static int exynos4_clksrc_mask_peril0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL0, clk, enable); } static int exynos4_clksrc_mask_peril1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL1, clk, enable); } static int exynos4_clk_ip_mfc_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP_MFC, clk, enable); } static int exynos4_clk_ip_cam_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP_CAM, clk, enable); } static int exynos4_clk_ip_tv_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP_TV, clk, enable); } static int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP_IMAGE, clk, enable); } static int exynos4_clk_ip_lcd0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP_LCD0, clk, enable); } static int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP_LCD1, clk, enable); } static int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP_FSYS, clk, enable); } static int exynos4_clk_ip_peril_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable); } static int exynos4_clk_ip_perir_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP_PERIR, clk, enable); } /* Core list of CMU_CPU side */ static struct clksrc_clk clk_mout_apll = { .clk = { .name = "mout_apll", .id = -1, }, .sources = &clk_src_apll, .reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 0, .size = 1 }, }; static struct clksrc_clk clk_sclk_apll = { .clk = { .name = "sclk_apll", .id = -1, .parent = &clk_mout_apll.clk, }, .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 24, .size = 3 }, }; static struct clksrc_clk clk_mout_epll = { .clk = { .name = "mout_epll", .id = -1, }, .sources = &clk_src_epll, .reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 4, .size = 1 }, }; static struct clksrc_clk clk_mout_mpll = { .clk = { .name = "mout_mpll", .id = -1, }, .sources = &clk_src_mpll, .reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 8, .size = 1 }, }; static struct clk *clkset_moutcore_list[] = { [0] = &clk_mout_apll.clk, [1] = &clk_mout_mpll.clk, }; static struct clksrc_sources clkset_moutcore = { .sources = clkset_moutcore_list, .nr_sources = ARRAY_SIZE(clkset_moutcore_list), }; static struct clksrc_clk clk_moutcore = { .clk = { .name = "moutcore", .id = -1, }, .sources = &clkset_moutcore, .reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 16, .size = 1 }, }; static struct clksrc_clk clk_coreclk = { .clk = { .name = "core_clk", .id = -1, .parent = &clk_moutcore.clk, }, .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 0, .size = 3 }, }; static struct clksrc_clk clk_armclk = { .clk = { .name = "armclk", .id = -1, .parent = &clk_coreclk.clk, }, }; static struct clksrc_clk clk_aclk_corem0 = { .clk = { .name = "aclk_corem0", .id = -1, .parent = &clk_coreclk.clk, }, .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 4, .size = 3 }, }; static struct clksrc_clk clk_aclk_cores = { .clk = { .name = "aclk_cores", .id = -1, .parent = &clk_coreclk.clk, }, .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 4, .size = 3 }, }; static struct clksrc_clk clk_aclk_corem1 = { .clk = { .name = "aclk_corem1", .id = -1, .parent = &clk_coreclk.clk, }, .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 8, .size = 3 }, }; static struct clksrc_clk clk_periphclk = { .clk = { .name = "periphclk", .id = -1, .parent = &clk_coreclk.clk, }, .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 12, .size = 3 }, }; /* Core list of CMU_CORE side */ static struct clk *clkset_corebus_list[] = { [0] = &clk_mout_mpll.clk, [1] = &clk_sclk_apll.clk, }; static struct clksrc_sources clkset_mout_corebus = { .sources = clkset_corebus_list, .nr_sources = ARRAY_SIZE(clkset_corebus_list), }; static struct clksrc_clk clk_mout_corebus = { .clk = { .name = "mout_corebus", .id = -1, }, .sources = &clkset_mout_corebus, .reg_src = { .reg = S5P_CLKSRC_DMC, .shift = 4, .size = 1 }, }; static struct clksrc_clk clk_sclk_dmc = { .clk = { .name = "sclk_dmc", .id = -1, .parent = &clk_mout_corebus.clk, }, .reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 12, .size = 3 }, }; static struct clksrc_clk clk_aclk_cored = { .clk = { .name = "aclk_cored", .id = -1, .parent = &clk_sclk_dmc.clk, }, .reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 16, .size = 3 }, }; static struct clksrc_clk clk_aclk_corep = { .clk = { .name = "aclk_corep", .id = -1, .parent = &clk_aclk_cored.clk, }, .reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 20, .size = 3 }, }; static struct clksrc_clk clk_aclk_acp = { .clk = { .name = "aclk_acp", .id = -1, .parent = &clk_mout_corebus.clk, }, .reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 0, .size = 3 }, }; static struct clksrc_clk clk_pclk_acp = { .clk = { .name = "pclk_acp", .id = -1, .parent = &clk_aclk_acp.clk, }, .reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 4, .size = 3 }, }; /* Core list of CMU_TOP side */ static struct clk *clkset_aclk_top_list[] = { [0] = &clk_mout_mpll.clk, [1] = &clk_sclk_apll.clk, }; static struct clksrc_sources clkset_aclk = { .sources = clkset_aclk_top_list, .nr_sources = ARRAY_SIZE(clkset_aclk_top_list), }; static struct clksrc_clk clk_aclk_200 = { .clk = { .name = "aclk_200", .id = -1, }, .sources = &clkset_aclk, .reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 12, .size = 1 }, .reg_div = { .reg = S5P_CLKDIV_TOP, .shift = 0, .size = 3 }, }; static struct clksrc_clk clk_aclk_100 = { .clk = { .name = "aclk_100", .id = -1, }, .sources = &clkset_aclk, .reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 16, .size = 1 }, .reg_div = { .reg = S5P_CLKDIV_TOP, .shift = 4, .size = 4 }, }; static struct clksrc_clk clk_aclk_160 = { .clk = { .name = "aclk_160", .id = -1, }, .sources = &clkset_aclk, .reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 20, .size = 1 }, .reg_div = { .reg = S5P_CLKDIV_TOP, .shift = 8, .size = 3 }, }; static struct clksrc_clk clk_aclk_133 = { .clk = { .name = "aclk_133", .id = -1, }, .sources = &clkset_aclk, .reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 24, .size = 1 }, .reg_div = { .reg = S5P_CLKDIV_TOP, .shift = 12, .size = 3 }, }; static struct clk *clkset_vpllsrc_list[] = { [0] = &clk_fin_vpll, [1] = &clk_sclk_hdmi27m, }; static struct clksrc_sources clkset_vpllsrc = { .sources = clkset_vpllsrc_list, .nr_sources = ARRAY_SIZE(clkset_vpllsrc_list), }; static struct clksrc_clk clk_vpllsrc = { .clk = { .name = "vpll_src", .id = -1, .enable = exynos4_clksrc_mask_top_ctrl, .ctrlbit = (1 << 0), }, .sources = &clkset_vpllsrc, .reg_src = { .reg = S5P_CLKSRC_TOP1, .shift = 0, .size = 1 }, }; static struct clk *clkset_sclk_vpll_list[] = { [0] = &clk_vpllsrc.clk, [1] = &clk_fout_vpll, }; static struct clksrc_sources clkset_sclk_vpll = { .sources = clkset_sclk_vpll_list, .nr_sources = ARRAY_SIZE(clkset_sclk_vpll_list), }; static struct clksrc_clk clk_sclk_vpll = { .clk = { .name = "sclk_vpll", .id = -1, }, .sources = &clkset_sclk_vpll, .reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 8, .size = 1 }, }; static struct clk init_clocks_off[] = { { .name = "timers", .id = -1, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1<<24), }, { .name = "csis", .id = 0, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 4), }, { .name = "csis", .id = 1, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 5), }, { .name = "fimc", .id = 0, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 0), }, { .name = "fimc", .id = 1, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 1), }, { .name = "fimc", .id = 2, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 2), }, { .name = "fimc", .id = 3, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 3), }, { .name = "fimd", .id = 0, .enable = exynos4_clk_ip_lcd0_ctrl, .ctrlbit = (1 << 0), }, { .name = "fimd", .id = 1, .enable = exynos4_clk_ip_lcd1_ctrl, .ctrlbit = (1 << 0), }, { .name = "sataphy", .id = -1, .parent = &clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 3), }, { .name = "hsmmc", .id = 0, .parent = &clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 5), }, { .name = "hsmmc", .id = 1, .parent = &clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 6), }, { .name = "hsmmc", .id = 2, .parent = &clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 7), }, { .name = "hsmmc", .id = 3, .parent = &clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 8), }, { .name = "hsmmc", .id = 4, .parent = &clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 9), }, { .name = "sata", .id = -1, .parent = &clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 10), }, { .name = "pdma", .id = 0, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 0), }, { .name = "pdma", .id = 1, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 1), }, { .name = "adc", .id = -1, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 15), }, { .name = "keypad", .id = -1, .enable = exynos4_clk_ip_perir_ctrl, .ctrlbit = (1 << 16), }, { .name = "rtc", .id = -1, .enable = exynos4_clk_ip_perir_ctrl, .ctrlbit = (1 << 15), }, { .name = "watchdog", .id = -1, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_perir_ctrl, .ctrlbit = (1 << 14), }, { .name = "usbhost", .id = -1, .enable = exynos4_clk_ip_fsys_ctrl , .ctrlbit = (1 << 12), }, { .name = "otg", .id = -1, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 13), }, { .name = "spi", .id = 0, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 16), }, { .name = "spi", .id = 1, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 17), }, { .name = "spi", .id = 2, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 18), }, { .name = "iis", .id = 0, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 19), }, { .name = "iis", .id = 1, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 20), }, { .name = "iis", .id = 2, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 21), }, { .name = "ac97", .id = -1, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 27), }, { .name = "fimg2d", .id = -1, .enable = exynos4_clk_ip_image_ctrl, .ctrlbit = (1 << 0), }, { .name = "i2c", .id = 0, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 6), }, { .name = "i2c", .id = 1, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 7), }, { .name = "i2c", .id = 2, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 8), }, { .name = "i2c", .id = 3, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 9), }, { .name = "i2c", .id = 4, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 10), }, { .name = "i2c", .id = 5, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 11), }, { .name = "i2c", .id = 6, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 12), }, { .name = "i2c", .id = 7, .parent = &clk_aclk_100.clk, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 13), }, { .name = "SYSMMU_MDMA", .id = -1, .enable = exynos4_clk_ip_image_ctrl, .ctrlbit = (1 << 5), }, { .name = "SYSMMU_FIMC0", .id = -1, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 7), }, { .name = "SYSMMU_FIMC1", .id = -1, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 8), }, { .name = "SYSMMU_FIMC2", .id = -1, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 9), }, { .name = "SYSMMU_FIMC3", .id = -1, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 10), }, { .name = "SYSMMU_JPEG", .id = -1, .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 11), }, { .name = "SYSMMU_FIMD0", .id = -1, .enable = exynos4_clk_ip_lcd0_ctrl, .ctrlbit = (1 << 4), }, { .name = "SYSMMU_FIMD1", .id = -1, .enable = exynos4_clk_ip_lcd1_ctrl, .ctrlbit = (1 << 4), }, { .name = "SYSMMU_PCIe", .id = -1, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 18), }, { .name = "SYSMMU_G2D", .id = -1, .enable = exynos4_clk_ip_image_ctrl, .ctrlbit = (1 << 3), }, { .name = "SYSMMU_ROTATOR", .id = -1, .enable = exynos4_clk_ip_image_ctrl, .ctrlbit = (1 << 4), }, { .name = "SYSMMU_TV", .id = -1, .enable = exynos4_clk_ip_tv_ctrl, .ctrlbit = (1 << 4), }, { .name = "SYSMMU_MFC_L", .id = -1, .enable = exynos4_clk_ip_mfc_ctrl, .ctrlbit = (1 << 1), }, { .name = "SYSMMU_MFC_R", .id = -1, .enable = exynos4_clk_ip_mfc_ctrl, .ctrlbit = (1 << 2), } }; static struct clk init_clocks[] = { { .name = "uart", .id = 0, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 0), }, { .name = "uart", .id = 1, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 1), }, { .name = "uart", .id = 2, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 2), }, { .name = "uart", .id = 3, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 3), }, { .name = "uart", .id = 4, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 4), }, { .name = "uart", .id = 5, .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 5), } }; static struct clk *clkset_group_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_xusbxti, [2] = &clk_sclk_hdmi27m, [3] = &clk_sclk_usbphy0, [4] = &clk_sclk_usbphy1, [5] = &clk_sclk_hdmiphy, [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, [8] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_group = { .sources = clkset_group_list, .nr_sources = ARRAY_SIZE(clkset_group_list), }; static struct clk *clkset_mout_g2d0_list[] = { [0] = &clk_mout_mpll.clk, [1] = &clk_sclk_apll.clk, }; static struct clksrc_sources clkset_mout_g2d0 = { .sources = clkset_mout_g2d0_list, .nr_sources = ARRAY_SIZE(clkset_mout_g2d0_list), }; static struct clksrc_clk clk_mout_g2d0 = { .clk = { .name = "mout_g2d0", .id = -1, }, .sources = &clkset_mout_g2d0, .reg_src = { .reg = S5P_CLKSRC_IMAGE, .shift = 0, .size = 1 }, }; static struct clk *clkset_mout_g2d1_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_mout_g2d1 = { .sources = clkset_mout_g2d1_list, .nr_sources = ARRAY_SIZE(clkset_mout_g2d1_list), }; static struct clksrc_clk clk_mout_g2d1 = { .clk = { .name = "mout_g2d1", .id = -1, }, .sources = &clkset_mout_g2d1, .reg_src = { .reg = S5P_CLKSRC_IMAGE, .shift = 4, .size = 1 }, }; static struct clk *clkset_mout_g2d_list[] = { [0] = &clk_mout_g2d0.clk, [1] = &clk_mout_g2d1.clk, }; static struct clksrc_sources clkset_mout_g2d = { .sources = clkset_mout_g2d_list, .nr_sources = ARRAY_SIZE(clkset_mout_g2d_list), }; static struct clksrc_clk clk_dout_mmc0 = { .clk = { .name = "dout_mmc0", .id = -1, }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 0, .size = 4 }, }; static struct clksrc_clk clk_dout_mmc1 = { .clk = { .name = "dout_mmc1", .id = -1, }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 4, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 16, .size = 4 }, }; static struct clksrc_clk clk_dout_mmc2 = { .clk = { .name = "dout_mmc2", .id = -1, }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 8, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 0, .size = 4 }, }; static struct clksrc_clk clk_dout_mmc3 = { .clk = { .name = "dout_mmc3", .id = -1, }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 16, .size = 4 }, }; static struct clksrc_clk clk_dout_mmc4 = { .clk = { .name = "dout_mmc4", .id = -1, }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 16, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_FSYS3, .shift = 0, .size = 4 }, }; static struct clksrc_clk clksrcs[] = { { .clk = { .name = "uclk1", .id = 0, .enable = exynos4_clksrc_mask_peril0_ctrl, .ctrlbit = (1 << 0), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 0, .size = 4 }, }, { .clk = { .name = "uclk1", .id = 1, .enable = exynos4_clksrc_mask_peril0_ctrl, .ctrlbit = (1 << 4), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 4, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 4, .size = 4 }, }, { .clk = { .name = "uclk1", .id = 2, .enable = exynos4_clksrc_mask_peril0_ctrl, .ctrlbit = (1 << 8), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 8, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 8, .size = 4 }, }, { .clk = { .name = "uclk1", .id = 3, .enable = exynos4_clksrc_mask_peril0_ctrl, .ctrlbit = (1 << 12), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 12, .size = 4 }, }, { .clk = { .name = "sclk_pwm", .id = -1, .enable = exynos4_clksrc_mask_peril0_ctrl, .ctrlbit = (1 << 24), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 24, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_PERIL3, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_csis", .id = 0, .enable = exynos4_clksrc_mask_cam_ctrl, .ctrlbit = (1 << 24), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 24, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 24, .size = 4 }, }, { .clk = { .name = "sclk_csis", .id = 1, .enable = exynos4_clksrc_mask_cam_ctrl, .ctrlbit = (1 << 28), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 28, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 }, }, { .clk = { .name = "sclk_cam", .id = 0, .enable = exynos4_clksrc_mask_cam_ctrl, .ctrlbit = (1 << 16), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 16, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 }, }, { .clk = { .name = "sclk_cam", .id = 1, .enable = exynos4_clksrc_mask_cam_ctrl, .ctrlbit = (1 << 20), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 20, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .id = 0, .enable = exynos4_clksrc_mask_cam_ctrl, .ctrlbit = (1 << 0), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .id = 1, .enable = exynos4_clksrc_mask_cam_ctrl, .ctrlbit = (1 << 4), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 4, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 4, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .id = 2, .enable = exynos4_clksrc_mask_cam_ctrl, .ctrlbit = (1 << 8), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 8, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 8, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .id = 3, .enable = exynos4_clksrc_mask_cam_ctrl, .ctrlbit = (1 << 12), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_CAM, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 12, .size = 4 }, }, { .clk = { .name = "sclk_fimd", .id = 0, .enable = exynos4_clksrc_mask_lcd0_ctrl, .ctrlbit = (1 << 0), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_LCD0, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_LCD0, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_fimd", .id = 1, .enable = exynos4_clksrc_mask_lcd1_ctrl, .ctrlbit = (1 << 0), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_LCD1, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_LCD1, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_sata", .id = -1, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 24), }, .sources = &clkset_mout_corebus, .reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 24, .size = 1 }, .reg_div = { .reg = S5P_CLKDIV_FSYS0, .shift = 20, .size = 4 }, }, { .clk = { .name = "sclk_spi", .id = 0, .enable = exynos4_clksrc_mask_peril1_ctrl, .ctrlbit = (1 << 16), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 16, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_PERIL1, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_spi", .id = 1, .enable = exynos4_clksrc_mask_peril1_ctrl, .ctrlbit = (1 << 20), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_PERIL1, .shift = 16, .size = 4 }, }, { .clk = { .name = "sclk_spi", .id = 2, .enable = exynos4_clksrc_mask_peril1_ctrl, .ctrlbit = (1 << 24), }, .sources = &clkset_group, .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 24, .size = 4 }, .reg_div = { .reg = S5P_CLKDIV_PERIL2, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_fimg2d", .id = -1, }, .sources = &clkset_mout_g2d, .reg_src = { .reg = S5P_CLKSRC_IMAGE, .shift = 8, .size = 1 }, .reg_div = { .reg = S5P_CLKDIV_IMAGE, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_mmc", .id = 0, .parent = &clk_dout_mmc0.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 0), }, .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 8, .size = 8 }, }, { .clk = { .name = "sclk_mmc", .id = 1, .parent = &clk_dout_mmc1.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 4), }, .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 24, .size = 8 }, }, { .clk = { .name = "sclk_mmc", .id = 2, .parent = &clk_dout_mmc2.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 8), }, .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 8, .size = 8 }, }, { .clk = { .name = "sclk_mmc", .id = 3, .parent = &clk_dout_mmc3.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 12), }, .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 24, .size = 8 }, }, { .clk = { .name = "sclk_mmc", .id = 4, .parent = &clk_dout_mmc4.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 16), }, .reg_div = { .reg = S5P_CLKDIV_FSYS3, .shift = 8, .size = 8 }, } }; /* Clock initialization code */ static struct clksrc_clk *sysclks[] = { &clk_mout_apll, &clk_sclk_apll, &clk_mout_epll, &clk_mout_mpll, &clk_moutcore, &clk_coreclk, &clk_armclk, &clk_aclk_corem0, &clk_aclk_cores, &clk_aclk_corem1, &clk_periphclk, &clk_mout_corebus, &clk_sclk_dmc, &clk_aclk_cored, &clk_aclk_corep, &clk_aclk_acp, &clk_pclk_acp, &clk_vpllsrc, &clk_sclk_vpll, &clk_aclk_200, &clk_aclk_100, &clk_aclk_160, &clk_aclk_133, &clk_dout_mmc0, &clk_dout_mmc1, &clk_dout_mmc2, &clk_dout_mmc3, &clk_dout_mmc4, }; static int xtal_rate; static unsigned long exynos4_fout_apll_get_rate(struct clk *clk) { return s5p_get_pll45xx(xtal_rate, __raw_readl(S5P_APLL_CON0), pll_4508); } static struct clk_ops exynos4_fout_apll_ops = { .get_rate = exynos4_fout_apll_get_rate, }; void __init_or_cpufreq exynos4_setup_clocks(void) { struct clk *xtal_clk; unsigned long apll; unsigned long mpll; unsigned long epll; unsigned long vpll; unsigned long vpllsrc; unsigned long xtal; unsigned long armclk; unsigned long sclk_dmc; unsigned long aclk_200; unsigned long aclk_100; unsigned long aclk_160; unsigned long aclk_133; unsigned int ptr; printk(KERN_DEBUG "%s: registering clocks\n", __func__); xtal_clk = clk_get(NULL, "xtal"); BUG_ON(IS_ERR(xtal_clk)); xtal = clk_get_rate(xtal_clk); xtal_rate = xtal; clk_put(xtal_clk); printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal); apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON0), pll_4508); mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON0), pll_4508); epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON0), __raw_readl(S5P_EPLL_CON1), pll_4600); vpllsrc = clk_get_rate(&clk_vpllsrc.clk); vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0), __raw_readl(S5P_VPLL_CON1), pll_4650); clk_fout_apll.ops = &exynos4_fout_apll_ops; clk_fout_mpll.rate = mpll; clk_fout_epll.rate = epll; clk_fout_vpll.rate = vpll; printk(KERN_INFO "EXYNOS4: PLL settings, A=%ld, M=%ld, E=%ld V=%ld", apll, mpll, epll, vpll); armclk = clk_get_rate(&clk_armclk.clk); sclk_dmc = clk_get_rate(&clk_sclk_dmc.clk); aclk_200 = clk_get_rate(&clk_aclk_200.clk); aclk_100 = clk_get_rate(&clk_aclk_100.clk); aclk_160 = clk_get_rate(&clk_aclk_160.clk); aclk_133 = clk_get_rate(&clk_aclk_133.clk); printk(KERN_INFO "EXYNOS4: ARMCLK=%ld, DMC=%ld, ACLK200=%ld\n" "ACLK100=%ld, ACLK160=%ld, ACLK133=%ld\n", armclk, sclk_dmc, aclk_200, aclk_100, aclk_160, aclk_133); clk_f.rate = armclk; clk_h.rate = sclk_dmc; clk_p.rate = aclk_100; for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_set_clksrc(&clksrcs[ptr], true); } static struct clk *clks[] __initdata = { /* Nothing here yet */ }; void __init exynos4_register_clocks(void) { int ptr; s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++) s3c_register_clksrc(sysclks[ptr], 1); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_pwmclk_init(); }
gpl-2.0
ColDReaVeR/android_kernel_samsung_coriplus
fs/ext3/dir.c
3279
13921
/* * linux/fs/ext3/dir.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/dir.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext3 directory handling functions * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * * Hash Tree Directory indexing (c) 2001 Daniel Phillips * */ #include <linux/fs.h> #include <linux/jbd.h> #include <linux/ext3_fs.h> #include <linux/buffer_head.h> #include <linux/slab.h> #include <linux/rbtree.h> static unsigned char ext3_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; static int ext3_readdir(struct file *, void *, filldir_t); static int ext3_dx_readdir(struct file * filp, void * dirent, filldir_t filldir); static int ext3_release_dir (struct inode * inode, struct file * filp); const struct file_operations ext3_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = ext3_readdir, /* we take BKL. needed?*/ .unlocked_ioctl = ext3_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext3_compat_ioctl, #endif .fsync = ext3_sync_file, /* BKL held */ .release = ext3_release_dir, }; static unsigned char get_dtype(struct super_block *sb, int filetype) { if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) || (filetype >= EXT3_FT_MAX)) return DT_UNKNOWN; return (ext3_filetype_table[filetype]); } int ext3_check_dir_entry (const char * function, struct inode * dir, struct ext3_dir_entry_2 * de, struct buffer_head * bh, unsigned long offset) { const char * error_msg = NULL; const int rlen = ext3_rec_len_from_disk(de->rec_len); if (unlikely(rlen < EXT3_DIR_REC_LEN(1))) error_msg = "rec_len is smaller than minimal"; else if (unlikely(rlen % 4 != 0)) error_msg = "rec_len % 4 != 0"; else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len))) error_msg = "rec_len is too small for name_len"; else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))) error_msg = "directory entry across blocks"; else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; if (unlikely(error_msg != NULL)) ext3_error (dir->i_sb, function, "bad entry in directory #%lu: %s - " "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", dir->i_ino, error_msg, offset, (unsigned long) le32_to_cpu(de->inode), rlen, de->name_len); return error_msg == NULL ? 1 : 0; } static int ext3_readdir(struct file * filp, void * dirent, filldir_t filldir) { int error = 0; unsigned long offset; int i, stored; struct ext3_dir_entry_2 *de; struct super_block *sb; int err; struct inode *inode = filp->f_path.dentry->d_inode; int ret = 0; int dir_has_error = 0; sb = inode->i_sb; if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb, EXT3_FEATURE_COMPAT_DIR_INDEX) && ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) || ((inode->i_size >> sb->s_blocksize_bits) == 1))) { err = ext3_dx_readdir(filp, dirent, filldir); if (err != ERR_BAD_DX_DIR) { ret = err; goto out; } /* * We don't set the inode dirty flag since it's not * critical that it get flushed back to the disk. */ EXT3_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT3_INDEX_FL; } stored = 0; offset = filp->f_pos & (sb->s_blocksize - 1); while (!error && !stored && filp->f_pos < inode->i_size) { unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb); struct buffer_head map_bh; struct buffer_head *bh = NULL; map_bh.b_state = 0; err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0); if (err > 0) { pgoff_t index = map_bh.b_blocknr >> (PAGE_CACHE_SHIFT - inode->i_blkbits); if (!ra_has_index(&filp->f_ra, index)) page_cache_sync_readahead( sb->s_bdev->bd_inode->i_mapping, &filp->f_ra, filp, index, 1); filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; bh = ext3_bread(NULL, inode, blk, 0, &err); } /* * We ignore I/O errors on directories so users have a chance * of recovering data when there's a bad sector */ if (!bh) { if (!dir_has_error) { ext3_error(sb, __func__, "directory #%lu " "contains a hole at offset %lld", inode->i_ino, filp->f_pos); dir_has_error = 1; } /* corrupt size? Maybe no more blocks to read */ if (filp->f_pos > inode->i_blocks << 9) break; filp->f_pos += sb->s_blocksize - offset; continue; } revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block * to make sure. */ if (filp->f_version != inode->i_version) { for (i = 0; i < sb->s_blocksize && i < offset; ) { de = (struct ext3_dir_entry_2 *) (bh->b_data + i); /* It's too expensive to do a full * dirent test each time round this * loop, but we do have to test at * least that it is non-zero. A * failure will be detected in the * dirent test below. */ if (ext3_rec_len_from_disk(de->rec_len) < EXT3_DIR_REC_LEN(1)) break; i += ext3_rec_len_from_disk(de->rec_len); } offset = i; filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1)) | offset; filp->f_version = inode->i_version; } while (!error && filp->f_pos < inode->i_size && offset < sb->s_blocksize) { de = (struct ext3_dir_entry_2 *) (bh->b_data + offset); if (!ext3_check_dir_entry ("ext3_readdir", inode, de, bh, offset)) { /* On error, skip the f_pos to the next block. */ filp->f_pos = (filp->f_pos | (sb->s_blocksize - 1)) + 1; brelse (bh); ret = stored; goto out; } offset += ext3_rec_len_from_disk(de->rec_len); if (le32_to_cpu(de->inode)) { /* We might block in the next section * if the data destination is * currently swapped out. So, use a * version stamp to detect whether or * not the directory has been modified * during the copy operation. */ u64 version = filp->f_version; error = filldir(dirent, de->name, de->name_len, filp->f_pos, le32_to_cpu(de->inode), get_dtype(sb, de->file_type)); if (error) break; if (version != filp->f_version) goto revalidate; stored ++; } filp->f_pos += ext3_rec_len_from_disk(de->rec_len); } offset = 0; brelse (bh); } out: return ret; } /* * These functions convert from the major/minor hash to an f_pos * value. * * Currently we only use major hash numer. This is unfortunate, but * on 32-bit machines, the same VFS interface is used for lseek and * llseek, so if we use the 64 bit offset, then the 32-bit versions of * lseek/telldir/seekdir will blow out spectacularly, and from within * the ext2 low-level routine, we don't know if we're being called by * a 64-bit version of the system call or the 32-bit version of the * system call. Worse yet, NFSv2 only allows for a 32-bit readdir * cookie. Sigh. */ #define hash2pos(major, minor) (major >> 1) #define pos2maj_hash(pos) ((pos << 1) & 0xffffffff) #define pos2min_hash(pos) (0) /* * This structure holds the nodes of the red-black tree used to store * the directory entry in hash order. */ struct fname { __u32 hash; __u32 minor_hash; struct rb_node rb_hash; struct fname *next; __u32 inode; __u8 name_len; __u8 file_type; char name[0]; }; /* * This functoin implements a non-recursive way of freeing all of the * nodes in the red-black tree. */ static void free_rb_tree_fname(struct rb_root *root) { struct rb_node *n = root->rb_node; struct rb_node *parent; struct fname *fname; while (n) { /* Do the node's children first */ if (n->rb_left) { n = n->rb_left; continue; } if (n->rb_right) { n = n->rb_right; continue; } /* * The node has no children; free it, and then zero * out parent's link to it. Finally go to the * beginning of the loop and try to free the parent * node. */ parent = rb_parent(n); fname = rb_entry(n, struct fname, rb_hash); while (fname) { struct fname * old = fname; fname = fname->next; kfree (old); } if (!parent) *root = RB_ROOT; else if (parent->rb_left == n) parent->rb_left = NULL; else if (parent->rb_right == n) parent->rb_right = NULL; n = parent; } } static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos) { struct dir_private_info *p; p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL); if (!p) return NULL; p->curr_hash = pos2maj_hash(pos); p->curr_minor_hash = pos2min_hash(pos); return p; } void ext3_htree_free_dir_info(struct dir_private_info *p) { free_rb_tree_fname(&p->root); kfree(p); } /* * Given a directory entry, enter it into the fname rb tree. */ int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, __u32 minor_hash, struct ext3_dir_entry_2 *dirent) { struct rb_node **p, *parent = NULL; struct fname * fname, *new_fn; struct dir_private_info *info; int len; info = (struct dir_private_info *) dir_file->private_data; p = &info->root.rb_node; /* Create and allocate the fname structure */ len = sizeof(struct fname) + dirent->name_len + 1; new_fn = kzalloc(len, GFP_KERNEL); if (!new_fn) return -ENOMEM; new_fn->hash = hash; new_fn->minor_hash = minor_hash; new_fn->inode = le32_to_cpu(dirent->inode); new_fn->name_len = dirent->name_len; new_fn->file_type = dirent->file_type; memcpy(new_fn->name, dirent->name, dirent->name_len); new_fn->name[dirent->name_len] = 0; while (*p) { parent = *p; fname = rb_entry(parent, struct fname, rb_hash); /* * If the hash and minor hash match up, then we put * them on a linked list. This rarely happens... */ if ((new_fn->hash == fname->hash) && (new_fn->minor_hash == fname->minor_hash)) { new_fn->next = fname->next; fname->next = new_fn; return 0; } if (new_fn->hash < fname->hash) p = &(*p)->rb_left; else if (new_fn->hash > fname->hash) p = &(*p)->rb_right; else if (new_fn->minor_hash < fname->minor_hash) p = &(*p)->rb_left; else /* if (new_fn->minor_hash > fname->minor_hash) */ p = &(*p)->rb_right; } rb_link_node(&new_fn->rb_hash, parent, p); rb_insert_color(&new_fn->rb_hash, &info->root); return 0; } /* * This is a helper function for ext3_dx_readdir. It calls filldir * for all entres on the fname linked list. (Normally there is only * one entry on the linked list, unless there are 62 bit hash collisions.) */ static int call_filldir(struct file * filp, void * dirent, filldir_t filldir, struct fname *fname) { struct dir_private_info *info = filp->private_data; loff_t curr_pos; struct inode *inode = filp->f_path.dentry->d_inode; struct super_block * sb; int error; sb = inode->i_sb; if (!fname) { printk("call_filldir: called with null fname?!?\n"); return 0; } curr_pos = hash2pos(fname->hash, fname->minor_hash); while (fname) { error = filldir(dirent, fname->name, fname->name_len, curr_pos, fname->inode, get_dtype(sb, fname->file_type)); if (error) { filp->f_pos = curr_pos; info->extra_fname = fname; return error; } fname = fname->next; } return 0; } static int ext3_dx_readdir(struct file * filp, void * dirent, filldir_t filldir) { struct dir_private_info *info = filp->private_data; struct inode *inode = filp->f_path.dentry->d_inode; struct fname *fname; int ret; if (!info) { info = ext3_htree_create_dir_info(filp->f_pos); if (!info) return -ENOMEM; filp->private_data = info; } if (filp->f_pos == EXT3_HTREE_EOF) return 0; /* EOF */ /* Some one has messed with f_pos; reset the world */ if (info->last_pos != filp->f_pos) { free_rb_tree_fname(&info->root); info->curr_node = NULL; info->extra_fname = NULL; info->curr_hash = pos2maj_hash(filp->f_pos); info->curr_minor_hash = pos2min_hash(filp->f_pos); } /* * If there are any leftover names on the hash collision * chain, return them first. */ if (info->extra_fname) { if (call_filldir(filp, dirent, filldir, info->extra_fname)) goto finished; info->extra_fname = NULL; goto next_node; } else if (!info->curr_node) info->curr_node = rb_first(&info->root); while (1) { /* * Fill the rbtree if we have no more entries, * or the inode has changed since we last read in the * cached entries. */ if ((!info->curr_node) || (filp->f_version != inode->i_version)) { info->curr_node = NULL; free_rb_tree_fname(&info->root); filp->f_version = inode->i_version; ret = ext3_htree_fill_tree(filp, info->curr_hash, info->curr_minor_hash, &info->next_hash); if (ret < 0) return ret; if (ret == 0) { filp->f_pos = EXT3_HTREE_EOF; break; } info->curr_node = rb_first(&info->root); } fname = rb_entry(info->curr_node, struct fname, rb_hash); info->curr_hash = fname->hash; info->curr_minor_hash = fname->minor_hash; if (call_filldir(filp, dirent, filldir, fname)) break; next_node: info->curr_node = rb_next(info->curr_node); if (info->curr_node) { fname = rb_entry(info->curr_node, struct fname, rb_hash); info->curr_hash = fname->hash; info->curr_minor_hash = fname->minor_hash; } else { if (info->next_hash == ~0) { filp->f_pos = EXT3_HTREE_EOF; break; } info->curr_hash = info->next_hash; info->curr_minor_hash = 0; } } finished: info->last_pos = filp->f_pos; return 0; } static int ext3_release_dir (struct inode * inode, struct file * filp) { if (filp->private_data) ext3_htree_free_dir_info(filp->private_data); return 0; }
gpl-2.0
samno1607/FreshBake
drivers/media/dvb/frontends/s5h1409.c
4303
23559
/* Samsung S5H1409 VSB/QAM demodulator driver Copyright (C) 2006 Steven Toth <stoth@linuxtv.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/delay.h> #include "dvb_frontend.h" #include "s5h1409.h" struct s5h1409_state { struct i2c_adapter *i2c; /* configuration settings */ const struct s5h1409_config *config; struct dvb_frontend frontend; /* previous uncorrected block counter */ fe_modulation_t current_modulation; u32 current_frequency; int if_freq; u32 is_qam_locked; /* QAM tuning state goes through the following state transitions */ #define QAM_STATE_UNTUNED 0 #define QAM_STATE_TUNING_STARTED 1 #define QAM_STATE_INTERLEAVE_SET 2 #define QAM_STATE_QAM_OPTIMIZED_L1 3 #define QAM_STATE_QAM_OPTIMIZED_L2 4 #define QAM_STATE_QAM_OPTIMIZED_L3 5 u8 qam_state; }; static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Enable verbose debug messages"); #define dprintk if (debug) printk /* Register values to initialise the demod, this will set VSB by default */ static struct init_tab { u8 reg; u16 data; } init_tab[] = { { 0x00, 0x0071, }, { 0x01, 0x3213, }, { 0x09, 0x0025, }, { 0x1c, 0x001d, }, { 0x1f, 0x002d, }, { 0x20, 0x001d, }, { 0x22, 0x0022, }, { 0x23, 0x0020, }, { 0x29, 0x110f, }, { 0x2a, 0x10b4, }, { 0x2b, 0x10ae, }, { 0x2c, 0x0031, }, { 0x31, 0x010d, }, { 0x32, 0x0100, }, { 0x44, 0x0510, }, { 0x54, 0x0104, }, { 0x58, 0x2222, }, { 0x59, 0x1162, }, { 0x5a, 0x3211, }, { 0x5d, 0x0370, }, { 0x5e, 0x0296, }, { 0x61, 0x0010, }, { 0x63, 0x4a00, }, { 0x65, 0x0800, }, { 0x71, 0x0003, }, { 0x72, 0x0470, }, { 0x81, 0x0002, }, { 0x82, 0x0600, }, { 0x86, 0x0002, }, { 0x8a, 0x2c38, }, { 0x8b, 0x2a37, }, { 0x92, 0x302f, }, { 0x93, 0x3332, }, { 0x96, 0x000c, }, { 0x99, 0x0101, }, { 0x9c, 0x2e37, }, { 0x9d, 0x2c37, }, { 0x9e, 0x2c37, }, { 0xab, 0x0100, }, { 0xac, 0x1003, }, { 0xad, 0x103f, }, { 0xe2, 0x0100, }, { 0xe3, 0x1000, }, { 0x28, 0x1010, }, { 0xb1, 0x000e, }, }; /* VSB SNR lookup table */ static struct vsb_snr_tab { u16 val; u16 data; } vsb_snr_tab[] = { { 924, 300, }, { 923, 300, }, { 918, 295, }, { 915, 290, }, { 911, 285, }, { 906, 280, }, { 901, 275, }, { 896, 270, }, { 891, 265, }, { 885, 260, }, { 879, 255, }, { 873, 250, }, { 864, 245, }, { 858, 240, }, { 850, 235, }, { 841, 230, }, { 832, 225, }, { 823, 220, }, { 812, 215, }, { 802, 210, }, { 788, 205, }, { 778, 200, }, { 767, 195, }, { 753, 190, }, { 740, 185, }, { 725, 180, }, { 707, 175, }, { 689, 170, }, { 671, 165, }, { 656, 160, }, { 637, 155, }, { 616, 150, }, { 542, 145, }, { 519, 140, }, { 507, 135, }, { 497, 130, }, { 492, 125, }, { 474, 120, }, { 300, 111, }, { 0, 0, }, }; /* QAM64 SNR lookup table */ static struct qam64_snr_tab { u16 val; u16 data; } qam64_snr_tab[] = { { 1, 0, }, { 12, 300, }, { 15, 290, }, { 18, 280, }, { 22, 270, }, { 23, 268, }, { 24, 266, }, { 25, 264, }, { 27, 262, }, { 28, 260, }, { 29, 258, }, { 30, 256, }, { 32, 254, }, { 33, 252, }, { 34, 250, }, { 35, 249, }, { 36, 248, }, { 37, 247, }, { 38, 246, }, { 39, 245, }, { 40, 244, }, { 41, 243, }, { 42, 241, }, { 43, 240, }, { 44, 239, }, { 45, 238, }, { 46, 237, }, { 47, 236, }, { 48, 235, }, { 49, 234, }, { 50, 233, }, { 51, 232, }, { 52, 231, }, { 53, 230, }, { 55, 229, }, { 56, 228, }, { 57, 227, }, { 58, 226, }, { 59, 225, }, { 60, 224, }, { 62, 223, }, { 63, 222, }, { 65, 221, }, { 66, 220, }, { 68, 219, }, { 69, 218, }, { 70, 217, }, { 72, 216, }, { 73, 215, }, { 75, 214, }, { 76, 213, }, { 78, 212, }, { 80, 211, }, { 81, 210, }, { 83, 209, }, { 84, 208, }, { 85, 207, }, { 87, 206, }, { 89, 205, }, { 91, 204, }, { 93, 203, }, { 95, 202, }, { 96, 201, }, { 104, 200, }, { 255, 0, }, }; /* QAM256 SNR lookup table */ static struct qam256_snr_tab { u16 val; u16 data; } qam256_snr_tab[] = { { 1, 0, }, { 12, 400, }, { 13, 390, }, { 15, 380, }, { 17, 360, }, { 19, 350, }, { 22, 348, }, { 23, 346, }, { 24, 344, }, { 25, 342, }, { 26, 340, }, { 27, 336, }, { 28, 334, }, { 29, 332, }, { 30, 330, }, { 31, 328, }, { 32, 326, }, { 33, 325, }, { 34, 322, }, { 35, 320, }, { 37, 318, }, { 39, 316, }, { 40, 314, }, { 41, 312, }, { 42, 310, }, { 43, 308, }, { 46, 306, }, { 47, 304, }, { 49, 302, }, { 51, 300, }, { 53, 298, }, { 54, 297, }, { 55, 296, }, { 56, 295, }, { 57, 294, }, { 59, 293, }, { 60, 292, }, { 61, 291, }, { 63, 290, }, { 64, 289, }, { 65, 288, }, { 66, 287, }, { 68, 286, }, { 69, 285, }, { 71, 284, }, { 72, 283, }, { 74, 282, }, { 75, 281, }, { 76, 280, }, { 77, 279, }, { 78, 278, }, { 81, 277, }, { 83, 276, }, { 84, 275, }, { 86, 274, }, { 87, 273, }, { 89, 272, }, { 90, 271, }, { 92, 270, }, { 93, 269, }, { 95, 268, }, { 96, 267, }, { 98, 266, }, { 100, 265, }, { 102, 264, }, { 104, 263, }, { 105, 262, }, { 106, 261, }, { 110, 260, }, { 255, 0, }, }; /* 8 bit registers, 16 bit values */ static int s5h1409_writereg(struct s5h1409_state *state, u8 reg, u16 data) { int ret; u8 buf[] = { reg, data >> 8, data & 0xff }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 3 }; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: error (reg == 0x%02x, val == 0x%04x, " "ret == %i)\n", __func__, reg, data, ret); return (ret != 1) ? -1 : 0; } static u16 s5h1409_readreg(struct s5h1409_state *state, u8 reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0, 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 2 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) printk("%s: readreg error (ret == %i)\n", __func__, ret); return (b1[0] << 8) | b1[1]; } static int s5h1409_softreset(struct dvb_frontend *fe) { struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); s5h1409_writereg(state, 0xf5, 0); s5h1409_writereg(state, 0xf5, 1); state->is_qam_locked = 0; state->qam_state = QAM_STATE_UNTUNED; return 0; } #define S5H1409_VSB_IF_FREQ 5380 #define S5H1409_QAM_IF_FREQ (state->config->qam_if) static int s5h1409_set_if_freq(struct dvb_frontend *fe, int KHz) { struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s(%d KHz)\n", __func__, KHz); switch (KHz) { case 4000: s5h1409_writereg(state, 0x87, 0x014b); s5h1409_writereg(state, 0x88, 0x0cb5); s5h1409_writereg(state, 0x89, 0x03e2); break; case 5380: case 44000: default: s5h1409_writereg(state, 0x87, 0x01be); s5h1409_writereg(state, 0x88, 0x0436); s5h1409_writereg(state, 0x89, 0x054d); break; } state->if_freq = KHz; return 0; } static int s5h1409_set_spectralinversion(struct dvb_frontend *fe, int inverted) { struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s(%d)\n", __func__, inverted); if (inverted == 1) return s5h1409_writereg(state, 0x1b, 0x1101); /* Inverted */ else return s5h1409_writereg(state, 0x1b, 0x0110); /* Normal */ } static int s5h1409_enable_modulation(struct dvb_frontend *fe, fe_modulation_t m) { struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s(0x%08x)\n", __func__, m); switch (m) { case VSB_8: dprintk("%s() VSB_8\n", __func__); if (state->if_freq != S5H1409_VSB_IF_FREQ) s5h1409_set_if_freq(fe, S5H1409_VSB_IF_FREQ); s5h1409_writereg(state, 0xf4, 0); break; case QAM_64: case QAM_256: case QAM_AUTO: dprintk("%s() QAM_AUTO (64/256)\n", __func__); if (state->if_freq != S5H1409_QAM_IF_FREQ) s5h1409_set_if_freq(fe, S5H1409_QAM_IF_FREQ); s5h1409_writereg(state, 0xf4, 1); s5h1409_writereg(state, 0x85, 0x110); break; default: dprintk("%s() Invalid modulation\n", __func__); return -EINVAL; } state->current_modulation = m; s5h1409_softreset(fe); return 0; } static int s5h1409_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s(%d)\n", __func__, enable); if (enable) return s5h1409_writereg(state, 0xf3, 1); else return s5h1409_writereg(state, 0xf3, 0); } static int s5h1409_set_gpio(struct dvb_frontend *fe, int enable) { struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s(%d)\n", __func__, enable); if (enable) return s5h1409_writereg(state, 0xe3, s5h1409_readreg(state, 0xe3) | 0x1100); else return s5h1409_writereg(state, 0xe3, s5h1409_readreg(state, 0xe3) & 0xfeff); } static int s5h1409_sleep(struct dvb_frontend *fe, int enable) { struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s(%d)\n", __func__, enable); return s5h1409_writereg(state, 0xf2, enable); } static int s5h1409_register_reset(struct dvb_frontend *fe) { struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); return s5h1409_writereg(state, 0xfa, 0); } static void s5h1409_set_qam_amhum_mode(struct dvb_frontend *fe) { struct s5h1409_state *state = fe->demodulator_priv; u16 reg; if (state->qam_state < QAM_STATE_INTERLEAVE_SET) { /* We should not perform amhum optimization until the interleave mode has been configured */ return; } if (state->qam_state == QAM_STATE_QAM_OPTIMIZED_L3) { /* We've already reached the maximum optimization level, so dont bother banging on the status registers */ return; } /* QAM EQ lock check */ reg = s5h1409_readreg(state, 0xf0); if ((reg >> 13) & 0x1) { reg &= 0xff; s5h1409_writereg(state, 0x96, 0x000c); if (reg < 0x68) { if (state->qam_state < QAM_STATE_QAM_OPTIMIZED_L3) { dprintk("%s() setting QAM state to OPT_L3\n", __func__); s5h1409_writereg(state, 0x93, 0x3130); s5h1409_writereg(state, 0x9e, 0x2836); state->qam_state = QAM_STATE_QAM_OPTIMIZED_L3; } } else { if (state->qam_state < QAM_STATE_QAM_OPTIMIZED_L2) { dprintk("%s() setting QAM state to OPT_L2\n", __func__); s5h1409_writereg(state, 0x93, 0x3332); s5h1409_writereg(state, 0x9e, 0x2c37); state->qam_state = QAM_STATE_QAM_OPTIMIZED_L2; } } } else { if (state->qam_state < QAM_STATE_QAM_OPTIMIZED_L1) { dprintk("%s() setting QAM state to OPT_L1\n", __func__); s5h1409_writereg(state, 0x96, 0x0008); s5h1409_writereg(state, 0x93, 0x3332); s5h1409_writereg(state, 0x9e, 0x2c37); state->qam_state = QAM_STATE_QAM_OPTIMIZED_L1; } } } static void s5h1409_set_qam_amhum_mode_legacy(struct dvb_frontend *fe) { struct s5h1409_state *state = fe->demodulator_priv; u16 reg; if (state->is_qam_locked) return; /* QAM EQ lock check */ reg = s5h1409_readreg(state, 0xf0); if ((reg >> 13) & 0x1) { state->is_qam_locked = 1; reg &= 0xff; s5h1409_writereg(state, 0x96, 0x00c); if ((reg < 0x38) || (reg > 0x68)) { s5h1409_writereg(state, 0x93, 0x3332); s5h1409_writereg(state, 0x9e, 0x2c37); } else { s5h1409_writereg(state, 0x93, 0x3130); s5h1409_writereg(state, 0x9e, 0x2836); } } else { s5h1409_writereg(state, 0x96, 0x0008); s5h1409_writereg(state, 0x93, 0x3332); s5h1409_writereg(state, 0x9e, 0x2c37); } } static void s5h1409_set_qam_interleave_mode(struct dvb_frontend *fe) { struct s5h1409_state *state = fe->demodulator_priv; u16 reg, reg1, reg2; if (state->qam_state >= QAM_STATE_INTERLEAVE_SET) { /* We've done the optimization already */ return; } reg = s5h1409_readreg(state, 0xf1); /* Master lock */ if ((reg >> 15) & 0x1) { if (state->qam_state == QAM_STATE_UNTUNED || state->qam_state == QAM_STATE_TUNING_STARTED) { dprintk("%s() setting QAM state to INTERLEAVE_SET\n", __func__); reg1 = s5h1409_readreg(state, 0xb2); reg2 = s5h1409_readreg(state, 0xad); s5h1409_writereg(state, 0x96, 0x0020); s5h1409_writereg(state, 0xad, (((reg1 & 0xf000) >> 4) | (reg2 & 0xf0ff))); state->qam_state = QAM_STATE_INTERLEAVE_SET; } } else { if (state->qam_state == QAM_STATE_UNTUNED) { dprintk("%s() setting QAM state to TUNING_STARTED\n", __func__); s5h1409_writereg(state, 0x96, 0x08); s5h1409_writereg(state, 0xab, s5h1409_readreg(state, 0xab) | 0x1001); state->qam_state = QAM_STATE_TUNING_STARTED; } } } static void s5h1409_set_qam_interleave_mode_legacy(struct dvb_frontend *fe) { struct s5h1409_state *state = fe->demodulator_priv; u16 reg, reg1, reg2; reg = s5h1409_readreg(state, 0xf1); /* Master lock */ if ((reg >> 15) & 0x1) { if (state->qam_state != 2) { state->qam_state = 2; reg1 = s5h1409_readreg(state, 0xb2); reg2 = s5h1409_readreg(state, 0xad); s5h1409_writereg(state, 0x96, 0x20); s5h1409_writereg(state, 0xad, (((reg1 & 0xf000) >> 4) | (reg2 & 0xf0ff))); s5h1409_writereg(state, 0xab, s5h1409_readreg(state, 0xab) & 0xeffe); } } else { if (state->qam_state != 1) { state->qam_state = 1; s5h1409_writereg(state, 0x96, 0x08); s5h1409_writereg(state, 0xab, s5h1409_readreg(state, 0xab) | 0x1001); } } } /* Talk to the demod, set the FEC, GUARD, QAM settings etc */ static int s5h1409_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s(frequency=%d)\n", __func__, p->frequency); s5h1409_softreset(fe); state->current_frequency = p->frequency; s5h1409_enable_modulation(fe, p->u.vsb.modulation); if (fe->ops.tuner_ops.set_params) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.set_params(fe, p); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* Issue a reset to the demod so it knows to resync against the newly tuned frequency */ s5h1409_softreset(fe); /* Optimize the demod for QAM */ if (state->current_modulation != VSB_8) { /* This almost certainly applies to all boards, but for now only do it for the HVR-1600. Once the other boards are tested, the "legacy" versions can just go away */ if (state->config->hvr1600_opt == S5H1409_HVR1600_OPTIMIZE) { s5h1409_set_qam_interleave_mode(fe); s5h1409_set_qam_amhum_mode(fe); } else { s5h1409_set_qam_amhum_mode_legacy(fe); s5h1409_set_qam_interleave_mode_legacy(fe); } } return 0; } static int s5h1409_set_mpeg_timing(struct dvb_frontend *fe, int mode) { struct s5h1409_state *state = fe->demodulator_priv; u16 val; dprintk("%s(%d)\n", __func__, mode); val = s5h1409_readreg(state, 0xac) & 0xcfff; switch (mode) { case S5H1409_MPEGTIMING_CONTINOUS_INVERTING_CLOCK: val |= 0x0000; break; case S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK: dprintk("%s(%d) Mode1 or Defaulting\n", __func__, mode); val |= 0x1000; break; case S5H1409_MPEGTIMING_NONCONTINOUS_INVERTING_CLOCK: val |= 0x2000; break; case S5H1409_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK: val |= 0x3000; break; default: return -EINVAL; } /* Configure MPEG Signal Timing charactistics */ return s5h1409_writereg(state, 0xac, val); } /* Reset the demod hardware and reset all of the configuration registers to a default state. */ static int s5h1409_init(struct dvb_frontend *fe) { int i; struct s5h1409_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); s5h1409_sleep(fe, 0); s5h1409_register_reset(fe); for (i = 0; i < ARRAY_SIZE(init_tab); i++) s5h1409_writereg(state, init_tab[i].reg, init_tab[i].data); /* The datasheet says that after initialisation, VSB is default */ state->current_modulation = VSB_8; /* Optimize for the HVR-1600 if appropriate. Note that some of these may get folded into the generic case after testing with other devices */ if (state->config->hvr1600_opt == S5H1409_HVR1600_OPTIMIZE) { /* VSB AGC REF */ s5h1409_writereg(state, 0x09, 0x0050); /* Unknown but Windows driver does it... */ s5h1409_writereg(state, 0x21, 0x0001); s5h1409_writereg(state, 0x50, 0x030e); /* QAM AGC REF */ s5h1409_writereg(state, 0x82, 0x0800); } if (state->config->output_mode == S5H1409_SERIAL_OUTPUT) s5h1409_writereg(state, 0xab, s5h1409_readreg(state, 0xab) | 0x100); /* Serial */ else s5h1409_writereg(state, 0xab, s5h1409_readreg(state, 0xab) & 0xfeff); /* Parallel */ s5h1409_set_spectralinversion(fe, state->config->inversion); s5h1409_set_if_freq(fe, state->if_freq); s5h1409_set_gpio(fe, state->config->gpio); s5h1409_set_mpeg_timing(fe, state->config->mpeg_timing); s5h1409_softreset(fe); /* Note: Leaving the I2C gate closed. */ s5h1409_i2c_gate_ctrl(fe, 0); return 0; } static int s5h1409_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct s5h1409_state *state = fe->demodulator_priv; u16 reg; u32 tuner_status = 0; *status = 0; /* Optimize the demod for QAM */ if (state->current_modulation != VSB_8) { /* This almost certainly applies to all boards, but for now only do it for the HVR-1600. Once the other boards are tested, the "legacy" versions can just go away */ if (state->config->hvr1600_opt == S5H1409_HVR1600_OPTIMIZE) { s5h1409_set_qam_interleave_mode(fe); s5h1409_set_qam_amhum_mode(fe); } } /* Get the demodulator status */ reg = s5h1409_readreg(state, 0xf1); if (reg & 0x1000) *status |= FE_HAS_VITERBI; if (reg & 0x8000) *status |= FE_HAS_LOCK | FE_HAS_SYNC; switch (state->config->status_mode) { case S5H1409_DEMODLOCKING: if (*status & FE_HAS_VITERBI) *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL; break; case S5H1409_TUNERLOCKING: /* Get the tuner status */ if (fe->ops.tuner_ops.get_status) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.get_status(fe, &tuner_status); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } if (tuner_status) *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL; break; } dprintk("%s() status 0x%08x\n", __func__, *status); return 0; } static int s5h1409_qam256_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v) { int i, ret = -EINVAL; dprintk("%s()\n", __func__); for (i = 0; i < ARRAY_SIZE(qam256_snr_tab); i++) { if (v < qam256_snr_tab[i].val) { *snr = qam256_snr_tab[i].data; ret = 0; break; } } return ret; } static int s5h1409_qam64_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v) { int i, ret = -EINVAL; dprintk("%s()\n", __func__); for (i = 0; i < ARRAY_SIZE(qam64_snr_tab); i++) { if (v < qam64_snr_tab[i].val) { *snr = qam64_snr_tab[i].data; ret = 0; break; } } return ret; } static int s5h1409_vsb_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v) { int i, ret = -EINVAL; dprintk("%s()\n", __func__); for (i = 0; i < ARRAY_SIZE(vsb_snr_tab); i++) { if (v > vsb_snr_tab[i].val) { *snr = vsb_snr_tab[i].data; ret = 0; break; } } dprintk("%s() snr=%d\n", __func__, *snr); return ret; } static int s5h1409_read_snr(struct dvb_frontend *fe, u16 *snr) { struct s5h1409_state *state = fe->demodulator_priv; u16 reg; dprintk("%s()\n", __func__); switch (state->current_modulation) { case QAM_64: reg = s5h1409_readreg(state, 0xf0) & 0xff; return s5h1409_qam64_lookup_snr(fe, snr, reg); case QAM_256: reg = s5h1409_readreg(state, 0xf0) & 0xff; return s5h1409_qam256_lookup_snr(fe, snr, reg); case VSB_8: reg = s5h1409_readreg(state, 0xf1) & 0x3ff; return s5h1409_vsb_lookup_snr(fe, snr, reg); default: break; } return -EINVAL; } static int s5h1409_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { return s5h1409_read_snr(fe, signal_strength); } static int s5h1409_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct s5h1409_state *state = fe->demodulator_priv; *ucblocks = s5h1409_readreg(state, 0xb5); return 0; } static int s5h1409_read_ber(struct dvb_frontend *fe, u32 *ber) { return s5h1409_read_ucblocks(fe, ber); } static int s5h1409_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct s5h1409_state *state = fe->demodulator_priv; p->frequency = state->current_frequency; p->u.vsb.modulation = state->current_modulation; return 0; } static int s5h1409_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static void s5h1409_release(struct dvb_frontend *fe) { struct s5h1409_state *state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops s5h1409_ops; struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config, struct i2c_adapter *i2c) { struct s5h1409_state *state = NULL; u16 reg; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct s5h1409_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; state->current_modulation = 0; state->if_freq = S5H1409_VSB_IF_FREQ; /* check if the demod exists */ reg = s5h1409_readreg(state, 0x04); if ((reg != 0x0066) && (reg != 0x007f)) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &s5h1409_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; if (s5h1409_init(&state->frontend) != 0) { printk(KERN_ERR "%s: Failed to initialize correctly\n", __func__); goto error; } /* Note: Leaving the I2C gate open here. */ s5h1409_i2c_gate_ctrl(&state->frontend, 1); return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(s5h1409_attach); static struct dvb_frontend_ops s5h1409_ops = { .info = { .name = "Samsung S5H1409 QAM/8VSB Frontend", .type = FE_ATSC, .frequency_min = 54000000, .frequency_max = 858000000, .frequency_stepsize = 62500, .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB }, .init = s5h1409_init, .i2c_gate_ctrl = s5h1409_i2c_gate_ctrl, .set_frontend = s5h1409_set_frontend, .get_frontend = s5h1409_get_frontend, .get_tune_settings = s5h1409_get_tune_settings, .read_status = s5h1409_read_status, .read_ber = s5h1409_read_ber, .read_signal_strength = s5h1409_read_signal_strength, .read_snr = s5h1409_read_snr, .read_ucblocks = s5h1409_read_ucblocks, .release = s5h1409_release, }; MODULE_DESCRIPTION("Samsung S5H1409 QAM-B/ATSC Demodulator driver"); MODULE_AUTHOR("Steven Toth"); MODULE_LICENSE("GPL"); /* * Local variables: * c-basic-offset: 8 */
gpl-2.0
netico-solutions/linux-amxx
arch/sh/mm/alignment.c
4559
4750
/* * Alignment access counters and corresponding user-space interfaces. * * Copyright (C) 2009 ST Microelectronics * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <linux/ratelimit.h> #include <asm/alignment.h> #include <asm/processor.h> static unsigned long se_user; static unsigned long se_sys; static unsigned long se_half; static unsigned long se_word; static unsigned long se_dword; static unsigned long se_multi; /* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not valid! */ static int se_usermode = UM_WARN | UM_FIXUP; /* 0: no warning 1: print a warning message, disabled by default */ static int se_kernmode_warn; core_param(alignment, se_usermode, int, 0600); void inc_unaligned_byte_access(void) { se_half++; } void inc_unaligned_word_access(void) { se_word++; } void inc_unaligned_dword_access(void) { se_dword++; } void inc_unaligned_multi_access(void) { se_multi++; } void inc_unaligned_user_access(void) { se_user++; } void inc_unaligned_kernel_access(void) { se_sys++; } /* * This defaults to the global policy which can be set from the command * line, while processes can overload their preferences via prctl(). */ unsigned int unaligned_user_action(void) { unsigned int action = se_usermode; if (current->thread.flags & SH_THREAD_UAC_SIGBUS) { action &= ~UM_FIXUP; action |= UM_SIGNAL; } if (current->thread.flags & SH_THREAD_UAC_NOPRINT) action &= ~UM_WARN; return action; } int get_unalign_ctl(struct task_struct *tsk, unsigned long addr) { return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK, (unsigned int __user *)addr); } int set_unalign_ctl(struct task_struct *tsk, unsigned int val) { tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) | (val & SH_THREAD_UAC_MASK); return 0; } void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn, struct pt_regs *regs) { if (user_mode(regs) && (se_usermode & UM_WARN)) pr_notice_ratelimited("Fixing up unaligned userspace access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", tsk->comm, task_pid_nr(tsk), (void *)instruction_pointer(regs), insn); else if (se_kernmode_warn) pr_notice_ratelimited("Fixing up unaligned kernel access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", tsk->comm, task_pid_nr(tsk), (void *)instruction_pointer(regs), insn); } static const char *se_usermode_action[] = { "ignored", "warn", "fixup", "fixup+warn", "signal", "signal+warn" }; static int alignment_proc_show(struct seq_file *m, void *v) { seq_printf(m, "User:\t\t%lu\n", se_user); seq_printf(m, "System:\t\t%lu\n", se_sys); seq_printf(m, "Half:\t\t%lu\n", se_half); seq_printf(m, "Word:\t\t%lu\n", se_word); seq_printf(m, "DWord:\t\t%lu\n", se_dword); seq_printf(m, "Multi:\t\t%lu\n", se_multi); seq_printf(m, "User faults:\t%i (%s)\n", se_usermode, se_usermode_action[se_usermode]); seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, se_kernmode_warn ? "+warn" : ""); return 0; } static int alignment_proc_open(struct inode *inode, struct file *file) { return single_open(file, alignment_proc_show, NULL); } static ssize_t alignment_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { int *data = PDE_DATA(file_inode(file)); char mode; if (count > 0) { if (get_user(mode, buffer)) return -EFAULT; if (mode >= '0' && mode <= '5') *data = mode - '0'; } return count; } static const struct file_operations alignment_proc_fops = { .owner = THIS_MODULE, .open = alignment_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = alignment_proc_write, }; /* * This needs to be done after sysctl_init, otherwise sys/ will be * overwritten. Actually, this shouldn't be in sys/ at all since * it isn't a sysctl, and it doesn't contain sysctl information. * We now locate it in /proc/cpu/alignment instead. */ static int __init alignment_init(void) { struct proc_dir_entry *dir, *res; dir = proc_mkdir("cpu", NULL); if (!dir) return -ENOMEM; res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir, &alignment_proc_fops, &se_usermode); if (!res) return -ENOMEM; res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir, &alignment_proc_fops, &se_kernmode_warn); if (!res) return -ENOMEM; return 0; } fs_initcall(alignment_init);
gpl-2.0
nimon/GPSense_1
drivers/staging/rts5139/rts51x_fop.c
5071
7100
/* Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China * Maintainer: * Edwin Rong (edwin_rong@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include "rts51x.h" #ifdef SUPPORT_FILE_OP #include <linux/types.h> #include <linux/stat.h> #include <linux/kref.h> #include <linux/slab.h> #include "rts51x_chip.h" #include "rts51x_card.h" #include "rts51x_fop.h" #include "sd_cprm.h" #include "rts51x.h" #define RTS5139_IOC_MAGIC 0x39 #define RTS5139_IOC_SD_DIRECT _IOWR(RTS5139_IOC_MAGIC, 0xA0, int) #define RTS5139_IOC_SD_GET_RSP _IOWR(RTS5139_IOC_MAGIC, 0xA1, int) static int rts51x_sd_direct_cmnd(struct rts51x_chip *chip, struct sd_direct_cmnd *cmnd) { int retval; u8 dir, cmd12, standby, acmd, cmd_idx, rsp_code; u8 *buf; u32 arg, len; dir = (cmnd->cmnd[0] >> 3) & 0x03; cmd12 = (cmnd->cmnd[0] >> 2) & 0x01; standby = (cmnd->cmnd[0] >> 1) & 0x01; acmd = cmnd->cmnd[0] & 0x01; cmd_idx = cmnd->cmnd[1]; arg = ((u32) (cmnd->cmnd[2]) << 24) | ((u32) (cmnd->cmnd[3]) << 16) | ((u32) (cmnd->cmnd[4]) << 8) | cmnd->cmnd[5]; len = ((u32) (cmnd->cmnd[6]) << 16) | ((u32) (cmnd->cmnd[7]) << 8) | cmnd->cmnd[8]; rsp_code = cmnd->cmnd[9]; if (dir) { if (!cmnd->buf || (cmnd->buf_len < len)) TRACE_RET(chip, STATUS_FAIL); } switch (dir) { case 0: /* No data */ retval = ext_sd_execute_no_data(chip, chip->card2lun[SD_CARD], cmd_idx, standby, acmd, rsp_code, arg); if (retval != TRANSPORT_GOOD) TRACE_RET(chip, STATUS_FAIL); break; case 1: /* Read from card */ buf = kmalloc(cmnd->buf_len, GFP_KERNEL); if (!buf) TRACE_RET(chip, STATUS_NOMEM); retval = ext_sd_execute_read_data(chip, chip->card2lun[SD_CARD], cmd_idx, cmd12, standby, acmd, rsp_code, arg, len, buf, cmnd->buf_len, 0); if (retval != TRANSPORT_GOOD) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } retval = copy_to_user((void *)cmnd->buf, (void *)buf, cmnd->buf_len); if (retval) { kfree(buf); TRACE_RET(chip, STATUS_NOMEM); } kfree(buf); break; case 2: /* Write to card */ buf = kmalloc(cmnd->buf_len, GFP_KERNEL); if (!buf) TRACE_RET(chip, STATUS_NOMEM); retval = copy_from_user((void *)buf, (void *)cmnd->buf, cmnd->buf_len); if (retval) { kfree(buf); TRACE_RET(chip, STATUS_NOMEM); } retval = ext_sd_execute_write_data(chip, chip->card2lun[SD_CARD], cmd_idx, cmd12, standby, acmd, rsp_code, arg, len, buf, cmnd->buf_len, 0); if (retval != TRANSPORT_GOOD) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } kfree(buf); break; default: TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int rts51x_sd_get_rsp(struct rts51x_chip *chip, struct sd_rsp *rsp) { struct sd_info *sd_card = &(chip->sd_card); int count = 0, retval; if (sd_card->pre_cmd_err) { sd_card->pre_cmd_err = 0; TRACE_RET(chip, STATUS_FAIL); } if (sd_card->last_rsp_type == SD_RSP_TYPE_R0) TRACE_RET(chip, STATUS_FAIL); else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2) count = (rsp->rsp_len < 17) ? rsp->rsp_len : 17; else count = (rsp->rsp_len < 6) ? rsp->rsp_len : 6; retval = copy_to_user((void *)rsp->rsp, (void *)sd_card->rsp, count); if (retval) TRACE_RET(chip, STATUS_NOMEM); RTS51X_DEBUGP("Response length: %d\n", count); RTS51X_DEBUGP("Response: 0x%x 0x%x 0x%x 0x%x\n", sd_card->rsp[0], sd_card->rsp[1], sd_card->rsp[2], sd_card->rsp[3]); return STATUS_SUCCESS; } int rts51x_open(struct inode *inode, struct file *filp) { struct rts51x_chip *chip; struct usb_interface *interface; int subminor; int retval = 0; subminor = iminor(inode); interface = usb_find_interface(&rts51x_driver, subminor); if (!interface) { RTS51X_DEBUGP("%s - error, can't find device for minor %d\n", __func__, subminor); retval = -ENODEV; goto exit; } chip = (struct rts51x_chip *)usb_get_intfdata(interface); if (!chip) { RTS51X_DEBUGP("Can't find chip\n"); retval = -ENODEV; goto exit; } /* Increase our reference to the host */ scsi_host_get(rts51x_to_host(chip)); /* lock the device pointers */ mutex_lock(&(chip->usb->dev_mutex)); /* save our object in the file's private structure */ filp->private_data = chip; /* unlock the device pointers */ mutex_unlock(&chip->usb->dev_mutex); exit: return retval; } int rts51x_release(struct inode *inode, struct file *filp) { struct rts51x_chip *chip; chip = (struct rts51x_chip *)filp->private_data; if (chip == NULL) return -ENODEV; /* Drop our reference to the host; the SCSI core will free it * (and "chip" along with it) when the refcount becomes 0. */ scsi_host_put(rts51x_to_host(chip)); return 0; } ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { return 0; } ssize_t rts51x_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { return 0; } #if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */ int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) #else long rts51x_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) #endif { struct rts51x_chip *chip; struct sd_direct_cmnd cmnd; struct sd_rsp rsp; int retval = 0; chip = (struct rts51x_chip *)filp->private_data; if (chip == NULL) return -ENODEV; /* lock the device pointers */ mutex_lock(&(chip->usb->dev_mutex)); switch (cmd) { case RTS5139_IOC_SD_DIRECT: retval = copy_from_user((void *)&cmnd, (void *)arg, sizeof(struct sd_direct_cmnd)); if (retval) { retval = -ENOMEM; TRACE_GOTO(chip, exit); } retval = rts51x_sd_direct_cmnd(chip, &cmnd); if (retval != STATUS_SUCCESS) { retval = -EIO; TRACE_GOTO(chip, exit); } break; case RTS5139_IOC_SD_GET_RSP: retval = copy_from_user((void *)&rsp, (void *)arg, sizeof(struct sd_rsp)); if (retval) { retval = -ENOMEM; TRACE_GOTO(chip, exit); } retval = rts51x_sd_get_rsp(chip, &rsp); if (retval != STATUS_SUCCESS) { retval = -EIO; TRACE_GOTO(chip, exit); } break; default: break; } exit: /* unlock the device pointers */ mutex_unlock(&chip->usb->dev_mutex); return retval; } #endif
gpl-2.0
ali-filth/android_kernel_samsung_msm8226
drivers/watchdog/davinci_wdt.c
5071
6806
/* * drivers/char/watchdog/davinci_wdt.c * * Watchdog driver for DaVinci DM644x/DM646x processors * * Copyright (C) 2006 Texas Instruments. * * 2007 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/slab.h> #define MODULE_NAME "DAVINCI-WDT: " #define DEFAULT_HEARTBEAT 60 #define MAX_HEARTBEAT 600 /* really the max margin is 264/27MHz*/ /* Timer register set definition */ #define PID12 (0x0) #define EMUMGT (0x4) #define TIM12 (0x10) #define TIM34 (0x14) #define PRD12 (0x18) #define PRD34 (0x1C) #define TCR (0x20) #define TGCR (0x24) #define WDTCR (0x28) /* TCR bit definitions */ #define ENAMODE12_DISABLED (0 << 6) #define ENAMODE12_ONESHOT (1 << 6) #define ENAMODE12_PERIODIC (2 << 6) /* TGCR bit definitions */ #define TIM12RS_UNRESET (1 << 0) #define TIM34RS_UNRESET (1 << 1) #define TIMMODE_64BIT_WDOG (2 << 2) /* WDTCR bit definitions */ #define WDEN (1 << 14) #define WDFLAG (1 << 15) #define WDKEY_SEQ0 (0xa5c6 << 16) #define WDKEY_SEQ1 (0xda7e << 16) static int heartbeat = DEFAULT_HEARTBEAT; static DEFINE_SPINLOCK(io_lock); static unsigned long wdt_status; #define WDT_IN_USE 0 #define WDT_OK_TO_CLOSE 1 #define WDT_REGION_INITED 2 #define WDT_DEVICE_INITED 3 static struct resource *wdt_mem; static void __iomem *wdt_base; struct clk *wdt_clk; static void wdt_service(void) { spin_lock(&io_lock); /* put watchdog in service state */ iowrite32(WDKEY_SEQ0, wdt_base + WDTCR); /* put watchdog in active state */ iowrite32(WDKEY_SEQ1, wdt_base + WDTCR); spin_unlock(&io_lock); } static void wdt_enable(void) { u32 tgcr; u32 timer_margin; unsigned long wdt_freq; wdt_freq = clk_get_rate(wdt_clk); spin_lock(&io_lock); /* disable, internal clock source */ iowrite32(0, wdt_base + TCR); /* reset timer, set mode to 64-bit watchdog, and unreset */ iowrite32(0, wdt_base + TGCR); tgcr = TIMMODE_64BIT_WDOG | TIM12RS_UNRESET | TIM34RS_UNRESET; iowrite32(tgcr, wdt_base + TGCR); /* clear counter regs */ iowrite32(0, wdt_base + TIM12); iowrite32(0, wdt_base + TIM34); /* set timeout period */ timer_margin = (((u64)heartbeat * wdt_freq) & 0xffffffff); iowrite32(timer_margin, wdt_base + PRD12); timer_margin = (((u64)heartbeat * wdt_freq) >> 32); iowrite32(timer_margin, wdt_base + PRD34); /* enable run continuously */ iowrite32(ENAMODE12_PERIODIC, wdt_base + TCR); /* Once the WDT is in pre-active state write to * TIM12, TIM34, PRD12, PRD34, TCR, TGCR, WDTCR are * write protected (except for the WDKEY field) */ /* put watchdog in pre-active state */ iowrite32(WDKEY_SEQ0 | WDEN, wdt_base + WDTCR); /* put watchdog in active state */ iowrite32(WDKEY_SEQ1 | WDEN, wdt_base + WDTCR); spin_unlock(&io_lock); } static int davinci_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(WDT_IN_USE, &wdt_status)) return -EBUSY; wdt_enable(); return nonseekable_open(inode, file); } static ssize_t davinci_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { if (len) wdt_service(); return len; } static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING, .identity = "DaVinci Watchdog", }; static long davinci_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: ret = put_user(0, (int *)arg); break; case WDIOC_KEEPALIVE: wdt_service(); ret = 0; break; case WDIOC_GETTIMEOUT: ret = put_user(heartbeat, (int *)arg); break; } return ret; } static int davinci_wdt_release(struct inode *inode, struct file *file) { wdt_service(); clear_bit(WDT_IN_USE, &wdt_status); return 0; } static const struct file_operations davinci_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = davinci_wdt_write, .unlocked_ioctl = davinci_wdt_ioctl, .open = davinci_wdt_open, .release = davinci_wdt_release, }; static struct miscdevice davinci_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &davinci_wdt_fops, }; static int __devinit davinci_wdt_probe(struct platform_device *pdev) { int ret = 0, size; struct device *dev = &pdev->dev; wdt_clk = clk_get(dev, NULL); if (WARN_ON(IS_ERR(wdt_clk))) return PTR_ERR(wdt_clk); clk_enable(wdt_clk); if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) heartbeat = DEFAULT_HEARTBEAT; dev_info(dev, "heartbeat %d sec\n", heartbeat); wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (wdt_mem == NULL) { dev_err(dev, "failed to get memory region resource\n"); return -ENOENT; } size = resource_size(wdt_mem); if (!request_mem_region(wdt_mem->start, size, pdev->name)) { dev_err(dev, "failed to get memory region\n"); return -ENOENT; } wdt_base = ioremap(wdt_mem->start, size); if (!wdt_base) { dev_err(dev, "failed to map memory region\n"); release_mem_region(wdt_mem->start, size); wdt_mem = NULL; return -ENOMEM; } ret = misc_register(&davinci_wdt_miscdev); if (ret < 0) { dev_err(dev, "cannot register misc device\n"); release_mem_region(wdt_mem->start, size); wdt_mem = NULL; } else { set_bit(WDT_DEVICE_INITED, &wdt_status); } iounmap(wdt_base); return ret; } static int __devexit davinci_wdt_remove(struct platform_device *pdev) { misc_deregister(&davinci_wdt_miscdev); if (wdt_mem) { release_mem_region(wdt_mem->start, resource_size(wdt_mem)); wdt_mem = NULL; } clk_disable(wdt_clk); clk_put(wdt_clk); return 0; } static struct platform_driver platform_wdt_driver = { .driver = { .name = "watchdog", .owner = THIS_MODULE, }, .probe = davinci_wdt_probe, .remove = __devexit_p(davinci_wdt_remove), }; module_platform_driver(platform_wdt_driver); MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("DaVinci Watchdog Driver"); module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to " __MODULE_STRING(MAX_HEARTBEAT) ", default " __MODULE_STRING(DEFAULT_HEARTBEAT)); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:watchdog");
gpl-2.0
paloda/android_kernel_htc_evita
drivers/staging/rts5139/rts51x_fop.c
5071
7100
/* Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China * Maintainer: * Edwin Rong (edwin_rong@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include "rts51x.h" #ifdef SUPPORT_FILE_OP #include <linux/types.h> #include <linux/stat.h> #include <linux/kref.h> #include <linux/slab.h> #include "rts51x_chip.h" #include "rts51x_card.h" #include "rts51x_fop.h" #include "sd_cprm.h" #include "rts51x.h" #define RTS5139_IOC_MAGIC 0x39 #define RTS5139_IOC_SD_DIRECT _IOWR(RTS5139_IOC_MAGIC, 0xA0, int) #define RTS5139_IOC_SD_GET_RSP _IOWR(RTS5139_IOC_MAGIC, 0xA1, int) static int rts51x_sd_direct_cmnd(struct rts51x_chip *chip, struct sd_direct_cmnd *cmnd) { int retval; u8 dir, cmd12, standby, acmd, cmd_idx, rsp_code; u8 *buf; u32 arg, len; dir = (cmnd->cmnd[0] >> 3) & 0x03; cmd12 = (cmnd->cmnd[0] >> 2) & 0x01; standby = (cmnd->cmnd[0] >> 1) & 0x01; acmd = cmnd->cmnd[0] & 0x01; cmd_idx = cmnd->cmnd[1]; arg = ((u32) (cmnd->cmnd[2]) << 24) | ((u32) (cmnd->cmnd[3]) << 16) | ((u32) (cmnd->cmnd[4]) << 8) | cmnd->cmnd[5]; len = ((u32) (cmnd->cmnd[6]) << 16) | ((u32) (cmnd->cmnd[7]) << 8) | cmnd->cmnd[8]; rsp_code = cmnd->cmnd[9]; if (dir) { if (!cmnd->buf || (cmnd->buf_len < len)) TRACE_RET(chip, STATUS_FAIL); } switch (dir) { case 0: /* No data */ retval = ext_sd_execute_no_data(chip, chip->card2lun[SD_CARD], cmd_idx, standby, acmd, rsp_code, arg); if (retval != TRANSPORT_GOOD) TRACE_RET(chip, STATUS_FAIL); break; case 1: /* Read from card */ buf = kmalloc(cmnd->buf_len, GFP_KERNEL); if (!buf) TRACE_RET(chip, STATUS_NOMEM); retval = ext_sd_execute_read_data(chip, chip->card2lun[SD_CARD], cmd_idx, cmd12, standby, acmd, rsp_code, arg, len, buf, cmnd->buf_len, 0); if (retval != TRANSPORT_GOOD) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } retval = copy_to_user((void *)cmnd->buf, (void *)buf, cmnd->buf_len); if (retval) { kfree(buf); TRACE_RET(chip, STATUS_NOMEM); } kfree(buf); break; case 2: /* Write to card */ buf = kmalloc(cmnd->buf_len, GFP_KERNEL); if (!buf) TRACE_RET(chip, STATUS_NOMEM); retval = copy_from_user((void *)buf, (void *)cmnd->buf, cmnd->buf_len); if (retval) { kfree(buf); TRACE_RET(chip, STATUS_NOMEM); } retval = ext_sd_execute_write_data(chip, chip->card2lun[SD_CARD], cmd_idx, cmd12, standby, acmd, rsp_code, arg, len, buf, cmnd->buf_len, 0); if (retval != TRANSPORT_GOOD) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } kfree(buf); break; default: TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int rts51x_sd_get_rsp(struct rts51x_chip *chip, struct sd_rsp *rsp) { struct sd_info *sd_card = &(chip->sd_card); int count = 0, retval; if (sd_card->pre_cmd_err) { sd_card->pre_cmd_err = 0; TRACE_RET(chip, STATUS_FAIL); } if (sd_card->last_rsp_type == SD_RSP_TYPE_R0) TRACE_RET(chip, STATUS_FAIL); else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2) count = (rsp->rsp_len < 17) ? rsp->rsp_len : 17; else count = (rsp->rsp_len < 6) ? rsp->rsp_len : 6; retval = copy_to_user((void *)rsp->rsp, (void *)sd_card->rsp, count); if (retval) TRACE_RET(chip, STATUS_NOMEM); RTS51X_DEBUGP("Response length: %d\n", count); RTS51X_DEBUGP("Response: 0x%x 0x%x 0x%x 0x%x\n", sd_card->rsp[0], sd_card->rsp[1], sd_card->rsp[2], sd_card->rsp[3]); return STATUS_SUCCESS; } int rts51x_open(struct inode *inode, struct file *filp) { struct rts51x_chip *chip; struct usb_interface *interface; int subminor; int retval = 0; subminor = iminor(inode); interface = usb_find_interface(&rts51x_driver, subminor); if (!interface) { RTS51X_DEBUGP("%s - error, can't find device for minor %d\n", __func__, subminor); retval = -ENODEV; goto exit; } chip = (struct rts51x_chip *)usb_get_intfdata(interface); if (!chip) { RTS51X_DEBUGP("Can't find chip\n"); retval = -ENODEV; goto exit; } /* Increase our reference to the host */ scsi_host_get(rts51x_to_host(chip)); /* lock the device pointers */ mutex_lock(&(chip->usb->dev_mutex)); /* save our object in the file's private structure */ filp->private_data = chip; /* unlock the device pointers */ mutex_unlock(&chip->usb->dev_mutex); exit: return retval; } int rts51x_release(struct inode *inode, struct file *filp) { struct rts51x_chip *chip; chip = (struct rts51x_chip *)filp->private_data; if (chip == NULL) return -ENODEV; /* Drop our reference to the host; the SCSI core will free it * (and "chip" along with it) when the refcount becomes 0. */ scsi_host_put(rts51x_to_host(chip)); return 0; } ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { return 0; } ssize_t rts51x_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { return 0; } #if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */ int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) #else long rts51x_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) #endif { struct rts51x_chip *chip; struct sd_direct_cmnd cmnd; struct sd_rsp rsp; int retval = 0; chip = (struct rts51x_chip *)filp->private_data; if (chip == NULL) return -ENODEV; /* lock the device pointers */ mutex_lock(&(chip->usb->dev_mutex)); switch (cmd) { case RTS5139_IOC_SD_DIRECT: retval = copy_from_user((void *)&cmnd, (void *)arg, sizeof(struct sd_direct_cmnd)); if (retval) { retval = -ENOMEM; TRACE_GOTO(chip, exit); } retval = rts51x_sd_direct_cmnd(chip, &cmnd); if (retval != STATUS_SUCCESS) { retval = -EIO; TRACE_GOTO(chip, exit); } break; case RTS5139_IOC_SD_GET_RSP: retval = copy_from_user((void *)&rsp, (void *)arg, sizeof(struct sd_rsp)); if (retval) { retval = -ENOMEM; TRACE_GOTO(chip, exit); } retval = rts51x_sd_get_rsp(chip, &rsp); if (retval != STATUS_SUCCESS) { retval = -EIO; TRACE_GOTO(chip, exit); } break; default: break; } exit: /* unlock the device pointers */ mutex_unlock(&chip->usb->dev_mutex); return retval; } #endif
gpl-2.0
cyaniris/sgs4duos_kernel
drivers/input/misc/bfin_rotary.c
5071
6208
/* * Rotary counter driver for Analog Devices Blackfin Processors * * Copyright 2008-2009 Analog Devices Inc. * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/slab.h> #include <asm/portmux.h> #include <asm/bfin_rotary.h> static const u16 per_cnt[] = { P_CNT_CUD, P_CNT_CDG, P_CNT_CZM, 0 }; struct bfin_rot { struct input_dev *input; int irq; unsigned int up_key; unsigned int down_key; unsigned int button_key; unsigned int rel_code; unsigned short cnt_config; unsigned short cnt_imask; unsigned short cnt_debounce; }; static void report_key_event(struct input_dev *input, int keycode) { /* simulate a press-n-release */ input_report_key(input, keycode, 1); input_sync(input); input_report_key(input, keycode, 0); input_sync(input); } static void report_rotary_event(struct bfin_rot *rotary, int delta) { struct input_dev *input = rotary->input; if (rotary->up_key) { report_key_event(input, delta > 0 ? rotary->up_key : rotary->down_key); } else { input_report_rel(input, rotary->rel_code, delta); input_sync(input); } } static irqreturn_t bfin_rotary_isr(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct bfin_rot *rotary = platform_get_drvdata(pdev); int delta; switch (bfin_read_CNT_STATUS()) { case ICII: break; case UCII: case DCII: delta = bfin_read_CNT_COUNTER(); if (delta) report_rotary_event(rotary, delta); break; case CZMII: report_key_event(rotary->input, rotary->button_key); break; default: break; } bfin_write_CNT_COMMAND(W1LCNT_ZERO); /* Clear COUNTER */ bfin_write_CNT_STATUS(-1); /* Clear STATUS */ return IRQ_HANDLED; } static int __devinit bfin_rotary_probe(struct platform_device *pdev) { struct bfin_rotary_platform_data *pdata = pdev->dev.platform_data; struct bfin_rot *rotary; struct input_dev *input; int error; /* Basic validation */ if ((pdata->rotary_up_key && !pdata->rotary_down_key) || (!pdata->rotary_up_key && pdata->rotary_down_key)) { return -EINVAL; } error = peripheral_request_list(per_cnt, dev_name(&pdev->dev)); if (error) { dev_err(&pdev->dev, "requesting peripherals failed\n"); return error; } rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL); input = input_allocate_device(); if (!rotary || !input) { error = -ENOMEM; goto out1; } rotary->input = input; rotary->up_key = pdata->rotary_up_key; rotary->down_key = pdata->rotary_down_key; rotary->button_key = pdata->rotary_button_key; rotary->rel_code = pdata->rotary_rel_code; error = rotary->irq = platform_get_irq(pdev, 0); if (error < 0) goto out1; input->name = pdev->name; input->phys = "bfin-rotary/input0"; input->dev.parent = &pdev->dev; input_set_drvdata(input, rotary); input->id.bustype = BUS_HOST; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; if (rotary->up_key) { __set_bit(EV_KEY, input->evbit); __set_bit(rotary->up_key, input->keybit); __set_bit(rotary->down_key, input->keybit); } else { __set_bit(EV_REL, input->evbit); __set_bit(rotary->rel_code, input->relbit); } if (rotary->button_key) { __set_bit(EV_KEY, input->evbit); __set_bit(rotary->button_key, input->keybit); } error = request_irq(rotary->irq, bfin_rotary_isr, 0, dev_name(&pdev->dev), pdev); if (error) { dev_err(&pdev->dev, "unable to claim irq %d; error %d\n", rotary->irq, error); goto out1; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "unable to register input device (%d)\n", error); goto out2; } if (pdata->rotary_button_key) bfin_write_CNT_IMASK(CZMIE); if (pdata->mode & ROT_DEBE) bfin_write_CNT_DEBOUNCE(pdata->debounce & DPRESCALE); if (pdata->mode) bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | (pdata->mode & ~CNTE)); bfin_write_CNT_IMASK(bfin_read_CNT_IMASK() | UCIE | DCIE); bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | CNTE); platform_set_drvdata(pdev, rotary); device_init_wakeup(&pdev->dev, 1); return 0; out2: free_irq(rotary->irq, pdev); out1: input_free_device(input); kfree(rotary); peripheral_free_list(per_cnt); return error; } static int __devexit bfin_rotary_remove(struct platform_device *pdev) { struct bfin_rot *rotary = platform_get_drvdata(pdev); bfin_write_CNT_CONFIG(0); bfin_write_CNT_IMASK(0); free_irq(rotary->irq, pdev); input_unregister_device(rotary->input); peripheral_free_list(per_cnt); kfree(rotary); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int bfin_rotary_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct bfin_rot *rotary = platform_get_drvdata(pdev); rotary->cnt_config = bfin_read_CNT_CONFIG(); rotary->cnt_imask = bfin_read_CNT_IMASK(); rotary->cnt_debounce = bfin_read_CNT_DEBOUNCE(); if (device_may_wakeup(&pdev->dev)) enable_irq_wake(rotary->irq); return 0; } static int bfin_rotary_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct bfin_rot *rotary = platform_get_drvdata(pdev); bfin_write_CNT_DEBOUNCE(rotary->cnt_debounce); bfin_write_CNT_IMASK(rotary->cnt_imask); bfin_write_CNT_CONFIG(rotary->cnt_config & ~CNTE); if (device_may_wakeup(&pdev->dev)) disable_irq_wake(rotary->irq); if (rotary->cnt_config & CNTE) bfin_write_CNT_CONFIG(rotary->cnt_config); return 0; } static const struct dev_pm_ops bfin_rotary_pm_ops = { .suspend = bfin_rotary_suspend, .resume = bfin_rotary_resume, }; #endif static struct platform_driver bfin_rotary_device_driver = { .probe = bfin_rotary_probe, .remove = __devexit_p(bfin_rotary_remove), .driver = { .name = "bfin-rotary", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &bfin_rotary_pm_ops, #endif }, }; module_platform_driver(bfin_rotary_device_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Rotary Counter driver for Blackfin Processors"); MODULE_ALIAS("platform:bfin-rotary");
gpl-2.0
PsychoGame/omnirom_kernel_lge_msm8974-old
drivers/staging/rts5139/rts51x_fop.c
5071
7100
/* Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China * Maintainer: * Edwin Rong (edwin_rong@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include "rts51x.h" #ifdef SUPPORT_FILE_OP #include <linux/types.h> #include <linux/stat.h> #include <linux/kref.h> #include <linux/slab.h> #include "rts51x_chip.h" #include "rts51x_card.h" #include "rts51x_fop.h" #include "sd_cprm.h" #include "rts51x.h" #define RTS5139_IOC_MAGIC 0x39 #define RTS5139_IOC_SD_DIRECT _IOWR(RTS5139_IOC_MAGIC, 0xA0, int) #define RTS5139_IOC_SD_GET_RSP _IOWR(RTS5139_IOC_MAGIC, 0xA1, int) static int rts51x_sd_direct_cmnd(struct rts51x_chip *chip, struct sd_direct_cmnd *cmnd) { int retval; u8 dir, cmd12, standby, acmd, cmd_idx, rsp_code; u8 *buf; u32 arg, len; dir = (cmnd->cmnd[0] >> 3) & 0x03; cmd12 = (cmnd->cmnd[0] >> 2) & 0x01; standby = (cmnd->cmnd[0] >> 1) & 0x01; acmd = cmnd->cmnd[0] & 0x01; cmd_idx = cmnd->cmnd[1]; arg = ((u32) (cmnd->cmnd[2]) << 24) | ((u32) (cmnd->cmnd[3]) << 16) | ((u32) (cmnd->cmnd[4]) << 8) | cmnd->cmnd[5]; len = ((u32) (cmnd->cmnd[6]) << 16) | ((u32) (cmnd->cmnd[7]) << 8) | cmnd->cmnd[8]; rsp_code = cmnd->cmnd[9]; if (dir) { if (!cmnd->buf || (cmnd->buf_len < len)) TRACE_RET(chip, STATUS_FAIL); } switch (dir) { case 0: /* No data */ retval = ext_sd_execute_no_data(chip, chip->card2lun[SD_CARD], cmd_idx, standby, acmd, rsp_code, arg); if (retval != TRANSPORT_GOOD) TRACE_RET(chip, STATUS_FAIL); break; case 1: /* Read from card */ buf = kmalloc(cmnd->buf_len, GFP_KERNEL); if (!buf) TRACE_RET(chip, STATUS_NOMEM); retval = ext_sd_execute_read_data(chip, chip->card2lun[SD_CARD], cmd_idx, cmd12, standby, acmd, rsp_code, arg, len, buf, cmnd->buf_len, 0); if (retval != TRANSPORT_GOOD) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } retval = copy_to_user((void *)cmnd->buf, (void *)buf, cmnd->buf_len); if (retval) { kfree(buf); TRACE_RET(chip, STATUS_NOMEM); } kfree(buf); break; case 2: /* Write to card */ buf = kmalloc(cmnd->buf_len, GFP_KERNEL); if (!buf) TRACE_RET(chip, STATUS_NOMEM); retval = copy_from_user((void *)buf, (void *)cmnd->buf, cmnd->buf_len); if (retval) { kfree(buf); TRACE_RET(chip, STATUS_NOMEM); } retval = ext_sd_execute_write_data(chip, chip->card2lun[SD_CARD], cmd_idx, cmd12, standby, acmd, rsp_code, arg, len, buf, cmnd->buf_len, 0); if (retval != TRANSPORT_GOOD) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } kfree(buf); break; default: TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int rts51x_sd_get_rsp(struct rts51x_chip *chip, struct sd_rsp *rsp) { struct sd_info *sd_card = &(chip->sd_card); int count = 0, retval; if (sd_card->pre_cmd_err) { sd_card->pre_cmd_err = 0; TRACE_RET(chip, STATUS_FAIL); } if (sd_card->last_rsp_type == SD_RSP_TYPE_R0) TRACE_RET(chip, STATUS_FAIL); else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2) count = (rsp->rsp_len < 17) ? rsp->rsp_len : 17; else count = (rsp->rsp_len < 6) ? rsp->rsp_len : 6; retval = copy_to_user((void *)rsp->rsp, (void *)sd_card->rsp, count); if (retval) TRACE_RET(chip, STATUS_NOMEM); RTS51X_DEBUGP("Response length: %d\n", count); RTS51X_DEBUGP("Response: 0x%x 0x%x 0x%x 0x%x\n", sd_card->rsp[0], sd_card->rsp[1], sd_card->rsp[2], sd_card->rsp[3]); return STATUS_SUCCESS; } int rts51x_open(struct inode *inode, struct file *filp) { struct rts51x_chip *chip; struct usb_interface *interface; int subminor; int retval = 0; subminor = iminor(inode); interface = usb_find_interface(&rts51x_driver, subminor); if (!interface) { RTS51X_DEBUGP("%s - error, can't find device for minor %d\n", __func__, subminor); retval = -ENODEV; goto exit; } chip = (struct rts51x_chip *)usb_get_intfdata(interface); if (!chip) { RTS51X_DEBUGP("Can't find chip\n"); retval = -ENODEV; goto exit; } /* Increase our reference to the host */ scsi_host_get(rts51x_to_host(chip)); /* lock the device pointers */ mutex_lock(&(chip->usb->dev_mutex)); /* save our object in the file's private structure */ filp->private_data = chip; /* unlock the device pointers */ mutex_unlock(&chip->usb->dev_mutex); exit: return retval; } int rts51x_release(struct inode *inode, struct file *filp) { struct rts51x_chip *chip; chip = (struct rts51x_chip *)filp->private_data; if (chip == NULL) return -ENODEV; /* Drop our reference to the host; the SCSI core will free it * (and "chip" along with it) when the refcount becomes 0. */ scsi_host_put(rts51x_to_host(chip)); return 0; } ssize_t rts51x_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { return 0; } ssize_t rts51x_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { return 0; } #if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */ int rts51x_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) #else long rts51x_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) #endif { struct rts51x_chip *chip; struct sd_direct_cmnd cmnd; struct sd_rsp rsp; int retval = 0; chip = (struct rts51x_chip *)filp->private_data; if (chip == NULL) return -ENODEV; /* lock the device pointers */ mutex_lock(&(chip->usb->dev_mutex)); switch (cmd) { case RTS5139_IOC_SD_DIRECT: retval = copy_from_user((void *)&cmnd, (void *)arg, sizeof(struct sd_direct_cmnd)); if (retval) { retval = -ENOMEM; TRACE_GOTO(chip, exit); } retval = rts51x_sd_direct_cmnd(chip, &cmnd); if (retval != STATUS_SUCCESS) { retval = -EIO; TRACE_GOTO(chip, exit); } break; case RTS5139_IOC_SD_GET_RSP: retval = copy_from_user((void *)&rsp, (void *)arg, sizeof(struct sd_rsp)); if (retval) { retval = -ENOMEM; TRACE_GOTO(chip, exit); } retval = rts51x_sd_get_rsp(chip, &rsp); if (retval != STATUS_SUCCESS) { retval = -EIO; TRACE_GOTO(chip, exit); } break; default: break; } exit: /* unlock the device pointers */ mutex_unlock(&chip->usb->dev_mutex); return retval; } #endif
gpl-2.0
zarboz/Ville-Z.238
drivers/char/agp/ati-agp.c
9167
14939
/* * ATi AGPGART routines. */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/agp_backend.h> #include <asm/agp.h> #include "agp.h" #define ATI_GART_MMBASE_ADDR 0x14 #define ATI_RS100_APSIZE 0xac #define ATI_RS100_IG_AGPMODE 0xb0 #define ATI_RS300_APSIZE 0xf8 #define ATI_RS300_IG_AGPMODE 0xfc #define ATI_GART_FEATURE_ID 0x00 #define ATI_GART_BASE 0x04 #define ATI_GART_CACHE_SZBASE 0x08 #define ATI_GART_CACHE_CNTRL 0x0c #define ATI_GART_CACHE_ENTRY_CNTRL 0x10 static const struct aper_size_info_lvl2 ati_generic_sizes[7] = { {2048, 524288, 0x0000000c}, {1024, 262144, 0x0000000a}, {512, 131072, 0x00000008}, {256, 65536, 0x00000006}, {128, 32768, 0x00000004}, {64, 16384, 0x00000002}, {32, 8192, 0x00000000} }; static struct gatt_mask ati_generic_masks[] = { { .mask = 1, .type = 0} }; struct ati_page_map { unsigned long *real; unsigned long __iomem *remapped; }; static struct _ati_generic_private { volatile u8 __iomem *registers; struct ati_page_map **gatt_pages; int num_tables; } ati_generic_private; static int ati_create_page_map(struct ati_page_map *page_map) { int i, err = 0; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) return -ENOMEM; set_memory_uc((unsigned long)page_map->real, 1); err = map_page_into_agp(virt_to_page(page_map->real)); page_map->remapped = page_map->real; for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { writel(agp_bridge->scratch_page, page_map->remapped+i); readl(page_map->remapped+i); /* PCI Posting. */ } return 0; } static void ati_free_page_map(struct ati_page_map *page_map) { unmap_page_from_agp(virt_to_page(page_map->real)); set_memory_wb((unsigned long)page_map->real, 1); free_page((unsigned long) page_map->real); } static void ati_free_gatt_pages(void) { int i; struct ati_page_map **tables; struct ati_page_map *entry; tables = ati_generic_private.gatt_pages; for (i = 0; i < ati_generic_private.num_tables; i++) { entry = tables[i]; if (entry != NULL) { if (entry->real != NULL) ati_free_page_map(entry); kfree(entry); } } kfree(tables); } static int ati_create_gatt_pages(int nr_tables) { struct ati_page_map **tables; struct ati_page_map *entry; int retval = 0; int i; tables = kzalloc((nr_tables + 1) * sizeof(struct ati_page_map *),GFP_KERNEL); if (tables == NULL) return -ENOMEM; for (i = 0; i < nr_tables; i++) { entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL); tables[i] = entry; if (entry == NULL) { retval = -ENOMEM; break; } retval = ati_create_page_map(entry); if (retval != 0) break; } ati_generic_private.num_tables = i; ati_generic_private.gatt_pages = tables; if (retval != 0) ati_free_gatt_pages(); return retval; } static int is_r200(void) { if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) || (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) || (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) || (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250)) return 1; return 0; } static int ati_fetch_size(void) { int i; u32 temp; struct aper_size_info_lvl2 *values; if (is_r200()) pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); else pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); temp = (temp & 0x0000000e); values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static void ati_tlbflush(struct agp_memory * mem) { writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL); readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */ } static void ati_cleanup(void) { struct aper_size_info_lvl2 *previous_size; u32 temp; previous_size = A_SIZE_LVL2(agp_bridge->previous_size); /* Write back the previous size and disable gart translation */ if (is_r200()) { pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); temp = ((temp & ~(0x0000000f)) | previous_size->size_value); pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp); } else { pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); temp = ((temp & ~(0x0000000f)) | previous_size->size_value); pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp); } iounmap((volatile u8 __iomem *)ati_generic_private.registers); } static int ati_configure(void) { u32 temp; /* Get the memory mapped registers */ pci_read_config_dword(agp_bridge->dev, ATI_GART_MMBASE_ADDR, &temp); temp = (temp & 0xfffff000); ati_generic_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); if (!ati_generic_private.registers) return -ENOMEM; if (is_r200()) pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000); else pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000); /* address to map too */ /* pci_read_config_dword(agp_bridge.dev, AGP_APBASE, &temp); agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr); */ writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID); readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/ /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */ pci_read_config_dword(agp_bridge->dev, 4, &temp); pci_write_config_dword(agp_bridge->dev, 4, temp | (1<<14)); /* Write out the address of the gatt table */ writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE); readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */ return 0; } #ifdef CONFIG_PM static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state) { pci_save_state(dev); pci_set_power_state(dev, 3); return 0; } static int agp_ati_resume(struct pci_dev *dev) { pci_set_power_state(dev, 0); pci_restore_state(dev); return ati_configure(); } #endif /* *Since we don't need contiguous memory we just try * to get the gatt table once */ #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #undef GET_GATT #define GET_GATT(addr) (ati_generic_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) static int ati_insert_memory(struct agp_memory * mem, off_t pg_start, int type) { int i, j, num_entries; unsigned long __iomem *cur_gatt; unsigned long addr; int mask_type; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; mask_type = agp_generic_type_to_mask_type(mem->bridge, type); if (mask_type != 0 || type != mem->type) return -EINVAL; if (mem->page_count == 0) return 0; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; j = pg_start; while (j < (pg_start + mem->page_count)) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr)))) return -EBUSY; j++; } if (!mem->is_flushed) { /*CACHE_FLUSH(); */ global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->driver->mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mem->type), cur_gatt+GET_GATT_OFF(addr)); } readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */ agp_bridge->driver->tlb_flush(mem); return 0; } static int ati_remove_memory(struct agp_memory * mem, off_t pg_start, int type) { int i; unsigned long __iomem *cur_gatt; unsigned long addr; int mask_type; mask_type = agp_generic_type_to_mask_type(mem->bridge, type); if (mask_type != 0 || type != mem->type) return -EINVAL; if (mem->page_count == 0) return 0; for (i = pg_start; i < (mem->page_count + pg_start); i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); } readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */ agp_bridge->driver->tlb_flush(mem); return 0; } static int ati_create_gatt_table(struct agp_bridge_data *bridge) { struct aper_size_info_lvl2 *value; struct ati_page_map page_dir; unsigned long __iomem *cur_gatt; unsigned long addr; int retval; u32 temp; int i; struct aper_size_info_lvl2 *current_size; value = A_SIZE_LVL2(agp_bridge->current_size); retval = ati_create_page_map(&page_dir); if (retval != 0) return retval; retval = ati_create_gatt_pages(value->num_entries / 1024); if (retval != 0) { ati_free_page_map(&page_dir); return retval; } agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); /* Write out the size register */ current_size = A_SIZE_LVL2(agp_bridge->current_size); if (is_r200()) { pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp); pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); } else { pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp); pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); } /* * Get the address for the gart region. * This is a bus address even on the alpha, b/c its * used to program the agp master not the cpu */ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); agp_bridge->gart_bus_addr = addr; /* Calculate the agp offset */ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1, page_dir.remapped+GET_PAGE_DIR_OFF(addr)); readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ } for (i = 0; i < value->num_entries; i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); } return 0; } static int ati_free_gatt_table(struct agp_bridge_data *bridge) { struct ati_page_map page_dir; page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; ati_free_gatt_pages(); ati_free_page_map(&page_dir); return 0; } static const struct agp_bridge_driver ati_generic_bridge = { .owner = THIS_MODULE, .aperture_sizes = ati_generic_sizes, .size_type = LVL2_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = ati_configure, .fetch_size = ati_fetch_size, .cleanup = ati_cleanup, .tlb_flush = ati_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = ati_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = ati_create_gatt_table, .free_gatt_table = ati_free_gatt_table, .insert_memory = ati_insert_memory, .remove_memory = ati_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static struct agp_device_ids ati_agp_device_ids[] __devinitdata = { { .device_id = PCI_DEVICE_ID_ATI_RS100, .chipset_name = "IGP320/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS200, .chipset_name = "IGP330/340/345/350/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS200_B, .chipset_name = "IGP345M", }, { .device_id = PCI_DEVICE_ID_ATI_RS250, .chipset_name = "IGP7000/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS300_100, .chipset_name = "IGP9100/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS300_133, .chipset_name = "IGP9100/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS300_166, .chipset_name = "IGP9100/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS300_200, .chipset_name = "IGP9100/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS350_133, .chipset_name = "IGP9000/M", }, { .device_id = PCI_DEVICE_ID_ATI_RS350_200, .chipset_name = "IGP9100/M", }, { }, /* dummy final entry, always present */ }; static int __devinit agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_device_ids *devs = ati_agp_device_ids; struct agp_bridge_data *bridge; u8 cap_ptr; int j; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; /* probe for known chipsets */ for (j = 0; devs[j].chipset_name; j++) { if (pdev->device == devs[j].device_id) goto found; } dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n", pdev->vendor, pdev->device); return -ENODEV; found: bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->dev = pdev; bridge->capndx = cap_ptr; bridge->driver = &ati_generic_bridge; dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name); /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void __devexit agp_ati_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static struct pci_device_id agp_ati_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_ATI, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_ati_pci_table); static struct pci_driver agp_ati_pci_driver = { .name = "agpgart-ati", .id_table = agp_ati_pci_table, .probe = agp_ati_probe, .remove = agp_ati_remove, #ifdef CONFIG_PM .suspend = agp_ati_suspend, .resume = agp_ati_resume, #endif }; static int __init agp_ati_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_ati_pci_driver); } static void __exit agp_ati_cleanup(void) { pci_unregister_driver(&agp_ati_pci_driver); } module_init(agp_ati_init); module_exit(agp_ati_cleanup); MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
ztemt/N939Sc_5.1_kenel
drivers/video/console/mdacon.c
11471
14127
/* * linux/drivers/video/mdacon.c -- Low level MDA based console driver * * (c) 1998 Andrew Apted <ajapted@netspace.net.au> * * including portions (c) 1995-1998 Patrick Caulfield. * * slight improvements (c) 2000 Edward Betts <edward@debian.org> * * This file is based on the VGA console driver (vgacon.c): * * Created 28 Sep 1997 by Geert Uytterhoeven * * Rewritten by Martin Mares <mj@ucw.cz>, July 1998 * * and on the old console.c, vga.c and vesa_blank.c drivers: * * Copyright (C) 1991, 1992 Linus Torvalds * 1995 Jay Estabrook * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Changelog: * Paul G. (03/2001) Fix mdacon= boot prompt to use __setup(). */ #include <linux/types.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/console.h> #include <linux/string.h> #include <linux/kd.h> #include <linux/vt_kern.h> #include <linux/vt_buffer.h> #include <linux/selection.h> #include <linux/spinlock.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/init.h> #include <asm/io.h> #include <asm/vga.h> static DEFINE_SPINLOCK(mda_lock); /* description of the hardware layout */ static unsigned long mda_vram_base; /* Base of video memory */ static unsigned long mda_vram_len; /* Size of video memory */ static unsigned int mda_num_columns; /* Number of text columns */ static unsigned int mda_num_lines; /* Number of text lines */ static unsigned int mda_index_port; /* Register select port */ static unsigned int mda_value_port; /* Register value port */ static unsigned int mda_mode_port; /* Mode control port */ static unsigned int mda_status_port; /* Status and Config port */ static unsigned int mda_gfx_port; /* Graphics control port */ /* current hardware state */ static int mda_cursor_loc=-1; static int mda_cursor_size_from=-1; static int mda_cursor_size_to=-1; static enum { TYPE_MDA, TYPE_HERC, TYPE_HERCPLUS, TYPE_HERCCOLOR } mda_type; static char *mda_type_name; /* console information */ static int mda_first_vc = 13; static int mda_last_vc = 16; static struct vc_data *mda_display_fg = NULL; module_param(mda_first_vc, int, 0); MODULE_PARM_DESC(mda_first_vc, "First virtual console. Default: 13"); module_param(mda_last_vc, int, 0); MODULE_PARM_DESC(mda_last_vc, "Last virtual console. Default: 16"); /* MDA register values */ #define MDA_CURSOR_BLINKING 0x00 #define MDA_CURSOR_OFF 0x20 #define MDA_CURSOR_SLOWBLINK 0x60 #define MDA_MODE_GRAPHICS 0x02 #define MDA_MODE_VIDEO_EN 0x08 #define MDA_MODE_BLINK_EN 0x20 #define MDA_MODE_GFX_PAGE1 0x80 #define MDA_STATUS_HSYNC 0x01 #define MDA_STATUS_VSYNC 0x80 #define MDA_STATUS_VIDEO 0x08 #define MDA_CONFIG_COL132 0x08 #define MDA_GFX_MODE_EN 0x01 #define MDA_GFX_PAGE_EN 0x02 /* * MDA could easily be classified as "pre-dinosaur hardware". */ static void write_mda_b(unsigned int val, unsigned char reg) { unsigned long flags; spin_lock_irqsave(&mda_lock, flags); outb_p(reg, mda_index_port); outb_p(val, mda_value_port); spin_unlock_irqrestore(&mda_lock, flags); } static void write_mda_w(unsigned int val, unsigned char reg) { unsigned long flags; spin_lock_irqsave(&mda_lock, flags); outb_p(reg, mda_index_port); outb_p(val >> 8, mda_value_port); outb_p(reg+1, mda_index_port); outb_p(val & 0xff, mda_value_port); spin_unlock_irqrestore(&mda_lock, flags); } #ifdef TEST_MDA_B static int test_mda_b(unsigned char val, unsigned char reg) { unsigned long flags; spin_lock_irqsave(&mda_lock, flags); outb_p(reg, mda_index_port); outb (val, mda_value_port); udelay(20); val = (inb_p(mda_value_port) == val); spin_unlock_irqrestore(&mda_lock, flags); return val; } #endif static inline void mda_set_cursor(unsigned int location) { if (mda_cursor_loc == location) return; write_mda_w(location >> 1, 0x0e); mda_cursor_loc = location; } static inline void mda_set_cursor_size(int from, int to) { if (mda_cursor_size_from==from && mda_cursor_size_to==to) return; if (from > to) { write_mda_b(MDA_CURSOR_OFF, 0x0a); /* disable cursor */ } else { write_mda_b(from, 0x0a); /* cursor start */ write_mda_b(to, 0x0b); /* cursor end */ } mda_cursor_size_from = from; mda_cursor_size_to = to; } #ifndef MODULE static int __init mdacon_setup(char *str) { /* command line format: mdacon=<first>,<last> */ int ints[3]; str = get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] < 2) return 0; if (ints[1] < 1 || ints[1] > MAX_NR_CONSOLES || ints[2] < 1 || ints[2] > MAX_NR_CONSOLES) return 0; mda_first_vc = ints[1]; mda_last_vc = ints[2]; return 1; } __setup("mdacon=", mdacon_setup); #endif static int mda_detect(void) { int count=0; u16 *p, p_save; u16 *q, q_save; /* do a memory check */ p = (u16 *) mda_vram_base; q = (u16 *) (mda_vram_base + 0x01000); p_save = scr_readw(p); q_save = scr_readw(q); scr_writew(0xAA55, p); if (scr_readw(p) == 0xAA55) count++; scr_writew(0x55AA, p); if (scr_readw(p) == 0x55AA) count++; scr_writew(p_save, p); if (count != 2) { return 0; } /* check if we have 4K or 8K */ scr_writew(0xA55A, q); scr_writew(0x0000, p); if (scr_readw(q) == 0xA55A) count++; scr_writew(0x5AA5, q); scr_writew(0x0000, p); if (scr_readw(q) == 0x5AA5) count++; scr_writew(p_save, p); scr_writew(q_save, q); if (count == 4) { mda_vram_len = 0x02000; } /* Ok, there is definitely a card registering at the correct * memory location, so now we do an I/O port test. */ #ifdef TEST_MDA_B /* Edward: These two mess `tests' mess up my cursor on bootup */ /* cursor low register */ if (! test_mda_b(0x66, 0x0f)) { return 0; } /* cursor low register */ if (! test_mda_b(0x99, 0x0f)) { return 0; } #endif /* See if the card is a Hercules, by checking whether the vsync * bit of the status register is changing. This test lasts for * approximately 1/10th of a second. */ p_save = q_save = inb_p(mda_status_port) & MDA_STATUS_VSYNC; for (count=0; count < 50000 && p_save == q_save; count++) { q_save = inb(mda_status_port) & MDA_STATUS_VSYNC; udelay(2); } if (p_save != q_save) { switch (inb_p(mda_status_port) & 0x70) { case 0x10: mda_type = TYPE_HERCPLUS; mda_type_name = "HerculesPlus"; break; case 0x50: mda_type = TYPE_HERCCOLOR; mda_type_name = "HerculesColor"; break; default: mda_type = TYPE_HERC; mda_type_name = "Hercules"; break; } } return 1; } static void mda_initialize(void) { write_mda_b(97, 0x00); /* horizontal total */ write_mda_b(80, 0x01); /* horizontal displayed */ write_mda_b(82, 0x02); /* horizontal sync pos */ write_mda_b(15, 0x03); /* horizontal sync width */ write_mda_b(25, 0x04); /* vertical total */ write_mda_b(6, 0x05); /* vertical total adjust */ write_mda_b(25, 0x06); /* vertical displayed */ write_mda_b(25, 0x07); /* vertical sync pos */ write_mda_b(2, 0x08); /* interlace mode */ write_mda_b(13, 0x09); /* maximum scanline */ write_mda_b(12, 0x0a); /* cursor start */ write_mda_b(13, 0x0b); /* cursor end */ write_mda_w(0x0000, 0x0c); /* start address */ write_mda_w(0x0000, 0x0e); /* cursor location */ outb_p(MDA_MODE_VIDEO_EN | MDA_MODE_BLINK_EN, mda_mode_port); outb_p(0x00, mda_status_port); outb_p(0x00, mda_gfx_port); } static const char *mdacon_startup(void) { mda_num_columns = 80; mda_num_lines = 25; mda_vram_len = 0x01000; mda_vram_base = VGA_MAP_MEM(0xb0000, mda_vram_len); mda_index_port = 0x3b4; mda_value_port = 0x3b5; mda_mode_port = 0x3b8; mda_status_port = 0x3ba; mda_gfx_port = 0x3bf; mda_type = TYPE_MDA; mda_type_name = "MDA"; if (! mda_detect()) { printk("mdacon: MDA card not detected.\n"); return NULL; } if (mda_type != TYPE_MDA) { mda_initialize(); } /* cursor looks ugly during boot-up, so turn it off */ mda_set_cursor(mda_vram_len - 1); printk("mdacon: %s with %ldK of memory detected.\n", mda_type_name, mda_vram_len/1024); return "MDA-2"; } static void mdacon_init(struct vc_data *c, int init) { c->vc_complement_mask = 0x0800; /* reverse video */ c->vc_display_fg = &mda_display_fg; if (init) { c->vc_cols = mda_num_columns; c->vc_rows = mda_num_lines; } else vc_resize(c, mda_num_columns, mda_num_lines); /* make the first MDA console visible */ if (mda_display_fg == NULL) mda_display_fg = c; } static void mdacon_deinit(struct vc_data *c) { /* con_set_default_unimap(c->vc_num); */ if (mda_display_fg == c) mda_display_fg = NULL; } static inline u16 mda_convert_attr(u16 ch) { u16 attr = 0x0700; /* Underline and reverse-video are mutually exclusive on MDA. * Since reverse-video is used for cursors and selected areas, * it takes precedence. */ if (ch & 0x0800) attr = 0x7000; /* reverse */ else if (ch & 0x0400) attr = 0x0100; /* underline */ return ((ch & 0x0200) << 2) | /* intensity */ (ch & 0x8000) | /* blink */ (ch & 0x00ff) | attr; } static u8 mdacon_build_attr(struct vc_data *c, u8 color, u8 intensity, u8 blink, u8 underline, u8 reverse, u8 italic) { /* The attribute is just a bit vector: * * Bit 0..1 : intensity (0..2) * Bit 2 : underline * Bit 3 : reverse * Bit 7 : blink */ return (intensity & 3) | ((underline & 1) << 2) | ((reverse & 1) << 3) | (!!italic << 4) | ((blink & 1) << 7); } static void mdacon_invert_region(struct vc_data *c, u16 *p, int count) { for (; count > 0; count--) { scr_writew(scr_readw(p) ^ 0x0800, p); p++; } } #define MDA_ADDR(x,y) ((u16 *) mda_vram_base + (y)*mda_num_columns + (x)) static void mdacon_putc(struct vc_data *c, int ch, int y, int x) { scr_writew(mda_convert_attr(ch), MDA_ADDR(x, y)); } static void mdacon_putcs(struct vc_data *c, const unsigned short *s, int count, int y, int x) { u16 *dest = MDA_ADDR(x, y); for (; count > 0; count--) { scr_writew(mda_convert_attr(scr_readw(s++)), dest++); } } static void mdacon_clear(struct vc_data *c, int y, int x, int height, int width) { u16 *dest = MDA_ADDR(x, y); u16 eattr = mda_convert_attr(c->vc_video_erase_char); if (width <= 0 || height <= 0) return; if (x==0 && width==mda_num_columns) { scr_memsetw(dest, eattr, height*width*2); } else { for (; height > 0; height--, dest+=mda_num_columns) scr_memsetw(dest, eattr, width*2); } } static void mdacon_bmove(struct vc_data *c, int sy, int sx, int dy, int dx, int height, int width) { u16 *src, *dest; if (width <= 0 || height <= 0) return; if (sx==0 && dx==0 && width==mda_num_columns) { scr_memmovew(MDA_ADDR(0,dy), MDA_ADDR(0,sy), height*width*2); } else if (dy < sy || (dy == sy && dx < sx)) { src = MDA_ADDR(sx, sy); dest = MDA_ADDR(dx, dy); for (; height > 0; height--) { scr_memmovew(dest, src, width*2); src += mda_num_columns; dest += mda_num_columns; } } else { src = MDA_ADDR(sx, sy+height-1); dest = MDA_ADDR(dx, dy+height-1); for (; height > 0; height--) { scr_memmovew(dest, src, width*2); src -= mda_num_columns; dest -= mda_num_columns; } } } static int mdacon_switch(struct vc_data *c) { return 1; /* redrawing needed */ } static int mdacon_set_palette(struct vc_data *c, unsigned char *table) { return -EINVAL; } static int mdacon_blank(struct vc_data *c, int blank, int mode_switch) { if (mda_type == TYPE_MDA) { if (blank) scr_memsetw((void *)mda_vram_base, mda_convert_attr(c->vc_video_erase_char), c->vc_screenbuf_size); /* Tell console.c that it has to restore the screen itself */ return 1; } else { if (blank) outb_p(0x00, mda_mode_port); /* disable video */ else outb_p(MDA_MODE_VIDEO_EN | MDA_MODE_BLINK_EN, mda_mode_port); return 0; } } static int mdacon_scrolldelta(struct vc_data *c, int lines) { return 0; } static void mdacon_cursor(struct vc_data *c, int mode) { if (mode == CM_ERASE) { mda_set_cursor(mda_vram_len - 1); return; } mda_set_cursor(c->vc_y*mda_num_columns*2 + c->vc_x*2); switch (c->vc_cursor_type & 0x0f) { case CUR_LOWER_THIRD: mda_set_cursor_size(10, 13); break; case CUR_LOWER_HALF: mda_set_cursor_size(7, 13); break; case CUR_TWO_THIRDS: mda_set_cursor_size(4, 13); break; case CUR_BLOCK: mda_set_cursor_size(1, 13); break; case CUR_NONE: mda_set_cursor_size(14, 13); break; default: mda_set_cursor_size(12, 13); break; } } static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines) { u16 eattr = mda_convert_attr(c->vc_video_erase_char); if (!lines) return 0; if (lines > c->vc_rows) /* maximum realistic size */ lines = c->vc_rows; switch (dir) { case SM_UP: scr_memmovew(MDA_ADDR(0,t), MDA_ADDR(0,t+lines), (b-t-lines)*mda_num_columns*2); scr_memsetw(MDA_ADDR(0,b-lines), eattr, lines*mda_num_columns*2); break; case SM_DOWN: scr_memmovew(MDA_ADDR(0,t+lines), MDA_ADDR(0,t), (b-t-lines)*mda_num_columns*2); scr_memsetw(MDA_ADDR(0,t), eattr, lines*mda_num_columns*2); break; } return 0; } /* * The console `switch' structure for the MDA based console */ static const struct consw mda_con = { .owner = THIS_MODULE, .con_startup = mdacon_startup, .con_init = mdacon_init, .con_deinit = mdacon_deinit, .con_clear = mdacon_clear, .con_putc = mdacon_putc, .con_putcs = mdacon_putcs, .con_cursor = mdacon_cursor, .con_scroll = mdacon_scroll, .con_bmove = mdacon_bmove, .con_switch = mdacon_switch, .con_blank = mdacon_blank, .con_set_palette = mdacon_set_palette, .con_scrolldelta = mdacon_scrolldelta, .con_build_attr = mdacon_build_attr, .con_invert_region = mdacon_invert_region, }; int __init mda_console_init(void) { if (mda_first_vc > mda_last_vc) return 1; return take_over_console(&mda_con, mda_first_vc-1, mda_last_vc-1, 0); } static void __exit mda_console_exit(void) { give_up_console(&mda_con); } module_init(mda_console_init); module_exit(mda_console_exit); MODULE_LICENSE("GPL");
gpl-2.0
deadlyindian/android_kernel_oneplus_msm8974
fs/proc/loadavg.c
11983
1141
#include <linux/fs.h> #include <linux/init.h> #include <linux/pid_namespace.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/seqlock.h> #include <linux/time.h> #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) static int loadavg_proc_show(struct seq_file *m, void *v) { unsigned long avnrun[3]; get_avenrun(avnrun, FIXED_1/200, 0); seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n", LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]), LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), nr_running(), nr_threads, task_active_pid_ns(current)->last_pid); return 0; } static int loadavg_proc_open(struct inode *inode, struct file *file) { return single_open(file, loadavg_proc_show, NULL); } static const struct file_operations loadavg_proc_fops = { .open = loadavg_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_loadavg_init(void) { proc_create("loadavg", 0, NULL, &loadavg_proc_fops); return 0; } module_init(proc_loadavg_init);
gpl-2.0
sunny256/linux
arch/c6x/platforms/megamod-pic.c
208
8245
/* * Support for C64x+ Megamodule Interrupt Controller * * Copyright (C) 2010, 2011 Texas Instruments Incorporated * Contributed by: Mark Salter <msalter@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/slab.h> #include <asm/soc.h> #include <asm/megamod-pic.h> #define NR_COMBINERS 4 #define NR_MUX_OUTPUTS 12 #define IRQ_UNMAPPED 0xffff /* * Megamodule Interrupt Controller register layout */ struct megamod_regs { u32 evtflag[8]; u32 evtset[8]; u32 evtclr[8]; u32 reserved0[8]; u32 evtmask[8]; u32 mevtflag[8]; u32 expmask[8]; u32 mexpflag[8]; u32 intmux_unused; u32 intmux[7]; u32 reserved1[8]; u32 aegmux[2]; u32 reserved2[14]; u32 intxstat; u32 intxclr; u32 intdmask; u32 reserved3[13]; u32 evtasrt; }; struct megamod_pic { struct irq_domain *irqhost; struct megamod_regs __iomem *regs; raw_spinlock_t lock; /* hw mux mapping */ unsigned int output_to_irq[NR_MUX_OUTPUTS]; }; static struct megamod_pic *mm_pic; struct megamod_cascade_data { struct megamod_pic *pic; int index; }; static struct megamod_cascade_data cascade_data[NR_COMBINERS]; static void mask_megamod(struct irq_data *data) { struct megamod_pic *pic = irq_data_get_irq_chip_data(data); irq_hw_number_t src = irqd_to_hwirq(data); u32 __iomem *evtmask = &pic->regs->evtmask[src / 32]; raw_spin_lock(&pic->lock); soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask); raw_spin_unlock(&pic->lock); } static void unmask_megamod(struct irq_data *data) { struct megamod_pic *pic = irq_data_get_irq_chip_data(data); irq_hw_number_t src = irqd_to_hwirq(data); u32 __iomem *evtmask = &pic->regs->evtmask[src / 32]; raw_spin_lock(&pic->lock); soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask); raw_spin_unlock(&pic->lock); } static struct irq_chip megamod_chip = { .name = "megamod", .irq_mask = mask_megamod, .irq_unmask = unmask_megamod, }; static void megamod_irq_cascade(struct irq_desc *desc) { struct megamod_cascade_data *cascade; struct megamod_pic *pic; unsigned int irq; u32 events; int n, idx; cascade = irq_desc_get_handler_data(desc); pic = cascade->pic; idx = cascade->index; while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) { n = __ffs(events); irq = irq_linear_revmap(pic->irqhost, idx * 32 + n); soc_writel(1 << n, &pic->regs->evtclr[idx]); generic_handle_irq(irq); } } static int megamod_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct megamod_pic *pic = h->host_data; int i; /* We shouldn't see a hwirq which is muxed to core controller */ for (i = 0; i < NR_MUX_OUTPUTS; i++) if (pic->output_to_irq[i] == hw) return -1; irq_set_chip_data(virq, pic); irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq); /* Set default irq type */ irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static const struct irq_domain_ops megamod_domain_ops = { .map = megamod_map, .xlate = irq_domain_xlate_onecell, }; static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output) { int index, offset; u32 val; if (src < 0 || src >= (NR_COMBINERS * 32)) { pic->output_to_irq[output] = IRQ_UNMAPPED; return; } /* four mappings per mux register */ index = output / 4; offset = (output & 3) * 8; val = soc_readl(&pic->regs->intmux[index]); val &= ~(0xff << offset); val |= src << offset; soc_writel(val, &pic->regs->intmux[index]); } /* * Parse the MUX mapping, if one exists. * * The MUX map is an array of up to 12 cells; one for each usable core priority * interrupt. The value of a given cell is the megamodule interrupt source * which is to me MUXed to the output corresponding to the cell position * withing the array. The first cell in the array corresponds to priority * 4 and the last (12th) cell corresponds to priority 15. The allowed * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt * sources (0 - 3) are not allowed to be mapped through this property. They * are handled through the "interrupts" property. This allows us to use a * value of zero as a "do not map" placeholder. */ static void __init parse_priority_map(struct megamod_pic *pic, int *mapping, int size) { struct device_node *np = irq_domain_get_of_node(pic->irqhost); const __be32 *map; int i, maplen; u32 val; map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen); if (map) { maplen /= 4; if (maplen > size) maplen = size; for (i = 0; i < maplen; i++) { val = be32_to_cpup(map); if (val && val >= 4) mapping[i] = val; ++map; } } } static struct megamod_pic * __init init_megamod_pic(struct device_node *np) { struct megamod_pic *pic; int i, irq; int mapping[NR_MUX_OUTPUTS]; pr_info("Initializing C64x+ Megamodule PIC\n"); pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL); if (!pic) { pr_err("%pOF: Could not alloc PIC structure.\n", np); return NULL; } pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32, &megamod_domain_ops, pic); if (!pic->irqhost) { pr_err("%pOF: Could not alloc host.\n", np); goto error_free; } pic->irqhost->host_data = pic; raw_spin_lock_init(&pic->lock); pic->regs = of_iomap(np, 0); if (!pic->regs) { pr_err("%pOF: Could not map registers.\n", np); goto error_free; } /* Initialize MUX map */ for (i = 0; i < ARRAY_SIZE(mapping); i++) mapping[i] = IRQ_UNMAPPED; parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); /* * We can have up to 12 interrupts cascading to the core controller. * These cascades can be from the combined interrupt sources or for * individual interrupt sources. The "interrupts" property only * deals with the cascaded combined interrupts. The individual * interrupts muxed to the core controller use the core controller * as their interrupt parent. */ for (i = 0; i < NR_COMBINERS; i++) { struct irq_data *irq_data; irq_hw_number_t hwirq; irq = irq_of_parse_and_map(np, i); if (irq == NO_IRQ) continue; irq_data = irq_get_irq_data(irq); if (!irq_data) { pr_err("%pOF: combiner-%d no irq_data for virq %d!\n", np, i, irq); continue; } hwirq = irq_data->hwirq; /* * Check that device tree provided something in the range * of the core priority interrupts (4 - 15). */ if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) { pr_err("%pOF: combiner-%d core irq %ld out of range!\n", np, i, hwirq); continue; } /* record the mapping */ mapping[hwirq - 4] = i; pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n", np, i, hwirq); cascade_data[i].pic = pic; cascade_data[i].index = i; /* mask and clear all events in combiner */ soc_writel(~0, &pic->regs->evtmask[i]); soc_writel(~0, &pic->regs->evtclr[i]); irq_set_chained_handler_and_data(irq, megamod_irq_cascade, &cascade_data[i]); } /* Finally, set up the MUX registers */ for (i = 0; i < NR_MUX_OUTPUTS; i++) { if (mapping[i] != IRQ_UNMAPPED) { pr_debug("%pOF: setting mux %d to priority %d\n", np, mapping[i], i + 4); set_megamod_mux(pic, mapping[i], i); } } return pic; error_free: kfree(pic); return NULL; } /* * Return next active event after ACK'ing it. * Return -1 if no events active. */ static int get_exception(void) { int i, bit; u32 mask; for (i = 0; i < NR_COMBINERS; i++) { mask = soc_readl(&mm_pic->regs->mexpflag[i]); if (mask) { bit = __ffs(mask); soc_writel(1 << bit, &mm_pic->regs->evtclr[i]); return (i * 32) + bit; } } return -1; } static void assert_event(unsigned int val) { soc_writel(val, &mm_pic->regs->evtasrt); } void __init megamod_pic_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic"); if (!np) return; mm_pic = init_megamod_pic(np); of_node_put(np); soc_ops.get_exception = get_exception; soc_ops.assert_event = assert_event; return; }
gpl-2.0
greguu/linux-4.9-rc3-c3x00
drivers/watchdog/f71808e_wdt.c
208
22084
/*************************************************************************** * Copyright (C) 2006 by Hans Edgington <hans@edgington.nl> * * Copyright (C) 2007-2009 Hans de Goede <hdegoede@redhat.com> * * Copyright (C) 2010 Giel van Schijndel <me@mortis.eu> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/uaccess.h> #include <linux/watchdog.h> #define DRVNAME "f71808e_wdt" #define SIO_F71808FG_LD_WDT 0x07 /* Watchdog timer logical device */ #define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ #define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */ #define SIO_REG_LDSEL 0x07 /* Logical device select */ #define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ #define SIO_REG_DEVREV 0x22 /* Device revision */ #define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */ #define SIO_REG_ROM_ADDR_SEL 0x27 /* ROM address select */ #define SIO_F81866_REG_PORT_SEL 0x27 /* F81866 Multi-Function Register */ #define SIO_REG_MFUNCT1 0x29 /* Multi function select 1 */ #define SIO_REG_MFUNCT2 0x2a /* Multi function select 2 */ #define SIO_REG_MFUNCT3 0x2b /* Multi function select 3 */ #define SIO_F81866_REG_GPIO1 0x2c /* F81866 GPIO1 Enable Register */ #define SIO_REG_ENABLE 0x30 /* Logical device enable */ #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ #define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ #define SIO_F71808_ID 0x0901 /* Chipset ID */ #define SIO_F71858_ID 0x0507 /* Chipset ID */ #define SIO_F71862_ID 0x0601 /* Chipset ID */ #define SIO_F71869_ID 0x0814 /* Chipset ID */ #define SIO_F71869A_ID 0x1007 /* Chipset ID */ #define SIO_F71882_ID 0x0541 /* Chipset ID */ #define SIO_F71889_ID 0x0723 /* Chipset ID */ #define SIO_F81865_ID 0x0704 /* Chipset ID */ #define SIO_F81866_ID 0x1010 /* Chipset ID */ #define F71808FG_REG_WDO_CONF 0xf0 #define F71808FG_REG_WDT_CONF 0xf5 #define F71808FG_REG_WD_TIME 0xf6 #define F71808FG_FLAG_WDOUT_EN 7 #define F71808FG_FLAG_WDTMOUT_STS 6 #define F71808FG_FLAG_WD_EN 5 #define F71808FG_FLAG_WD_PULSE 4 #define F71808FG_FLAG_WD_UNIT 3 #define F81865_REG_WDO_CONF 0xfa #define F81865_FLAG_WDOUT_EN 0 /* Default values */ #define WATCHDOG_TIMEOUT 60 /* 1 minute default timeout */ #define WATCHDOG_MAX_TIMEOUT (60 * 255) #define WATCHDOG_PULSE_WIDTH 125 /* 125 ms, default pulse width for watchdog signal */ #define WATCHDOG_F71862FG_PIN 63 /* default watchdog reset output pin number 63 */ static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); static const int max_timeout = WATCHDOG_MAX_TIMEOUT; static int timeout = WATCHDOG_TIMEOUT; /* default timeout in seconds */ module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=" __MODULE_STRING(WATCHDOG_MAX_TIMEOUT) " (default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); static unsigned int pulse_width = WATCHDOG_PULSE_WIDTH; module_param(pulse_width, uint, 0); MODULE_PARM_DESC(pulse_width, "Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms" " (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")"); static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN; module_param(f71862fg_pin, uint, 0); MODULE_PARM_DESC(f71862fg_pin, "Watchdog f71862fg reset output pin configuration. Choose pin 56 or 63" " (default=" __MODULE_STRING(WATCHDOG_F71862FG_PIN)")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0444); MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); static unsigned int start_withtimeout; module_param(start_withtimeout, uint, 0); MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with" " given initial timeout. Zero (default) disables this feature."); enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865, f81866}; static const char *f71808e_names[] = { "f71808fg", "f71858fg", "f71862fg", "f71869", "f71882fg", "f71889fg", "f81865", "f81866", }; /* Super-I/O Function prototypes */ static inline int superio_inb(int base, int reg); static inline int superio_inw(int base, int reg); static inline void superio_outb(int base, int reg, u8 val); static inline void superio_set_bit(int base, int reg, int bit); static inline void superio_clear_bit(int base, int reg, int bit); static inline int superio_enter(int base); static inline void superio_select(int base, int ld); static inline void superio_exit(int base); struct watchdog_data { unsigned short sioaddr; enum chips type; unsigned long opened; struct mutex lock; char expect_close; struct watchdog_info ident; unsigned short timeout; u8 timer_val; /* content for the wd_time register */ char minutes_mode; u8 pulse_val; /* pulse width flag */ char pulse_mode; /* enable pulse output mode? */ char caused_reboot; /* last reboot was by the watchdog */ }; static struct watchdog_data watchdog = { .lock = __MUTEX_INITIALIZER(watchdog.lock), }; /* Super I/O functions */ static inline int superio_inb(int base, int reg) { outb(reg, base); return inb(base + 1); } static int superio_inw(int base, int reg) { int val; val = superio_inb(base, reg) << 8; val |= superio_inb(base, reg + 1); return val; } static inline void superio_outb(int base, int reg, u8 val) { outb(reg, base); outb(val, base + 1); } static inline void superio_set_bit(int base, int reg, int bit) { unsigned long val = superio_inb(base, reg); __set_bit(bit, &val); superio_outb(base, reg, val); } static inline void superio_clear_bit(int base, int reg, int bit) { unsigned long val = superio_inb(base, reg); __clear_bit(bit, &val); superio_outb(base, reg, val); } static inline int superio_enter(int base) { /* Don't step on other drivers' I/O space by accident */ if (!request_muxed_region(base, 2, DRVNAME)) { pr_err("I/O address 0x%04x already in use\n", (int)base); return -EBUSY; } /* according to the datasheet the key must be sent twice! */ outb(SIO_UNLOCK_KEY, base); outb(SIO_UNLOCK_KEY, base); return 0; } static inline void superio_select(int base, int ld) { outb(SIO_REG_LDSEL, base); outb(ld, base + 1); } static inline void superio_exit(int base) { outb(SIO_LOCK_KEY, base); release_region(base, 2); } static int watchdog_set_timeout(int timeout) { if (timeout <= 0 || timeout > max_timeout) { pr_err("watchdog timeout out of range\n"); return -EINVAL; } mutex_lock(&watchdog.lock); watchdog.timeout = timeout; if (timeout > 0xff) { watchdog.timer_val = DIV_ROUND_UP(timeout, 60); watchdog.minutes_mode = true; } else { watchdog.timer_val = timeout; watchdog.minutes_mode = false; } mutex_unlock(&watchdog.lock); return 0; } static int watchdog_set_pulse_width(unsigned int pw) { int err = 0; mutex_lock(&watchdog.lock); if (pw <= 1) { watchdog.pulse_val = 0; } else if (pw <= 25) { watchdog.pulse_val = 1; } else if (pw <= 125) { watchdog.pulse_val = 2; } else if (pw <= 5000) { watchdog.pulse_val = 3; } else { pr_err("pulse width out of range\n"); err = -EINVAL; goto exit_unlock; } watchdog.pulse_mode = pw; exit_unlock: mutex_unlock(&watchdog.lock); return err; } static int watchdog_keepalive(void) { int err = 0; mutex_lock(&watchdog.lock); err = superio_enter(watchdog.sioaddr); if (err) goto exit_unlock; superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); if (watchdog.minutes_mode) /* select minutes for timer units */ superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF, F71808FG_FLAG_WD_UNIT); else /* select seconds for timer units */ superio_clear_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF, F71808FG_FLAG_WD_UNIT); /* Set timer value */ superio_outb(watchdog.sioaddr, F71808FG_REG_WD_TIME, watchdog.timer_val); superio_exit(watchdog.sioaddr); exit_unlock: mutex_unlock(&watchdog.lock); return err; } static int f71862fg_pin_configure(unsigned short ioaddr) { /* When ioaddr is non-zero the calling function has to take care of mutex handling and superio preparation! */ if (f71862fg_pin == 63) { if (ioaddr) { /* SPI must be disabled first to use this pin! */ superio_clear_bit(ioaddr, SIO_REG_ROM_ADDR_SEL, 6); superio_set_bit(ioaddr, SIO_REG_MFUNCT3, 4); } } else if (f71862fg_pin == 56) { if (ioaddr) superio_set_bit(ioaddr, SIO_REG_MFUNCT1, 1); } else { pr_err("Invalid argument f71862fg_pin=%d\n", f71862fg_pin); return -EINVAL; } return 0; } static int watchdog_start(void) { /* Make sure we don't die as soon as the watchdog is enabled below */ int err = watchdog_keepalive(); if (err) return err; mutex_lock(&watchdog.lock); err = superio_enter(watchdog.sioaddr); if (err) goto exit_unlock; superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); /* Watchdog pin configuration */ switch (watchdog.type) { case f71808fg: /* Set pin 21 to GPIO23/WDTRST#, then to WDTRST# */ superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT2, 3); superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 3); break; case f71862fg: err = f71862fg_pin_configure(watchdog.sioaddr); if (err) goto exit_superio; break; case f71869: /* GPIO14 --> WDTRST# */ superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4); break; case f71882fg: /* Set pin 56 to WDTRST# */ superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 1); break; case f71889fg: /* set pin 40 to WDTRST# */ superio_outb(watchdog.sioaddr, SIO_REG_MFUNCT3, superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf); break; case f81865: /* Set pin 70 to WDTRST# */ superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 5); break; case f81866: /* Set pin 70 to WDTRST# */ superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL, BIT(3) | BIT(0)); superio_set_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL, BIT(2)); /* * GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0. * The PIN 70(GPIO15/WDTRST) is controlled by 2Ch: * BIT5: 0 -> WDTRST# * 1 -> GPIO15 */ superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_GPIO1, BIT(5)); break; default: /* * 'default' label to shut up the compiler and catch * programmer errors */ err = -ENODEV; goto exit_superio; } superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); superio_set_bit(watchdog.sioaddr, SIO_REG_ENABLE, 0); if (watchdog.type == f81865 || watchdog.type == f81866) superio_set_bit(watchdog.sioaddr, F81865_REG_WDO_CONF, F81865_FLAG_WDOUT_EN); else superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDO_CONF, F71808FG_FLAG_WDOUT_EN); superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF, F71808FG_FLAG_WD_EN); if (watchdog.pulse_mode) { /* Select "pulse" output mode with given duration */ u8 wdt_conf = superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF); /* Set WD_PSWIDTH bits (1:0) */ wdt_conf = (wdt_conf & 0xfc) | (watchdog.pulse_val & 0x03); /* Set WD_PULSE to "pulse" mode */ wdt_conf |= BIT(F71808FG_FLAG_WD_PULSE); superio_outb(watchdog.sioaddr, F71808FG_REG_WDT_CONF, wdt_conf); } else { /* Select "level" output mode */ superio_clear_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF, F71808FG_FLAG_WD_PULSE); } exit_superio: superio_exit(watchdog.sioaddr); exit_unlock: mutex_unlock(&watchdog.lock); return err; } static int watchdog_stop(void) { int err = 0; mutex_lock(&watchdog.lock); err = superio_enter(watchdog.sioaddr); if (err) goto exit_unlock; superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); superio_clear_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF, F71808FG_FLAG_WD_EN); superio_exit(watchdog.sioaddr); exit_unlock: mutex_unlock(&watchdog.lock); return err; } static int watchdog_get_status(void) { int status = 0; mutex_lock(&watchdog.lock); status = (watchdog.caused_reboot) ? WDIOF_CARDRESET : 0; mutex_unlock(&watchdog.lock); return status; } static bool watchdog_is_running(void) { /* * if we fail to determine the watchdog's status assume it to be * running to be on the safe side */ bool is_running = true; mutex_lock(&watchdog.lock); if (superio_enter(watchdog.sioaddr)) goto exit_unlock; superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0)) && (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF) & F71808FG_FLAG_WD_EN); superio_exit(watchdog.sioaddr); exit_unlock: mutex_unlock(&watchdog.lock); return is_running; } /* /dev/watchdog api */ static int watchdog_open(struct inode *inode, struct file *file) { int err; /* If the watchdog is alive we don't need to start it again */ if (test_and_set_bit(0, &watchdog.opened)) return -EBUSY; err = watchdog_start(); if (err) { clear_bit(0, &watchdog.opened); return err; } if (nowayout) __module_get(THIS_MODULE); watchdog.expect_close = 0; return nonseekable_open(inode, file); } static int watchdog_release(struct inode *inode, struct file *file) { clear_bit(0, &watchdog.opened); if (!watchdog.expect_close) { watchdog_keepalive(); pr_crit("Unexpected close, not stopping watchdog!\n"); } else if (!nowayout) { watchdog_stop(); } return 0; } /* * watchdog_write: * @file: file handle to the watchdog * @buf: buffer to write * @count: count of bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t watchdog_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (count) { if (!nowayout) { size_t i; /* In case it was set long ago */ bool expect_close = false; for (i = 0; i != count; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; expect_close = (c == 'V'); } /* Properly order writes across fork()ed processes */ mutex_lock(&watchdog.lock); watchdog.expect_close = expect_close; mutex_unlock(&watchdog.lock); } /* someone wrote to us, we should restart timer */ watchdog_keepalive(); } return count; } /* * watchdog_ioctl: * @inode: inode of the device * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. */ static long watchdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int status; int new_options; int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &watchdog.ident, sizeof(watchdog.ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: status = watchdog_get_status(); if (status < 0) return status; return put_user(status, uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: if (get_user(new_options, uarg.i)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) watchdog_stop(); if (new_options & WDIOS_ENABLECARD) return watchdog_start(); case WDIOC_KEEPALIVE: watchdog_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; if (watchdog_set_timeout(new_timeout)) return -EINVAL; watchdog_keepalive(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(watchdog.timeout, uarg.i); default: return -ENOTTY; } } static int watchdog_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) watchdog_stop(); return NOTIFY_DONE; } static const struct file_operations watchdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = watchdog_open, .release = watchdog_release, .write = watchdog_write, .unlocked_ioctl = watchdog_ioctl, }; static struct miscdevice watchdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &watchdog_fops, }; static struct notifier_block watchdog_notifier = { .notifier_call = watchdog_notify_sys, }; static int __init watchdog_init(int sioaddr) { int wdt_conf, err = 0; /* No need to lock watchdog.lock here because no entry points * into the module have been registered yet. */ watchdog.sioaddr = sioaddr; watchdog.ident.options = WDIOC_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING; snprintf(watchdog.ident.identity, sizeof(watchdog.ident.identity), "%s watchdog", f71808e_names[watchdog.type]); err = superio_enter(sioaddr); if (err) return err; superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF); watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS); superio_exit(sioaddr); err = watchdog_set_timeout(timeout); if (err) return err; err = watchdog_set_pulse_width(pulse_width); if (err) return err; err = register_reboot_notifier(&watchdog_notifier); if (err) return err; err = misc_register(&watchdog_miscdev); if (err) { pr_err("cannot register miscdev on minor=%d\n", watchdog_miscdev.minor); goto exit_reboot; } if (start_withtimeout) { if (start_withtimeout <= 0 || start_withtimeout > max_timeout) { pr_err("starting timeout out of range\n"); err = -EINVAL; goto exit_miscdev; } err = watchdog_start(); if (err) { pr_err("cannot start watchdog timer\n"); goto exit_miscdev; } mutex_lock(&watchdog.lock); err = superio_enter(sioaddr); if (err) goto exit_unlock; superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); if (start_withtimeout > 0xff) { /* select minutes for timer units */ superio_set_bit(sioaddr, F71808FG_REG_WDT_CONF, F71808FG_FLAG_WD_UNIT); superio_outb(sioaddr, F71808FG_REG_WD_TIME, DIV_ROUND_UP(start_withtimeout, 60)); } else { /* select seconds for timer units */ superio_clear_bit(sioaddr, F71808FG_REG_WDT_CONF, F71808FG_FLAG_WD_UNIT); superio_outb(sioaddr, F71808FG_REG_WD_TIME, start_withtimeout); } superio_exit(sioaddr); mutex_unlock(&watchdog.lock); if (nowayout) __module_get(THIS_MODULE); pr_info("watchdog started with initial timeout of %u sec\n", start_withtimeout); } return 0; exit_unlock: mutex_unlock(&watchdog.lock); exit_miscdev: misc_deregister(&watchdog_miscdev); exit_reboot: unregister_reboot_notifier(&watchdog_notifier); return err; } static int __init f71808e_find(int sioaddr) { u16 devid; int err = superio_enter(sioaddr); if (err) return err; devid = superio_inw(sioaddr, SIO_REG_MANID); if (devid != SIO_FINTEK_ID) { pr_debug("Not a Fintek device\n"); err = -ENODEV; goto exit; } devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); switch (devid) { case SIO_F71808_ID: watchdog.type = f71808fg; break; case SIO_F71862_ID: watchdog.type = f71862fg; err = f71862fg_pin_configure(0); /* validate module parameter */ break; case SIO_F71869_ID: case SIO_F71869A_ID: watchdog.type = f71869; break; case SIO_F71882_ID: watchdog.type = f71882fg; break; case SIO_F71889_ID: watchdog.type = f71889fg; break; case SIO_F71858_ID: /* Confirmed (by datasheet) not to have a watchdog. */ err = -ENODEV; goto exit; case SIO_F81865_ID: watchdog.type = f81865; break; case SIO_F81866_ID: watchdog.type = f81866; break; default: pr_info("Unrecognized Fintek device: %04x\n", (unsigned int)devid); err = -ENODEV; goto exit; } pr_info("Found %s watchdog chip, revision %d\n", f71808e_names[watchdog.type], (int)superio_inb(sioaddr, SIO_REG_DEVREV)); exit: superio_exit(sioaddr); return err; } static int __init f71808e_init(void) { static const unsigned short addrs[] = { 0x2e, 0x4e }; int err = -ENODEV; int i; for (i = 0; i < ARRAY_SIZE(addrs); i++) { err = f71808e_find(addrs[i]); if (err == 0) break; } if (i == ARRAY_SIZE(addrs)) return err; return watchdog_init(addrs[i]); } static void __exit f71808e_exit(void) { if (watchdog_is_running()) { pr_warn("Watchdog timer still running, stopping it\n"); watchdog_stop(); } misc_deregister(&watchdog_miscdev); unregister_reboot_notifier(&watchdog_notifier); } MODULE_DESCRIPTION("F71808E Watchdog Driver"); MODULE_AUTHOR("Giel van Schijndel <me@mortis.eu>"); MODULE_LICENSE("GPL"); module_init(f71808e_init); module_exit(f71808e_exit);
gpl-2.0
mirror-androidarmv6/android_kernel_huawei_msm7x25
drivers/net/wireless/mwl8k.c
464
74363
/* * drivers/net/wireless/mwl8k.c * Driver for Marvell TOPDOG 802.11 Wireless cards * * Copyright (C) 2008-2009 Marvell Semiconductor Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/etherdevice.h> #include <net/mac80211.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/workqueue.h> #define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver" #define MWL8K_NAME KBUILD_MODNAME #define MWL8K_VERSION "0.10" MODULE_DESCRIPTION(MWL8K_DESC); MODULE_VERSION(MWL8K_VERSION); MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>"); MODULE_LICENSE("GPL"); static DEFINE_PCI_DEVICE_TABLE(mwl8k_table) = { { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = 8687, }, { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = 8687, }, { } }; MODULE_DEVICE_TABLE(pci, mwl8k_table); /* Register definitions */ #define MWL8K_HIU_GEN_PTR 0x00000c10 #define MWL8K_MODE_STA 0x0000005a #define MWL8K_MODE_AP 0x000000a5 #define MWL8K_HIU_INT_CODE 0x00000c14 #define MWL8K_FWSTA_READY 0xf0f1f2f4 #define MWL8K_FWAP_READY 0xf1f2f4a5 #define MWL8K_INT_CODE_CMD_FINISHED 0x00000005 #define MWL8K_HIU_SCRATCH 0x00000c40 /* Host->device communications */ #define MWL8K_HIU_H2A_INTERRUPT_EVENTS 0x00000c18 #define MWL8K_HIU_H2A_INTERRUPT_STATUS 0x00000c1c #define MWL8K_HIU_H2A_INTERRUPT_MASK 0x00000c20 #define MWL8K_HIU_H2A_INTERRUPT_CLEAR_SEL 0x00000c24 #define MWL8K_HIU_H2A_INTERRUPT_STATUS_MASK 0x00000c28 #define MWL8K_H2A_INT_DUMMY (1 << 20) #define MWL8K_H2A_INT_RESET (1 << 15) #define MWL8K_H2A_INT_DOORBELL (1 << 1) #define MWL8K_H2A_INT_PPA_READY (1 << 0) /* Device->host communications */ #define MWL8K_HIU_A2H_INTERRUPT_EVENTS 0x00000c2c #define MWL8K_HIU_A2H_INTERRUPT_STATUS 0x00000c30 #define MWL8K_HIU_A2H_INTERRUPT_MASK 0x00000c34 #define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38 #define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c #define MWL8K_A2H_INT_DUMMY (1 << 20) #define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11) #define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10) #define MWL8K_A2H_INT_RADAR_DETECT (1 << 7) #define MWL8K_A2H_INT_RADIO_ON (1 << 6) #define MWL8K_A2H_INT_RADIO_OFF (1 << 5) #define MWL8K_A2H_INT_MAC_EVENT (1 << 3) #define MWL8K_A2H_INT_OPC_DONE (1 << 2) #define MWL8K_A2H_INT_RX_READY (1 << 1) #define MWL8K_A2H_INT_TX_DONE (1 << 0) #define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \ MWL8K_A2H_INT_CHNL_SWITCHED | \ MWL8K_A2H_INT_QUEUE_EMPTY | \ MWL8K_A2H_INT_RADAR_DETECT | \ MWL8K_A2H_INT_RADIO_ON | \ MWL8K_A2H_INT_RADIO_OFF | \ MWL8K_A2H_INT_MAC_EVENT | \ MWL8K_A2H_INT_OPC_DONE | \ MWL8K_A2H_INT_RX_READY | \ MWL8K_A2H_INT_TX_DONE) /* WME stream classes */ #define WME_AC_BE 0 /* best effort */ #define WME_AC_BK 1 /* background */ #define WME_AC_VI 2 /* video */ #define WME_AC_VO 3 /* voice */ #define MWL8K_RX_QUEUES 1 #define MWL8K_TX_QUEUES 4 struct mwl8k_rx_queue { int rx_desc_count; /* hw receives here */ int rx_head; /* refill descs here */ int rx_tail; struct mwl8k_rx_desc *rx_desc_area; dma_addr_t rx_desc_dma; struct sk_buff **rx_skb; }; struct mwl8k_tx_queue { /* hw transmits here */ int tx_head; /* sw appends here */ int tx_tail; struct ieee80211_tx_queue_stats tx_stats; struct mwl8k_tx_desc *tx_desc_area; dma_addr_t tx_desc_dma; struct sk_buff **tx_skb; }; /* Pointers to the firmware data and meta information about it. */ struct mwl8k_firmware { /* Microcode */ struct firmware *ucode; /* Boot helper code */ struct firmware *helper; }; struct mwl8k_priv { void __iomem *regs; struct ieee80211_hw *hw; struct pci_dev *pdev; u8 name[16]; /* firmware files and meta data */ struct mwl8k_firmware fw; u32 part_num; /* firmware access */ struct mutex fw_mutex; struct task_struct *fw_mutex_owner; int fw_mutex_depth; struct completion *tx_wait; struct completion *hostcmd_wait; /* lock held over TX and TX reap */ spinlock_t tx_lock; struct ieee80211_vif *vif; struct ieee80211_channel *current_channel; /* power management status cookie from firmware */ u32 *cookie; dma_addr_t cookie_dma; u16 num_mcaddrs; u8 hw_rev; u32 fw_rev; /* * Running count of TX packets in flight, to avoid * iterating over the transmit rings each time. */ int pending_tx_pkts; struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES]; struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES]; /* PHY parameters */ struct ieee80211_supported_band band; struct ieee80211_channel channels[14]; struct ieee80211_rate rates[12]; bool radio_on; bool radio_short_preamble; bool wmm_enabled; /* XXX need to convert this to handle multiple interfaces */ bool capture_beacon; u8 capture_bssid[ETH_ALEN]; struct sk_buff *beacon_skb; /* * This FJ worker has to be global as it is scheduled from the * RX handler. At this point we don't know which interface it * belongs to until the list of bssids waiting to complete join * is checked. */ struct work_struct finalize_join_worker; /* Tasklet to reclaim TX descriptors and buffers after tx */ struct tasklet_struct tx_reclaim_task; /* Work thread to serialize configuration requests */ struct workqueue_struct *config_wq; }; /* Per interface specific private data */ struct mwl8k_vif { /* backpointer to parent config block */ struct mwl8k_priv *priv; /* BSS config of AP or IBSS from mac80211*/ struct ieee80211_bss_conf bss_info; /* BSSID of AP or IBSS */ u8 bssid[ETH_ALEN]; u8 mac_addr[ETH_ALEN]; /* * Subset of supported legacy rates. * Intersection of AP and STA supported rates. */ struct ieee80211_rate legacy_rates[12]; /* number of supported legacy rates */ u8 legacy_nrates; /* Index into station database.Returned by update_sta_db call */ u8 peer_id; /* Non AMPDU sequence number assigned by driver */ u16 seqno; }; #define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) static const struct ieee80211_channel mwl8k_channels[] = { { .center_freq = 2412, .hw_value = 1, }, { .center_freq = 2417, .hw_value = 2, }, { .center_freq = 2422, .hw_value = 3, }, { .center_freq = 2427, .hw_value = 4, }, { .center_freq = 2432, .hw_value = 5, }, { .center_freq = 2437, .hw_value = 6, }, { .center_freq = 2442, .hw_value = 7, }, { .center_freq = 2447, .hw_value = 8, }, { .center_freq = 2452, .hw_value = 9, }, { .center_freq = 2457, .hw_value = 10, }, { .center_freq = 2462, .hw_value = 11, }, }; static const struct ieee80211_rate mwl8k_rates[] = { { .bitrate = 10, .hw_value = 2, }, { .bitrate = 20, .hw_value = 4, }, { .bitrate = 55, .hw_value = 11, }, { .bitrate = 60, .hw_value = 12, }, { .bitrate = 90, .hw_value = 18, }, { .bitrate = 110, .hw_value = 22, }, { .bitrate = 120, .hw_value = 24, }, { .bitrate = 180, .hw_value = 36, }, { .bitrate = 240, .hw_value = 48, }, { .bitrate = 360, .hw_value = 72, }, { .bitrate = 480, .hw_value = 96, }, { .bitrate = 540, .hw_value = 108, }, }; /* Set or get info from Firmware */ #define MWL8K_CMD_SET 0x0001 #define MWL8K_CMD_GET 0x0000 /* Firmware command codes */ #define MWL8K_CMD_CODE_DNLD 0x0001 #define MWL8K_CMD_GET_HW_SPEC 0x0003 #define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010 #define MWL8K_CMD_GET_STAT 0x0014 #define MWL8K_CMD_RADIO_CONTROL 0x001c #define MWL8K_CMD_RF_TX_POWER 0x001e #define MWL8K_CMD_SET_PRE_SCAN 0x0107 #define MWL8K_CMD_SET_POST_SCAN 0x0108 #define MWL8K_CMD_SET_RF_CHANNEL 0x010a #define MWL8K_CMD_SET_AID 0x010d #define MWL8K_CMD_SET_RATE 0x0110 #define MWL8K_CMD_SET_FINALIZE_JOIN 0x0111 #define MWL8K_CMD_RTS_THRESHOLD 0x0113 #define MWL8K_CMD_SET_SLOT 0x0114 #define MWL8K_CMD_SET_EDCA_PARAMS 0x0115 #define MWL8K_CMD_SET_WMM_MODE 0x0123 #define MWL8K_CMD_MIMO_CONFIG 0x0125 #define MWL8K_CMD_USE_FIXED_RATE 0x0126 #define MWL8K_CMD_ENABLE_SNIFFER 0x0150 #define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 #define MWL8K_CMD_UPDATE_STADB 0x1123 static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize) { #define MWL8K_CMDNAME(x) case MWL8K_CMD_##x: do {\ snprintf(buf, bufsize, "%s", #x);\ return buf;\ } while (0) switch (cmd & ~0x8000) { MWL8K_CMDNAME(CODE_DNLD); MWL8K_CMDNAME(GET_HW_SPEC); MWL8K_CMDNAME(MAC_MULTICAST_ADR); MWL8K_CMDNAME(GET_STAT); MWL8K_CMDNAME(RADIO_CONTROL); MWL8K_CMDNAME(RF_TX_POWER); MWL8K_CMDNAME(SET_PRE_SCAN); MWL8K_CMDNAME(SET_POST_SCAN); MWL8K_CMDNAME(SET_RF_CHANNEL); MWL8K_CMDNAME(SET_AID); MWL8K_CMDNAME(SET_RATE); MWL8K_CMDNAME(SET_FINALIZE_JOIN); MWL8K_CMDNAME(RTS_THRESHOLD); MWL8K_CMDNAME(SET_SLOT); MWL8K_CMDNAME(SET_EDCA_PARAMS); MWL8K_CMDNAME(SET_WMM_MODE); MWL8K_CMDNAME(MIMO_CONFIG); MWL8K_CMDNAME(USE_FIXED_RATE); MWL8K_CMDNAME(ENABLE_SNIFFER); MWL8K_CMDNAME(SET_RATEADAPT_MODE); MWL8K_CMDNAME(UPDATE_STADB); default: snprintf(buf, bufsize, "0x%x", cmd); } #undef MWL8K_CMDNAME return buf; } /* Hardware and firmware reset */ static void mwl8k_hw_reset(struct mwl8k_priv *priv) { iowrite32(MWL8K_H2A_INT_RESET, priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); iowrite32(MWL8K_H2A_INT_RESET, priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); msleep(20); } /* Release fw image */ static void mwl8k_release_fw(struct firmware **fw) { if (*fw == NULL) return; release_firmware(*fw); *fw = NULL; } static void mwl8k_release_firmware(struct mwl8k_priv *priv) { mwl8k_release_fw(&priv->fw.ucode); mwl8k_release_fw(&priv->fw.helper); } /* Request fw image */ static int mwl8k_request_fw(struct mwl8k_priv *priv, const char *fname, struct firmware **fw) { /* release current image */ if (*fw != NULL) mwl8k_release_fw(fw); return request_firmware((const struct firmware **)fw, fname, &priv->pdev->dev); } static int mwl8k_request_firmware(struct mwl8k_priv *priv, u32 part_num) { u8 filename[64]; int rc; priv->part_num = part_num; snprintf(filename, sizeof(filename), "mwl8k/helper_%u.fw", priv->part_num); rc = mwl8k_request_fw(priv, filename, &priv->fw.helper); if (rc) { printk(KERN_ERR "%s Error requesting helper firmware file %s\n", pci_name(priv->pdev), filename); return rc; } snprintf(filename, sizeof(filename), "mwl8k/fmimage_%u.fw", priv->part_num); rc = mwl8k_request_fw(priv, filename, &priv->fw.ucode); if (rc) { printk(KERN_ERR "%s Error requesting firmware file %s\n", pci_name(priv->pdev), filename); mwl8k_release_fw(&priv->fw.helper); return rc; } return 0; } struct mwl8k_cmd_pkt { __le16 code; __le16 length; __le16 seq_num; __le16 result; char payload[0]; } __attribute__((packed)); /* * Firmware loading. */ static int mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length) { void __iomem *regs = priv->regs; dma_addr_t dma_addr; int loops; dma_addr = pci_map_single(priv->pdev, data, length, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(priv->pdev, dma_addr)) return -ENOMEM; iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR); iowrite32(0, regs + MWL8K_HIU_INT_CODE); iowrite32(MWL8K_H2A_INT_DOORBELL, regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); iowrite32(MWL8K_H2A_INT_DUMMY, regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); loops = 1000; do { u32 int_code; int_code = ioread32(regs + MWL8K_HIU_INT_CODE); if (int_code == MWL8K_INT_CODE_CMD_FINISHED) { iowrite32(0, regs + MWL8K_HIU_INT_CODE); break; } udelay(1); } while (--loops); pci_unmap_single(priv->pdev, dma_addr, length, PCI_DMA_TODEVICE); return loops ? 0 : -ETIMEDOUT; } static int mwl8k_load_fw_image(struct mwl8k_priv *priv, const u8 *data, size_t length) { struct mwl8k_cmd_pkt *cmd; int done; int rc = 0; cmd = kmalloc(sizeof(*cmd) + 256, GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD); cmd->seq_num = 0; cmd->result = 0; done = 0; while (length) { int block_size = length > 256 ? 256 : length; memcpy(cmd->payload, data + done, block_size); cmd->length = cpu_to_le16(block_size); rc = mwl8k_send_fw_load_cmd(priv, cmd, sizeof(*cmd) + block_size); if (rc) break; done += block_size; length -= block_size; } if (!rc) { cmd->length = 0; rc = mwl8k_send_fw_load_cmd(priv, cmd, sizeof(*cmd)); } kfree(cmd); return rc; } static int mwl8k_feed_fw_image(struct mwl8k_priv *priv, const u8 *data, size_t length) { unsigned char *buffer; int may_continue, rc = 0; u32 done, prev_block_size; buffer = kmalloc(1024, GFP_KERNEL); if (buffer == NULL) return -ENOMEM; done = 0; prev_block_size = 0; may_continue = 1000; while (may_continue > 0) { u32 block_size; block_size = ioread32(priv->regs + MWL8K_HIU_SCRATCH); if (block_size & 1) { block_size &= ~1; may_continue--; } else { done += prev_block_size; length -= prev_block_size; } if (block_size > 1024 || block_size > length) { rc = -EOVERFLOW; break; } if (length == 0) { rc = 0; break; } if (block_size == 0) { rc = -EPROTO; may_continue--; udelay(1); continue; } prev_block_size = block_size; memcpy(buffer, data + done, block_size); rc = mwl8k_send_fw_load_cmd(priv, buffer, block_size); if (rc) break; } if (!rc && length != 0) rc = -EREMOTEIO; kfree(buffer); return rc; } static int mwl8k_load_firmware(struct mwl8k_priv *priv) { int loops, rc; const u8 *ucode = priv->fw.ucode->data; size_t ucode_len = priv->fw.ucode->size; const u8 *helper = priv->fw.helper->data; size_t helper_len = priv->fw.helper->size; if (!memcmp(ucode, "\x01\x00\x00\x00", 4)) { rc = mwl8k_load_fw_image(priv, helper, helper_len); if (rc) { printk(KERN_ERR "%s: unable to load firmware " "helper image\n", pci_name(priv->pdev)); return rc; } msleep(1); rc = mwl8k_feed_fw_image(priv, ucode, ucode_len); } else { rc = mwl8k_load_fw_image(priv, ucode, ucode_len); } if (rc) { printk(KERN_ERR "%s: unable to load firmware data\n", pci_name(priv->pdev)); return rc; } iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR); msleep(1); loops = 200000; do { if (ioread32(priv->regs + MWL8K_HIU_INT_CODE) == MWL8K_FWSTA_READY) break; udelay(1); } while (--loops); return loops ? 0 : -ETIMEDOUT; } /* * Defines shared between transmission and reception. */ /* HT control fields for firmware */ struct ewc_ht_info { __le16 control1; __le16 control2; __le16 control3; } __attribute__((packed)); /* Firmware Station database operations */ #define MWL8K_STA_DB_ADD_ENTRY 0 #define MWL8K_STA_DB_MODIFY_ENTRY 1 #define MWL8K_STA_DB_DEL_ENTRY 2 #define MWL8K_STA_DB_FLUSH 3 /* Peer Entry flags - used to define the type of the peer node */ #define MWL8K_PEER_TYPE_ACCESSPOINT 2 #define MWL8K_IEEE_LEGACY_DATA_RATES 12 #define MWL8K_MCS_BITMAP_SIZE 16 struct peer_capability_info { /* Peer type - AP vs. STA. */ __u8 peer_type; /* Basic 802.11 capabilities from assoc resp. */ __le16 basic_caps; /* Set if peer supports 802.11n high throughput (HT). */ __u8 ht_support; /* Valid if HT is supported. */ __le16 ht_caps; __u8 extended_ht_caps; struct ewc_ht_info ewc_info; /* Legacy rate table. Intersection of our rates and peer rates. */ __u8 legacy_rates[MWL8K_IEEE_LEGACY_DATA_RATES]; /* HT rate table. Intersection of our rates and peer rates. */ __u8 ht_rates[MWL8K_MCS_BITMAP_SIZE]; __u8 pad[16]; /* If set, interoperability mode, no proprietary extensions. */ __u8 interop; __u8 pad2; __u8 station_id; __le16 amsdu_enabled; } __attribute__((packed)); /* Inline functions to manipulate QoS field in data descriptor. */ static inline u16 mwl8k_qos_setbit_eosp(u16 qos) { u16 val_mask = 1 << 4; /* End of Service Period Bit 4 */ return qos | val_mask; } static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy) { u16 val_mask = 0x3; u8 shift = 5; u16 qos_mask = ~(val_mask << shift); /* Ack Policy Bit 5-6 */ return (qos & qos_mask) | ((ack_policy & val_mask) << shift); } static inline u16 mwl8k_qos_setbit_amsdu(u16 qos) { u16 val_mask = 1 << 7; /* AMSDU present Bit 7 */ return qos | val_mask; } static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len) { u16 val_mask = 0xff; u8 shift = 8; u16 qos_mask = ~(val_mask << shift); /* Queue Length Bits 8-15 */ return (qos & qos_mask) | ((len & val_mask) << shift); } /* DMA header used by firmware and hardware. */ struct mwl8k_dma_data { __le16 fwlen; struct ieee80211_hdr wh; } __attribute__((packed)); /* Routines to add/remove DMA header from skb. */ static inline void mwl8k_remove_dma_header(struct sk_buff *skb) { struct mwl8k_dma_data *tr = (struct mwl8k_dma_data *)skb->data; void *dst, *src = &tr->wh; int hdrlen = ieee80211_hdrlen(tr->wh.frame_control); u16 space = sizeof(struct mwl8k_dma_data) - hdrlen; dst = (void *)tr + space; if (dst != src) { memmove(dst, src, hdrlen); skb_pull(skb, space); } } static inline void mwl8k_add_dma_header(struct sk_buff *skb) { struct ieee80211_hdr *wh; u32 hdrlen, pktlen; struct mwl8k_dma_data *tr; wh = (struct ieee80211_hdr *)skb->data; hdrlen = ieee80211_hdrlen(wh->frame_control); pktlen = skb->len; /* * Copy up/down the 802.11 header; the firmware requires * we present a 2-byte payload length followed by a * 4-address header (w/o QoS), followed (optionally) by * any WEP/ExtIV header (but only filled in for CCMP). */ if (hdrlen != sizeof(struct mwl8k_dma_data)) skb_push(skb, sizeof(struct mwl8k_dma_data) - hdrlen); tr = (struct mwl8k_dma_data *)skb->data; if (wh != &tr->wh) memmove(&tr->wh, wh, hdrlen); /* Clear addr4 */ memset(tr->wh.addr4, 0, ETH_ALEN); /* * Firmware length is the length of the fully formed "802.11 * payload". That is, everything except for the 802.11 header. * This includes all crypto material including the MIC. */ tr->fwlen = cpu_to_le16(pktlen - hdrlen); } /* * Packet reception. */ #define MWL8K_RX_CTRL_OWNED_BY_HOST 0x02 struct mwl8k_rx_desc { __le16 pkt_len; __u8 link_quality; __u8 noise_level; __le32 pkt_phys_addr; __le32 next_rx_desc_phys_addr; __le16 qos_control; __le16 rate_info; __le32 pad0[4]; __u8 rssi; __u8 channel; __le16 pad1; __u8 rx_ctrl; __u8 rx_status; __u8 pad2[2]; } __attribute__((packed)); #define MWL8K_RX_DESCS 256 #define MWL8K_RX_MAXSZ 3800 static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_rx_queue *rxq = priv->rxq + index; int size; int i; rxq->rx_desc_count = 0; rxq->rx_head = 0; rxq->rx_tail = 0; size = MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc); rxq->rx_desc_area = pci_alloc_consistent(priv->pdev, size, &rxq->rx_desc_dma); if (rxq->rx_desc_area == NULL) { printk(KERN_ERR "%s: failed to alloc RX descriptors\n", priv->name); return -ENOMEM; } memset(rxq->rx_desc_area, 0, size); rxq->rx_skb = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->rx_skb), GFP_KERNEL); if (rxq->rx_skb == NULL) { printk(KERN_ERR "%s: failed to alloc RX skbuff list\n", priv->name); pci_free_consistent(priv->pdev, size, rxq->rx_desc_area, rxq->rx_desc_dma); return -ENOMEM; } memset(rxq->rx_skb, 0, MWL8K_RX_DESCS * sizeof(*rxq->rx_skb)); for (i = 0; i < MWL8K_RX_DESCS; i++) { struct mwl8k_rx_desc *rx_desc; int nexti; rx_desc = rxq->rx_desc_area + i; nexti = (i + 1) % MWL8K_RX_DESCS; rx_desc->next_rx_desc_phys_addr = cpu_to_le32(rxq->rx_desc_dma + nexti * sizeof(*rx_desc)); rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST; } return 0; } static int rxq_refill(struct ieee80211_hw *hw, int index, int limit) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_rx_queue *rxq = priv->rxq + index; int refilled; refilled = 0; while (rxq->rx_desc_count < MWL8K_RX_DESCS && limit--) { struct sk_buff *skb; int rx; skb = dev_alloc_skb(MWL8K_RX_MAXSZ); if (skb == NULL) break; rxq->rx_desc_count++; rx = rxq->rx_tail; rxq->rx_tail = (rx + 1) % MWL8K_RX_DESCS; rxq->rx_desc_area[rx].pkt_phys_addr = cpu_to_le32(pci_map_single(priv->pdev, skb->data, MWL8K_RX_MAXSZ, DMA_FROM_DEVICE)); rxq->rx_desc_area[rx].pkt_len = cpu_to_le16(MWL8K_RX_MAXSZ); rxq->rx_skb[rx] = skb; wmb(); rxq->rx_desc_area[rx].rx_ctrl = 0; refilled++; } return refilled; } /* Must be called only when the card's reception is completely halted */ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_rx_queue *rxq = priv->rxq + index; int i; for (i = 0; i < MWL8K_RX_DESCS; i++) { if (rxq->rx_skb[i] != NULL) { unsigned long addr; addr = le32_to_cpu(rxq->rx_desc_area[i].pkt_phys_addr); pci_unmap_single(priv->pdev, addr, MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); kfree_skb(rxq->rx_skb[i]); rxq->rx_skb[i] = NULL; } } kfree(rxq->rx_skb); rxq->rx_skb = NULL; pci_free_consistent(priv->pdev, MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc), rxq->rx_desc_area, rxq->rx_desc_dma); rxq->rx_desc_area = NULL; } /* * Scan a list of BSSIDs to process for finalize join. * Allows for extension to process multiple BSSIDs. */ static inline int mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh) { return priv->capture_beacon && ieee80211_is_beacon(wh->frame_control) && !compare_ether_addr(wh->addr3, priv->capture_bssid); } static inline void mwl8k_save_beacon(struct mwl8k_priv *priv, struct sk_buff *skb) { priv->capture_beacon = false; memset(priv->capture_bssid, 0, ETH_ALEN); /* * Use GFP_ATOMIC as rxq_process is called from * the primary interrupt handler, memory allocation call * must not sleep. */ priv->beacon_skb = skb_copy(skb, GFP_ATOMIC); if (priv->beacon_skb != NULL) queue_work(priv->config_wq, &priv->finalize_join_worker); } static int rxq_process(struct ieee80211_hw *hw, int index, int limit) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_rx_queue *rxq = priv->rxq + index; int processed; processed = 0; while (rxq->rx_desc_count && limit--) { struct mwl8k_rx_desc *rx_desc; struct sk_buff *skb; struct ieee80211_rx_status status; unsigned long addr; struct ieee80211_hdr *wh; rx_desc = rxq->rx_desc_area + rxq->rx_head; if (!(rx_desc->rx_ctrl & MWL8K_RX_CTRL_OWNED_BY_HOST)) break; rmb(); skb = rxq->rx_skb[rxq->rx_head]; if (skb == NULL) break; rxq->rx_skb[rxq->rx_head] = NULL; rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS; rxq->rx_desc_count--; addr = le32_to_cpu(rx_desc->pkt_phys_addr); pci_unmap_single(priv->pdev, addr, MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); skb_put(skb, le16_to_cpu(rx_desc->pkt_len)); mwl8k_remove_dma_header(skb); wh = (struct ieee80211_hdr *)skb->data; /* * Check for pending join operation. save a copy of * the beacon and schedule a tasklet to send finalize * join command to the firmware. */ if (mwl8k_capture_bssid(priv, wh)) mwl8k_save_beacon(priv, skb); memset(&status, 0, sizeof(status)); status.mactime = 0; status.signal = -rx_desc->rssi; status.noise = -rx_desc->noise_level; status.qual = rx_desc->link_quality; status.antenna = 1; status.rate_idx = 1; status.flag = 0; status.band = IEEE80211_BAND_2GHZ; status.freq = ieee80211_channel_to_frequency(rx_desc->channel); memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); ieee80211_rx_irqsafe(hw, skb); processed++; } return processed; } /* * Packet transmission. */ /* Transmit queue assignment. */ enum { MWL8K_WME_AC_BK = 0, /* background access */ MWL8K_WME_AC_BE = 1, /* best effort access */ MWL8K_WME_AC_VI = 2, /* video access */ MWL8K_WME_AC_VO = 3, /* voice access */ }; /* Transmit packet ACK policy */ #define MWL8K_TXD_ACK_POLICY_NORMAL 0 #define MWL8K_TXD_ACK_POLICY_BLOCKACK 3 #define GET_TXQ(_ac) (\ ((_ac) == WME_AC_VO) ? MWL8K_WME_AC_VO : \ ((_ac) == WME_AC_VI) ? MWL8K_WME_AC_VI : \ ((_ac) == WME_AC_BK) ? MWL8K_WME_AC_BK : \ MWL8K_WME_AC_BE) #define MWL8K_TXD_STATUS_OK 0x00000001 #define MWL8K_TXD_STATUS_OK_RETRY 0x00000002 #define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004 #define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008 #define MWL8K_TXD_STATUS_FW_OWNED 0x80000000 struct mwl8k_tx_desc { __le32 status; __u8 data_rate; __u8 tx_priority; __le16 qos_control; __le32 pkt_phys_addr; __le16 pkt_len; __u8 dest_MAC_addr[ETH_ALEN]; __le32 next_tx_desc_phys_addr; __le32 reserved; __le16 rate_info; __u8 peer_id; __u8 tx_frag_cnt; } __attribute__((packed)); #define MWL8K_TX_DESCS 128 static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_tx_queue *txq = priv->txq + index; int size; int i; memset(&txq->tx_stats, 0, sizeof(struct ieee80211_tx_queue_stats)); txq->tx_stats.limit = MWL8K_TX_DESCS; txq->tx_head = 0; txq->tx_tail = 0; size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc); txq->tx_desc_area = pci_alloc_consistent(priv->pdev, size, &txq->tx_desc_dma); if (txq->tx_desc_area == NULL) { printk(KERN_ERR "%s: failed to alloc TX descriptors\n", priv->name); return -ENOMEM; } memset(txq->tx_desc_area, 0, size); txq->tx_skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->tx_skb), GFP_KERNEL); if (txq->tx_skb == NULL) { printk(KERN_ERR "%s: failed to alloc TX skbuff list\n", priv->name); pci_free_consistent(priv->pdev, size, txq->tx_desc_area, txq->tx_desc_dma); return -ENOMEM; } memset(txq->tx_skb, 0, MWL8K_TX_DESCS * sizeof(*txq->tx_skb)); for (i = 0; i < MWL8K_TX_DESCS; i++) { struct mwl8k_tx_desc *tx_desc; int nexti; tx_desc = txq->tx_desc_area + i; nexti = (i + 1) % MWL8K_TX_DESCS; tx_desc->status = 0; tx_desc->next_tx_desc_phys_addr = cpu_to_le32(txq->tx_desc_dma + nexti * sizeof(*tx_desc)); } return 0; } static inline void mwl8k_tx_start(struct mwl8k_priv *priv) { iowrite32(MWL8K_H2A_INT_PPA_READY, priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); iowrite32(MWL8K_H2A_INT_DUMMY, priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); ioread32(priv->regs + MWL8K_HIU_INT_CODE); } static inline int mwl8k_txq_busy(struct mwl8k_priv *priv) { return priv->pending_tx_pkts; } struct mwl8k_txq_info { u32 fw_owned; u32 drv_owned; u32 unused; u32 len; u32 head; u32 tail; }; static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv, struct mwl8k_txq_info *txinfo) { int count, desc, status; struct mwl8k_tx_queue *txq; struct mwl8k_tx_desc *tx_desc; int ndescs = 0; memset(txinfo, 0, MWL8K_TX_QUEUES * sizeof(struct mwl8k_txq_info)); spin_lock_bh(&priv->tx_lock); for (count = 0; count < MWL8K_TX_QUEUES; count++) { txq = priv->txq + count; txinfo[count].len = txq->tx_stats.len; txinfo[count].head = txq->tx_head; txinfo[count].tail = txq->tx_tail; for (desc = 0; desc < MWL8K_TX_DESCS; desc++) { tx_desc = txq->tx_desc_area + desc; status = le32_to_cpu(tx_desc->status); if (status & MWL8K_TXD_STATUS_FW_OWNED) txinfo[count].fw_owned++; else txinfo[count].drv_owned++; if (tx_desc->pkt_len == 0) txinfo[count].unused++; } } spin_unlock_bh(&priv->tx_lock); return ndescs; } /* * Must be called with hw->fw_mutex held and tx queues stopped. */ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; DECLARE_COMPLETION_ONSTACK(cmd_wait); u32 count; unsigned long timeout; might_sleep(); spin_lock_bh(&priv->tx_lock); count = mwl8k_txq_busy(priv); if (count) { priv->tx_wait = &cmd_wait; if (priv->radio_on) mwl8k_tx_start(priv); } spin_unlock_bh(&priv->tx_lock); if (count) { struct mwl8k_txq_info txinfo[MWL8K_TX_QUEUES]; int index; int newcount; timeout = wait_for_completion_timeout(&cmd_wait, msecs_to_jiffies(5000)); if (timeout) return 0; spin_lock_bh(&priv->tx_lock); priv->tx_wait = NULL; newcount = mwl8k_txq_busy(priv); spin_unlock_bh(&priv->tx_lock); printk(KERN_ERR "%s(%u) TIMEDOUT:5000ms Pend:%u-->%u\n", __func__, __LINE__, count, newcount); mwl8k_scan_tx_ring(priv, txinfo); for (index = 0; index < MWL8K_TX_QUEUES; index++) printk(KERN_ERR "TXQ:%u L:%u H:%u T:%u FW:%u DRV:%u U:%u\n", index, txinfo[index].len, txinfo[index].head, txinfo[index].tail, txinfo[index].fw_owned, txinfo[index].drv_owned, txinfo[index].unused); return -ETIMEDOUT; } return 0; } #define MWL8K_TXD_SUCCESS(status) \ ((status) & (MWL8K_TXD_STATUS_OK | \ MWL8K_TXD_STATUS_OK_RETRY | \ MWL8K_TXD_STATUS_OK_MORE_RETRY)) static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_tx_queue *txq = priv->txq + index; int wake = 0; while (txq->tx_stats.len > 0) { int tx; struct mwl8k_tx_desc *tx_desc; unsigned long addr; int size; struct sk_buff *skb; struct ieee80211_tx_info *info; u32 status; tx = txq->tx_head; tx_desc = txq->tx_desc_area + tx; status = le32_to_cpu(tx_desc->status); if (status & MWL8K_TXD_STATUS_FW_OWNED) { if (!force) break; tx_desc->status &= ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED); } txq->tx_head = (tx + 1) % MWL8K_TX_DESCS; BUG_ON(txq->tx_stats.len == 0); txq->tx_stats.len--; priv->pending_tx_pkts--; addr = le32_to_cpu(tx_desc->pkt_phys_addr); size = le16_to_cpu(tx_desc->pkt_len); skb = txq->tx_skb[tx]; txq->tx_skb[tx] = NULL; BUG_ON(skb == NULL); pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE); mwl8k_remove_dma_header(skb); /* Mark descriptor as unused */ tx_desc->pkt_phys_addr = 0; tx_desc->pkt_len = 0; info = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(info); if (MWL8K_TXD_SUCCESS(status)) info->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, skb); wake = 1; } if (wake && priv->radio_on && !mutex_is_locked(&priv->fw_mutex)) ieee80211_wake_queue(hw, index); } /* must be called only when the card's transmit is completely halted */ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_tx_queue *txq = priv->txq + index; mwl8k_txq_reclaim(hw, index, 1); kfree(txq->tx_skb); txq->tx_skb = NULL; pci_free_consistent(priv->pdev, MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc), txq->tx_desc_area, txq->tx_desc_dma); txq->tx_desc_area = NULL; } static int mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) { struct mwl8k_priv *priv = hw->priv; struct ieee80211_tx_info *tx_info; struct mwl8k_vif *mwl8k_vif; struct ieee80211_hdr *wh; struct mwl8k_tx_queue *txq; struct mwl8k_tx_desc *tx; dma_addr_t dma; u32 txstatus; u8 txdatarate; u16 qos; wh = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_data_qos(wh->frame_control)) qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh))); else qos = 0; mwl8k_add_dma_header(skb); wh = &((struct mwl8k_dma_data *)skb->data)->wh; tx_info = IEEE80211_SKB_CB(skb); mwl8k_vif = MWL8K_VIF(tx_info->control.vif); if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { u16 seqno = mwl8k_vif->seqno; wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); wh->seq_ctrl |= cpu_to_le16(seqno << 4); mwl8k_vif->seqno = seqno++ % 4096; } /* Setup firmware control bit fields for each frame type. */ txstatus = 0; txdatarate = 0; if (ieee80211_is_mgmt(wh->frame_control) || ieee80211_is_ctl(wh->frame_control)) { txdatarate = 0; qos = mwl8k_qos_setbit_eosp(qos); /* Set Queue size to unspecified */ qos = mwl8k_qos_setbit_qlen(qos, 0xff); } else if (ieee80211_is_data(wh->frame_control)) { txdatarate = 1; if (is_multicast_ether_addr(wh->addr1)) txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX; /* Send pkt in an aggregate if AMPDU frame. */ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) qos = mwl8k_qos_setbit_ack(qos, MWL8K_TXD_ACK_POLICY_BLOCKACK); else qos = mwl8k_qos_setbit_ack(qos, MWL8K_TXD_ACK_POLICY_NORMAL); if (qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) qos = mwl8k_qos_setbit_amsdu(qos); } dma = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(priv->pdev, dma)) { printk(KERN_DEBUG "%s: failed to dma map skb, " "dropping TX frame.\n", priv->name); dev_kfree_skb(skb); return NETDEV_TX_OK; } spin_lock_bh(&priv->tx_lock); txq = priv->txq + index; BUG_ON(txq->tx_skb[txq->tx_tail] != NULL); txq->tx_skb[txq->tx_tail] = skb; tx = txq->tx_desc_area + txq->tx_tail; tx->data_rate = txdatarate; tx->tx_priority = index; tx->qos_control = cpu_to_le16(qos); tx->pkt_phys_addr = cpu_to_le32(dma); tx->pkt_len = cpu_to_le16(skb->len); tx->rate_info = 0; tx->peer_id = mwl8k_vif->peer_id; wmb(); tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); txq->tx_stats.count++; txq->tx_stats.len++; priv->pending_tx_pkts++; txq->tx_tail++; if (txq->tx_tail == MWL8K_TX_DESCS) txq->tx_tail = 0; if (txq->tx_head == txq->tx_tail) ieee80211_stop_queue(hw, index); mwl8k_tx_start(priv); spin_unlock_bh(&priv->tx_lock); return NETDEV_TX_OK; } /* * Firmware access. * * We have the following requirements for issuing firmware commands: * - Some commands require that the packet transmit path is idle when * the command is issued. (For simplicity, we'll just quiesce the * transmit path for every command.) * - There are certain sequences of commands that need to be issued to * the hardware sequentially, with no other intervening commands. * * This leads to an implementation of a "firmware lock" as a mutex that * can be taken recursively, and which is taken by both the low-level * command submission function (mwl8k_post_cmd) as well as any users of * that function that require issuing of an atomic sequence of commands, * and quiesces the transmit path whenever it's taken. */ static int mwl8k_fw_lock(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; if (priv->fw_mutex_owner != current) { int rc; mutex_lock(&priv->fw_mutex); ieee80211_stop_queues(hw); rc = mwl8k_tx_wait_empty(hw); if (rc) { ieee80211_wake_queues(hw); mutex_unlock(&priv->fw_mutex); return rc; } priv->fw_mutex_owner = current; } priv->fw_mutex_depth++; return 0; } static void mwl8k_fw_unlock(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; if (!--priv->fw_mutex_depth) { ieee80211_wake_queues(hw); priv->fw_mutex_owner = NULL; mutex_unlock(&priv->fw_mutex); } } /* * Command processing. */ /* Timeout firmware commands after 2000ms */ #define MWL8K_CMD_TIMEOUT_MS 2000 static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) { DECLARE_COMPLETION_ONSTACK(cmd_wait); struct mwl8k_priv *priv = hw->priv; void __iomem *regs = priv->regs; dma_addr_t dma_addr; unsigned int dma_size; int rc; unsigned long timeout = 0; u8 buf[32]; cmd->result = 0xFFFF; dma_size = le16_to_cpu(cmd->length); dma_addr = pci_map_single(priv->pdev, cmd, dma_size, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(priv->pdev, dma_addr)) return -ENOMEM; rc = mwl8k_fw_lock(hw); if (rc) { pci_unmap_single(priv->pdev, dma_addr, dma_size, PCI_DMA_BIDIRECTIONAL); return rc; } priv->hostcmd_wait = &cmd_wait; iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR); iowrite32(MWL8K_H2A_INT_DOORBELL, regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); iowrite32(MWL8K_H2A_INT_DUMMY, regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); timeout = wait_for_completion_timeout(&cmd_wait, msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS)); priv->hostcmd_wait = NULL; mwl8k_fw_unlock(hw); pci_unmap_single(priv->pdev, dma_addr, dma_size, PCI_DMA_BIDIRECTIONAL); if (!timeout) { printk(KERN_ERR "%s: Command %s timeout after %u ms\n", priv->name, mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), MWL8K_CMD_TIMEOUT_MS); rc = -ETIMEDOUT; } else { rc = cmd->result ? -EINVAL : 0; if (rc) printk(KERN_ERR "%s: Command %s error 0x%x\n", priv->name, mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), le16_to_cpu(cmd->result)); } return rc; } /* * GET_HW_SPEC. */ struct mwl8k_cmd_get_hw_spec { struct mwl8k_cmd_pkt header; __u8 hw_rev; __u8 host_interface; __le16 num_mcaddrs; __u8 perm_addr[ETH_ALEN]; __le16 region_code; __le32 fw_rev; __le32 ps_cookie; __le32 caps; __u8 mcs_bitmap[16]; __le32 rx_queue_ptr; __le32 num_tx_queues; __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; __le32 caps2; __le32 num_tx_desc_per_queue; __le32 total_rx_desc; } __attribute__((packed)); static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_get_hw_spec *cmd; int rc; int i; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC); cmd->header.length = cpu_to_le16(sizeof(*cmd)); memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma); cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); for (i = 0; i < MWL8K_TX_QUEUES; i++) cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma); cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS); rc = mwl8k_post_cmd(hw, &cmd->header); if (!rc) { SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); priv->fw_rev = le32_to_cpu(cmd->fw_rev); priv->hw_rev = cmd->hw_rev; } kfree(cmd); return rc; } /* * CMD_MAC_MULTICAST_ADR. */ struct mwl8k_cmd_mac_multicast_adr { struct mwl8k_cmd_pkt header; __le16 action; __le16 numaddr; __u8 addr[0][ETH_ALEN]; }; #define MWL8K_ENABLE_RX_MULTICAST 0x000F static struct mwl8k_cmd_pkt * __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int mc_count, struct dev_addr_list *mclist) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_mac_multicast_adr *cmd; int size; int i; if (mc_count > priv->num_mcaddrs) mc_count = priv->num_mcaddrs; size = sizeof(*cmd) + mc_count * ETH_ALEN; cmd = kzalloc(size, GFP_ATOMIC); if (cmd == NULL) return NULL; cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR); cmd->header.length = cpu_to_le16(size); cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); cmd->numaddr = cpu_to_le16(mc_count); for (i = 0; i < mc_count && mclist; i++) { if (mclist->da_addrlen != ETH_ALEN) { kfree(cmd); return NULL; } memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN); mclist = mclist->next; } return &cmd->header; } /* * CMD_802_11_GET_STAT. */ struct mwl8k_cmd_802_11_get_stat { struct mwl8k_cmd_pkt header; __le16 action; __le32 stats[64]; } __attribute__((packed)); #define MWL8K_STAT_ACK_FAILURE 9 #define MWL8K_STAT_RTS_FAILURE 12 #define MWL8K_STAT_FCS_ERROR 24 #define MWL8K_STAT_RTS_SUCCESS 11 static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct mwl8k_cmd_802_11_get_stat *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_GET); rc = mwl8k_post_cmd(hw, &cmd->header); if (!rc) { stats->dot11ACKFailureCount = le32_to_cpu(cmd->stats[MWL8K_STAT_ACK_FAILURE]); stats->dot11RTSFailureCount = le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_FAILURE]); stats->dot11FCSErrorCount = le32_to_cpu(cmd->stats[MWL8K_STAT_FCS_ERROR]); stats->dot11RTSSuccessCount = le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_SUCCESS]); } kfree(cmd); return rc; } /* * CMD_802_11_RADIO_CONTROL. */ struct mwl8k_cmd_802_11_radio_control { struct mwl8k_cmd_pkt header; __le16 action; __le16 control; __le16 radio_on; } __attribute__((packed)); static int mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_802_11_radio_control *cmd; int rc; if (enable == priv->radio_on && !force) return 0; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_RADIO_CONTROL); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->control = cpu_to_le16(priv->radio_short_preamble ? 3 : 1); cmd->radio_on = cpu_to_le16(enable ? 0x0001 : 0x0000); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); if (!rc) priv->radio_on = enable; return rc; } static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw) { return mwl8k_cmd_802_11_radio_control(hw, 0, 0); } static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw) { return mwl8k_cmd_802_11_radio_control(hw, 1, 0); } static int mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble) { struct mwl8k_priv *priv; if (hw == NULL || hw->priv == NULL) return -EINVAL; priv = hw->priv; priv->radio_short_preamble = short_preamble; return mwl8k_cmd_802_11_radio_control(hw, 1, 1); } /* * CMD_802_11_RF_TX_POWER. */ #define MWL8K_TX_POWER_LEVEL_TOTAL 8 struct mwl8k_cmd_802_11_rf_tx_power { struct mwl8k_cmd_pkt header; __le16 action; __le16 support_level; __le16 current_level; __le16 reserved; __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; } __attribute__((packed)); static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm) { struct mwl8k_cmd_802_11_rf_tx_power *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_TX_POWER); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->support_level = cpu_to_le16(dBm); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_PRE_SCAN. */ struct mwl8k_cmd_set_pre_scan { struct mwl8k_cmd_pkt header; } __attribute__((packed)); static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) { struct mwl8k_cmd_set_pre_scan *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_PRE_SCAN); cmd->header.length = cpu_to_le16(sizeof(*cmd)); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_POST_SCAN. */ struct mwl8k_cmd_set_post_scan { struct mwl8k_cmd_pkt header; __le32 isibss; __u8 bssid[ETH_ALEN]; } __attribute__((packed)); static int mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac) { struct mwl8k_cmd_set_post_scan *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_POST_SCAN); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->isibss = 0; memcpy(cmd->bssid, mac, ETH_ALEN); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_RF_CHANNEL. */ struct mwl8k_cmd_set_rf_channel { struct mwl8k_cmd_pkt header; __le16 action; __u8 current_channel; __le32 channel_flags; } __attribute__((packed)); static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, struct ieee80211_channel *channel) { struct mwl8k_cmd_set_rf_channel *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RF_CHANNEL); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->current_channel = channel->hw_value; if (channel->band == IEEE80211_BAND_2GHZ) cmd->channel_flags = cpu_to_le32(0x00000081); else cmd->channel_flags = cpu_to_le32(0x00000000); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_SLOT. */ struct mwl8k_cmd_set_slot { struct mwl8k_cmd_pkt header; __le16 action; __u8 short_slot; } __attribute__((packed)); static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time) { struct mwl8k_cmd_set_slot *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->short_slot = short_slot_time; rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_MIMO_CONFIG. */ struct mwl8k_cmd_mimo_config { struct mwl8k_cmd_pkt header; __le32 action; __u8 rx_antenna_map; __u8 tx_antenna_map; } __attribute__((packed)); static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) { struct mwl8k_cmd_mimo_config *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET); cmd->rx_antenna_map = rx; cmd->tx_antenna_map = tx; rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_ENABLE_SNIFFER. */ struct mwl8k_cmd_enable_sniffer { struct mwl8k_cmd_pkt header; __le32 action; } __attribute__((packed)); static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable) { struct mwl8k_cmd_enable_sniffer *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(!!enable); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_RATEADAPT_MODE. */ struct mwl8k_cmd_set_rate_adapt_mode { struct mwl8k_cmd_pkt header; __le16 action; __le16 mode; } __attribute__((packed)); static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode) { struct mwl8k_cmd_set_rate_adapt_mode *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_CMD_SET); cmd->mode = cpu_to_le16(mode); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_WMM_MODE. */ struct mwl8k_cmd_set_wmm { struct mwl8k_cmd_pkt header; __le16 action; } __attribute__((packed)); static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_set_wmm *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(!!enable); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); if (!rc) priv->wmm_enabled = enable; return rc; } /* * CMD_SET_RTS_THRESHOLD. */ struct mwl8k_cmd_rts_threshold { struct mwl8k_cmd_pkt header; __le16 action; __le16 threshold; } __attribute__((packed)); static int mwl8k_rts_threshold(struct ieee80211_hw *hw, u16 action, u16 threshold) { struct mwl8k_cmd_rts_threshold *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(action); cmd->threshold = cpu_to_le16(threshold); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_EDCA_PARAMS. */ struct mwl8k_cmd_set_edca_params { struct mwl8k_cmd_pkt header; /* See MWL8K_SET_EDCA_XXX below */ __le16 action; /* TX opportunity in units of 32 us */ __le16 txop; /* Log exponent of max contention period: 0...15*/ __u8 log_cw_max; /* Log exponent of min contention period: 0...15 */ __u8 log_cw_min; /* Adaptive interframe spacing in units of 32us */ __u8 aifs; /* TX queue to configure */ __u8 txq; } __attribute__((packed)); #define MWL8K_SET_EDCA_CW 0x01 #define MWL8K_SET_EDCA_TXOP 0x02 #define MWL8K_SET_EDCA_AIFS 0x04 #define MWL8K_SET_EDCA_ALL (MWL8K_SET_EDCA_CW | \ MWL8K_SET_EDCA_TXOP | \ MWL8K_SET_EDCA_AIFS) static int mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum, __u16 cw_min, __u16 cw_max, __u8 aifs, __u16 txop) { struct mwl8k_cmd_set_edca_params *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL); cmd->txop = cpu_to_le16(txop); cmd->log_cw_max = (u8)ilog2(cw_max + 1); cmd->log_cw_min = (u8)ilog2(cw_min + 1); cmd->aifs = aifs; cmd->txq = qnum; rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_FINALIZE_JOIN. */ /* FJ beacon buffer size is compiled into the firmware. */ #define MWL8K_FJ_BEACON_MAXLEN 128 struct mwl8k_cmd_finalize_join { struct mwl8k_cmd_pkt header; __le32 sleep_interval; /* Number of beacon periods to sleep */ __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN]; } __attribute__((packed)); static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame, __u16 framelen, __u16 dtim) { struct mwl8k_cmd_finalize_join *cmd; struct ieee80211_mgmt *payload = frame; u16 hdrlen; u32 payload_len; int rc; if (frame == NULL) return -EINVAL; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1); hdrlen = ieee80211_hdrlen(payload->frame_control); payload_len = framelen > hdrlen ? framelen - hdrlen : 0; /* XXX TBD Might just have to abort and return an error */ if (payload_len > MWL8K_FJ_BEACON_MAXLEN) printk(KERN_ERR "%s(): WARNING: Incomplete beacon " "sent to firmware. Sz=%u MAX=%u\n", __func__, payload_len, MWL8K_FJ_BEACON_MAXLEN); if (payload_len > MWL8K_FJ_BEACON_MAXLEN) payload_len = MWL8K_FJ_BEACON_MAXLEN; if (payload && payload_len) memcpy(cmd->beacon_data, &payload->u.beacon, payload_len); rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_UPDATE_STADB. */ struct mwl8k_cmd_update_sta_db { struct mwl8k_cmd_pkt header; /* See STADB_ACTION_TYPE */ __le32 action; /* Peer MAC address */ __u8 peer_addr[ETH_ALEN]; __le32 reserved; /* Peer info - valid during add/update. */ struct peer_capability_info peer_info; } __attribute__((packed)); static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw, struct ieee80211_vif *vif, __u32 action) { struct mwl8k_vif *mv_vif = MWL8K_VIF(vif); struct ieee80211_bss_conf *info = &mv_vif->bss_info; struct mwl8k_cmd_update_sta_db *cmd; struct peer_capability_info *peer_info; struct ieee80211_rate *bitrates = mv_vif->legacy_rates; int rc; __u8 count, *rates; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(action); peer_info = &cmd->peer_info; memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN); switch (action) { case MWL8K_STA_DB_ADD_ENTRY: case MWL8K_STA_DB_MODIFY_ENTRY: /* Build peer_info block */ peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT; peer_info->basic_caps = cpu_to_le16(info->assoc_capability); peer_info->interop = 1; peer_info->amsdu_enabled = 0; rates = peer_info->legacy_rates; for (count = 0; count < mv_vif->legacy_nrates; count++) rates[count] = bitrates[count].hw_value; rc = mwl8k_post_cmd(hw, &cmd->header); if (rc == 0) mv_vif->peer_id = peer_info->station_id; break; case MWL8K_STA_DB_DEL_ENTRY: case MWL8K_STA_DB_FLUSH: default: rc = mwl8k_post_cmd(hw, &cmd->header); if (rc == 0) mv_vif->peer_id = 0; break; } kfree(cmd); return rc; } /* * CMD_SET_AID. */ #define MWL8K_RATE_INDEX_MAX_ARRAY 14 #define MWL8K_FRAME_PROT_DISABLED 0x00 #define MWL8K_FRAME_PROT_11G 0x07 #define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02 #define MWL8K_FRAME_PROT_11N_HT_ALL 0x06 struct mwl8k_cmd_update_set_aid { struct mwl8k_cmd_pkt header; __le16 aid; /* AP's MAC address (BSSID) */ __u8 bssid[ETH_ALEN]; __le16 protection_mode; __u8 supp_rates[MWL8K_RATE_INDEX_MAX_ARRAY]; } __attribute__((packed)); static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mwl8k_vif *mv_vif = MWL8K_VIF(vif); struct ieee80211_bss_conf *info = &mv_vif->bss_info; struct mwl8k_cmd_update_set_aid *cmd; struct ieee80211_rate *bitrates = mv_vif->legacy_rates; int count; u16 prot_mode; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->aid = cpu_to_le16(info->aid); memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN); if (info->use_cts_prot) { prot_mode = MWL8K_FRAME_PROT_11G; } else { switch (info->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) { case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY; break; case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL; break; default: prot_mode = MWL8K_FRAME_PROT_DISABLED; break; } } cmd->protection_mode = cpu_to_le16(prot_mode); for (count = 0; count < mv_vif->legacy_nrates; count++) cmd->supp_rates[count] = bitrates[count].hw_value; rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_SET_RATE. */ struct mwl8k_cmd_update_rateset { struct mwl8k_cmd_pkt header; __u8 legacy_rates[MWL8K_RATE_INDEX_MAX_ARRAY]; /* Bitmap for supported MCS codes. */ __u8 mcs_set[MWL8K_IEEE_LEGACY_DATA_RATES]; __u8 reserved[MWL8K_IEEE_LEGACY_DATA_RATES]; } __attribute__((packed)); static int mwl8k_update_rateset(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mwl8k_vif *mv_vif = MWL8K_VIF(vif); struct mwl8k_cmd_update_rateset *cmd; struct ieee80211_rate *bitrates = mv_vif->legacy_rates; int count; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE); cmd->header.length = cpu_to_le16(sizeof(*cmd)); for (count = 0; count < mv_vif->legacy_nrates; count++) cmd->legacy_rates[count] = bitrates[count].hw_value; rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * CMD_USE_FIXED_RATE. */ #define MWL8K_RATE_TABLE_SIZE 8 #define MWL8K_UCAST_RATE 0 #define MWL8K_USE_AUTO_RATE 0x0002 struct mwl8k_rate_entry { /* Set to 1 if HT rate, 0 if legacy. */ __le32 is_ht_rate; /* Set to 1 to use retry_count field. */ __le32 enable_retry; /* Specified legacy rate or MCS. */ __le32 rate; /* Number of allowed retries. */ __le32 retry_count; } __attribute__((packed)); struct mwl8k_rate_table { /* 1 to allow specified rate and below */ __le32 allow_rate_drop; __le32 num_rates; struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE]; } __attribute__((packed)); struct mwl8k_cmd_use_fixed_rate { struct mwl8k_cmd_pkt header; __le32 action; struct mwl8k_rate_table rate_table; /* Unicast, Broadcast or Multicast */ __le32 rate_type; __le32 reserved1; __le32 reserved2; } __attribute__((packed)); static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw, u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table) { struct mwl8k_cmd_use_fixed_rate *cmd; int count; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE); cmd->header.length = cpu_to_le16(sizeof(*cmd)); cmd->action = cpu_to_le32(action); cmd->rate_type = cpu_to_le32(rate_type); if (rate_table != NULL) { /* Copy over each field manually so * that bitflipping can be done */ cmd->rate_table.allow_rate_drop = cpu_to_le32(rate_table->allow_rate_drop); cmd->rate_table.num_rates = cpu_to_le32(rate_table->num_rates); for (count = 0; count < rate_table->num_rates; count++) { struct mwl8k_rate_entry *dst = &cmd->rate_table.rate_entry[count]; struct mwl8k_rate_entry *src = &rate_table->rate_entry[count]; dst->is_ht_rate = cpu_to_le32(src->is_ht_rate); dst->enable_retry = cpu_to_le32(src->enable_retry); dst->rate = cpu_to_le32(src->rate); dst->retry_count = cpu_to_le32(src->retry_count); } } rc = mwl8k_post_cmd(hw, &cmd->header); kfree(cmd); return rc; } /* * Interrupt handling. */ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id) { struct ieee80211_hw *hw = dev_id; struct mwl8k_priv *priv = hw->priv; u32 status; status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); if (!status) return IRQ_NONE; if (status & MWL8K_A2H_INT_TX_DONE) tasklet_schedule(&priv->tx_reclaim_task); if (status & MWL8K_A2H_INT_RX_READY) { while (rxq_process(hw, 0, 1)) rxq_refill(hw, 0, 1); } if (status & MWL8K_A2H_INT_OPC_DONE) { if (priv->hostcmd_wait != NULL) complete(priv->hostcmd_wait); } if (status & MWL8K_A2H_INT_QUEUE_EMPTY) { if (!mutex_is_locked(&priv->fw_mutex) && priv->radio_on && mwl8k_txq_busy(priv)) mwl8k_tx_start(priv); } return IRQ_HANDLED; } /* * Core driver operations. */ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct mwl8k_priv *priv = hw->priv; int index = skb_get_queue_mapping(skb); int rc; if (priv->current_channel == NULL) { printk(KERN_DEBUG "%s: dropped TX frame since radio " "disabled\n", priv->name); dev_kfree_skb(skb); return NETDEV_TX_OK; } rc = mwl8k_txq_xmit(hw, index, skb); return rc; } static int mwl8k_start(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; int rc; rc = request_irq(priv->pdev->irq, &mwl8k_interrupt, IRQF_SHARED, MWL8K_NAME, hw); if (rc) { printk(KERN_ERR "%s: failed to register IRQ handler\n", priv->name); return -EIO; } /* Enable tx reclaim tasklet */ tasklet_enable(&priv->tx_reclaim_task); /* Enable interrupts */ iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); rc = mwl8k_fw_lock(hw); if (!rc) { rc = mwl8k_cmd_802_11_radio_enable(hw); if (!rc) rc = mwl8k_cmd_set_pre_scan(hw); if (!rc) rc = mwl8k_cmd_set_post_scan(hw, "\x00\x00\x00\x00\x00\x00"); if (!rc) rc = mwl8k_cmd_setrateadaptmode(hw, 0); if (!rc) rc = mwl8k_set_wmm(hw, 0); if (!rc) rc = mwl8k_enable_sniffer(hw, 0); mwl8k_fw_unlock(hw); } if (rc) { iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); free_irq(priv->pdev->irq, hw); tasklet_disable(&priv->tx_reclaim_task); } return rc; } static void mwl8k_stop(struct ieee80211_hw *hw) { struct mwl8k_priv *priv = hw->priv; int i; mwl8k_cmd_802_11_radio_disable(hw); ieee80211_stop_queues(hw); /* Disable interrupts */ iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); free_irq(priv->pdev->irq, hw); /* Stop finalize join worker */ cancel_work_sync(&priv->finalize_join_worker); if (priv->beacon_skb != NULL) dev_kfree_skb(priv->beacon_skb); /* Stop tx reclaim tasklet */ tasklet_disable(&priv->tx_reclaim_task); /* Stop config thread */ flush_workqueue(priv->config_wq); /* Return all skbs to mac80211 */ for (i = 0; i < MWL8K_TX_QUEUES; i++) mwl8k_txq_reclaim(hw, i, 1); } static int mwl8k_add_interface(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_vif *mwl8k_vif; /* * We only support one active interface at a time. */ if (priv->vif != NULL) return -EBUSY; /* * We only support managed interfaces for now. */ if (conf->type != NL80211_IFTYPE_STATION) return -EINVAL; /* Clean out driver private area */ mwl8k_vif = MWL8K_VIF(conf->vif); memset(mwl8k_vif, 0, sizeof(*mwl8k_vif)); /* Save the mac address */ memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN); /* Back pointer to parent config block */ mwl8k_vif->priv = priv; /* Setup initial PHY parameters */ memcpy(mwl8k_vif->legacy_rates, priv->rates, sizeof(mwl8k_vif->legacy_rates)); mwl8k_vif->legacy_nrates = ARRAY_SIZE(priv->rates); /* Set Initial sequence number to zero */ mwl8k_vif->seqno = 0; priv->vif = conf->vif; priv->current_channel = NULL; return 0; } static void mwl8k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) { struct mwl8k_priv *priv = hw->priv; if (priv->vif == NULL) return; priv->vif = NULL; } static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) { struct ieee80211_conf *conf = &hw->conf; struct mwl8k_priv *priv = hw->priv; int rc; if (conf->flags & IEEE80211_CONF_IDLE) { mwl8k_cmd_802_11_radio_disable(hw); priv->current_channel = NULL; return 0; } rc = mwl8k_fw_lock(hw); if (rc) return rc; rc = mwl8k_cmd_802_11_radio_enable(hw); if (rc) goto out; rc = mwl8k_cmd_set_rf_channel(hw, conf->channel); if (rc) goto out; priv->current_channel = conf->channel; if (conf->power_level > 18) conf->power_level = 18; rc = mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level); if (rc) goto out; if (mwl8k_cmd_mimo_config(hw, 0x7, 0x7)) rc = -EINVAL; out: mwl8k_fw_unlock(hw); return rc; } static void mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); int rc; if (changed & BSS_CHANGED_BSSID) memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN); if ((changed & BSS_CHANGED_ASSOC) == 0) return; priv->capture_beacon = false; rc = mwl8k_fw_lock(hw); if (rc) return; if (info->assoc) { memcpy(&mwl8k_vif->bss_info, info, sizeof(struct ieee80211_bss_conf)); /* Install rates */ rc = mwl8k_update_rateset(hw, vif); if (rc) goto out; /* Turn on rate adaptation */ rc = mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE, MWL8K_UCAST_RATE, NULL); if (rc) goto out; /* Set radio preamble */ rc = mwl8k_set_radio_preamble(hw, info->use_short_preamble); if (rc) goto out; /* Set slot time */ rc = mwl8k_cmd_set_slot(hw, info->use_short_slot); if (rc) goto out; /* Update peer rate info */ rc = mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_MODIFY_ENTRY); if (rc) goto out; /* Set AID */ rc = mwl8k_cmd_set_aid(hw, vif); if (rc) goto out; /* * Finalize the join. Tell rx handler to process * next beacon from our BSSID. */ memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN); priv->capture_beacon = true; } else { rc = mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY); memset(&mwl8k_vif->bss_info, 0, sizeof(struct ieee80211_bss_conf)); memset(mwl8k_vif->bssid, 0, ETH_ALEN); } out: mwl8k_fw_unlock(hw); } static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, int mc_count, struct dev_addr_list *mclist) { struct mwl8k_cmd_pkt *cmd; cmd = __mwl8k_cmd_mac_multicast_adr(hw, mc_count, mclist); return (unsigned long)cmd; } static void mwl8k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_pkt *multicast_adr_cmd; /* Clear unsupported feature flags */ *total_flags &= FIF_BCN_PRBRESP_PROMISC; if (mwl8k_fw_lock(hw)) return; if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { if (*total_flags & FIF_BCN_PRBRESP_PROMISC) mwl8k_cmd_set_pre_scan(hw); else { u8 *bssid; bssid = "\x00\x00\x00\x00\x00\x00"; if (priv->vif != NULL) bssid = MWL8K_VIF(priv->vif)->bssid; mwl8k_cmd_set_post_scan(hw, bssid); } } multicast_adr_cmd = (void *)(unsigned long)multicast; if (multicast_adr_cmd != NULL) { mwl8k_post_cmd(hw, multicast_adr_cmd); kfree(multicast_adr_cmd); } mwl8k_fw_unlock(hw); } static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { return mwl8k_rts_threshold(hw, MWL8K_CMD_SET, value); } static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, const struct ieee80211_tx_queue_params *params) { struct mwl8k_priv *priv = hw->priv; int rc; rc = mwl8k_fw_lock(hw); if (!rc) { if (!priv->wmm_enabled) rc = mwl8k_set_wmm(hw, 1); if (!rc) rc = mwl8k_set_edca_params(hw, queue, params->cw_min, params->cw_max, params->aifs, params->txop); mwl8k_fw_unlock(hw); } return rc; } static int mwl8k_get_tx_stats(struct ieee80211_hw *hw, struct ieee80211_tx_queue_stats *stats) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_tx_queue *txq; int index; spin_lock_bh(&priv->tx_lock); for (index = 0; index < MWL8K_TX_QUEUES; index++) { txq = priv->txq + index; memcpy(&stats[index], &txq->tx_stats, sizeof(struct ieee80211_tx_queue_stats)); } spin_unlock_bh(&priv->tx_lock); return 0; } static int mwl8k_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { return mwl8k_cmd_802_11_get_stat(hw, stats); } static const struct ieee80211_ops mwl8k_ops = { .tx = mwl8k_tx, .start = mwl8k_start, .stop = mwl8k_stop, .add_interface = mwl8k_add_interface, .remove_interface = mwl8k_remove_interface, .config = mwl8k_config, .bss_info_changed = mwl8k_bss_info_changed, .prepare_multicast = mwl8k_prepare_multicast, .configure_filter = mwl8k_configure_filter, .set_rts_threshold = mwl8k_set_rts_threshold, .conf_tx = mwl8k_conf_tx, .get_tx_stats = mwl8k_get_tx_stats, .get_stats = mwl8k_get_stats, }; static void mwl8k_tx_reclaim_handler(unsigned long data) { int i; struct ieee80211_hw *hw = (struct ieee80211_hw *) data; struct mwl8k_priv *priv = hw->priv; spin_lock_bh(&priv->tx_lock); for (i = 0; i < MWL8K_TX_QUEUES; i++) mwl8k_txq_reclaim(hw, i, 0); if (priv->tx_wait != NULL && mwl8k_txq_busy(priv) == 0) { complete(priv->tx_wait); priv->tx_wait = NULL; } spin_unlock_bh(&priv->tx_lock); } static void mwl8k_finalize_join_worker(struct work_struct *work) { struct mwl8k_priv *priv = container_of(work, struct mwl8k_priv, finalize_join_worker); struct sk_buff *skb = priv->beacon_skb; u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period; mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim); dev_kfree_skb(skb); priv->beacon_skb = NULL; } static int __devinit mwl8k_probe(struct pci_dev *pdev, const struct pci_device_id *id) { static int printed_version = 0; struct ieee80211_hw *hw; struct mwl8k_priv *priv; int rc; int i; if (!printed_version) { printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION); printed_version = 1; } rc = pci_enable_device(pdev); if (rc) { printk(KERN_ERR "%s: Cannot enable new PCI device\n", MWL8K_NAME); return rc; } rc = pci_request_regions(pdev, MWL8K_NAME); if (rc) { printk(KERN_ERR "%s: Cannot obtain PCI resources\n", MWL8K_NAME); return rc; } pci_set_master(pdev); hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops); if (hw == NULL) { printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME); rc = -ENOMEM; goto err_free_reg; } priv = hw->priv; priv->hw = hw; priv->pdev = pdev; priv->wmm_enabled = false; priv->pending_tx_pkts = 0; strncpy(priv->name, MWL8K_NAME, sizeof(priv->name)); SET_IEEE80211_DEV(hw, &pdev->dev); pci_set_drvdata(pdev, hw); priv->regs = pci_iomap(pdev, 1, 0x10000); if (priv->regs == NULL) { printk(KERN_ERR "%s: Cannot map device memory\n", priv->name); goto err_iounmap; } memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels)); priv->band.band = IEEE80211_BAND_2GHZ; priv->band.channels = priv->channels; priv->band.n_channels = ARRAY_SIZE(mwl8k_channels); priv->band.bitrates = priv->rates; priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates)); memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates)); /* * Extra headroom is the size of the required DMA header * minus the size of the smallest 802.11 frame (CTS frame). */ hw->extra_tx_headroom = sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts); hw->channel_change_time = 10; hw->queues = MWL8K_TX_QUEUES; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); /* Set rssi and noise values to dBm */ hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; hw->vif_data_size = sizeof(struct mwl8k_vif); priv->vif = NULL; /* Set default radio state and preamble */ priv->radio_on = 0; priv->radio_short_preamble = 0; /* Finalize join worker */ INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); /* TX reclaim tasklet */ tasklet_init(&priv->tx_reclaim_task, mwl8k_tx_reclaim_handler, (unsigned long)hw); tasklet_disable(&priv->tx_reclaim_task); /* Config workthread */ priv->config_wq = create_singlethread_workqueue("mwl8k_config"); if (priv->config_wq == NULL) goto err_iounmap; /* Power management cookie */ priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma); if (priv->cookie == NULL) goto err_iounmap; rc = mwl8k_rxq_init(hw, 0); if (rc) goto err_iounmap; rxq_refill(hw, 0, INT_MAX); mutex_init(&priv->fw_mutex); priv->fw_mutex_owner = NULL; priv->fw_mutex_depth = 0; priv->tx_wait = NULL; priv->hostcmd_wait = NULL; spin_lock_init(&priv->tx_lock); for (i = 0; i < MWL8K_TX_QUEUES; i++) { rc = mwl8k_txq_init(hw, i); if (rc) goto err_free_queues; } iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL); iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); rc = request_irq(priv->pdev->irq, &mwl8k_interrupt, IRQF_SHARED, MWL8K_NAME, hw); if (rc) { printk(KERN_ERR "%s: failed to register IRQ handler\n", priv->name); goto err_free_queues; } /* Reset firmware and hardware */ mwl8k_hw_reset(priv); /* Ask userland hotplug daemon for the device firmware */ rc = mwl8k_request_firmware(priv, (u32)id->driver_data); if (rc) { printk(KERN_ERR "%s: Firmware files not found\n", priv->name); goto err_free_irq; } /* Load firmware into hardware */ rc = mwl8k_load_firmware(priv); if (rc) { printk(KERN_ERR "%s: Cannot start firmware\n", priv->name); goto err_stop_firmware; } /* Reclaim memory once firmware is successfully loaded */ mwl8k_release_firmware(priv); /* * Temporarily enable interrupts. Initial firmware host * commands use interrupts and avoids polling. Disable * interrupts when done. */ iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); /* Get config data, mac addrs etc */ rc = mwl8k_cmd_get_hw_spec(hw); if (rc) { printk(KERN_ERR "%s: Cannot initialise firmware\n", priv->name); goto err_stop_firmware; } /* Turn radio off */ rc = mwl8k_cmd_802_11_radio_disable(hw); if (rc) { printk(KERN_ERR "%s: Cannot disable\n", priv->name); goto err_stop_firmware; } /* Disable interrupts */ iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); free_irq(priv->pdev->irq, hw); rc = ieee80211_register_hw(hw); if (rc) { printk(KERN_ERR "%s: Cannot register device\n", priv->name); goto err_stop_firmware; } printk(KERN_INFO "%s: 88w%u v%d, %pM, firmware version %u.%u.%u.%u\n", wiphy_name(hw->wiphy), priv->part_num, priv->hw_rev, hw->wiphy->perm_addr, (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff, (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff); return 0; err_stop_firmware: mwl8k_hw_reset(priv); mwl8k_release_firmware(priv); err_free_irq: iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); free_irq(priv->pdev->irq, hw); err_free_queues: for (i = 0; i < MWL8K_TX_QUEUES; i++) mwl8k_txq_deinit(hw, i); mwl8k_rxq_deinit(hw, 0); err_iounmap: if (priv->cookie != NULL) pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma); if (priv->regs != NULL) pci_iounmap(pdev, priv->regs); if (priv->config_wq != NULL) destroy_workqueue(priv->config_wq); pci_set_drvdata(pdev, NULL); ieee80211_free_hw(hw); err_free_reg: pci_release_regions(pdev); pci_disable_device(pdev); return rc; } static void __devexit mwl8k_shutdown(struct pci_dev *pdev) { printk(KERN_ERR "===>%s(%u)\n", __func__, __LINE__); } static void __devexit mwl8k_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct mwl8k_priv *priv; int i; if (hw == NULL) return; priv = hw->priv; ieee80211_stop_queues(hw); ieee80211_unregister_hw(hw); /* Remove tx reclaim tasklet */ tasklet_kill(&priv->tx_reclaim_task); /* Stop config thread */ destroy_workqueue(priv->config_wq); /* Stop hardware */ mwl8k_hw_reset(priv); /* Return all skbs to mac80211 */ for (i = 0; i < MWL8K_TX_QUEUES; i++) mwl8k_txq_reclaim(hw, i, 1); for (i = 0; i < MWL8K_TX_QUEUES; i++) mwl8k_txq_deinit(hw, i); mwl8k_rxq_deinit(hw, 0); pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma); pci_iounmap(pdev, priv->regs); pci_set_drvdata(pdev, NULL); ieee80211_free_hw(hw); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver mwl8k_driver = { .name = MWL8K_NAME, .id_table = mwl8k_table, .probe = mwl8k_probe, .remove = __devexit_p(mwl8k_remove), .shutdown = __devexit_p(mwl8k_shutdown), }; static int __init mwl8k_init(void) { return pci_register_driver(&mwl8k_driver); } static void __exit mwl8k_exit(void) { pci_unregister_driver(&mwl8k_driver); } module_init(mwl8k_init); module_exit(mwl8k_exit);
gpl-2.0
onyx-intl/ak98_kernel
drivers/video/omap/lcd_2430sdp.c
464
5308
/* * LCD panel support for the TI 2430SDP board * * Copyright (C) 2007 MontaVista * Author: Hunyue Yau <hyau@mvista.com> * * Derived from drivers/video/omap/lcd-apollon.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/i2c/twl4030.h> #include <mach/mux.h> #include <mach/omapfb.h> #include <asm/mach-types.h> #define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91 #define SDP2430_LCD_PANEL_ENABLE_GPIO 154 #define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 24 #define SDP3430_LCD_PANEL_ENABLE_GPIO 28 static unsigned backlight_gpio; static unsigned enable_gpio; #define LCD_PIXCLOCK_MAX 5400 /* freq 5.4 MHz */ #define PM_RECEIVER TWL4030_MODULE_PM_RECEIVER #define ENABLE_VAUX2_DEDICATED 0x09 #define ENABLE_VAUX2_DEV_GRP 0x20 #define ENABLE_VAUX3_DEDICATED 0x03 #define ENABLE_VAUX3_DEV_GRP 0x20 #define ENABLE_VPLL2_DEDICATED 0x05 #define ENABLE_VPLL2_DEV_GRP 0xE0 #define TWL4030_VPLL2_DEV_GRP 0x33 #define TWL4030_VPLL2_DEDICATED 0x36 #define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v) static int sdp2430_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { if (machine_is_omap_3430sdp()) { enable_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO; backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO; } else { enable_gpio = SDP2430_LCD_PANEL_ENABLE_GPIO; backlight_gpio = SDP2430_LCD_PANEL_BACKLIGHT_GPIO; } gpio_request(enable_gpio, "LCD enable"); /* LCD panel */ gpio_request(backlight_gpio, "LCD bl"); /* LCD backlight */ gpio_direction_output(enable_gpio, 0); gpio_direction_output(backlight_gpio, 0); return 0; } static void sdp2430_panel_cleanup(struct lcd_panel *panel) { gpio_free(backlight_gpio); gpio_free(enable_gpio); } static int sdp2430_panel_enable(struct lcd_panel *panel) { u8 ded_val, ded_reg; u8 grp_val, grp_reg; if (machine_is_omap_3430sdp()) { ded_reg = TWL4030_VAUX3_DEDICATED; ded_val = ENABLE_VAUX3_DEDICATED; grp_reg = TWL4030_VAUX3_DEV_GRP; grp_val = ENABLE_VAUX3_DEV_GRP; if (omap_rev() > OMAP3430_REV_ES1_0) { t2_out(PM_RECEIVER, ENABLE_VPLL2_DEDICATED, TWL4030_VPLL2_DEDICATED); t2_out(PM_RECEIVER, ENABLE_VPLL2_DEV_GRP, TWL4030_VPLL2_DEV_GRP); } } else { ded_reg = TWL4030_VAUX2_DEDICATED; ded_val = ENABLE_VAUX2_DEDICATED; grp_reg = TWL4030_VAUX2_DEV_GRP; grp_val = ENABLE_VAUX2_DEV_GRP; } gpio_set_value(enable_gpio, 1); gpio_set_value(backlight_gpio, 1); if (0 != t2_out(PM_RECEIVER, ded_val, ded_reg)) return -EIO; if (0 != t2_out(PM_RECEIVER, grp_val, grp_reg)) return -EIO; return 0; } static void sdp2430_panel_disable(struct lcd_panel *panel) { gpio_set_value(enable_gpio, 0); gpio_set_value(backlight_gpio, 0); if (omap_rev() > OMAP3430_REV_ES1_0) { t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEDICATED); t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEV_GRP); msleep(4); } } static unsigned long sdp2430_panel_get_caps(struct lcd_panel *panel) { return 0; } struct lcd_panel sdp2430_panel = { .name = "sdp2430", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | OMAP_LCDC_INV_HSYNC, .bpp = 16, .data_lines = 16, .x_res = 240, .y_res = 320, .hsw = 3, /* hsync_len (4) - 1 */ .hfp = 3, /* right_margin (4) - 1 */ .hbp = 39, /* left_margin (40) - 1 */ .vsw = 1, /* vsync_len (2) - 1 */ .vfp = 2, /* lower_margin */ .vbp = 7, /* upper_margin (8) - 1 */ .pixel_clock = LCD_PIXCLOCK_MAX, .init = sdp2430_panel_init, .cleanup = sdp2430_panel_cleanup, .enable = sdp2430_panel_enable, .disable = sdp2430_panel_disable, .get_caps = sdp2430_panel_get_caps, }; static int sdp2430_panel_probe(struct platform_device *pdev) { omapfb_register_panel(&sdp2430_panel); return 0; } static int sdp2430_panel_remove(struct platform_device *pdev) { return 0; } static int sdp2430_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int sdp2430_panel_resume(struct platform_device *pdev) { return 0; } struct platform_driver sdp2430_panel_driver = { .probe = sdp2430_panel_probe, .remove = sdp2430_panel_remove, .suspend = sdp2430_panel_suspend, .resume = sdp2430_panel_resume, .driver = { .name = "sdp2430_lcd", .owner = THIS_MODULE, }, }; static int __init sdp2430_panel_drv_init(void) { return platform_driver_register(&sdp2430_panel_driver); } static void __exit sdp2430_panel_drv_exit(void) { platform_driver_unregister(&sdp2430_panel_driver); } module_init(sdp2430_panel_drv_init); module_exit(sdp2430_panel_drv_exit);
gpl-2.0
bebek15/samsung_kernel_msm7x27
drivers/gpio/gpiolib.c
720
43087
#include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/err.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/gpio.h> #include <linux/idr.h> #include <linux/slab.h> /* Optional implementation infrastructure for GPIO interfaces. * * Platforms may want to use this if they tend to use very many GPIOs * that aren't part of a System-On-Chip core; or across I2C/SPI/etc. * * When kernel footprint or instruction count is an issue, simpler * implementations may be preferred. The GPIO programming interface * allows for inlining speed-critical get/set operations for common * cases, so that access to SOC-integrated GPIOs can sometimes cost * only an instruction or two per bit. */ /* When debugging, extend minimal trust to callers and platform code. * Also emit diagnostic messages that may help initial bringup, when * board setup or driver bugs are most common. * * Otherwise, minimize overhead in what may be bitbanging codepaths. */ #ifdef DEBUG #define extra_checks 1 #else #define extra_checks 0 #endif /* gpio_lock prevents conflicts during gpio_desc[] table updates. * While any GPIO is requested, its gpio_chip is not removable; * each GPIO's "requested" flag serves as a lock and refcount. */ static DEFINE_SPINLOCK(gpio_lock); struct gpio_desc { struct gpio_chip *chip; unsigned long flags; /* flag symbols are bit numbers */ #define FLAG_REQUESTED 0 #define FLAG_IS_OUT 1 #define FLAG_RESERVED 2 #define FLAG_EXPORT 3 /* protected by sysfs_lock */ #define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */ #define FLAG_TRIG_FALL 5 /* trigger on falling edge */ #define FLAG_TRIG_RISE 6 /* trigger on rising edge */ #define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */ #define PDESC_ID_SHIFT 16 /* add new flags before this one */ #define GPIO_FLAGS_MASK ((1 << PDESC_ID_SHIFT) - 1) #define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE)) #ifdef CONFIG_DEBUG_FS const char *label; #endif }; static struct gpio_desc gpio_desc[ARCH_NR_GPIOS]; #ifdef CONFIG_GPIO_SYSFS struct poll_desc { struct work_struct work; struct sysfs_dirent *value_sd; }; static struct idr pdesc_idr; #endif static inline void desc_set_label(struct gpio_desc *d, const char *label) { #ifdef CONFIG_DEBUG_FS d->label = label; #endif } /* Warn when drivers omit gpio_request() calls -- legal but ill-advised * when setting direction, and otherwise illegal. Until board setup code * and drivers use explicit requests everywhere (which won't happen when * those calls have no teeth) we can't avoid autorequesting. This nag * message should motivate switching to explicit requests... so should * the weaker cleanup after faults, compared to gpio_request(). * * NOTE: the autorequest mechanism is going away; at this point it's * only "legal" in the sense that (old) code using it won't break yet, * but instead only triggers a WARN() stack dump. */ static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset) { const struct gpio_chip *chip = desc->chip; const int gpio = chip->base + offset; if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0, "autorequest GPIO-%d\n", gpio)) { if (!try_module_get(chip->owner)) { pr_err("GPIO-%d: module can't be gotten \n", gpio); clear_bit(FLAG_REQUESTED, &desc->flags); /* lose */ return -EIO; } desc_set_label(desc, "[auto]"); /* caller must chip->request() w/o spinlock */ if (chip->request) return 1; } return 0; } /* caller holds gpio_lock *OR* gpio is marked as requested */ static inline struct gpio_chip *gpio_to_chip(unsigned gpio) { return gpio_desc[gpio].chip; } /* dynamic allocation of GPIOs, e.g. on a hotplugged device */ static int gpiochip_find_base(int ngpio) { int i; int spare = 0; int base = -ENOSPC; for (i = ARCH_NR_GPIOS - 1; i >= 0 ; i--) { struct gpio_desc *desc = &gpio_desc[i]; struct gpio_chip *chip = desc->chip; if (!chip && !test_bit(FLAG_RESERVED, &desc->flags)) { spare++; if (spare == ngpio) { base = i; break; } } else { spare = 0; if (chip) i -= chip->ngpio - 1; } } if (gpio_is_valid(base)) pr_debug("%s: found new base at %d\n", __func__, base); return base; } /** * gpiochip_reserve() - reserve range of gpios to use with platform code only * @start: starting gpio number * @ngpio: number of gpios to reserve * Context: platform init, potentially before irqs or kmalloc will work * * Returns a negative errno if any gpio within the range is already reserved * or registered, else returns zero as a success code. Use this function * to mark a range of gpios as unavailable for dynamic gpio number allocation, * for example because its driver support is not yet loaded. */ int __init gpiochip_reserve(int start, int ngpio) { int ret = 0; unsigned long flags; int i; if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio - 1)) return -EINVAL; spin_lock_irqsave(&gpio_lock, flags); for (i = start; i < start + ngpio; i++) { struct gpio_desc *desc = &gpio_desc[i]; if (desc->chip || test_bit(FLAG_RESERVED, &desc->flags)) { ret = -EBUSY; goto err; } set_bit(FLAG_RESERVED, &desc->flags); } pr_debug("%s: reserved gpios from %d to %d\n", __func__, start, start + ngpio - 1); err: spin_unlock_irqrestore(&gpio_lock, flags); return ret; } #ifdef CONFIG_GPIO_SYSFS /* lock protects against unexport_gpio() being called while * sysfs files are active. */ static DEFINE_MUTEX(sysfs_lock); /* * /sys/class/gpio/gpioN... only for GPIOs that are exported * /direction * * MAY BE OMITTED if kernel won't allow direction changes * * is read/write as "in" or "out" * * may also be written as "high" or "low", initializing * output value as specified ("out" implies "low") * /value * * always readable, subject to hardware behavior * * may be writable, as zero/nonzero * /edge * * configures behavior of poll(2) on /value * * available only if pin can generate IRQs on input * * is read/write as "none", "falling", "rising", or "both" * /active_low * * configures polarity of /value * * is read/write as zero/nonzero * * also affects existing and subsequent "falling" and "rising" * /edge configuration */ static ssize_t gpio_direction_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else status = sprintf(buf, "%s\n", test_bit(FLAG_IS_OUT, &desc->flags) ? "out" : "in"); mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_direction_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { const struct gpio_desc *desc = dev_get_drvdata(dev); unsigned gpio = desc - gpio_desc; ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else if (sysfs_streq(buf, "high")) status = gpio_direction_output(gpio, 1); else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low")) status = gpio_direction_output(gpio, 0); else if (sysfs_streq(buf, "in")) status = gpio_direction_input(gpio); else status = -EINVAL; mutex_unlock(&sysfs_lock); return status ? : size; } static /* const */ DEVICE_ATTR(direction, 0644, gpio_direction_show, gpio_direction_store); static ssize_t gpio_value_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); unsigned gpio = desc - gpio_desc; ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) { status = -EIO; } else { int value; value = !!gpio_get_value_cansleep(gpio); if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; status = sprintf(buf, "%d\n", value); } mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { const struct gpio_desc *desc = dev_get_drvdata(dev); unsigned gpio = desc - gpio_desc; ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else if (!test_bit(FLAG_IS_OUT, &desc->flags)) status = -EPERM; else { long value; status = strict_strtol(buf, 0, &value); if (status == 0) { if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; gpio_set_value_cansleep(gpio, value != 0); status = size; } } mutex_unlock(&sysfs_lock); return status; } static const DEVICE_ATTR(value, 0644, gpio_value_show, gpio_value_store); static irqreturn_t gpio_sysfs_irq(int irq, void *priv) { struct work_struct *work = priv; schedule_work(work); return IRQ_HANDLED; } static void gpio_notify_sysfs(struct work_struct *work) { struct poll_desc *pdesc; pdesc = container_of(work, struct poll_desc, work); sysfs_notify_dirent(pdesc->value_sd); } static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev, unsigned long gpio_flags) { struct poll_desc *pdesc; unsigned long irq_flags; int ret, irq, id; if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags) return 0; irq = gpio_to_irq(desc - gpio_desc); if (irq < 0) return -EIO; id = desc->flags >> PDESC_ID_SHIFT; pdesc = idr_find(&pdesc_idr, id); if (pdesc) { free_irq(irq, &pdesc->work); cancel_work_sync(&pdesc->work); } desc->flags &= ~GPIO_TRIGGER_MASK; if (!gpio_flags) { ret = 0; goto free_sd; } irq_flags = IRQF_SHARED; if (test_bit(FLAG_TRIG_FALL, &gpio_flags)) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; if (test_bit(FLAG_TRIG_RISE, &gpio_flags)) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; if (!pdesc) { pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL); if (!pdesc) { ret = -ENOMEM; goto err_out; } do { ret = -ENOMEM; if (idr_pre_get(&pdesc_idr, GFP_KERNEL)) ret = idr_get_new_above(&pdesc_idr, pdesc, 1, &id); } while (ret == -EAGAIN); if (ret) goto free_mem; desc->flags &= GPIO_FLAGS_MASK; desc->flags |= (unsigned long)id << PDESC_ID_SHIFT; if (desc->flags >> PDESC_ID_SHIFT != id) { ret = -ERANGE; goto free_id; } pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value"); if (!pdesc->value_sd) { ret = -ENODEV; goto free_id; } INIT_WORK(&pdesc->work, gpio_notify_sysfs); } ret = request_irq(irq, gpio_sysfs_irq, irq_flags, "gpiolib", &pdesc->work); if (ret) goto free_sd; desc->flags |= gpio_flags; return 0; free_sd: if (pdesc) sysfs_put(pdesc->value_sd); free_id: idr_remove(&pdesc_idr, id); desc->flags &= GPIO_FLAGS_MASK; free_mem: kfree(pdesc); err_out: return ret; } static const struct { const char *name; unsigned long flags; } trigger_types[] = { { "none", 0 }, { "falling", BIT(FLAG_TRIG_FALL) }, { "rising", BIT(FLAG_TRIG_RISE) }, { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) }, }; static ssize_t gpio_edge_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else { int i; status = 0; for (i = 0; i < ARRAY_SIZE(trigger_types); i++) if ((desc->flags & GPIO_TRIGGER_MASK) == trigger_types[i].flags) { status = sprintf(buf, "%s\n", trigger_types[i].name); break; } } mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_edge_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; int i; for (i = 0; i < ARRAY_SIZE(trigger_types); i++) if (sysfs_streq(trigger_types[i].name, buf)) goto found; return -EINVAL; found: mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else { status = gpio_setup_irq(desc, dev, trigger_types[i].flags); if (!status) status = size; } mutex_unlock(&sysfs_lock); return status; } static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store); static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev, int value) { int status = 0; if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) return 0; if (value) set_bit(FLAG_ACTIVE_LOW, &desc->flags); else clear_bit(FLAG_ACTIVE_LOW, &desc->flags); /* reconfigure poll(2) support if enabled on one edge only */ if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^ !!test_bit(FLAG_TRIG_FALL, &desc->flags))) { unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK; gpio_setup_irq(desc, dev, 0); status = gpio_setup_irq(desc, dev, trigger_flags); } return status; } static ssize_t gpio_active_low_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else status = sprintf(buf, "%d\n", !!test_bit(FLAG_ACTIVE_LOW, &desc->flags)); mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_active_low_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) { status = -EIO; } else { long value; status = strict_strtol(buf, 0, &value); if (status == 0) status = sysfs_set_active_low(desc, dev, value != 0); } mutex_unlock(&sysfs_lock); return status ? : size; } static const DEVICE_ATTR(active_low, 0644, gpio_active_low_show, gpio_active_low_store); static const struct attribute *gpio_attrs[] = { &dev_attr_value.attr, &dev_attr_active_low.attr, NULL, }; static const struct attribute_group gpio_attr_group = { .attrs = (struct attribute **) gpio_attrs, }; /* * /sys/class/gpio/gpiochipN/ * /base ... matching gpio_chip.base (N) * /label ... matching gpio_chip.label * /ngpio ... matching gpio_chip.ngpio */ static ssize_t chip_base_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%d\n", chip->base); } static DEVICE_ATTR(base, 0444, chip_base_show, NULL); static ssize_t chip_label_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%s\n", chip->label ? : ""); } static DEVICE_ATTR(label, 0444, chip_label_show, NULL); static ssize_t chip_ngpio_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%u\n", chip->ngpio); } static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); static const struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, &dev_attr_label.attr, &dev_attr_ngpio.attr, NULL, }; static const struct attribute_group gpiochip_attr_group = { .attrs = (struct attribute **) gpiochip_attrs, }; /* * /sys/class/gpio/export ... write-only * integer N ... number of GPIO to export (full access) * /sys/class/gpio/unexport ... write-only * integer N ... number of GPIO to unexport */ static ssize_t export_store(struct class *class, struct class_attribute *attr, const char *buf, size_t len) { long gpio; int status; status = strict_strtol(buf, 0, &gpio); if (status < 0) goto done; /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ status = gpio_request(gpio, "sysfs"); if (status < 0) goto done; status = gpio_export(gpio, true); if (status < 0) gpio_free(gpio); else set_bit(FLAG_SYSFS, &gpio_desc[gpio].flags); done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static ssize_t unexport_store(struct class *class, struct class_attribute *attr, const char *buf, size_t len) { long gpio; int status; status = strict_strtol(buf, 0, &gpio); if (status < 0) goto done; status = -EINVAL; /* reject bogus commands (gpio_unexport ignores them) */ if (!gpio_is_valid(gpio)) goto done; /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ if (test_and_clear_bit(FLAG_SYSFS, &gpio_desc[gpio].flags)) { status = 0; gpio_free(gpio); } done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static struct class_attribute gpio_class_attrs[] = { __ATTR(export, 0200, NULL, export_store), __ATTR(unexport, 0200, NULL, unexport_store), __ATTR_NULL, }; static struct class gpio_class = { .name = "gpio", .owner = THIS_MODULE, .class_attrs = gpio_class_attrs, }; /** * gpio_export - export a GPIO through sysfs * @gpio: gpio to make available, already requested * @direction_may_change: true if userspace may change gpio direction * Context: arch_initcall or later * * When drivers want to make a GPIO accessible to userspace after they * have requested it -- perhaps while debugging, or as part of their * public interface -- they may use this routine. If the GPIO can * change direction (some can't) and the caller allows it, userspace * will see "direction" sysfs attribute which may be used to change * the gpio's direction. A "value" attribute will always be provided. * * Returns zero on success, else an error. */ int gpio_export(unsigned gpio, bool direction_may_change) { unsigned long flags; struct gpio_desc *desc; int status = -EINVAL; const char *ioname = NULL; /* can't export until sysfs is available ... */ if (!gpio_class.p) { pr_debug("%s: called too early!\n", __func__); return -ENOENT; } if (!gpio_is_valid(gpio)) goto done; mutex_lock(&sysfs_lock); spin_lock_irqsave(&gpio_lock, flags); desc = &gpio_desc[gpio]; if (test_bit(FLAG_REQUESTED, &desc->flags) && !test_bit(FLAG_EXPORT, &desc->flags)) { status = 0; if (!desc->chip->direction_input || !desc->chip->direction_output) direction_may_change = false; } spin_unlock_irqrestore(&gpio_lock, flags); if (desc->chip->names && desc->chip->names[gpio - desc->chip->base]) ioname = desc->chip->names[gpio - desc->chip->base]; if (status == 0) { struct device *dev; dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), desc, ioname ? ioname : "gpio%u", gpio); if (!IS_ERR(dev)) { status = sysfs_create_group(&dev->kobj, &gpio_attr_group); if (!status && direction_may_change) status = device_create_file(dev, &dev_attr_direction); if (!status && gpio_to_irq(gpio) >= 0 && (direction_may_change || !test_bit(FLAG_IS_OUT, &desc->flags))) status = device_create_file(dev, &dev_attr_edge); if (status != 0) device_unregister(dev); } else status = PTR_ERR(dev); if (status == 0) set_bit(FLAG_EXPORT, &desc->flags); } mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_export); static int match_export(struct device *dev, void *data) { return dev_get_drvdata(dev) == data; } /** * gpio_export_link - create a sysfs link to an exported GPIO node * @dev: device under which to create symlink * @name: name of the symlink * @gpio: gpio to create symlink to, already exported * * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN * node. Caller is responsible for unlinking. * * Returns zero on success, else an error. */ int gpio_export_link(struct device *dev, const char *name, unsigned gpio) { struct gpio_desc *desc; int status = -EINVAL; if (!gpio_is_valid(gpio)) goto done; mutex_lock(&sysfs_lock); desc = &gpio_desc[gpio]; if (test_bit(FLAG_EXPORT, &desc->flags)) { struct device *tdev; tdev = class_find_device(&gpio_class, NULL, desc, match_export); if (tdev != NULL) { status = sysfs_create_link(&dev->kobj, &tdev->kobj, name); } else { status = -ENODEV; } } mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_export_link); /** * gpio_sysfs_set_active_low - set the polarity of gpio sysfs value * @gpio: gpio to change * @value: non-zero to use active low, i.e. inverted values * * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute. * The GPIO does not have to be exported yet. If poll(2) support has * been enabled for either rising or falling edge, it will be * reconfigured to follow the new polarity. * * Returns zero on success, else an error. */ int gpio_sysfs_set_active_low(unsigned gpio, int value) { struct gpio_desc *desc; struct device *dev = NULL; int status = -EINVAL; if (!gpio_is_valid(gpio)) goto done; mutex_lock(&sysfs_lock); desc = &gpio_desc[gpio]; if (test_bit(FLAG_EXPORT, &desc->flags)) { dev = class_find_device(&gpio_class, NULL, desc, match_export); if (dev == NULL) { status = -ENODEV; goto unlock; } } status = sysfs_set_active_low(desc, dev, value); unlock: mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low); /** * gpio_unexport - reverse effect of gpio_export() * @gpio: gpio to make unavailable * * This is implicit on gpio_free(). */ void gpio_unexport(unsigned gpio) { struct gpio_desc *desc; int status = 0; if (!gpio_is_valid(gpio)) { status = -EINVAL; goto done; } mutex_lock(&sysfs_lock); desc = &gpio_desc[gpio]; if (test_bit(FLAG_EXPORT, &desc->flags)) { struct device *dev = NULL; dev = class_find_device(&gpio_class, NULL, desc, match_export); if (dev) { gpio_setup_irq(desc, dev, 0); clear_bit(FLAG_EXPORT, &desc->flags); put_device(dev); device_unregister(dev); } else status = -ENODEV; } mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); } EXPORT_SYMBOL_GPL(gpio_unexport); static int gpiochip_export(struct gpio_chip *chip) { int status; struct device *dev; /* Many systems register gpio chips for SOC support very early, * before driver model support is available. In those cases we * export this later, in gpiolib_sysfs_init() ... here we just * verify that _some_ field of gpio_class got initialized. */ if (!gpio_class.p) return 0; /* use chip->base for the ID; it's already known to be unique */ mutex_lock(&sysfs_lock); dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, "gpiochip%d", chip->base); if (!IS_ERR(dev)) { status = sysfs_create_group(&dev->kobj, &gpiochip_attr_group); } else status = PTR_ERR(dev); chip->exported = (status == 0); mutex_unlock(&sysfs_lock); if (status) { unsigned long flags; unsigned gpio; spin_lock_irqsave(&gpio_lock, flags); gpio = chip->base; while (gpio_desc[gpio].chip == chip) gpio_desc[gpio++].chip = NULL; spin_unlock_irqrestore(&gpio_lock, flags); pr_debug("%s: chip %s status %d\n", __func__, chip->label, status); } return status; } static void gpiochip_unexport(struct gpio_chip *chip) { int status; struct device *dev; mutex_lock(&sysfs_lock); dev = class_find_device(&gpio_class, NULL, chip, match_export); if (dev) { put_device(dev); device_unregister(dev); chip->exported = 0; status = 0; } else status = -ENODEV; mutex_unlock(&sysfs_lock); if (status) pr_debug("%s: chip %s status %d\n", __func__, chip->label, status); } static int __init gpiolib_sysfs_init(void) { int status; unsigned long flags; unsigned gpio; idr_init(&pdesc_idr); status = class_register(&gpio_class); if (status < 0) return status; /* Scan and register the gpio_chips which registered very * early (e.g. before the class_register above was called). * * We run before arch_initcall() so chip->dev nodes can have * registered, and so arch_initcall() can always gpio_export(). */ spin_lock_irqsave(&gpio_lock, flags); for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) { struct gpio_chip *chip; chip = gpio_desc[gpio].chip; if (!chip || chip->exported) continue; spin_unlock_irqrestore(&gpio_lock, flags); status = gpiochip_export(chip); spin_lock_irqsave(&gpio_lock, flags); } spin_unlock_irqrestore(&gpio_lock, flags); return status; } postcore_initcall(gpiolib_sysfs_init); #else static inline int gpiochip_export(struct gpio_chip *chip) { return 0; } static inline void gpiochip_unexport(struct gpio_chip *chip) { } #endif /* CONFIG_GPIO_SYSFS */ /** * gpiochip_add() - register a gpio_chip * @chip: the chip to register, with chip->base initialized * Context: potentially before irqs or kmalloc will work * * Returns a negative errno if the chip can't be registered, such as * because the chip->base is invalid or already associated with a * different chip. Otherwise it returns zero as a success code. * * When gpiochip_add() is called very early during boot, so that GPIOs * can be freely used, the chip->dev device must be registered before * the gpio framework's arch_initcall(). Otherwise sysfs initialization * for GPIOs will fail rudely. * * If chip->base is negative, this requests dynamic assignment of * a range of valid GPIOs. */ int gpiochip_add(struct gpio_chip *chip) { unsigned long flags; int status = 0; unsigned id; int base = chip->base; if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio - 1)) && base >= 0) { status = -EINVAL; goto fail; } spin_lock_irqsave(&gpio_lock, flags); if (base < 0) { base = gpiochip_find_base(chip->ngpio); if (base < 0) { status = base; goto unlock; } chip->base = base; } /* these GPIO numbers must not be managed by another gpio_chip */ for (id = base; id < base + chip->ngpio; id++) { if (gpio_desc[id].chip != NULL) { status = -EBUSY; break; } } if (status == 0) { for (id = base; id < base + chip->ngpio; id++) { gpio_desc[id].chip = chip; /* REVISIT: most hardware initializes GPIOs as * inputs (often with pullups enabled) so power * usage is minimized. Linux code should set the * gpio direction first thing; but until it does, * we may expose the wrong direction in sysfs. */ gpio_desc[id].flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; } } unlock: spin_unlock_irqrestore(&gpio_lock, flags); if (status == 0) status = gpiochip_export(chip); fail: /* failures here can mean systems won't boot... */ if (status) pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n", chip->base, chip->base + chip->ngpio - 1, chip->label ? : "generic"); return status; } EXPORT_SYMBOL_GPL(gpiochip_add); /** * gpiochip_remove() - unregister a gpio_chip * @chip: the chip to unregister * * A gpio_chip with any GPIOs still requested may not be removed. */ int gpiochip_remove(struct gpio_chip *chip) { unsigned long flags; int status = 0; unsigned id; spin_lock_irqsave(&gpio_lock, flags); for (id = chip->base; id < chip->base + chip->ngpio; id++) { if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) { status = -EBUSY; break; } } if (status == 0) { for (id = chip->base; id < chip->base + chip->ngpio; id++) gpio_desc[id].chip = NULL; } spin_unlock_irqrestore(&gpio_lock, flags); if (status == 0) gpiochip_unexport(chip); return status; } EXPORT_SYMBOL_GPL(gpiochip_remove); /* These "optional" allocation calls help prevent drivers from stomping * on each other, and help provide better diagnostics in debugfs. * They're called even less than the "set direction" calls. */ int gpio_request(unsigned gpio, const char *label) { struct gpio_desc *desc; struct gpio_chip *chip; int status = -EINVAL; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto done; desc = &gpio_desc[gpio]; chip = desc->chip; if (chip == NULL) goto done; if (!try_module_get(chip->owner)) goto done; /* NOTE: gpio_request() can be called in early boot, * before IRQs are enabled, for non-sleeping (SOC) GPIOs. */ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { desc_set_label(desc, label ? : "?"); status = 0; } else { status = -EBUSY; module_put(chip->owner); goto done; } if (chip->request) { /* chip->request may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); status = chip->request(chip, gpio - chip->base); spin_lock_irqsave(&gpio_lock, flags); if (status < 0) { desc_set_label(desc, NULL); module_put(chip->owner); clear_bit(FLAG_REQUESTED, &desc->flags); } } done: if (status) pr_debug("gpio_request: gpio-%d (%s) status %d\n", gpio, label ? : "?", status); spin_unlock_irqrestore(&gpio_lock, flags); return status; } EXPORT_SYMBOL_GPL(gpio_request); void gpio_free(unsigned gpio) { unsigned long flags; struct gpio_desc *desc; struct gpio_chip *chip; might_sleep(); if (!gpio_is_valid(gpio)) { WARN_ON(extra_checks); return; } gpio_unexport(gpio); spin_lock_irqsave(&gpio_lock, flags); desc = &gpio_desc[gpio]; chip = desc->chip; if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) { if (chip->free) { spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(extra_checks && chip->can_sleep); chip->free(chip, gpio - chip->base); spin_lock_irqsave(&gpio_lock, flags); } desc_set_label(desc, NULL); module_put(desc->chip->owner); clear_bit(FLAG_ACTIVE_LOW, &desc->flags); clear_bit(FLAG_REQUESTED, &desc->flags); } else WARN_ON(extra_checks); spin_unlock_irqrestore(&gpio_lock, flags); } EXPORT_SYMBOL_GPL(gpio_free); /** * gpio_request_one - request a single GPIO with initial configuration * @gpio: the GPIO number * @flags: GPIO configuration as specified by GPIOF_* * @label: a literal description string of this GPIO */ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) { int err; err = gpio_request(gpio, label); if (err) return err; if (flags & GPIOF_DIR_IN) err = gpio_direction_input(gpio); else err = gpio_direction_output(gpio, (flags & GPIOF_INIT_HIGH) ? 1 : 0); return err; } EXPORT_SYMBOL_GPL(gpio_request_one); /** * gpio_request_array - request multiple GPIOs in a single call * @array: array of the 'struct gpio' * @num: how many GPIOs in the array */ int gpio_request_array(struct gpio *array, size_t num) { int i, err; for (i = 0; i < num; i++, array++) { err = gpio_request_one(array->gpio, array->flags, array->label); if (err) goto err_free; } return 0; err_free: while (i--) gpio_free((--array)->gpio); return err; } EXPORT_SYMBOL_GPL(gpio_request_array); /** * gpio_free_array - release multiple GPIOs in a single call * @array: array of the 'struct gpio' * @num: how many GPIOs in the array */ void gpio_free_array(struct gpio *array, size_t num) { while (num--) gpio_free((array++)->gpio); } EXPORT_SYMBOL_GPL(gpio_free_array); /** * gpiochip_is_requested - return string iff signal was requested * @chip: controller managing the signal * @offset: of signal within controller's 0..(ngpio - 1) range * * Returns NULL if the GPIO is not currently requested, else a string. * If debugfs support is enabled, the string returned is the label passed * to gpio_request(); otherwise it is a meaningless constant. * * This function is for use by GPIO controller drivers. The label can * help with diagnostics, and knowing that the signal is used as a GPIO * can help avoid accidentally multiplexing it to another controller. */ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset) { unsigned gpio = chip->base + offset; if (!gpio_is_valid(gpio) || gpio_desc[gpio].chip != chip) return NULL; if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0) return NULL; #ifdef CONFIG_DEBUG_FS return gpio_desc[gpio].label; #else return "?"; #endif } EXPORT_SYMBOL_GPL(gpiochip_is_requested); /* Drivers MUST set GPIO direction before making get/set calls. In * some cases this is done in early boot, before IRQs are enabled. * * As a rule these aren't called more than once (except for drivers * using the open-drain emulation idiom) so these are natural places * to accumulate extra debugging checks. Note that we can't (yet) * rely on gpio_request() having been called beforehand. */ int gpio_direction_input(unsigned gpio) { unsigned long flags; struct gpio_chip *chip; struct gpio_desc *desc = &gpio_desc[gpio]; int status = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto fail; chip = desc->chip; if (!chip || !chip->get || !chip->direction_input) goto fail; gpio -= chip->base; if (gpio >= chip->ngpio) goto fail; status = gpio_ensure_requested(desc, gpio); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(extra_checks && chip->can_sleep); if (status) { status = chip->request(chip, gpio); if (status < 0) { pr_debug("GPIO-%d: chip request fail, %d\n", chip->base + gpio, status); /* and it's not available to anyone else ... * gpio_request() is the fully clean solution. */ goto lose; } } status = chip->direction_input(chip, gpio); if (status == 0) clear_bit(FLAG_IS_OUT, &desc->flags); lose: return status; fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) pr_debug("%s: gpio-%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_direction_input); int gpio_direction_output(unsigned gpio, int value) { unsigned long flags; struct gpio_chip *chip; struct gpio_desc *desc = &gpio_desc[gpio]; int status = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto fail; chip = desc->chip; if (!chip || !chip->set || !chip->direction_output) goto fail; gpio -= chip->base; if (gpio >= chip->ngpio) goto fail; status = gpio_ensure_requested(desc, gpio); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(extra_checks && chip->can_sleep); if (status) { status = chip->request(chip, gpio); if (status < 0) { pr_debug("GPIO-%d: chip request fail, %d\n", chip->base + gpio, status); /* and it's not available to anyone else ... * gpio_request() is the fully clean solution. */ goto lose; } } status = chip->direction_output(chip, gpio, value); if (status == 0) set_bit(FLAG_IS_OUT, &desc->flags); lose: return status; fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) pr_debug("%s: gpio-%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_direction_output); /** * gpio_set_debounce - sets @debounce time for a @gpio * @gpio: the gpio to set debounce time * @debounce: debounce time is microseconds */ int gpio_set_debounce(unsigned gpio, unsigned debounce) { unsigned long flags; struct gpio_chip *chip; struct gpio_desc *desc = &gpio_desc[gpio]; int status = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto fail; chip = desc->chip; if (!chip || !chip->set || !chip->set_debounce) goto fail; gpio -= chip->base; if (gpio >= chip->ngpio) goto fail; status = gpio_ensure_requested(desc, gpio); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(extra_checks && chip->can_sleep); return chip->set_debounce(chip, gpio, debounce); fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) pr_debug("%s: gpio-%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_set_debounce); /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * * "Get" operations are often inlinable as reading a pin value register, * and masking the relevant bit in that register. * * When "set" operations are inlinable, they involve writing that mask to * one register to set a low value, or a different register to set it high. * Otherwise locking is needed, so there may be little value to inlining. * *------------------------------------------------------------------------ * * IMPORTANT!!! The hot paths -- get/set value -- assume that callers * have requested the GPIO. That can include implicit requesting by * a direction setting call. Marking a gpio as requested locks its chip * in memory, guaranteeing that these table lookups need no more locking * and that gpiochip_remove() will fail. * * REVISIT when debugging, consider adding some instrumentation to ensure * that the GPIO was actually requested. */ /** * __gpio_get_value() - return a gpio's value * @gpio: gpio whose value will be returned * Context: any * * This is used directly or indirectly to implement gpio_get_value(). * It returns the zero or nonzero value provided by the associated * gpio_chip.get() method; or zero if no such method is provided. */ int __gpio_get_value(unsigned gpio) { struct gpio_chip *chip; chip = gpio_to_chip(gpio); WARN_ON(extra_checks && chip->can_sleep); return chip->get ? chip->get(chip, gpio - chip->base) : 0; } EXPORT_SYMBOL_GPL(__gpio_get_value); /** * __gpio_set_value() - assign a gpio's value * @gpio: gpio whose value will be assigned * @value: value to assign * Context: any * * This is used directly or indirectly to implement gpio_set_value(). * It invokes the associated gpio_chip.set() method. */ void __gpio_set_value(unsigned gpio, int value) { struct gpio_chip *chip; chip = gpio_to_chip(gpio); WARN_ON(extra_checks && chip->can_sleep); chip->set(chip, gpio - chip->base, value); } EXPORT_SYMBOL_GPL(__gpio_set_value); /** * __gpio_cansleep() - report whether gpio value access will sleep * @gpio: gpio in question * Context: any * * This is used directly or indirectly to implement gpio_cansleep(). It * returns nonzero if access reading or writing the GPIO value can sleep. */ int __gpio_cansleep(unsigned gpio) { struct gpio_chip *chip; /* only call this on GPIOs that are valid! */ chip = gpio_to_chip(gpio); return chip->can_sleep; } EXPORT_SYMBOL_GPL(__gpio_cansleep); /** * __gpio_to_irq() - return the IRQ corresponding to a GPIO * @gpio: gpio whose IRQ will be returned (already requested) * Context: any * * This is used directly or indirectly to implement gpio_to_irq(). * It returns the number of the IRQ signaled by this (input) GPIO, * or a negative errno. */ int __gpio_to_irq(unsigned gpio) { struct gpio_chip *chip; chip = gpio_to_chip(gpio); return chip->to_irq ? chip->to_irq(chip, gpio - chip->base) : -ENXIO; } EXPORT_SYMBOL_GPL(__gpio_to_irq); /* There's no value in making it easy to inline GPIO calls that may sleep. * Common examples include ones connected to I2C or SPI chips. */ int gpio_get_value_cansleep(unsigned gpio) { struct gpio_chip *chip; might_sleep_if(extra_checks); chip = gpio_to_chip(gpio); return chip->get ? chip->get(chip, gpio - chip->base) : 0; } EXPORT_SYMBOL_GPL(gpio_get_value_cansleep); void gpio_set_value_cansleep(unsigned gpio, int value) { struct gpio_chip *chip; might_sleep_if(extra_checks); chip = gpio_to_chip(gpio); chip->set(chip, gpio - chip->base, value); } EXPORT_SYMBOL_GPL(gpio_set_value_cansleep); #ifdef CONFIG_DEBUG_FS static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip) { unsigned i; unsigned gpio = chip->base; struct gpio_desc *gdesc = &gpio_desc[gpio]; int is_out; for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) { if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) continue; is_out = test_bit(FLAG_IS_OUT, &gdesc->flags); seq_printf(s, " gpio-%-3d (%-20.20s) %s %s", gpio, gdesc->label, is_out ? "out" : "in ", chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? "); if (!is_out) { int irq = gpio_to_irq(gpio); struct irq_desc *desc = irq_to_desc(irq); /* This races with request_irq(), set_irq_type(), * and set_irq_wake() ... but those are "rare". * * More significantly, trigger type flags aren't * currently maintained by genirq. */ if (irq >= 0 && desc->action) { char *trigger; switch (desc->status & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_NONE: trigger = "(default)"; break; case IRQ_TYPE_EDGE_FALLING: trigger = "edge-falling"; break; case IRQ_TYPE_EDGE_RISING: trigger = "edge-rising"; break; case IRQ_TYPE_EDGE_BOTH: trigger = "edge-both"; break; case IRQ_TYPE_LEVEL_HIGH: trigger = "level-high"; break; case IRQ_TYPE_LEVEL_LOW: trigger = "level-low"; break; default: trigger = "?trigger?"; break; } seq_printf(s, " irq-%d %s%s", irq, trigger, (desc->status & IRQ_WAKEUP) ? " wakeup" : ""); } } seq_printf(s, "\n"); } } static int gpiolib_show(struct seq_file *s, void *unused) { struct gpio_chip *chip = NULL; unsigned gpio; int started = 0; /* REVISIT this isn't locked against gpio_chip removal ... */ for (gpio = 0; gpio_is_valid(gpio); gpio++) { struct device *dev; if (chip == gpio_desc[gpio].chip) continue; chip = gpio_desc[gpio].chip; if (!chip) continue; seq_printf(s, "%sGPIOs %d-%d", started ? "\n" : "", chip->base, chip->base + chip->ngpio - 1); dev = chip->dev; if (dev) seq_printf(s, ", %s/%s", dev->bus ? dev->bus->name : "no-bus", dev_name(dev)); if (chip->label) seq_printf(s, ", %s", chip->label); if (chip->can_sleep) seq_printf(s, ", can sleep"); seq_printf(s, ":\n"); started = 1; if (chip->dbg_show) chip->dbg_show(s, chip); else gpiolib_dbg_show(s, chip); } return 0; } static int gpiolib_open(struct inode *inode, struct file *file) { return single_open(file, gpiolib_show, NULL); } static const struct file_operations gpiolib_operations = { .open = gpiolib_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init gpiolib_debugfs_init(void) { /* /sys/kernel/debug/gpio */ (void) debugfs_create_file("gpio", S_IFREG | S_IRUGO, NULL, NULL, &gpiolib_operations); return 0; } subsys_initcall(gpiolib_debugfs_init); #endif /* DEBUG_FS */
gpl-2.0
cwallac/KernelMerge
net/sched/cls_u32.c
720
16562
/* * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * The filters are packed to hash tables of key nodes * with a set of 32bit key/mask pairs at every node. * Nodes reference next level hash tables etc. * * This scheme is the best universal classifier I managed to * invent; it is not super-fast, but it is not slow (provided you * program it correctly), and general enough. And its relative * speed grows as the number of rules becomes larger. * * It seems that it represents the best middle point between * speed and manageability both by human and by machine. * * It is especially useful for link sharing combined with QoS; * pure RSVP doesn't need such a general approach and can use * much simpler (and faster) schemes, sort of cls_rsvp.c. * * JHS: We should remove the CONFIG_NET_CLS_IND from here * eventually when the meta match extension is made available * * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/act_api.h> #include <net/pkt_cls.h> struct tc_u_knode { struct tc_u_knode *next; u32 handle; struct tc_u_hnode *ht_up; struct tcf_exts exts; #ifdef CONFIG_NET_CLS_IND char indev[IFNAMSIZ]; #endif u8 fshift; struct tcf_result res; struct tc_u_hnode *ht_down; #ifdef CONFIG_CLS_U32_PERF struct tc_u32_pcnt *pf; #endif #ifdef CONFIG_CLS_U32_MARK struct tc_u32_mark mark; #endif struct tc_u32_sel sel; }; struct tc_u_hnode { struct tc_u_hnode *next; u32 handle; u32 prio; struct tc_u_common *tp_c; int refcnt; unsigned divisor; struct tc_u_knode *ht[1]; }; struct tc_u_common { struct tc_u_hnode *hlist; struct Qdisc *q; int refcnt; u32 hgenerator; }; static const struct tcf_ext_map u32_ext_map = { .action = TCA_U32_ACT, .police = TCA_U32_POLICE }; static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) { unsigned h = ntohl(key & sel->hmask)>>fshift; return h; } static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { struct { struct tc_u_knode *knode; u8 *ptr; } stack[TC_U32_MAXDEPTH]; struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; u8 *ptr = skb_network_header(skb); struct tc_u_knode *n; int sdepth = 0; int off2 = 0; int sel = 0; #ifdef CONFIG_CLS_U32_PERF int j; #endif int i, r; next_ht: n = ht->ht[sel]; next_knode: if (n) { struct tc_u32_key *key = n->sel.keys; #ifdef CONFIG_CLS_U32_PERF n->pf->rcnt +=1; j = 0; #endif #ifdef CONFIG_CLS_U32_MARK if ((skb->mark & n->mark.mask) != n->mark.val) { n = n->next; goto next_knode; } else { n->mark.success++; } #endif for (i = n->sel.nkeys; i>0; i--, key++) { if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { n = n->next; goto next_knode; } #ifdef CONFIG_CLS_U32_PERF n->pf->kcnts[j] +=1; j++; #endif } if (n->ht_down == NULL) { check_terminal: if (n->sel.flags&TC_U32_TERMINAL) { *res = n->res; #ifdef CONFIG_NET_CLS_IND if (!tcf_match_indev(skb, n->indev)) { n = n->next; goto next_knode; } #endif #ifdef CONFIG_CLS_U32_PERF n->pf->rhit +=1; #endif r = tcf_exts_exec(skb, &n->exts, res); if (r < 0) { n = n->next; goto next_knode; } return r; } n = n->next; goto next_knode; } /* PUSH */ if (sdepth >= TC_U32_MAXDEPTH) goto deadloop; stack[sdepth].knode = n; stack[sdepth].ptr = ptr; sdepth++; ht = n->ht_down; sel = 0; if (ht->divisor) sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) goto next_ht; if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { off2 = n->sel.off + 3; if (n->sel.flags&TC_U32_VAROFFSET) off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; off2 &= ~3; } if (n->sel.flags&TC_U32_EAT) { ptr += off2; off2 = 0; } if (ptr < skb_tail_pointer(skb)) goto next_ht; } /* POP */ if (sdepth--) { n = stack[sdepth].knode; ht = n->ht_up; ptr = stack[sdepth].ptr; goto check_terminal; } return -1; deadloop: if (net_ratelimit()) printk("cls_u32: dead loop\n"); return -1; } static __inline__ struct tc_u_hnode * u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) { struct tc_u_hnode *ht; for (ht = tp_c->hlist; ht; ht = ht->next) if (ht->handle == handle) break; return ht; } static __inline__ struct tc_u_knode * u32_lookup_key(struct tc_u_hnode *ht, u32 handle) { unsigned sel; struct tc_u_knode *n = NULL; sel = TC_U32_HASH(handle); if (sel > ht->divisor) goto out; for (n = ht->ht[sel]; n; n = n->next) if (n->handle == handle) break; out: return n; } static unsigned long u32_get(struct tcf_proto *tp, u32 handle) { struct tc_u_hnode *ht; struct tc_u_common *tp_c = tp->data; if (TC_U32_HTID(handle) == TC_U32_ROOT) ht = tp->root; else ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); if (!ht) return 0; if (TC_U32_KEY(handle) == 0) return (unsigned long)ht; return (unsigned long)u32_lookup_key(ht, handle); } static void u32_put(struct tcf_proto *tp, unsigned long f) { } static u32 gen_new_htid(struct tc_u_common *tp_c) { int i = 0x800; do { if (++tp_c->hgenerator == 0x7FF) tp_c->hgenerator = 1; } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0; } static int u32_init(struct tcf_proto *tp) { struct tc_u_hnode *root_ht; struct tc_u_common *tp_c; tp_c = tp->q->u32_node; root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); if (root_ht == NULL) return -ENOBUFS; root_ht->divisor = 0; root_ht->refcnt++; root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; root_ht->prio = tp->prio; if (tp_c == NULL) { tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); if (tp_c == NULL) { kfree(root_ht); return -ENOBUFS; } tp_c->q = tp->q; tp->q->u32_node = tp_c; } tp_c->refcnt++; root_ht->next = tp_c->hlist; tp_c->hlist = root_ht; root_ht->tp_c = tp_c; tp->root = root_ht; tp->data = tp_c; return 0; } static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n) { tcf_unbind_filter(tp, &n->res); tcf_exts_destroy(tp, &n->exts); if (n->ht_down) n->ht_down->refcnt--; #ifdef CONFIG_CLS_U32_PERF kfree(n->pf); #endif kfree(n); return 0; } static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key) { struct tc_u_knode **kp; struct tc_u_hnode *ht = key->ht_up; if (ht) { for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) { if (*kp == key) { tcf_tree_lock(tp); *kp = key->next; tcf_tree_unlock(tp); u32_destroy_key(tp, key); return 0; } } } WARN_ON(1); return 0; } static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) { struct tc_u_knode *n; unsigned h; for (h=0; h<=ht->divisor; h++) { while ((n = ht->ht[h]) != NULL) { ht->ht[h] = n->next; u32_destroy_key(tp, n); } } } static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) { struct tc_u_common *tp_c = tp->data; struct tc_u_hnode **hn; WARN_ON(ht->refcnt); u32_clear_hnode(tp, ht); for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) { if (*hn == ht) { *hn = ht->next; kfree(ht); return 0; } } WARN_ON(1); return -ENOENT; } static void u32_destroy(struct tcf_proto *tp) { struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *root_ht = tp->root; WARN_ON(root_ht == NULL); if (root_ht && --root_ht->refcnt == 0) u32_destroy_hnode(tp, root_ht); if (--tp_c->refcnt == 0) { struct tc_u_hnode *ht; tp->q->u32_node = NULL; for (ht = tp_c->hlist; ht; ht = ht->next) { ht->refcnt--; u32_clear_hnode(tp, ht); } while ((ht = tp_c->hlist) != NULL) { tp_c->hlist = ht->next; WARN_ON(ht->refcnt != 0); kfree(ht); } kfree(tp_c); } tp->data = NULL; } static int u32_delete(struct tcf_proto *tp, unsigned long arg) { struct tc_u_hnode *ht = (struct tc_u_hnode*)arg; if (ht == NULL) return 0; if (TC_U32_KEY(ht->handle)) return u32_delete_key(tp, (struct tc_u_knode*)ht); if (tp->root == ht) return -EINVAL; if (ht->refcnt == 1) { ht->refcnt--; u32_destroy_hnode(tp, ht); } else { return -EBUSY; } return 0; } static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) { struct tc_u_knode *n; unsigned i = 0x7FF; for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next) if (i < TC_U32_NODE(n->handle)) i = TC_U32_NODE(n->handle); i++; return handle|(i>0xFFF ? 0xFFF : i); } static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { [TCA_U32_CLASSID] = { .type = NLA_U32 }, [TCA_U32_HASH] = { .type = NLA_U32 }, [TCA_U32_LINK] = { .type = NLA_U32 }, [TCA_U32_DIVISOR] = { .type = NLA_U32 }, [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) }, [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) }, }; static int u32_set_parms(struct tcf_proto *tp, unsigned long base, struct tc_u_hnode *ht, struct tc_u_knode *n, struct nlattr **tb, struct nlattr *est) { int err; struct tcf_exts e; err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map); if (err < 0) return err; err = -EINVAL; if (tb[TCA_U32_LINK]) { u32 handle = nla_get_u32(tb[TCA_U32_LINK]); struct tc_u_hnode *ht_down = NULL, *ht_old; if (TC_U32_KEY(handle)) goto errout; if (handle) { ht_down = u32_lookup_ht(ht->tp_c, handle); if (ht_down == NULL) goto errout; ht_down->refcnt++; } tcf_tree_lock(tp); ht_old = n->ht_down; n->ht_down = ht_down; tcf_tree_unlock(tp); if (ht_old) ht_old->refcnt--; } if (tb[TCA_U32_CLASSID]) { n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]); tcf_bind_filter(tp, &n->res, base); } #ifdef CONFIG_NET_CLS_IND if (tb[TCA_U32_INDEV]) { err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV]); if (err < 0) goto errout; } #endif tcf_exts_change(tp, &n->exts, &e); return 0; errout: tcf_exts_destroy(tp, &e); return err; } static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg) { struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *ht; struct tc_u_knode *n; struct tc_u32_sel *s; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_U32_MAX + 1]; u32 htid; int err; if (opt == NULL) return handle ? -EINVAL : 0; err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy); if (err < 0) return err; if ((n = (struct tc_u_knode*)*arg) != NULL) { if (TC_U32_KEY(n->handle) == 0) return -EINVAL; return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE]); } if (tb[TCA_U32_DIVISOR]) { unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); if (--divisor > 0x100) return -EINVAL; if (TC_U32_KEY(handle)) return -EINVAL; if (handle == 0) { handle = gen_new_htid(tp->data); if (handle == 0) return -ENOMEM; } ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); if (ht == NULL) return -ENOBUFS; ht->tp_c = tp_c; ht->refcnt = 1; ht->divisor = divisor; ht->handle = handle; ht->prio = tp->prio; ht->next = tp_c->hlist; tp_c->hlist = ht; *arg = (unsigned long)ht; return 0; } if (tb[TCA_U32_HASH]) { htid = nla_get_u32(tb[TCA_U32_HASH]); if (TC_U32_HTID(htid) == TC_U32_ROOT) { ht = tp->root; htid = ht->handle; } else { ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); if (ht == NULL) return -EINVAL; } } else { ht = tp->root; htid = ht->handle; } if (ht->divisor < TC_U32_HASH(htid)) return -EINVAL; if (handle) { if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid)) return -EINVAL; handle = htid | TC_U32_NODE(handle); } else handle = gen_new_kid(ht, htid); if (tb[TCA_U32_SEL] == NULL) return -EINVAL; s = nla_data(tb[TCA_U32_SEL]); n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); if (n == NULL) return -ENOBUFS; #ifdef CONFIG_CLS_U32_PERF n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); if (n->pf == NULL) { kfree(n); return -ENOBUFS; } #endif memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); n->ht_up = ht; n->handle = handle; n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; #ifdef CONFIG_CLS_U32_MARK if (tb[TCA_U32_MARK]) { struct tc_u32_mark *mark; mark = nla_data(tb[TCA_U32_MARK]); memcpy(&n->mark, mark, sizeof(struct tc_u32_mark)); n->mark.success = 0; } #endif err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE]); if (err == 0) { struct tc_u_knode **ins; for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle)) break; n->next = *ins; tcf_tree_lock(tp); *ins = n; tcf_tree_unlock(tp); *arg = (unsigned long)n; return 0; } #ifdef CONFIG_CLS_U32_PERF kfree(n->pf); #endif kfree(n); return err; } static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) { struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *ht; struct tc_u_knode *n; unsigned h; if (arg->stop) return; for (ht = tp_c->hlist; ht; ht = ht->next) { if (ht->prio != tp->prio) continue; if (arg->count >= arg->skip) { if (arg->fn(tp, (unsigned long)ht, arg) < 0) { arg->stop = 1; return; } } arg->count++; for (h = 0; h <= ht->divisor; h++) { for (n = ht->ht[h]; n; n = n->next) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(tp, (unsigned long)n, arg) < 0) { arg->stop = 1; return; } arg->count++; } } } } static int u32_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct tc_u_knode *n = (struct tc_u_knode*)fh; struct nlattr *nest; if (n == NULL) return skb->len; t->tcm_handle = n->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (TC_U32_KEY(n->handle) == 0) { struct tc_u_hnode *ht = (struct tc_u_hnode*)fh; u32 divisor = ht->divisor+1; NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); } else { NLA_PUT(skb, TCA_U32_SEL, sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), &n->sel); if (n->ht_up) { u32 htid = n->handle & 0xFFFFF000; NLA_PUT_U32(skb, TCA_U32_HASH, htid); } if (n->res.classid) NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid); if (n->ht_down) NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle); #ifdef CONFIG_CLS_U32_MARK if (n->mark.val || n->mark.mask) NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark); #endif if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0) goto nla_put_failure; #ifdef CONFIG_NET_CLS_IND if(strlen(n->indev)) NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); #endif #ifdef CONFIG_CLS_U32_PERF NLA_PUT(skb, TCA_U32_PCNT, sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), n->pf); #endif } nla_nest_end(skb, nest); if (TC_U32_KEY(n->handle)) if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0) goto nla_put_failure; return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static struct tcf_proto_ops cls_u32_ops __read_mostly = { .kind = "u32", .classify = u32_classify, .init = u32_init, .destroy = u32_destroy, .get = u32_get, .put = u32_put, .change = u32_change, .delete = u32_delete, .walk = u32_walk, .dump = u32_dump, .owner = THIS_MODULE, }; static int __init init_u32(void) { printk("u32 classifier\n"); #ifdef CONFIG_CLS_U32_PERF printk(" Performance counters on\n"); #endif #ifdef CONFIG_NET_CLS_IND printk(" input device check on \n"); #endif #ifdef CONFIG_NET_CLS_ACT printk(" Actions configured \n"); #endif return register_tcf_proto_ops(&cls_u32_ops); } static void __exit exit_u32(void) { unregister_tcf_proto_ops(&cls_u32_ops); } module_init(init_u32) module_exit(exit_u32) MODULE_LICENSE("GPL");
gpl-2.0
ntb-ch/linux
arch/arm/mach-davinci/pm.c
976
3858
/* * DaVinci Power Management Routines * * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/pm.h> #include <linux/suspend.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/spinlock.h> #include <asm/cacheflush.h> #include <asm/delay.h> #include <asm/io.h> #include <mach/common.h> #include <mach/da8xx.h> #include <mach/sram.h> #include <mach/pm.h> #include "clock.h" #define DEEPSLEEP_SLEEPCOUNT_MASK 0xFFFF static void (*davinci_sram_suspend) (struct davinci_pm_config *); static struct davinci_pm_config *pdata; static void davinci_sram_push(void *dest, void *src, unsigned int size) { memcpy(dest, src, size); flush_icache_range((unsigned long)dest, (unsigned long)(dest + size)); } static void davinci_pm_suspend(void) { unsigned val; if (pdata->cpupll_reg_base != pdata->ddrpll_reg_base) { /* Switch CPU PLL to bypass mode */ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL); val &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN); __raw_writel(val, pdata->cpupll_reg_base + PLLCTL); udelay(PLL_BYPASS_TIME); /* Powerdown CPU PLL */ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL); val |= PLLCTL_PLLPWRDN; __raw_writel(val, pdata->cpupll_reg_base + PLLCTL); } /* Configure sleep count in deep sleep register */ val = __raw_readl(pdata->deepsleep_reg); val &= ~DEEPSLEEP_SLEEPCOUNT_MASK, val |= pdata->sleepcount; __raw_writel(val, pdata->deepsleep_reg); /* System goes to sleep in this call */ davinci_sram_suspend(pdata); if (pdata->cpupll_reg_base != pdata->ddrpll_reg_base) { /* put CPU PLL in reset */ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL); val &= ~PLLCTL_PLLRST; __raw_writel(val, pdata->cpupll_reg_base + PLLCTL); /* put CPU PLL in power down */ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL); val &= ~PLLCTL_PLLPWRDN; __raw_writel(val, pdata->cpupll_reg_base + PLLCTL); /* wait for CPU PLL reset */ udelay(PLL_RESET_TIME); /* bring CPU PLL out of reset */ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL); val |= PLLCTL_PLLRST; __raw_writel(val, pdata->cpupll_reg_base + PLLCTL); /* Wait for CPU PLL to lock */ udelay(PLL_LOCK_TIME); /* Remove CPU PLL from bypass mode */ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL); val &= ~PLLCTL_PLLENSRC; val |= PLLCTL_PLLEN; __raw_writel(val, pdata->cpupll_reg_base + PLLCTL); } } static int davinci_pm_enter(suspend_state_t state) { int ret = 0; switch (state) { case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: davinci_pm_suspend(); break; default: ret = -EINVAL; } return ret; } static const struct platform_suspend_ops davinci_pm_ops = { .enter = davinci_pm_enter, .valid = suspend_valid_only_mem, }; static int __init davinci_pm_probe(struct platform_device *pdev) { pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "cannot get platform data\n"); return -ENOENT; } davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL); if (!davinci_sram_suspend) { dev_err(&pdev->dev, "cannot allocate SRAM memory\n"); return -ENOMEM; } davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend, davinci_cpu_suspend_sz); suspend_set_ops(&davinci_pm_ops); return 0; } static int __exit davinci_pm_remove(struct platform_device *pdev) { sram_free(davinci_sram_suspend, davinci_cpu_suspend_sz); return 0; } static struct platform_driver davinci_pm_driver = { .driver = { .name = "pm-davinci", }, .remove = __exit_p(davinci_pm_remove), }; int __init davinci_pm_init(void) { return platform_driver_probe(&davinci_pm_driver, davinci_pm_probe); }
gpl-2.0
GHackAnonymous/linux
arch/cris/arch-v10/lib/usercopy.c
1232
16191
/* * User address space access functions. * The non-inlined parts of asm-cris/uaccess.h are here. * * Copyright (C) 2000, Axis Communications AB. * * Written by Hans-Peter Nilsson. * Pieces used from memcpy, originally by Kenny Ranerup long time ago. */ #include <asm/uaccess.h> /* Asm:s have been tweaked (within the domain of correctness) to give satisfactory results for "gcc version 2.96 20000427 (experimental)". Check regularly... Note that the PC saved at a bus-fault is the address *after* the faulting instruction, which means the branch-target for instructions in delay-slots for taken branches. Note also that the postincrement in the instruction is performed regardless of bus-fault; the register is seen updated in fault handlers. Oh, and on the code formatting issue, to whomever feels like "fixing it" to Conformity: I'm too "lazy", but why don't you go ahead and "fix" string.c too. I just don't think too many people will hack this file for the code format to be an issue. */ /* Copy to userspace. This is based on the memcpy used for kernel-to-kernel copying; see "string.c". */ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn) { /* We want the parameters put in special registers. Make sure the compiler is able to make something useful of this. As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). FIXME: Comment for old gcc version. Check. If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register char *dst __asm__ ("r13") = pdst; register const char *src __asm__ ("r11") = psrc; register int n __asm__ ("r12") = pn; register int retn __asm__ ("r10") = 0; /* When src is aligned but not dst, this makes a few extra needless cycles. I believe it would take as many to check that the re-alignment was unnecessary. */ if (((unsigned long) dst & 3) != 0 /* Don't align if we wouldn't copy more than a few bytes; so we don't have to check further for overflows. */ && n >= 3) { if ((unsigned long) dst & 1) { __asm_copy_to_user_1 (dst, src, retn); n--; } if ((unsigned long) dst & 2) { __asm_copy_to_user_2 (dst, src, retn); n -= 2; } } /* Decide which copying method to use. */ if (n >= 44*2) /* Break even between movem and move16 is at 38.7*2, but modulo 44. */ { /* For large copies we use 'movem'. */ /* It is not optimal to tell the compiler about clobbering any registers; that will move the saving/restoring of those registers to the function prologue/epilogue, and make non-movem sizes suboptimal. This method is not foolproof; it assumes that the "asm reg" declarations at the beginning of the function really are used here (beware: they may be moved to temporary registers). This way, we do not have to save/move the registers around into temporaries; we can safely use them straight away. If you want to check that the allocation was right; then check the equalities in the first comment. It should say "r13=r13, r11=r11, r12=r12". */ __asm__ volatile ("\ .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\ .err \n\ .endif \n\ \n\ ;; Save the registers we'll use in the movem process \n\ ;; on the stack. \n\ subq 11*4,$sp \n\ movem $r10,[$sp] \n\ \n\ ;; Now we've got this: \n\ ;; r11 - src \n\ ;; r13 - dst \n\ ;; r12 - n \n\ \n\ ;; Update n for the first loop \n\ subq 44,$r12 \n\ \n\ ; Since the noted PC of a faulting instruction in a delay-slot of a taken \n\ ; branch, is that of the branch target, we actually point at the from-movem \n\ ; for this case. There is no ambiguity here; if there was a fault in that \n\ ; instruction (meaning a kernel oops), the faulted PC would be the address \n\ ; after *that* movem. \n\ \n\ 0: \n\ movem [$r11+],$r10 \n\ subq 44,$r12 \n\ bge 0b \n\ movem $r10,[$r13+] \n\ 1: \n\ addq 44,$r12 ;; compensate for last loop underflowing n \n\ \n\ ;; Restore registers from stack \n\ movem [$sp+],$r10 \n\ 2: \n\ .section .fixup,\"ax\" \n\ \n\ ; To provide a correct count in r10 of bytes that failed to be copied, \n\ ; we jump back into the loop if the loop-branch was taken. There is no \n\ ; performance penalty for sany use; the program will segfault soon enough.\n\ \n\ 3: \n\ move.d [$sp],$r10 \n\ addq 44,$r10 \n\ move.d $r10,[$sp] \n\ jump 0b \n\ 4: \n\ movem [$sp+],$r10 \n\ addq 44,$r10 \n\ addq 44,$r12 \n\ jump 2b \n\ \n\ .previous \n\ .section __ex_table,\"a\" \n\ .dword 0b,3b \n\ .dword 1b,4b \n\ .previous" /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn) /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn)); } /* Either we directly start copying, using dword copying in a loop, or we copy as much as possible with 'movem' and then the last block (<44 bytes) is copied here. This will work since 'movem' will have updated SRC, DST and N. */ while (n >= 16) { __asm_copy_to_user_16 (dst, src, retn); n -= 16; } /* Having a separate by-four loops cuts down on cache footprint. FIXME: Test with and without; increasing switch to be 0..15. */ while (n >= 4) { __asm_copy_to_user_4 (dst, src, retn); n -= 4; } switch (n) { case 0: break; case 1: __asm_copy_to_user_1 (dst, src, retn); break; case 2: __asm_copy_to_user_2 (dst, src, retn); break; case 3: __asm_copy_to_user_3 (dst, src, retn); break; } return retn; } EXPORT_SYMBOL(__copy_user); /* Copy from user to kernel, zeroing the bytes that were inaccessible in userland. The return-value is the number of bytes that were inaccessible. */ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn) { /* We want the parameters put in special registers. Make sure the compiler is able to make something useful of this. As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). FIXME: Comment for old gcc version. Check. If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register char *dst __asm__ ("r13") = pdst; register const char *src __asm__ ("r11") = psrc; register int n __asm__ ("r12") = pn; register int retn __asm__ ("r10") = 0; /* The best reason to align src is that we then know that a read-fault was for aligned bytes; there's no 1..3 remaining good bytes to pickle. */ if (((unsigned long) src & 3) != 0) { if (((unsigned long) src & 1) && n != 0) { __asm_copy_from_user_1 (dst, src, retn); n--; } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2 (dst, src, retn); n -= 2; } /* We only need one check after the unalignment-adjustments, because if both adjustments were done, either both or neither reference had an exception. */ if (retn != 0) goto copy_exception_bytes; } /* Decide which copying method to use. */ if (n >= 44*2) /* Break even between movem and move16 is at 38.7*2, but modulo 44. FIXME: We use move4 now. */ { /* For large copies we use 'movem' */ /* It is not optimal to tell the compiler about clobbering any registers; that will move the saving/restoring of those registers to the function prologue/epilogue, and make non-movem sizes suboptimal. This method is not foolproof; it assumes that the "asm reg" declarations at the beginning of the function really are used here (beware: they may be moved to temporary registers). This way, we do not have to save/move the registers around into temporaries; we can safely use them straight away. If you want to check that the allocation was right; then check the equalities in the first comment. It should say "r13=r13, r11=r11, r12=r12" */ __asm__ volatile ("\n\ .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\ .err \n\ .endif \n\ \n\ ;; Save the registers we'll use in the movem process \n\ ;; on the stack. \n\ subq 11*4,$sp \n\ movem $r10,[$sp] \n\ \n\ ;; Now we've got this: \n\ ;; r11 - src \n\ ;; r13 - dst \n\ ;; r12 - n \n\ \n\ ;; Update n for the first loop \n\ subq 44,$r12 \n\ 0: \n\ movem [$r11+],$r10 \n\ 1: \n\ subq 44,$r12 \n\ bge 0b \n\ movem $r10,[$r13+] \n\ \n\ addq 44,$r12 ;; compensate for last loop underflowing n \n\ \n\ ;; Restore registers from stack \n\ movem [$sp+],$r10 \n\ 4: \n\ .section .fixup,\"ax\" \n\ \n\ ;; Do not jump back into the loop if we fail. For some uses, we get a \n\ ;; page fault somewhere on the line. Without checking for page limits, \n\ ;; we don't know where, but we need to copy accurately and keep an \n\ ;; accurate count; not just clear the whole line. To do that, we fall \n\ ;; down in the code below, proceeding with smaller amounts. It should \n\ ;; be kept in mind that we have to cater to code like what at one time \n\ ;; was in fs/super.c: \n\ ;; i = size - copy_from_user((void *)page, data, size); \n\ ;; which would cause repeated faults while clearing the remainder of \n\ ;; the SIZE bytes at PAGE after the first fault. \n\ ;; A caveat here is that we must not fall through from a failing page \n\ ;; to a valid page. \n\ \n\ 3: \n\ movem [$sp+],$r10 \n\ addq 44,$r12 ;; Get back count before faulting point. \n\ subq 44,$r11 ;; Get back pointer to faulting movem-line. \n\ jump 4b ;; Fall through, pretending the fault didn't happen.\n\ \n\ .previous \n\ .section __ex_table,\"a\" \n\ .dword 1b,3b \n\ .previous" /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn) /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn)); } /* Either we directly start copying here, using dword copying in a loop, or we copy as much as possible with 'movem' and then the last block (<44 bytes) is copied here. This will work since 'movem' will have updated src, dst and n. (Except with failing src.) Since we want to keep src accurate, we can't use __asm_copy_from_user_N with N != (1, 2, 4); it updates dst and retn, but not src (by design; it's value is ignored elsewhere). */ while (n >= 4) { __asm_copy_from_user_4 (dst, src, retn); n -= 4; if (retn) goto copy_exception_bytes; } /* If we get here, there were no memory read faults. */ switch (n) { /* These copies are at least "naturally aligned" (so we don't have to check each byte), due to the src alignment code before the movem loop. The *_3 case *will* get the correct count for retn. */ case 0: /* This case deliberately left in (if you have doubts check the generated assembly code). */ break; case 1: __asm_copy_from_user_1 (dst, src, retn); break; case 2: __asm_copy_from_user_2 (dst, src, retn); break; case 3: __asm_copy_from_user_3 (dst, src, retn); break; } /* If we get here, retn correctly reflects the number of failing bytes. */ return retn; copy_exception_bytes: /* We already have "retn" bytes cleared, and need to clear the remaining "n" bytes. A non-optimized simple byte-for-byte in-line memset is preferred here, since this isn't speed-critical code and we'd rather have this a leaf-function than calling memset. */ { char *endp; for (endp = dst + n; dst < endp; dst++) *dst = 0; } return retn + n; } EXPORT_SYMBOL(__copy_user_zeroing); /* Zero userspace. */ unsigned long __do_clear_user(void __user *pto, unsigned long pn) { /* We want the parameters put in special registers. Make sure the compiler is able to make something useful of this. As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). FIXME: Comment for old gcc version. Check. If gcc was alright, it really would need no temporaries, and no stack space to save stuff on. */ register char *dst __asm__ ("r13") = pto; register int n __asm__ ("r12") = pn; register int retn __asm__ ("r10") = 0; if (((unsigned long) dst & 3) != 0 /* Don't align if we wouldn't copy more than a few bytes. */ && n >= 3) { if ((unsigned long) dst & 1) { __asm_clear_1 (dst, retn); n--; } if ((unsigned long) dst & 2) { __asm_clear_2 (dst, retn); n -= 2; } } /* Decide which copying method to use. FIXME: This number is from the "ordinary" kernel memset. */ if (n >= (1*48)) { /* For large clears we use 'movem' */ /* It is not optimal to tell the compiler about clobbering any call-saved registers; that will move the saving/restoring of those registers to the function prologue/epilogue, and make non-movem sizes suboptimal. This method is not foolproof; it assumes that the "asm reg" declarations at the beginning of the function really are used here (beware: they may be moved to temporary registers). This way, we do not have to save/move the registers around into temporaries; we can safely use them straight away. If you want to check that the allocation was right; then check the equalities in the first comment. It should say something like "r13=r13, r11=r11, r12=r12". */ __asm__ volatile ("\n\ .ifnc %0%1%2,$r13$r12$r10 \n\ .err \n\ .endif \n\ \n\ ;; Save the registers we'll clobber in the movem process \n\ ;; on the stack. Don't mention them to gcc, it will only be \n\ ;; upset. \n\ subq 11*4,$sp \n\ movem $r10,[$sp] \n\ \n\ clear.d $r0 \n\ clear.d $r1 \n\ clear.d $r2 \n\ clear.d $r3 \n\ clear.d $r4 \n\ clear.d $r5 \n\ clear.d $r6 \n\ clear.d $r7 \n\ clear.d $r8 \n\ clear.d $r9 \n\ clear.d $r10 \n\ clear.d $r11 \n\ \n\ ;; Now we've got this: \n\ ;; r13 - dst \n\ ;; r12 - n \n\ \n\ ;; Update n for the first loop \n\ subq 12*4,$r12 \n\ 0: \n\ subq 12*4,$r12 \n\ bge 0b \n\ movem $r11,[$r13+] \n\ 1: \n\ addq 12*4,$r12 ;; compensate for last loop underflowing n\n\ \n\ ;; Restore registers from stack \n\ movem [$sp+],$r10 \n\ 2: \n\ .section .fixup,\"ax\" \n\ 3: \n\ move.d [$sp],$r10 \n\ addq 12*4,$r10 \n\ move.d $r10,[$sp] \n\ clear.d $r10 \n\ jump 0b \n\ \n\ 4: \n\ movem [$sp+],$r10 \n\ addq 12*4,$r10 \n\ addq 12*4,$r12 \n\ jump 2b \n\ \n\ .previous \n\ .section __ex_table,\"a\" \n\ .dword 0b,3b \n\ .dword 1b,4b \n\ .previous" /* Outputs */ : "=r" (dst), "=r" (n), "=r" (retn) /* Inputs */ : "0" (dst), "1" (n), "2" (retn) /* Clobber */ : "r11"); } while (n >= 16) { __asm_clear_16 (dst, retn); n -= 16; } /* Having a separate by-four loops cuts down on cache footprint. FIXME: Test with and without; increasing switch to be 0..15. */ while (n >= 4) { __asm_clear_4 (dst, retn); n -= 4; } switch (n) { case 0: break; case 1: __asm_clear_1 (dst, retn); break; case 2: __asm_clear_2 (dst, retn); break; case 3: __asm_clear_3 (dst, retn); break; } return retn; } EXPORT_SYMBOL(__do_clear_user);
gpl-2.0
PlayOSS-Dev/acer_picasso_kernel
drivers/scsi/bnx2i/bnx2i_init.c
2512
12515
/* bnx2i.c: Broadcom NetXtreme II iSCSI driver. * * Copyright (c) 2006 - 2010 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) * Maintained by: Eddie Wai (eddie.wai@broadcom.com) */ #include "bnx2i.h" static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; #define DRV_MODULE_NAME "bnx2i" #define DRV_MODULE_VERSION "2.6.2.3" #define DRV_MODULE_RELDATE "Dec 31, 2010" static char version[] __devinitdata = "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and " "Eddie Wai <eddie.wai@broadcom.com>"); MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712" " iSCSI Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); static DEFINE_MUTEX(bnx2i_dev_lock); unsigned int event_coal_min = 24; module_param(event_coal_min, int, 0664); MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands"); unsigned int event_coal_div = 1; module_param(event_coal_div, int, 0664); MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); unsigned int en_tcp_dack = 1; module_param(en_tcp_dack, int, 0664); MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK"); unsigned int error_mask1 = 0x00; module_param(error_mask1, int, 0664); MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1"); unsigned int error_mask2 = 0x00; module_param(error_mask2, int, 0664); MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2"); unsigned int sq_size; module_param(sq_size, int, 0664); MODULE_PARM_DESC(sq_size, "Configure SQ size"); unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT; module_param(rq_size, int, 0664); MODULE_PARM_DESC(rq_size, "Configure RQ size"); u64 iscsi_error_mask = 0x00; /** * bnx2i_identify_device - identifies NetXtreme II device type * @hba: Adapter structure pointer * * This function identifies the NX2 device type and sets appropriate * queue mailbox register access method, 5709 requires driver to * access MBOX regs using *bin* mode */ void bnx2i_identify_device(struct bnx2i_hba *hba) { hba->cnic_dev_type = 0; if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) || (hba->pci_did == PCI_DEVICE_ID_NX2_5706S)) set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type); else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) || (hba->pci_did == PCI_DEVICE_ID_NX2_5708S)) set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type); else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) || (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) { set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); hba->mail_queue_access = BNX2I_MQ_BIN_MODE; } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || hba->pci_did == PCI_DEVICE_ID_NX2_57711 || hba->pci_did == PCI_DEVICE_ID_NX2_57711E || hba->pci_did == PCI_DEVICE_ID_NX2_57712 || hba->pci_did == PCI_DEVICE_ID_NX2_57712E) set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); else printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n", hba->pci_did); } /** * get_adapter_list_head - returns head of adapter list */ struct bnx2i_hba *get_adapter_list_head(void) { struct bnx2i_hba *hba = NULL; struct bnx2i_hba *tmp_hba; if (!adapter_count) goto hba_not_found; mutex_lock(&bnx2i_dev_lock); list_for_each_entry(tmp_hba, &adapter_list, link) { if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) { hba = tmp_hba; break; } } mutex_unlock(&bnx2i_dev_lock); hba_not_found: return hba; } /** * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance * @cnic: pointer to cnic device instance * */ struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic) { struct bnx2i_hba *hba, *temp; mutex_lock(&bnx2i_dev_lock); list_for_each_entry_safe(hba, temp, &adapter_list, link) { if (hba->cnic == cnic) { mutex_unlock(&bnx2i_dev_lock); return hba; } } mutex_unlock(&bnx2i_dev_lock); return NULL; } /** * bnx2i_start - cnic callback to initialize & start adapter instance * @handle: transparent handle pointing to adapter structure * * This function maps adapter structure to pcidev structure and initiates * firmware handshake to enable/initialize on chip iscsi components * This bnx2i - cnic interface api callback is issued after following * 2 conditions are met - * a) underlying network interface is up (marked by event 'NETDEV_UP' * from netdev * b) bnx2i adapter instance is registered */ void bnx2i_start(void *handle) { #define BNX2I_INIT_POLL_TIME (1000 / HZ) struct bnx2i_hba *hba = handle; int i = HZ; if (!hba->cnic->max_iscsi_conn) { printk(KERN_ALERT "bnx2i: dev %s does not support " "iSCSI\n", hba->netdev->name); if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { mutex_lock(&bnx2i_dev_lock); list_del_init(&hba->link); adapter_count--; hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); mutex_unlock(&bnx2i_dev_lock); bnx2i_free_hba(hba); } return; } bnx2i_send_fw_iscsi_init_msg(hba); while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) msleep(BNX2I_INIT_POLL_TIME); } /** * bnx2i_chip_cleanup - local routine to handle chip cleanup * @hba: Adapter instance to register * * Driver checks if adapter still has any active connections before * executing the cleanup process */ static void bnx2i_chip_cleanup(struct bnx2i_hba *hba) { struct bnx2i_endpoint *bnx2i_ep; struct list_head *pos, *tmp; if (hba->ofld_conns_active) { /* Stage to force the disconnection * This is the case where the daemon is either slow or * not present */ printk(KERN_ALERT "bnx2i: (%s) chip cleanup for %d active " "connections\n", hba->netdev->name, hba->ofld_conns_active); mutex_lock(&hba->net_dev_lock); list_for_each_safe(pos, tmp, &hba->ep_active_list) { bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link); /* Clean up the chip only */ bnx2i_hw_ep_disconnect(bnx2i_ep); bnx2i_ep->cm_sk = NULL; } mutex_unlock(&hba->net_dev_lock); } } /** * bnx2i_stop - cnic callback to shutdown adapter instance * @handle: transparent handle pointing to adapter structure * * driver checks if adapter is already in shutdown mode, if not start * the shutdown process */ void bnx2i_stop(void *handle) { struct bnx2i_hba *hba = handle; int conns_active; int wait_delay = 1 * HZ; /* check if cleanup happened in GOING_DOWN context */ if (!test_and_set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { iscsi_host_for_each_session(hba->shost, bnx2i_drop_session); wait_delay = hba->hba_shutdown_tmo; } /* Wait for inflight offload connection tasks to complete before * proceeding. Forcefully terminate all connection recovery in * progress at the earliest, either in bind(), send_pdu(LOGIN), * or conn_start() */ wait_event_interruptible_timeout(hba->eh_wait, (list_empty(&hba->ep_ofld_list) && list_empty(&hba->ep_destroy_list)), 2 * HZ); /* Wait for all endpoints to be torn down, Chip will be reset once * control returns to network driver. So it is required to cleanup and * release all connection resources before returning from this routine. */ while (hba->ofld_conns_active) { conns_active = hba->ofld_conns_active; wait_event_interruptible_timeout(hba->eh_wait, (hba->ofld_conns_active != conns_active), wait_delay); if (hba->ofld_conns_active == conns_active) break; } bnx2i_chip_cleanup(hba); /* This flag should be cleared last so that ep_disconnect() gracefully * cleans up connection context */ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); } /** * bnx2i_init_one - initialize an adapter instance and allocate memory resources * @hba: bnx2i adapter instance * @cnic: cnic device handle * * Global resource lock is held during critical sections below. This routine is * called from either cnic_register_driver() or device hot plug context and * and does majority of device specific initialization */ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) { int rc; mutex_lock(&bnx2i_dev_lock); hba->cnic = cnic; rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); if (!rc) { hba->age++; set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); list_add_tail(&hba->link, &adapter_list); adapter_count++; } else if (rc == -EBUSY) /* duplicate registration */ printk(KERN_ALERT "bnx2i, duplicate registration" "hba=%p, cnic=%p\n", hba, cnic); else if (rc == -EAGAIN) printk(KERN_ERR "bnx2i, driver not registered\n"); else if (rc == -EINVAL) printk(KERN_ERR "bnx2i, invalid type %d\n", CNIC_ULP_ISCSI); else printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc); mutex_unlock(&bnx2i_dev_lock); return rc; } /** * bnx2i_ulp_init - initialize an adapter instance * @dev: cnic device handle * * Called from cnic_register_driver() context to initialize all enumerated * cnic devices. This routine allocate adapter structure and other * device specific resources. */ void bnx2i_ulp_init(struct cnic_dev *dev) { struct bnx2i_hba *hba; /* Allocate a HBA structure for this device */ hba = bnx2i_alloc_hba(dev); if (!hba) { printk(KERN_ERR "bnx2i init: hba initialization failed\n"); return; } /* Get PCI related information and update hba struct members */ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); if (bnx2i_init_one(hba, dev)) { printk(KERN_ERR "bnx2i - hba %p init failed\n", hba); bnx2i_free_hba(hba); } } /** * bnx2i_ulp_exit - shuts down adapter instance and frees all resources * @dev: cnic device handle * */ void bnx2i_ulp_exit(struct cnic_dev *dev) { struct bnx2i_hba *hba; hba = bnx2i_find_hba_for_cnic(dev); if (!hba) { printk(KERN_INFO "bnx2i_ulp_exit: hba not " "found, dev 0x%p\n", dev); return; } mutex_lock(&bnx2i_dev_lock); list_del_init(&hba->link); adapter_count--; if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } mutex_unlock(&bnx2i_dev_lock); bnx2i_free_hba(hba); } /** * bnx2i_mod_init - module init entry point * * initialize any driver wide global data structures such as endpoint pool, * tcp port manager/queue, sysfs. finally driver will register itself * with the cnic module */ static int __init bnx2i_mod_init(void) { int err; printk(KERN_INFO "%s", version); if (sq_size && !is_power_of_2(sq_size)) sq_size = roundup_pow_of_two(sq_size); mutex_init(&bnx2i_dev_lock); bnx2i_scsi_xport_template = iscsi_register_transport(&bnx2i_iscsi_transport); if (!bnx2i_scsi_xport_template) { printk(KERN_ERR "Could not register bnx2i transport.\n"); err = -ENOMEM; goto out; } err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb); if (err) { printk(KERN_ERR "Could not register bnx2i cnic driver.\n"); goto unreg_xport; } return 0; unreg_xport: iscsi_unregister_transport(&bnx2i_iscsi_transport); out: return err; } /** * bnx2i_mod_exit - module cleanup/exit entry point * * Global resource lock and host adapter lock is held during critical sections * in this function. Driver will browse through the adapter list, cleans-up * each instance, unregisters iscsi transport name and finally driver will * unregister itself with the cnic module */ static void __exit bnx2i_mod_exit(void) { struct bnx2i_hba *hba; mutex_lock(&bnx2i_dev_lock); while (!list_empty(&adapter_list)) { hba = list_entry(adapter_list.next, struct bnx2i_hba, link); list_del(&hba->link); adapter_count--; if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { bnx2i_chip_cleanup(hba); hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } bnx2i_free_hba(hba); } mutex_unlock(&bnx2i_dev_lock); iscsi_unregister_transport(&bnx2i_iscsi_transport); cnic_unregister_driver(CNIC_ULP_ISCSI); } module_init(bnx2i_mod_init); module_exit(bnx2i_mod_exit);
gpl-2.0
randomblame/android_kernel_acer_t20-common
drivers/usb/serial/usb_debug.c
3280
2428
/* * USB Debug cable driver * * Copyright (C) 2006 Greg Kroah-Hartman <greg@kroah.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define USB_DEBUG_MAX_PACKET_SIZE 8 #define USB_DEBUG_BRK_SIZE 8 static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = { 0x00, 0xff, 0x01, 0xfe, 0x00, 0xfe, 0x01, 0xff, }; static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0525, 0x127a) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_driver debug_driver = { .name = "debug", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table, .no_dynamic_id = 1, }; /* This HW really does not support a serial break, so one will be * emulated when ever the break state is set to true. */ static void usb_debug_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; if (!break_state) return; usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE); } static void usb_debug_read_bulk_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; if (urb->actual_length == USB_DEBUG_BRK_SIZE && memcmp(urb->transfer_buffer, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE) == 0) { usb_serial_handle_break(port); usb_serial_generic_submit_read_urb(port, GFP_ATOMIC); return; } usb_serial_generic_read_bulk_callback(urb); } static struct usb_serial_driver debug_device = { .driver = { .owner = THIS_MODULE, .name = "debug", }, .id_table = id_table, .usb_driver = &debug_driver, .num_ports = 1, .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE, .break_ctl = usb_debug_break_ctl, .read_bulk_callback = usb_debug_read_bulk_callback, }; static int __init debug_init(void) { int retval; retval = usb_serial_register(&debug_device); if (retval) return retval; retval = usb_register(&debug_driver); if (retval) usb_serial_deregister(&debug_device); return retval; } static void __exit debug_exit(void) { usb_deregister(&debug_driver); usb_serial_deregister(&debug_device); } module_init(debug_init); module_exit(debug_exit); MODULE_LICENSE("GPL");
gpl-2.0
jeehyn/NewWorld_kernel_ef52
drivers/usb/serial/ssu100.c
3280
16925
/* * usb-serial driver for Quatech SSU-100 * * based on ftdi_sio.c and the original serqt_usb.c from Quatech * */ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial_reg.h> #include <linux/uaccess.h> #define QT_OPEN_CLOSE_CHANNEL 0xca #define QT_SET_GET_DEVICE 0xc2 #define QT_SET_GET_REGISTER 0xc0 #define QT_GET_SET_PREBUF_TRIG_LVL 0xcc #define QT_SET_ATF 0xcd #define QT_GET_SET_UART 0xc1 #define QT_TRANSFER_IN 0xc0 #define QT_HW_FLOW_CONTROL_MASK 0xc5 #define QT_SW_FLOW_CONTROL_MASK 0xc6 #define SERIAL_MSR_MASK 0xf0 #define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS) #define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR) #define MAX_BAUD_RATE 460800 #define ATC_DISABLED 0x00 #define DUPMODE_BITS 0xc0 #define RR_BITS 0x03 #define LOOPMODE_BITS 0x41 #define RS232_MODE 0x00 #define RTSCTS_TO_CONNECTOR 0x40 #define CLKS_X4 0x02 #define FULLPWRBIT 0x00000080 #define NEXT_BOARD_POWER_BIT 0x00000004 static bool debug; /* Version Information */ #define DRIVER_VERSION "v0.1" #define DRIVER_DESC "Quatech SSU-100 USB to Serial Driver" #define USB_VENDOR_ID_QUATECH 0x061d /* Quatech VID */ #define QUATECH_SSU100 0xC020 /* SSU100 */ static const struct usb_device_id id_table[] = { {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU100)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_driver ssu100_driver = { .name = "ssu100", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table, .suspend = usb_serial_suspend, .resume = usb_serial_resume, .supports_autosuspend = 1, }; struct ssu100_port_private { spinlock_t status_lock; u8 shadowLSR; u8 shadowMSR; wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ struct async_icount icount; }; static void ssu100_release(struct usb_serial *serial) { struct ssu100_port_private *priv = usb_get_serial_port_data(*serial->port); dbg("%s", __func__); kfree(priv); } static inline int ssu100_control_msg(struct usb_device *dev, u8 request, u16 data, u16 index) { return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, 0x40, data, index, NULL, 0, 300); } static inline int ssu100_setdevice(struct usb_device *dev, u8 *data) { u16 x = ((u16)(data[1] << 8) | (u16)(data[0])); return ssu100_control_msg(dev, QT_SET_GET_DEVICE, x, 0); } static inline int ssu100_getdevice(struct usb_device *dev, u8 *data) { return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), QT_SET_GET_DEVICE, 0xc0, 0, 0, data, 3, 300); } static inline int ssu100_getregister(struct usb_device *dev, unsigned short uart, unsigned short reg, u8 *data) { return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), QT_SET_GET_REGISTER, 0xc0, reg, uart, data, sizeof(*data), 300); } static inline int ssu100_setregister(struct usb_device *dev, unsigned short uart, unsigned short reg, u16 data) { u16 value = (data << 8) | reg; return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), QT_SET_GET_REGISTER, 0x40, value, uart, NULL, 0, 300); } #define set_mctrl(dev, set) update_mctrl((dev), (set), 0) #define clear_mctrl(dev, clear) update_mctrl((dev), 0, (clear)) /* these do not deal with device that have more than 1 port */ static inline int update_mctrl(struct usb_device *dev, unsigned int set, unsigned int clear) { unsigned urb_value; int result; if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) { dbg("%s - DTR|RTS not being set|cleared", __func__); return 0; /* no change */ } clear &= ~set; /* 'set' takes precedence over 'clear' */ urb_value = 0; if (set & TIOCM_DTR) urb_value |= UART_MCR_DTR; if (set & TIOCM_RTS) urb_value |= UART_MCR_RTS; result = ssu100_setregister(dev, 0, UART_MCR, urb_value); if (result < 0) dbg("%s Error from MODEM_CTRL urb", __func__); return result; } static int ssu100_initdevice(struct usb_device *dev) { u8 *data; int result = 0; dbg("%s", __func__); data = kzalloc(3, GFP_KERNEL); if (!data) return -ENOMEM; result = ssu100_getdevice(dev, data); if (result < 0) { dbg("%s - get_device failed %i", __func__, result); goto out; } data[1] &= ~FULLPWRBIT; result = ssu100_setdevice(dev, data); if (result < 0) { dbg("%s - setdevice failed %i", __func__, result); goto out; } result = ssu100_control_msg(dev, QT_GET_SET_PREBUF_TRIG_LVL, 128, 0); if (result < 0) { dbg("%s - set prebuffer level failed %i", __func__, result); goto out; } result = ssu100_control_msg(dev, QT_SET_ATF, ATC_DISABLED, 0); if (result < 0) { dbg("%s - set ATFprebuffer level failed %i", __func__, result); goto out; } result = ssu100_getdevice(dev, data); if (result < 0) { dbg("%s - get_device failed %i", __func__, result); goto out; } data[0] &= ~(RR_BITS | DUPMODE_BITS); data[0] |= CLKS_X4; data[1] &= ~(LOOPMODE_BITS); data[1] |= RS232_MODE; result = ssu100_setdevice(dev, data); if (result < 0) { dbg("%s - setdevice failed %i", __func__, result); goto out; } out: kfree(data); return result; } static void ssu100_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_device *dev = port->serial->dev; struct ktermios *termios = tty->termios; u16 baud, divisor, remainder; unsigned int cflag = termios->c_cflag; u16 urb_value = 0; /* will hold the new flags */ int result; dbg("%s", __func__); if (cflag & PARENB) { if (cflag & PARODD) urb_value |= UART_LCR_PARITY; else urb_value |= SERIAL_EVEN_PARITY; } switch (cflag & CSIZE) { case CS5: urb_value |= UART_LCR_WLEN5; break; case CS6: urb_value |= UART_LCR_WLEN6; break; case CS7: urb_value |= UART_LCR_WLEN7; break; default: case CS8: urb_value |= UART_LCR_WLEN8; break; } baud = tty_get_baud_rate(tty); if (!baud) baud = 9600; dbg("%s - got baud = %d\n", __func__, baud); divisor = MAX_BAUD_RATE / baud; remainder = MAX_BAUD_RATE % baud; if (((remainder * 2) >= baud) && (baud != 110)) divisor++; urb_value = urb_value << 8; result = ssu100_control_msg(dev, QT_GET_SET_UART, divisor, urb_value); if (result < 0) dbg("%s - set uart failed", __func__); if (cflag & CRTSCTS) result = ssu100_control_msg(dev, QT_HW_FLOW_CONTROL_MASK, SERIAL_CRTSCTS, 0); else result = ssu100_control_msg(dev, QT_HW_FLOW_CONTROL_MASK, 0, 0); if (result < 0) dbg("%s - set HW flow control failed", __func__); if (I_IXOFF(tty) || I_IXON(tty)) { u16 x = ((u16)(START_CHAR(tty) << 8) | (u16)(STOP_CHAR(tty))); result = ssu100_control_msg(dev, QT_SW_FLOW_CONTROL_MASK, x, 0); } else result = ssu100_control_msg(dev, QT_SW_FLOW_CONTROL_MASK, 0, 0); if (result < 0) dbg("%s - set SW flow control failed", __func__); } static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_device *dev = port->serial->dev; struct ssu100_port_private *priv = usb_get_serial_port_data(port); u8 *data; int result; unsigned long flags; dbg("%s - port %d", __func__, port->number); data = kzalloc(2, GFP_KERNEL); if (!data) return -ENOMEM; result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), QT_OPEN_CLOSE_CHANNEL, QT_TRANSFER_IN, 0x01, 0, data, 2, 300); if (result < 0) { dbg("%s - open failed %i", __func__, result); kfree(data); return result; } spin_lock_irqsave(&priv->status_lock, flags); priv->shadowLSR = data[0]; priv->shadowMSR = data[1]; spin_unlock_irqrestore(&priv->status_lock, flags); kfree(data); /* set to 9600 */ result = ssu100_control_msg(dev, QT_GET_SET_UART, 0x30, 0x0300); if (result < 0) dbg("%s - set uart failed", __func__); if (tty) ssu100_set_termios(tty, port, tty->termios); return usb_serial_generic_open(tty, port); } static void ssu100_close(struct usb_serial_port *port) { dbg("%s", __func__); usb_serial_generic_close(port); } static int get_serial_info(struct usb_serial_port *port, struct serial_struct __user *retinfo) { struct serial_struct tmp; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.line = port->serial->minor; tmp.port = 0; tmp.irq = 0; tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ; tmp.xmit_fifo_size = port->bulk_out_size; tmp.baud_base = 9600; tmp.close_delay = 5*HZ; tmp.closing_wait = 30*HZ; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) { struct ssu100_port_private *priv = usb_get_serial_port_data(port); struct async_icount prev, cur; unsigned long flags; spin_lock_irqsave(&priv->status_lock, flags); prev = priv->icount; spin_unlock_irqrestore(&priv->status_lock, flags); while (1) { wait_event_interruptible(priv->delta_msr_wait, ((priv->icount.rng != prev.rng) || (priv->icount.dsr != prev.dsr) || (priv->icount.dcd != prev.dcd) || (priv->icount.cts != prev.cts))); if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irqsave(&priv->status_lock, flags); cur = priv->icount; spin_unlock_irqrestore(&priv->status_lock, flags); if ((prev.rng == cur.rng) && (prev.dsr == cur.dsr) && (prev.dcd == cur.dcd) && (prev.cts == cur.cts)) return -EIO; if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) || (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) || (arg & TIOCM_CD && (prev.dcd != cur.dcd)) || (arg & TIOCM_CTS && (prev.cts != cur.cts))) return 0; } return 0; } static int ssu100_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct usb_serial_port *port = tty->driver_data; struct ssu100_port_private *priv = usb_get_serial_port_data(port); struct async_icount cnow = priv->icount; icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } static int ssu100_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; dbg("%s cmd 0x%04x", __func__, cmd); switch (cmd) { case TIOCGSERIAL: return get_serial_info(port, (struct serial_struct __user *) arg); case TIOCMIWAIT: return wait_modem_info(port, arg); default: break; } dbg("%s arg not supported", __func__); return -ENOIOCTLCMD; } static int ssu100_attach(struct usb_serial *serial) { struct ssu100_port_private *priv; struct usb_serial_port *port = *serial->port; dbg("%s", __func__); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__, sizeof(*priv)); return -ENOMEM; } spin_lock_init(&priv->status_lock); init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); return ssu100_initdevice(serial->dev); } static int ssu100_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_device *dev = port->serial->dev; u8 *d; int r; dbg("%s\n", __func__); d = kzalloc(2, GFP_KERNEL); if (!d) return -ENOMEM; r = ssu100_getregister(dev, 0, UART_MCR, d); if (r < 0) goto mget_out; r = ssu100_getregister(dev, 0, UART_MSR, d+1); if (r < 0) goto mget_out; r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) | (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) | (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) | (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) | (d[1] & UART_MSR_RI ? TIOCM_RI : 0) | (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0); mget_out: kfree(d); return r; } static int ssu100_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_device *dev = port->serial->dev; dbg("%s\n", __func__); return update_mctrl(dev, set, clear); } static void ssu100_dtr_rts(struct usb_serial_port *port, int on) { struct usb_device *dev = port->serial->dev; dbg("%s\n", __func__); mutex_lock(&port->serial->disc_mutex); if (!port->serial->disconnected) { /* Disable flow control */ if (!on && ssu100_setregister(dev, 0, UART_MCR, 0) < 0) dev_err(&port->dev, "error from flowcontrol urb\n"); /* drop RTS and DTR */ if (on) set_mctrl(dev, TIOCM_DTR | TIOCM_RTS); else clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS); } mutex_unlock(&port->serial->disc_mutex); } static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) { struct ssu100_port_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->status_lock, flags); priv->shadowMSR = msr; spin_unlock_irqrestore(&priv->status_lock, flags); if (msr & UART_MSR_ANY_DELTA) { /* update input line counters */ if (msr & UART_MSR_DCTS) priv->icount.cts++; if (msr & UART_MSR_DDSR) priv->icount.dsr++; if (msr & UART_MSR_DDCD) priv->icount.dcd++; if (msr & UART_MSR_TERI) priv->icount.rng++; wake_up_interruptible(&priv->delta_msr_wait); } } static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr, char *tty_flag) { struct ssu100_port_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->status_lock, flags); priv->shadowLSR = lsr; spin_unlock_irqrestore(&priv->status_lock, flags); *tty_flag = TTY_NORMAL; if (lsr & UART_LSR_BRK_ERROR_BITS) { /* we always want to update icount, but we only want to * update tty_flag for one case */ if (lsr & UART_LSR_BI) { priv->icount.brk++; *tty_flag = TTY_BREAK; usb_serial_handle_break(port); } if (lsr & UART_LSR_PE) { priv->icount.parity++; if (*tty_flag == TTY_NORMAL) *tty_flag = TTY_PARITY; } if (lsr & UART_LSR_FE) { priv->icount.frame++; if (*tty_flag == TTY_NORMAL) *tty_flag = TTY_FRAME; } if (lsr & UART_LSR_OE){ priv->icount.overrun++; if (*tty_flag == TTY_NORMAL) *tty_flag = TTY_OVERRUN; } } } static int ssu100_process_packet(struct urb *urb, struct tty_struct *tty) { struct usb_serial_port *port = urb->context; char *packet = (char *)urb->transfer_buffer; char flag = TTY_NORMAL; u32 len = urb->actual_length; int i; char *ch; dbg("%s - port %d", __func__, port->number); if ((len >= 4) && (packet[0] == 0x1b) && (packet[1] == 0x1b) && ((packet[2] == 0x00) || (packet[2] == 0x01))) { if (packet[2] == 0x00) { ssu100_update_lsr(port, packet[3], &flag); if (flag == TTY_OVERRUN) tty_insert_flip_char(tty, 0, TTY_OVERRUN); } if (packet[2] == 0x01) ssu100_update_msr(port, packet[3]); len -= 4; ch = packet + 4; } else ch = packet; if (!len) return 0; /* status only */ if (port->port.console && port->sysrq) { for (i = 0; i < len; i++, ch++) { if (!usb_serial_handle_sysrq_char(port, *ch)) tty_insert_flip_char(tty, *ch, flag); } } else tty_insert_flip_string_fixed_flag(tty, ch, flag, len); return len; } static void ssu100_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct tty_struct *tty; int count; dbg("%s", __func__); tty = tty_port_tty_get(&port->port); if (!tty) return; count = ssu100_process_packet(urb, tty); if (count) tty_flip_buffer_push(tty); tty_kref_put(tty); } static struct usb_serial_driver ssu100_device = { .driver = { .owner = THIS_MODULE, .name = "ssu100", }, .description = DRIVER_DESC, .id_table = id_table, .num_ports = 1, .open = ssu100_open, .close = ssu100_close, .attach = ssu100_attach, .release = ssu100_release, .dtr_rts = ssu100_dtr_rts, .process_read_urb = ssu100_process_read_urb, .tiocmget = ssu100_tiocmget, .tiocmset = ssu100_tiocmset, .get_icount = ssu100_get_icount, .ioctl = ssu100_ioctl, .set_termios = ssu100_set_termios, .disconnect = usb_serial_generic_disconnect, }; static struct usb_serial_driver * const serial_drivers[] = { &ssu100_device, NULL }; module_usb_serial_driver(ssu100_driver, serial_drivers); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not");
gpl-2.0
nok07635/UnleaZhed_XTZ
arch/mips/mm/tlbex.c
3792
58320
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Synthesize TLB refill handlers at runtime. * * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2008, 2009 Cavium Networks, Inc. * * ... and the days got worse and worse and now you see * I've gone completly out of my mind. * * They're coming to take me a away haha * they're coming to take me a away hoho hihi haha * to the funny farm where code is beautiful all the time ... * * (Condolences to Napoleon XIV) */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/smp.h> #include <linux/string.h> #include <linux/init.h> #include <linux/cache.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> #include <asm/war.h> #include <asm/uasm.h> #include <asm/setup.h> /* * TLB load/store/modify handlers. * * Only the fastpath gets synthesized at runtime, the slowpath for * do_page_fault remains normal asm. */ extern void tlb_do_page_fault_0(void); extern void tlb_do_page_fault_1(void); struct work_registers { int r1; int r2; int r3; }; struct tlb_reg_save { unsigned long a; unsigned long b; } ____cacheline_aligned_in_smp; static struct tlb_reg_save handler_reg_save[NR_CPUS]; static inline int r45k_bvahwbug(void) { /* XXX: We should probe for the presence of this bug, but we don't. */ return 0; } static inline int r4k_250MHZhwbug(void) { /* XXX: We should probe for the presence of this bug, but we don't. */ return 0; } static inline int __maybe_unused bcm1250_m3_war(void) { return BCM1250_M3_WAR; } static inline int __maybe_unused r10000_llsc_war(void) { return R10000_LLSC_WAR; } static int use_bbit_insns(void) { switch (current_cpu_type()) { case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON2: return 1; default: return 0; } } static int use_lwx_insns(void) { switch (current_cpu_type()) { case CPU_CAVIUM_OCTEON2: return 1; default: return 0; } } #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \ CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 static bool scratchpad_available(void) { return true; } static int scratchpad_offset(int i) { /* * CVMSEG starts at address -32768 and extends for * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines. */ i += 1; /* Kernel use starts at the top and works down. */ return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; } #else static bool scratchpad_available(void) { return false; } static int scratchpad_offset(int i) { BUG(); /* Really unreachable, but evidently some GCC want this. */ return 0; } #endif /* * Found by experiment: At least some revisions of the 4kc throw under * some circumstances a machine check exception, triggered by invalid * values in the index register. Delaying the tlbp instruction until * after the next branch, plus adding an additional nop in front of * tlbwi/tlbwr avoids the invalid index register values. Nobody knows * why; it's not an issue caused by the core RTL. * */ static int __cpuinit m4kc_tlbp_war(void) { return (current_cpu_data.processor_id & 0xffff00) == (PRID_COMP_MIPS | PRID_IMP_4KC); } /* Handle labels (which must be positive integers). */ enum label_id { label_second_part = 1, label_leave, label_vmalloc, label_vmalloc_done, label_tlbw_hazard, label_split, label_tlbl_goaround1, label_tlbl_goaround2, label_nopage_tlbl, label_nopage_tlbs, label_nopage_tlbm, label_smp_pgtable_change, label_r3000_write_probe_fail, label_large_segbits_fault, #ifdef CONFIG_HUGETLB_PAGE label_tlb_huge_update, #endif }; UASM_L_LA(_second_part) UASM_L_LA(_leave) UASM_L_LA(_vmalloc) UASM_L_LA(_vmalloc_done) UASM_L_LA(_tlbw_hazard) UASM_L_LA(_split) UASM_L_LA(_tlbl_goaround1) UASM_L_LA(_tlbl_goaround2) UASM_L_LA(_nopage_tlbl) UASM_L_LA(_nopage_tlbs) UASM_L_LA(_nopage_tlbm) UASM_L_LA(_smp_pgtable_change) UASM_L_LA(_r3000_write_probe_fail) UASM_L_LA(_large_segbits_fault) #ifdef CONFIG_HUGETLB_PAGE UASM_L_LA(_tlb_huge_update) #endif /* * For debug purposes. */ static inline void dump_handler(const u32 *handler, int count) { int i; pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < count; i++) pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]); pr_debug("\t.set pop\n"); } /* The only general purpose registers allowed in TLB handlers. */ #define K0 26 #define K1 27 /* Some CP0 registers */ #define C0_INDEX 0, 0 #define C0_ENTRYLO0 2, 0 #define C0_TCBIND 2, 2 #define C0_ENTRYLO1 3, 0 #define C0_CONTEXT 4, 0 #define C0_PAGEMASK 5, 0 #define C0_BADVADDR 8, 0 #define C0_ENTRYHI 10, 0 #define C0_EPC 14, 0 #define C0_XCONTEXT 20, 0 #ifdef CONFIG_64BIT # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) #else # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) #endif /* The worst case length of the handler is around 18 instructions for * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. * Maximum space available is 32 instructions for R3000 and 64 * instructions for R4000. * * We deliberately chose a buffer size of 128, so we won't scribble * over anything important on overflow before we panic. */ static u32 tlb_handler[128] __cpuinitdata; /* simply assume worst case size for labels and relocs */ static struct uasm_label labels[128] __cpuinitdata; static struct uasm_reloc relocs[128] __cpuinitdata; #ifdef CONFIG_64BIT static int check_for_high_segbits __cpuinitdata; #endif static int check_for_high_segbits __cpuinitdata; static unsigned int kscratch_used_mask __cpuinitdata; static int __cpuinit allocate_kscratch(void) { int r; unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; r = ffs(a); if (r == 0) return -1; r--; /* make it zero based */ kscratch_used_mask |= (1 << r); return r; } static int scratch_reg __cpuinitdata; static int pgd_reg __cpuinitdata; enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; static struct work_registers __cpuinit build_get_work_registers(u32 **p) { struct work_registers r; int smp_processor_id_reg; int smp_processor_id_sel; int smp_processor_id_shift; if (scratch_reg > 0) { /* Save in CPU local C0_KScratch? */ UASM_i_MTC0(p, 1, 31, scratch_reg); r.r1 = K0; r.r2 = K1; r.r3 = 1; return r; } if (num_possible_cpus() > 1) { #ifdef CONFIG_MIPS_PGD_C0_CONTEXT smp_processor_id_shift = 51; smp_processor_id_reg = 20; /* XContext */ smp_processor_id_sel = 0; #else # ifdef CONFIG_32BIT smp_processor_id_shift = 25; smp_processor_id_reg = 4; /* Context */ smp_processor_id_sel = 0; # endif # ifdef CONFIG_64BIT smp_processor_id_shift = 26; smp_processor_id_reg = 4; /* Context */ smp_processor_id_sel = 0; # endif #endif /* Get smp_processor_id */ UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel); UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift); /* handler_reg_save index in K0 */ UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); UASM_i_LA(p, K1, (long)&handler_reg_save); UASM_i_ADDU(p, K0, K0, K1); } else { UASM_i_LA(p, K0, (long)&handler_reg_save); } /* K0 now points to save area, save $1 and $2 */ UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); r.r1 = K1; r.r2 = 1; r.r3 = 2; return r; } static void __cpuinit build_restore_work_registers(u32 **p) { if (scratch_reg > 0) { UASM_i_MFC0(p, 1, 31, scratch_reg); return; } /* K0 already points to save area, restore $1 and $2 */ UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); } #ifndef CONFIG_MIPS_PGD_C0_CONTEXT /* * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, * we cannot do r3000 under these circumstances. * * Declare pgd_current here instead of including mmu_context.h to avoid type * conflicts for tlbmiss_handler_setup_pgd */ extern unsigned long pgd_current[]; /* * The R3000 TLB handler is simple. */ static void __cpuinit build_r3000_tlb_refill_handler(void) { long pgdc = (long)pgd_current; u32 *p; memset(tlb_handler, 0, sizeof(tlb_handler)); p = tlb_handler; uasm_i_mfc0(&p, K0, C0_BADVADDR); uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); uasm_i_srl(&p, K0, K0, 22); /* load delay */ uasm_i_sll(&p, K0, K0, 2); uasm_i_addu(&p, K1, K1, K0); uasm_i_mfc0(&p, K0, C0_CONTEXT); uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ uasm_i_addu(&p, K1, K1, K0); uasm_i_lw(&p, K0, 0, K1); uasm_i_nop(&p); /* load delay */ uasm_i_mtc0(&p, K0, C0_ENTRYLO0); uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ uasm_i_tlbwr(&p); /* cp0 delay */ uasm_i_jr(&p, K1); uasm_i_rfe(&p); /* branch delay */ if (p > tlb_handler + 32) panic("TLB refill handler space exceeded"); pr_debug("Wrote TLB refill handler (%u instructions).\n", (unsigned int)(p - tlb_handler)); memcpy((void *)ebase, tlb_handler, 0x80); dump_handler((u32 *)ebase, 32); } #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ /* * The R4000 TLB handler is much more complicated. We have two * consecutive handler areas with 32 instructions space each. * Since they aren't used at the same time, we can overflow in the * other one.To keep things simple, we first assume linear space, * then we relocate it to the final handler layout as needed. */ static u32 final_handler[64] __cpuinitdata; /* * Hazards * * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: * 2. A timing hazard exists for the TLBP instruction. * * stalling_instruction * TLBP * * The JTLB is being read for the TLBP throughout the stall generated by the * previous instruction. This is not really correct as the stalling instruction * can modify the address used to access the JTLB. The failure symptom is that * the TLBP instruction will use an address created for the stalling instruction * and not the address held in C0_ENHI and thus report the wrong results. * * The software work-around is to not allow the instruction preceding the TLBP * to stall - make it an NOP or some other instruction guaranteed not to stall. * * Errata 2 will not be fixed. This errata is also on the R5000. * * As if we MIPS hackers wouldn't know how to nop pipelines happy ... */ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) { switch (current_cpu_type()) { /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ case CPU_R4600: case CPU_R4700: case CPU_R5000: case CPU_R5000A: case CPU_NEVADA: uasm_i_nop(p); uasm_i_tlbp(p); break; default: uasm_i_tlbp(p); break; } } /* * Write random or indexed TLB entry, and care about the hazards from * the preceding mtc0 and for the following eret. */ enum tlb_write_entry { tlb_random, tlb_indexed }; static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, struct uasm_reloc **r, enum tlb_write_entry wmode) { void(*tlbw)(u32 **) = NULL; switch (wmode) { case tlb_random: tlbw = uasm_i_tlbwr; break; case tlb_indexed: tlbw = uasm_i_tlbwi; break; } if (cpu_has_mips_r2) { if (cpu_has_mips_r2_exec_hazard) uasm_i_ehb(p); tlbw(p); return; } switch (current_cpu_type()) { case CPU_R4000PC: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400PC: case CPU_R4400SC: case CPU_R4400MC: /* * This branch uses up a mtc0 hazard nop slot and saves * two nops after the tlbw instruction. */ uasm_il_bgezl(p, r, 0, label_tlbw_hazard); tlbw(p); uasm_l_tlbw_hazard(l, *p); uasm_i_nop(p); break; case CPU_R4600: case CPU_R4700: case CPU_R5000: case CPU_R5000A: uasm_i_nop(p); tlbw(p); uasm_i_nop(p); break; case CPU_R4300: case CPU_5KC: case CPU_TX49XX: case CPU_PR4450: case CPU_XLR: uasm_i_nop(p); tlbw(p); break; case CPU_R10000: case CPU_R12000: case CPU_R14000: case CPU_4KC: case CPU_4KEC: case CPU_SB1: case CPU_SB1A: case CPU_4KSC: case CPU_20KC: case CPU_25KF: case CPU_BMIPS32: case CPU_BMIPS3300: case CPU_BMIPS4350: case CPU_BMIPS4380: case CPU_BMIPS5000: case CPU_LOONGSON2: case CPU_R5500: if (m4kc_tlbp_war()) uasm_i_nop(p); case CPU_ALCHEMY: tlbw(p); break; case CPU_NEVADA: uasm_i_nop(p); /* QED specifies 2 nops hazard */ /* * This branch uses up a mtc0 hazard nop slot and saves * a nop after the tlbw instruction. */ uasm_il_bgezl(p, r, 0, label_tlbw_hazard); tlbw(p); uasm_l_tlbw_hazard(l, *p); break; case CPU_RM7000: uasm_i_nop(p); uasm_i_nop(p); uasm_i_nop(p); uasm_i_nop(p); tlbw(p); break; case CPU_RM9000: /* * When the JTLB is updated by tlbwi or tlbwr, a subsequent * use of the JTLB for instructions should not occur for 4 * cpu cycles and use for data translations should not occur * for 3 cpu cycles. */ uasm_i_ssnop(p); uasm_i_ssnop(p); uasm_i_ssnop(p); uasm_i_ssnop(p); tlbw(p); uasm_i_ssnop(p); uasm_i_ssnop(p); uasm_i_ssnop(p); uasm_i_ssnop(p); break; case CPU_VR4111: case CPU_VR4121: case CPU_VR4122: case CPU_VR4181: case CPU_VR4181A: uasm_i_nop(p); uasm_i_nop(p); tlbw(p); uasm_i_nop(p); uasm_i_nop(p); break; case CPU_VR4131: case CPU_VR4133: case CPU_R5432: uasm_i_nop(p); uasm_i_nop(p); tlbw(p); break; case CPU_JZRISC: tlbw(p); uasm_i_nop(p); break; default: panic("No TLB refill handler yet (CPU type: %d)", current_cpu_data.cputype); break; } } static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) { if (kernel_uses_smartmips_rixi) { UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); } else { #ifdef CONFIG_64BIT_PHYS_ADDR uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); #else UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); #endif } } #ifdef CONFIG_HUGETLB_PAGE static __cpuinit void build_restore_pagemask(u32 **p, struct uasm_reloc **r, unsigned int tmp, enum label_id lid, int restore_scratch) { if (restore_scratch) { /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); uasm_i_mtc0(p, tmp, C0_PAGEMASK); uasm_il_b(p, r, lid); } else if (PM_DEFAULT_MASK) { uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); uasm_i_mtc0(p, tmp, C0_PAGEMASK); uasm_il_b(p, r, lid); } else { uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_il_b(p, r, lid); } if (scratch_reg > 0) UASM_i_MFC0(p, 1, 31, scratch_reg); else UASM_i_LW(p, 1, scratchpad_offset(0), 0); } else { /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); uasm_il_b(p, r, lid); uasm_i_mtc0(p, tmp, C0_PAGEMASK); } else if (PM_DEFAULT_MASK) { uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); uasm_il_b(p, r, lid); uasm_i_mtc0(p, tmp, C0_PAGEMASK); } else { uasm_il_b(p, r, lid); uasm_i_mtc0(p, 0, C0_PAGEMASK); } } } static __cpuinit void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, enum tlb_write_entry wmode, int restore_scratch) { /* Set huge page tlb entry size */ uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); uasm_i_mtc0(p, tmp, C0_PAGEMASK); build_tlb_write_entry(p, l, r, wmode); build_restore_pagemask(p, r, tmp, label_leave, restore_scratch); } /* * Check if Huge PTE is present, if so then jump to LABEL. */ static void __cpuinit build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, unsigned int pmd, int lid) { UASM_i_LW(p, tmp, 0, pmd); if (use_bbit_insns()) { uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid); } else { uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); uasm_il_bnez(p, r, tmp, lid); } } static __cpuinit void build_huge_update_entries(u32 **p, unsigned int pte, unsigned int tmp) { int small_sequence; /* * A huge PTE describes an area the size of the * configured huge page size. This is twice the * of the large TLB entry size we intend to use. * A TLB entry half the size of the configured * huge page size is configured into entrylo0 * and entrylo1 to cover the contiguous huge PTE * address space. */ small_sequence = (HPAGE_SIZE >> 7) < 0x10000; /* We can clobber tmp. It isn't used after this.*/ if (!small_sequence) uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); build_convert_pte_to_entrylo(p, pte); UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ /* convert to entrylo1 */ if (small_sequence) UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); else UASM_i_ADDU(p, pte, pte, tmp); UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ } static __cpuinit void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, struct uasm_label **l, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP UASM_i_SC(p, pte, 0, ptr); uasm_il_beqz(p, r, pte, label_tlb_huge_update); UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ #else UASM_i_SW(p, pte, 0, ptr); #endif build_huge_update_entries(p, pte, ptr); build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); } #endif /* CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_64BIT /* * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pmd entry. */ static void __cpuinit build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, unsigned int ptr) { #ifndef CONFIG_MIPS_PGD_C0_CONTEXT long pgdc = (long)pgd_current; #endif /* * The vmalloc handling is not in the hotpath. */ uasm_i_dmfc0(p, tmp, C0_BADVADDR); if (check_for_high_segbits) { /* * The kernel currently implicitely assumes that the * MIPS SEGBITS parameter for the processor is * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never * allocate virtual addresses outside the maximum * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But * that doesn't prevent user code from accessing the * higher xuseg addresses. Here, we make sure that * everything but the lower xuseg addresses goes down * the module_alloc/vmalloc path. */ uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, ptr, label_vmalloc); } else { uasm_il_bltz(p, r, tmp, label_vmalloc); } /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ #ifdef CONFIG_MIPS_PGD_C0_CONTEXT if (pgd_reg != -1) { /* pgd is in pgd_reg */ UASM_i_MFC0(p, ptr, 31, pgd_reg); } else { /* * &pgd << 11 stored in CONTEXT [23..63]. */ UASM_i_MFC0(p, ptr, C0_CONTEXT); /* Clear lower 23 bits of context. */ uasm_i_dins(p, ptr, 0, 0, 23); /* 1 0 1 0 1 << 6 xkphys cached */ uasm_i_ori(p, ptr, ptr, 0x540); uasm_i_drotr(p, ptr, ptr, 11); } #elif defined(CONFIG_SMP) # ifdef CONFIG_MIPS_MT_SMTC /* * SMTC uses TCBind value as "CPU" index */ uasm_i_mfc0(p, ptr, C0_TCBIND); uasm_i_dsrl_safe(p, ptr, ptr, 19); # else /* * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 * stored in CONTEXT. */ uasm_i_dmfc0(p, ptr, C0_CONTEXT); uasm_i_dsrl_safe(p, ptr, ptr, 23); # endif UASM_i_LA_mostly(p, tmp, pgdc); uasm_i_daddu(p, ptr, ptr, tmp); uasm_i_dmfc0(p, tmp, C0_BADVADDR); uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); #else UASM_i_LA_mostly(p, ptr, pgdc); uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); #endif uasm_l_vmalloc_done(l, *p); /* get pgd offset in bytes */ uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ #ifndef __PAGETABLE_PMD_FOLDED uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ #endif } /* * BVADDR is the faulting address, PTR is scratch. * PTR will hold the pgd for vmalloc. */ static void __cpuinit build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int bvaddr, unsigned int ptr, enum vmalloc64_mode mode) { long swpd = (long)swapper_pg_dir; int single_insn_swpd; int did_vmalloc_branch = 0; single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); uasm_l_vmalloc(l, *p); if (mode != not_refill && check_for_high_segbits) { if (single_insn_swpd) { uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); did_vmalloc_branch = 1; /* fall through */ } else { uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); } } if (!did_vmalloc_branch) { if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { uasm_il_b(p, r, label_vmalloc_done); uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); } else { UASM_i_LA_mostly(p, ptr, swpd); uasm_il_b(p, r, label_vmalloc_done); if (uasm_in_compat_space_p(swpd)) uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); else uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); } } if (mode != not_refill && check_for_high_segbits) { uasm_l_large_segbits_fault(l, *p); /* * We get here if we are an xsseg address, or if we are * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. * * Ignoring xsseg (assume disabled so would generate * (address errors?), the only remaining possibility * is the upper xuseg addresses. On processors with * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these * addresses would have taken an address error. We try * to mimic that here by taking a load/istream page * fault. */ UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); uasm_i_jr(p, ptr); if (mode == refill_scratch) { if (scratch_reg > 0) UASM_i_MFC0(p, 1, 31, scratch_reg); else UASM_i_LW(p, 1, scratchpad_offset(0), 0); } else { uasm_i_nop(p); } } } #else /* !CONFIG_64BIT */ /* * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pgd entry. */ static void __cpuinit __maybe_unused build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) { long pgdc = (long)pgd_current; /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ #ifdef CONFIG_SMP #ifdef CONFIG_MIPS_MT_SMTC /* * SMTC uses TCBind value as "CPU" index */ uasm_i_mfc0(p, ptr, C0_TCBIND); UASM_i_LA_mostly(p, tmp, pgdc); uasm_i_srl(p, ptr, ptr, 19); #else /* * smp_processor_id() << 3 is stored in CONTEXT. */ uasm_i_mfc0(p, ptr, C0_CONTEXT); UASM_i_LA_mostly(p, tmp, pgdc); uasm_i_srl(p, ptr, ptr, 23); #endif uasm_i_addu(p, ptr, tmp, ptr); #else UASM_i_LA_mostly(p, ptr, pgdc); #endif uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ } #endif /* !CONFIG_64BIT */ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) { unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); switch (current_cpu_type()) { case CPU_VR41XX: case CPU_VR4111: case CPU_VR4121: case CPU_VR4122: case CPU_VR4131: case CPU_VR4181: case CPU_VR4181A: case CPU_VR4133: shift += 2; break; default: break; } if (shift) UASM_i_SRL(p, ctx, ctx, shift); uasm_i_andi(p, ctx, ctx, mask); } static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { /* * Bug workaround for the Nevada. It seems as if under certain * circumstances the move from cp0_context might produce a * bogus result when the mfc0 instruction and its consumer are * in a different cacheline or a load instruction, probably any * memory reference, is between them. */ switch (current_cpu_type()) { case CPU_NEVADA: UASM_i_LW(p, ptr, 0, ptr); GET_CONTEXT(p, tmp); /* get context reg */ break; default: GET_CONTEXT(p, tmp); /* get context reg */ UASM_i_LW(p, ptr, 0, ptr); break; } build_adjust_context(p, tmp); UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ } static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { /* * 64bit address support (36bit on a 32bit CPU) in a 32bit * Kernel is a special case. Only a few CPUs use it. */ #ifdef CONFIG_64BIT_PHYS_ADDR if (cpu_has_64bits) { uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (kernel_uses_smartmips_rixi) { UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); } else { uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ } UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ } else { int pte_off_even = sizeof(pte_t) / 2; int pte_off_odd = pte_off_even + sizeof(pte_t); /* The pte entries are pre-shifted */ uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ } #else UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (r45k_bvahwbug()) build_tlb_probe_entry(p); if (kernel_uses_smartmips_rixi) { UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO0); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); } else { UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO0); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ if (r45k_bvahwbug()) uasm_i_mfc0(p, tmp, C0_INDEX); } if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ #endif } struct mips_huge_tlb_info { int huge_pte; int restore_scratch; }; static struct mips_huge_tlb_info __cpuinit build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, unsigned int ptr, int c0_scratch) { struct mips_huge_tlb_info rv; unsigned int even, odd; int vmalloc_branch_delay_filled = 0; const int scratch = 1; /* Our extra working register */ rv.huge_pte = scratch; rv.restore_scratch = 0; if (check_for_high_segbits) { UASM_i_MFC0(p, tmp, C0_BADVADDR); if (pgd_reg != -1) UASM_i_MFC0(p, ptr, 31, pgd_reg); else UASM_i_MFC0(p, ptr, C0_CONTEXT); if (c0_scratch >= 0) UASM_i_MTC0(p, scratch, 31, c0_scratch); else UASM_i_SW(p, scratch, scratchpad_offset(0), 0); uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, scratch, label_vmalloc); if (pgd_reg == -1) { vmalloc_branch_delay_filled = 1; /* Clear lower 23 bits of context. */ uasm_i_dins(p, ptr, 0, 0, 23); } } else { if (pgd_reg != -1) UASM_i_MFC0(p, ptr, 31, pgd_reg); else UASM_i_MFC0(p, ptr, C0_CONTEXT); UASM_i_MFC0(p, tmp, C0_BADVADDR); if (c0_scratch >= 0) UASM_i_MTC0(p, scratch, 31, c0_scratch); else UASM_i_SW(p, scratch, scratchpad_offset(0), 0); if (pgd_reg == -1) /* Clear lower 23 bits of context. */ uasm_i_dins(p, ptr, 0, 0, 23); uasm_il_bltz(p, r, tmp, label_vmalloc); } if (pgd_reg == -1) { vmalloc_branch_delay_filled = 1; /* 1 0 1 0 1 << 6 xkphys cached */ uasm_i_ori(p, ptr, ptr, 0x540); uasm_i_drotr(p, ptr, ptr, 11); } #ifdef __PAGETABLE_PMD_FOLDED #define LOC_PTEP scratch #else #define LOC_PTEP ptr #endif if (!vmalloc_branch_delay_filled) /* get pgd offset in bytes */ uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); uasm_l_vmalloc_done(l, *p); /* * tmp ptr * fall-through case = badvaddr *pgd_current * vmalloc case = badvaddr swapper_pg_dir */ if (vmalloc_branch_delay_filled) /* get pgd offset in bytes */ uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); #ifdef __PAGETABLE_PMD_FOLDED GET_CONTEXT(p, tmp); /* get context reg */ #endif uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3); if (use_lwx_insns()) { UASM_i_LWX(p, LOC_PTEP, scratch, ptr); } else { uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */ uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */ } #ifndef __PAGETABLE_PMD_FOLDED /* get pmd offset in bytes */ uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3); uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3); GET_CONTEXT(p, tmp); /* get context reg */ if (use_lwx_insns()) { UASM_i_LWX(p, scratch, scratch, ptr); } else { uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ UASM_i_LW(p, scratch, 0, ptr); } #endif /* Adjust the context during the load latency. */ build_adjust_context(p, tmp); #ifdef CONFIG_HUGETLB_PAGE uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); /* * The in the LWX case we don't want to do the load in the * delay slot. It cannot issue in the same cycle and may be * speculative and unneeded. */ if (use_lwx_insns()) uasm_i_nop(p); #endif /* CONFIG_HUGETLB_PAGE */ /* build_update_entries */ if (use_lwx_insns()) { even = ptr; odd = tmp; UASM_i_LWX(p, even, scratch, tmp); UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t)); UASM_i_LWX(p, odd, scratch, tmp); } else { UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */ even = tmp; odd = ptr; UASM_i_LW(p, even, 0, ptr); /* get even pte */ UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ } if (kernel_uses_smartmips_rixi) { uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC)); uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC)); uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); } else { uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL)); } UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ if (c0_scratch >= 0) { UASM_i_MFC0(p, scratch, 31, c0_scratch); build_tlb_write_entry(p, l, r, tlb_random); uasm_l_leave(l, *p); rv.restore_scratch = 1; } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) { build_tlb_write_entry(p, l, r, tlb_random); uasm_l_leave(l, *p); UASM_i_LW(p, scratch, scratchpad_offset(0), 0); } else { UASM_i_LW(p, scratch, scratchpad_offset(0), 0); build_tlb_write_entry(p, l, r, tlb_random); uasm_l_leave(l, *p); rv.restore_scratch = 1; } uasm_i_eret(p); /* return from trap */ return rv; } /* * For a 64-bit kernel, we are using the 64-bit XTLB refill exception * because EXL == 0. If we wrap, we can also use the 32 instruction * slots before the XTLB refill exception handler which belong to the * unused TLB refill exception. */ #define MIPS64_REFILL_INSNS 32 static void __cpuinit build_r4000_tlb_refill_handler(void) { u32 *p = tlb_handler; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; u32 *f; unsigned int final_len; struct mips_huge_tlb_info htlb_info __maybe_unused; enum vmalloc64_mode vmalloc_mode __maybe_unused; memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); memset(final_handler, 0, sizeof(final_handler)); if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, scratch_reg); vmalloc_mode = refill_scratch; } else { htlb_info.huge_pte = K0; htlb_info.restore_scratch = 0; vmalloc_mode = refill_noscratch; /* * create the plain linear handler */ if (bcm1250_m3_war()) { unsigned int segbits = 44; uasm_i_dmfc0(&p, K0, C0_BADVADDR); uasm_i_dmfc0(&p, K1, C0_ENTRYHI); uasm_i_xor(&p, K0, K0, K1); uasm_i_dsrl_safe(&p, K1, K0, 62); uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); uasm_i_or(&p, K0, K0, K1); uasm_il_bnez(&p, &r, K0, label_leave); /* No need for uasm_i_nop */ } #ifdef CONFIG_64BIT build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ #else build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ #endif #ifdef CONFIG_HUGETLB_PAGE build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); #endif build_get_ptep(&p, K0, K1); build_update_entries(&p, K0, K1); build_tlb_write_entry(&p, &l, &r, tlb_random); uasm_l_leave(&l, p); uasm_i_eret(&p); /* return from trap */ } #ifdef CONFIG_HUGETLB_PAGE uasm_l_tlb_huge_update(&l, p); build_huge_update_entries(&p, htlb_info.huge_pte, K1); build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, htlb_info.restore_scratch); #endif #ifdef CONFIG_64BIT build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); #endif /* * Overflow check: For the 64bit handler, we need at least one * free instruction slot for the wrap-around branch. In worst * case, if the intended insertion point is a delay slot, we * need three, with the second nop'ed and the third being * unused. */ /* Loongson2 ebase is different than r4k, we have more space */ #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) if ((p - tlb_handler) > 64) panic("TLB refill handler space exceeded"); #else if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) && uasm_insn_has_bdelay(relocs, tlb_handler + MIPS64_REFILL_INSNS - 3))) panic("TLB refill handler space exceeded"); #endif /* * Now fold the handler in the TLB refill handler space. */ #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) f = final_handler; /* Simplest case, just copy the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; #else /* CONFIG_64BIT */ f = final_handler + MIPS64_REFILL_INSNS; if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { /* Just copy the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; } else { #if defined(CONFIG_HUGETLB_PAGE) const enum label_id ls = label_tlb_huge_update; #else const enum label_id ls = label_vmalloc; #endif u32 *split; int ov = 0; int i; for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) ; BUG_ON(i == ARRAY_SIZE(labels)); split = labels[i].addr; /* * See if we have overflown one way or the other. */ if (split > tlb_handler + MIPS64_REFILL_INSNS || split < p - MIPS64_REFILL_INSNS) ov = 1; if (ov) { /* * Split two instructions before the end. One * for the branch and one for the instruction * in the delay slot. */ split = tlb_handler + MIPS64_REFILL_INSNS - 2; /* * If the branch would fall in a delay slot, * we must back up an additional instruction * so that it is no longer in a delay slot. */ if (uasm_insn_has_bdelay(relocs, split - 1)) split--; } /* Copy first part of the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, split, f); f += split - tlb_handler; if (ov) { /* Insert branch. */ uasm_l_split(&l, final_handler); uasm_il_b(&f, &r, label_split); if (uasm_insn_has_bdelay(relocs, split)) uasm_i_nop(&f); else { uasm_copy_handler(relocs, labels, split, split + 1, f); uasm_move_labels(labels, f, f + 1, -1); f++; split++; } } /* Copy the rest of the handler. */ uasm_copy_handler(relocs, labels, split, p, final_handler); final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + (p - split); } #endif /* CONFIG_64BIT */ uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB refill handler (%u instructions).\n", final_len); memcpy((void *)ebase, final_handler, 0x100); dump_handler((u32 *)ebase, 64); } /* * 128 instructions for the fastpath handler is generous and should * never be exceeded. */ #define FASTPATH_SIZE 128 u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; #ifdef CONFIG_MIPS_PGD_C0_CONTEXT u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned; static void __cpuinit build_r4000_setup_pgd(void) { const int a0 = 4; const int a1 = 5; u32 *p = tlbmiss_handler_setup_pgd; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); pgd_reg = allocate_kscratch(); if (pgd_reg == -1) { /* PGD << 11 in c0_Context */ /* * If it is a ckseg0 address, convert to a physical * address. Shifting right by 29 and adding 4 will * result in zero for these addresses. * */ UASM_i_SRA(&p, a1, a0, 29); UASM_i_ADDIU(&p, a1, a1, 4); uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1); uasm_i_nop(&p); uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); uasm_l_tlbl_goaround1(&l, p); UASM_i_SLL(&p, a0, a0, 11); uasm_i_jr(&p, 31); UASM_i_MTC0(&p, a0, C0_CONTEXT); } else { /* PGD in c0_KScratch */ uasm_i_jr(&p, 31); UASM_i_MTC0(&p, a0, 31, pgd_reg); } if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd)) panic("tlbmiss_handler_setup_pgd space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", (unsigned int)(p - tlbmiss_handler_setup_pgd)); dump_handler(tlbmiss_handler_setup_pgd, ARRAY_SIZE(tlbmiss_handler_setup_pgd)); } #endif static void __cpuinit iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP # ifdef CONFIG_64BIT_PHYS_ADDR if (cpu_has_64bits) uasm_i_lld(p, pte, 0, ptr); else # endif UASM_i_LL(p, pte, 0, ptr); #else # ifdef CONFIG_64BIT_PHYS_ADDR if (cpu_has_64bits) uasm_i_ld(p, pte, 0, ptr); else # endif UASM_i_LW(p, pte, 0, ptr); #endif } static void __cpuinit iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, unsigned int mode) { #ifdef CONFIG_64BIT_PHYS_ADDR unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); #endif uasm_i_ori(p, pte, pte, mode); #ifdef CONFIG_SMP # ifdef CONFIG_64BIT_PHYS_ADDR if (cpu_has_64bits) uasm_i_scd(p, pte, 0, ptr); else # endif UASM_i_SC(p, pte, 0, ptr); if (r10000_llsc_war()) uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); else uasm_il_beqz(p, r, pte, label_smp_pgtable_change); # ifdef CONFIG_64BIT_PHYS_ADDR if (!cpu_has_64bits) { /* no uasm_i_nop needed */ uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_ori(p, pte, pte, hwmode); uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); uasm_il_beqz(p, r, pte, label_smp_pgtable_change); /* no uasm_i_nop needed */ uasm_i_lw(p, pte, 0, ptr); } else uasm_i_nop(p); # else uasm_i_nop(p); # endif #else # ifdef CONFIG_64BIT_PHYS_ADDR if (cpu_has_64bits) uasm_i_sd(p, pte, 0, ptr); else # endif UASM_i_SW(p, pte, 0, ptr); # ifdef CONFIG_64BIT_PHYS_ADDR if (!cpu_has_64bits) { uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_ori(p, pte, pte, hwmode); uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_lw(p, pte, 0, ptr); } # endif #endif } /* * Check if PTE is present, if not then jump to LABEL. PTR points to * the page table where this PTE is located, PTE will be re-loaded * with it's original value. */ static void __cpuinit build_pte_present(u32 **p, struct uasm_reloc **r, int pte, int ptr, int scratch, enum label_id lid) { int t = scratch >= 0 ? scratch : pte; if (kernel_uses_smartmips_rixi) { if (use_bbit_insns()) { uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); uasm_i_nop(p); } else { uasm_i_andi(p, t, pte, _PAGE_PRESENT); uasm_il_beqz(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ iPTE_LW(p, pte, ptr); } } else { uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); uasm_il_bnez(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ iPTE_LW(p, pte, ptr); } } /* Make PTE valid, store result in PTR. */ static void __cpuinit build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr) { unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; iPTE_SW(p, r, pte, ptr, mode); } /* * Check if PTE can be written to, if not branch to LABEL. Regardless * restore PTE with value from PTR when done. */ static void __cpuinit build_pte_writable(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, int scratch, enum label_id lid) { int t = scratch >= 0 ? scratch : pte; uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); uasm_il_bnez(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ iPTE_LW(p, pte, ptr); else uasm_i_nop(p); } /* Make PTE writable, update software status bits as well, then store * at PTR. */ static void __cpuinit build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr) { unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); iPTE_SW(p, r, pte, ptr, mode); } /* * Check if PTE can be modified, if not branch to LABEL. Regardless * restore PTE with value from PTR when done. */ static void __cpuinit build_pte_modifiable(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, int scratch, enum label_id lid) { if (use_bbit_insns()) { uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); uasm_i_nop(p); } else { int t = scratch >= 0 ? scratch : pte; uasm_i_andi(p, t, pte, _PAGE_WRITE); uasm_il_beqz(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ iPTE_LW(p, pte, ptr); } } #ifndef CONFIG_MIPS_PGD_C0_CONTEXT /* * R3000 style TLB load/store/modify handlers. */ /* * This places the pte into ENTRYLO0 and writes it with tlbwi. * Then it returns. */ static void __cpuinit build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) { uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ uasm_i_tlbwi(p); uasm_i_jr(p, tmp); uasm_i_rfe(p); /* branch delay */ } /* * This places the pte into ENTRYLO0 and writes it with tlbwi * or tlbwr as appropriate. This is because the index register * may have the probe fail bit set as a result of a trap on a * kseg2 access, i.e. without refill. Then it returns. */ static void __cpuinit build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int pte, unsigned int tmp) { uasm_i_mfc0(p, tmp, C0_INDEX); uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ uasm_i_tlbwi(p); /* cp0 delay */ uasm_i_jr(p, tmp); uasm_i_rfe(p); /* branch delay */ uasm_l_r3000_write_probe_fail(l, *p); uasm_i_tlbwr(p); /* cp0 delay */ uasm_i_jr(p, tmp); uasm_i_rfe(p); /* branch delay */ } static void __cpuinit build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, unsigned int ptr) { long pgdc = (long)pgd_current; uasm_i_mfc0(p, pte, C0_BADVADDR); uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); uasm_i_srl(p, pte, pte, 22); /* load delay */ uasm_i_sll(p, pte, pte, 2); uasm_i_addu(p, ptr, ptr, pte); uasm_i_mfc0(p, pte, C0_CONTEXT); uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ uasm_i_addu(p, ptr, ptr, pte); uasm_i_lw(p, pte, 0, ptr); uasm_i_tlbp(p); /* load delay */ } static void __cpuinit build_r3000_tlb_load_handler(void) { u32 *p = handle_tlbl; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); uasm_i_nop(&p); /* load delay */ build_make_valid(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); uasm_l_nopage_tlbl(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); if ((p - handle_tlbl) > FASTPATH_SIZE) panic("TLB load handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbl)); dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); } static void __cpuinit build_r3000_tlb_store_handler(void) { u32 *p = handle_tlbs; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); uasm_l_nopage_tlbs(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); if ((p - handle_tlbs) > FASTPATH_SIZE) panic("TLB store handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbs)); dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); } static void __cpuinit build_r3000_tlb_modify_handler(void) { u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_pte_reload_tlbwi(&p, K0, K1); uasm_l_nopage_tlbm(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); if ((p - handle_tlbm) > FASTPATH_SIZE) panic("TLB modify handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); } #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ /* * R4000 style TLB load/store/modify handlers. */ static struct work_registers __cpuinit build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, struct uasm_reloc **r) { struct work_registers wr = build_get_work_registers(p); #ifdef CONFIG_64BIT build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ #else build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ #endif #ifdef CONFIG_HUGETLB_PAGE /* * For huge tlb entries, pmd doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); #endif UASM_i_MFC0(p, wr.r1, C0_BADVADDR); UASM_i_LW(p, wr.r2, 0, wr.r2); UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); #ifdef CONFIG_SMP uasm_l_smp_pgtable_change(l, *p); #endif iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ if (!m4kc_tlbp_war()) build_tlb_probe_entry(p); return wr; } static void __cpuinit build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, unsigned int ptr) { uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); build_update_entries(p, tmp, ptr); build_tlb_write_entry(p, l, r, tlb_indexed); uasm_l_leave(l, *p); build_restore_work_registers(p); uasm_i_eret(p); /* return from trap */ #ifdef CONFIG_64BIT build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); #endif } static void __cpuinit build_r4000_tlb_load_handler(void) { u32 *p = handle_tlbl; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; struct work_registers wr; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); if (bcm1250_m3_war()) { unsigned int segbits = 44; uasm_i_dmfc0(&p, K0, C0_BADVADDR); uasm_i_dmfc0(&p, K1, C0_ENTRYHI); uasm_i_xor(&p, K0, K0, K1); uasm_i_dsrl_safe(&p, K1, K0, 62); uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); uasm_i_or(&p, K0, K0, K1); uasm_il_bnez(&p, &r, K0, label_leave); /* No need for uasm_i_nop */ } wr = build_r4000_tlbchange_handler_head(&p, &l, &r); build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); if (kernel_uses_smartmips_rixi) { /* * If the page is not _PAGE_VALID, RI or XI could not * have triggered it. Skip the expensive test.. */ if (use_bbit_insns()) { uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), label_tlbl_goaround1); } else { uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); } uasm_i_nop(&p); uasm_i_tlbr(&p); /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); } else { uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); uasm_i_beqz(&p, wr.r3, 8); } /* load it in the delay slot*/ UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); /* load it if ptr is odd */ UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); /* * If the entryLo (now in wr.r3) is valid (bit 1), RI or * XI must have triggered it. */ if (use_bbit_insns()) { uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); uasm_i_nop(&p); uasm_l_tlbl_goaround1(&l, p); } else { uasm_i_andi(&p, wr.r3, wr.r3, 2); uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); uasm_i_nop(&p); } uasm_l_tlbl_goaround1(&l, p); } build_make_valid(&p, &r, wr.r1, wr.r2); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_HUGETLB_PAGE /* * This is the entry point when build_r4000_tlbchange_handler_head * spots a huge page. */ uasm_l_tlb_huge_update(&l, p); iPTE_LW(&p, wr.r1, wr.r2); build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); build_tlb_probe_entry(&p); if (kernel_uses_smartmips_rixi) { /* * If the page is not _PAGE_VALID, RI or XI could not * have triggered it. Skip the expensive test.. */ if (use_bbit_insns()) { uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), label_tlbl_goaround2); } else { uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); } uasm_i_nop(&p); uasm_i_tlbr(&p); /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); } else { uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); uasm_i_beqz(&p, wr.r3, 8); } /* load it in the delay slot*/ UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); /* load it if ptr is odd */ UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); /* * If the entryLo (now in wr.r3) is valid (bit 1), RI or * XI must have triggered it. */ if (use_bbit_insns()) { uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); } else { uasm_i_andi(&p, wr.r3, wr.r3, 2); uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); } if (PM_DEFAULT_MASK == 0) uasm_i_nop(&p); /* * We clobbered C0_PAGEMASK, restore it. On the other branch * it is restored in build_huge_tlb_write_entry. */ build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); uasm_l_tlbl_goaround2(&l, p); } uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); #endif uasm_l_nopage_tlbl(&l, p); build_restore_work_registers(&p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); if ((p - handle_tlbl) > FASTPATH_SIZE) panic("TLB load handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbl)); dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); } static void __cpuinit build_r4000_tlb_store_handler(void) { u32 *p = handle_tlbs; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; struct work_registers wr; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); wr = build_r4000_tlbchange_handler_head(&p, &l, &r); build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); build_make_write(&p, &r, wr.r1, wr.r2); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_HUGETLB_PAGE /* * This is the entry point when * build_r4000_tlbchange_handler_head spots a huge page. */ uasm_l_tlb_huge_update(&l, p); iPTE_LW(&p, wr.r1, wr.r2); build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); build_tlb_probe_entry(&p); uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); #endif uasm_l_nopage_tlbs(&l, p); build_restore_work_registers(&p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); if ((p - handle_tlbs) > FASTPATH_SIZE) panic("TLB store handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbs)); dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); } static void __cpuinit build_r4000_tlb_modify_handler(void) { u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; struct work_registers wr; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); wr = build_r4000_tlbchange_handler_head(&p, &l, &r); build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); /* Present and writable bits set, set accessed and dirty bits. */ build_make_write(&p, &r, wr.r1, wr.r2); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_HUGETLB_PAGE /* * This is the entry point when * build_r4000_tlbchange_handler_head spots a huge page. */ uasm_l_tlb_huge_update(&l, p); iPTE_LW(&p, wr.r1, wr.r2); build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); build_tlb_probe_entry(&p); uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); #endif uasm_l_nopage_tlbm(&l, p); build_restore_work_registers(&p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); if ((p - handle_tlbm) > FASTPATH_SIZE) panic("TLB modify handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); } void __cpuinit build_tlb_refill_handler(void) { /* * The refill handler is generated per-CPU, multi-node systems * may have local storage for it. The other handlers are only * needed once. */ static int run_once = 0; #ifdef CONFIG_64BIT check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); #endif switch (current_cpu_type()) { case CPU_R2000: case CPU_R3000: case CPU_R3000A: case CPU_R3081E: case CPU_TX3912: case CPU_TX3922: case CPU_TX3927: #ifndef CONFIG_MIPS_PGD_C0_CONTEXT build_r3000_tlb_refill_handler(); if (!run_once) { build_r3000_tlb_load_handler(); build_r3000_tlb_store_handler(); build_r3000_tlb_modify_handler(); run_once++; } #else panic("No R3000 TLB refill handler"); #endif break; case CPU_R6000: case CPU_R6000A: panic("No R6000 TLB refill handler yet"); break; case CPU_R8000: panic("No R8000 TLB refill handler yet"); break; default: if (!run_once) { scratch_reg = allocate_kscratch(); #ifdef CONFIG_MIPS_PGD_C0_CONTEXT build_r4000_setup_pgd(); #endif build_r4000_tlb_load_handler(); build_r4000_tlb_store_handler(); build_r4000_tlb_modify_handler(); run_once++; } build_r4000_tlb_refill_handler(); } } void __cpuinit flush_tlb_handlers(void) { local_flush_icache_range((unsigned long)handle_tlbl, (unsigned long)handle_tlbl + sizeof(handle_tlbl)); local_flush_icache_range((unsigned long)handle_tlbs, (unsigned long)handle_tlbs + sizeof(handle_tlbs)); local_flush_icache_range((unsigned long)handle_tlbm, (unsigned long)handle_tlbm + sizeof(handle_tlbm)); #ifdef CONFIG_MIPS_PGD_C0_CONTEXT local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm)); #endif }
gpl-2.0
FlukeNetworks/snackers-kernel
drivers/isdn/gigaset/dummyll.c
4048
1479
/* * Dummy LL interface for the Gigaset driver * * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>. * * ===================================================================== * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * ===================================================================== */ #include "gigaset.h" void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) { } EXPORT_SYMBOL_GPL(gigaset_skb_sent); void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb) { } EXPORT_SYMBOL_GPL(gigaset_skb_rcvd); void gigaset_isdn_rcv_err(struct bc_state *bcs) { } EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err); int gigaset_isdn_icall(struct at_state_t *at_state) { return ICALL_IGNORE; } void gigaset_isdn_connD(struct bc_state *bcs) { } void gigaset_isdn_hupD(struct bc_state *bcs) { } void gigaset_isdn_connB(struct bc_state *bcs) { } void gigaset_isdn_hupB(struct bc_state *bcs) { } void gigaset_isdn_start(struct cardstate *cs) { } void gigaset_isdn_stop(struct cardstate *cs) { } int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) { return 1; } void gigaset_isdn_unregdev(struct cardstate *cs) { } void gigaset_isdn_regdrv(void) { pr_info("no ISDN subsystem interface\n"); } void gigaset_isdn_unregdrv(void) { }
gpl-2.0
zzpianoman/android_kernel_samsung_tuna
arch/h8300/platform/h8300h/irq.c
4560
1624
/* * Interrupt handling H8/300H depend. * Yoshinori Sato <ysato@users.sourceforge.jp> * */ #include <linux/init.h> #include <linux/errno.h> #include <asm/ptrace.h> #include <asm/traps.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/gpio.h> #include <asm/regs306x.h> const int __initdata h8300_saved_vectors[] = { #if defined(CONFIG_GDB_DEBUG) TRAP3_VEC, /* TRAPA #3 is GDB breakpoint */ #endif -1, }; const h8300_vector __initdata h8300_trap_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, system_call, 0, 0, trace_break, }; int h8300_enable_irq_pin(unsigned int irq) { int bitmask; if (irq < EXT_IRQ0 || irq > EXT_IRQ5) return 0; /* initialize IRQ pin */ bitmask = 1 << (irq - EXT_IRQ0); switch(irq) { case EXT_IRQ0: case EXT_IRQ1: case EXT_IRQ2: case EXT_IRQ3: if (H8300_GPIO_RESERVE(H8300_GPIO_P8, bitmask) == 0) return -EBUSY; H8300_GPIO_DDR(H8300_GPIO_P8, bitmask, H8300_GPIO_INPUT); break; case EXT_IRQ4: case EXT_IRQ5: if (H8300_GPIO_RESERVE(H8300_GPIO_P9, bitmask) == 0) return -EBUSY; H8300_GPIO_DDR(H8300_GPIO_P9, bitmask, H8300_GPIO_INPUT); break; } return 0; } void h8300_disable_irq_pin(unsigned int irq) { int bitmask; if (irq < EXT_IRQ0 || irq > EXT_IRQ5) return; /* disable interrupt & release IRQ pin */ bitmask = 1 << (irq - EXT_IRQ0); switch(irq) { case EXT_IRQ0: case EXT_IRQ1: case EXT_IRQ2: case EXT_IRQ3: *(volatile unsigned char *)IER &= ~bitmask; H8300_GPIO_FREE(H8300_GPIO_P8, bitmask); break ; case EXT_IRQ4: case EXT_IRQ5: *(volatile unsigned char *)IER &= ~bitmask; H8300_GPIO_FREE(H8300_GPIO_P9, bitmask); break; } }
gpl-2.0